repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
panodata/python_dwd | [
"a9ee1bdf21b8fc12f6b6b33628ca804e656f310d"
] | [
"tests/dwd/observations/test_api_sites_geo.py"
] | [
"from pathlib import Path\n\nimport pytest\nimport numpy as np\nfrom datetime import datetime\nfrom unittest.mock import patch, MagicMock\nimport pandas as pd\n\nfrom wetterdienst.dwd.metadata.column_map import METADATA_DTYPE_MAPPING\nfrom wetterdienst.util.geo import derive_nearest_neighbours\nfrom wetterdienst.util.geo import Coordinates\nfrom wetterdienst.dwd.observations import (\n DWDObservationSites,\n DWDObservationParameterSet,\n DWDObservationPeriod,\n DWDObservationResolution,\n)\nfrom wetterdienst.exceptions import InvalidParameterCombination\n\n\nHERE = Path(__file__).parent\nMETADATA_FILE = HERE / \"FIXED_METADATA.JSON\"\nMETADATA_DF = pd.read_json(METADATA_FILE)\nMETADATA_DF = METADATA_DF.astype(METADATA_DTYPE_MAPPING)\n\n\n@patch(\n \"wetterdienst.dwd.observations.stations.metadata_for_climate_observations\",\n MagicMock(return_value=METADATA_DF),\n)\ndef test_dwd_observation_sites_nearby_number_success():\n\n # Test for one nearest station\n sites = DWDObservationSites(\n DWDObservationParameterSet.TEMPERATURE_AIR,\n DWDObservationResolution.HOURLY,\n DWDObservationPeriod.RECENT,\n datetime(2020, 1, 1),\n datetime(2020, 1, 20),\n )\n\n nearby_station = sites.nearby_number(\n 50.0,\n 8.9,\n 1,\n )\n nearby_station = nearby_station.drop(\"TO_DATE\", axis=\"columns\")\n nearby_station.STATION_ID = nearby_station.STATION_ID.astype(np.int64)\n\n pd.testing.assert_frame_equal(\n nearby_station,\n pd.DataFrame(\n [\n [\n np.int64(4411),\n np.datetime64(\"2002-01-24\"),\n 155.0,\n 49.9195,\n 8.9671,\n \"Schaafheim-Schlierbach\",\n \"Hessen\",\n 11.65302672,\n ]\n ],\n columns=[\n \"STATION_ID\",\n \"FROM_DATE\",\n \"STATION_HEIGHT\",\n \"LAT\",\n \"LON\",\n \"STATION_NAME\",\n \"STATE\",\n \"DISTANCE_TO_LOCATION\",\n ],\n ),\n )\n\n nearby_station = DWDObservationSites(\n DWDObservationParameterSet.TEMPERATURE_AIR,\n DWDObservationResolution.HOURLY,\n DWDObservationPeriod.RECENT,\n datetime(2020, 1, 1),\n datetime(2020, 1, 20),\n ).nearby_radius(\n 50.0,\n 8.9,\n 20,\n )\n nearby_station = nearby_station.drop(\"TO_DATE\", axis=\"columns\")\n nearby_station.STATION_ID = nearby_station.STATION_ID.astype(np.int64)\n\n pd.testing.assert_frame_equal(\n nearby_station,\n pd.DataFrame(\n [\n [\n np.int64(4411),\n np.datetime64(\"2002-01-24 00:00:00\"),\n 155.0,\n 49.9195,\n 8.9671,\n \"Schaafheim-Schlierbach\",\n \"Hessen\",\n 11.653026716750542,\n ],\n [\n np.int64(2480),\n np.datetime64(\"2004-09-01 00:00:00\"),\n 108.0,\n 50.0643,\n 8.993,\n \"Kahl/Main\",\n \"Bayern\",\n 12.572153957087247,\n ],\n [\n np.int64(7341),\n np.datetime64(\"2005-07-16 00:00:00\"),\n 119.0,\n 50.09,\n 8.7862,\n \"Offenbach-Wetterpark\",\n \"Hessen\",\n 16.13301589362613,\n ],\n ],\n columns=[\n \"STATION_ID\",\n \"FROM_DATE\",\n \"STATION_HEIGHT\",\n \"LAT\",\n \"LON\",\n \"STATION_NAME\",\n \"STATE\",\n \"DISTANCE_TO_LOCATION\",\n ],\n ),\n )\n\n\n@patch(\n \"wetterdienst.dwd.observations.stations.metadata_for_climate_observations\",\n MagicMock(return_value=METADATA_DF),\n)\ndef test_dwd_observation_sites_nearby_number_fail_1():\n\n with pytest.raises(ValueError):\n DWDObservationSites(\n DWDObservationParameterSet.TEMPERATURE_AIR,\n DWDObservationResolution.HOURLY,\n DWDObservationPeriod.RECENT,\n datetime(2020, 1, 1),\n datetime(2020, 1, 20),\n ).nearby_number(\n 51.4,\n 9.3,\n 0,\n )\n\n\n@patch(\n \"wetterdienst.dwd.observations.stations.metadata_for_climate_observations\",\n MagicMock(return_value=METADATA_DF),\n)\ndef test_dwd_observation_sites_nearby_number_fail_2():\n\n with pytest.raises(InvalidParameterCombination):\n DWDObservationSites(\n DWDObservationParameterSet.SOIL,\n DWDObservationResolution.MINUTE_10,\n DWDObservationPeriod.RECENT,\n datetime(2020, 1, 1),\n datetime(2020, 1, 20),\n ).nearby_number(\n 51.4,\n 9.3,\n 1,\n )\n\n\n@patch(\n \"wetterdienst.dwd.observations.stations.metadata_for_climate_observations\",\n MagicMock(return_value=METADATA_DF),\n)\ndef test_dwd_observation_sites_nearby_distance():\n nearby_station = DWDObservationSites(\n DWDObservationParameterSet.TEMPERATURE_AIR,\n DWDObservationResolution.HOURLY,\n DWDObservationPeriod.RECENT,\n datetime(2020, 1, 1),\n datetime(2020, 1, 20),\n ).nearby_radius(\n 50.0,\n 8.9,\n 10,\n )\n assert nearby_station.empty is True\n\n\ndef test_derive_nearest_neighbours():\n coords = Coordinates(np.array([50.0, 51.4]), np.array([8.9, 9.3]))\n\n metadata = pd.read_json(METADATA_FILE)\n\n distances, indices_nearest_neighbours = derive_nearest_neighbours(\n metadata.LAT.values, metadata.LON.values, coords\n )\n\n np.testing.assert_array_almost_equal(distances, np.array([0.00182907, 0.00227919]))\n\n np.testing.assert_array_almost_equal(\n indices_nearest_neighbours, np.array([432, 655])\n )\n"
] | [
[
"numpy.array",
"numpy.datetime64",
"pandas.read_json",
"numpy.int64"
]
] |
UKPLab/coling2018-fake-news-challenge- | [
"6446c4459b520b7f7713bc66117917e341d899dc"
] | [
"fnc/pipeline.py"
] | [
"import sys\nimport datetime\nimport argparse\nimport os\nimport csv\nimport numpy as np\nimport os.path as path\nfrom builtins import isinstance\nsys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\nimport fnc.refs.fnc1.scorer as scorer\nimport fnc.utils.score_calculation as score_calculation\nimport fnc.utils.estimator_definitions as esitmator_definitions\nfrom fnc.refs.utils.score import LABELS, score_submission\nfrom fnc.settings import myConstants\nfrom fnc.utils import printout_manager\nfrom fnc.models.MultiThreadingFeedForwardMLP import MultiThreadingFeedForwardMLP\nfrom fnc.src.models import Model\nfrom fnc.refs.utils.generate_test_splits import kfold_split, get_stances_for_folds\n#FNC challenge features from Athene\nfrom fnc.refs.feature_engineering import NMF_fit_all_incl_holdout_and_test, \\\n latent_dirichlet_allocation_incl_holdout_and_test, latent_semantic_indexing_gensim_holdout_and_test,\\\n NMF_fit_all_concat_300_and_test, word_ngrams_concat_tf5000_l2_w_holdout_and_test, NMF_fit_all, \\\n latent_dirichlet_allocation, latent_semantic_indexing_gensim_test, NMF_fit_all_concat_300, word_ngrams_concat_tf5000_l2_w_holdout\n#FNC challenge features from baseline implementation and from Benjamin Schiller\nfrom fnc.refs.feature_engineering import refuting_features, polarity_features, hand_features, word_overlap_features, \\\n gen_non_bleeding_feats, gen_or_load_feats, \\\n word_unigrams_5000_concat_tf_l2_holdout_unlbled_test, NMF_cos_300_holdout_unlbled_test, \\\n NMF_concat_300_holdout_unlbled_test, latent_dirichlet_allocation_25_holdout_unlbled_test, \\\n latent_semantic_indexing_gensim_300_concat_holdout_unlbled_test, \\\n NMF_cos_50, latent_dirichlet_allocation_25, \\\n latent_semantic_indexing_gensim_300_concat_holdout, NMF_concat_300_holdout, word_unigrams_5000_concat_tf_l2_holdout, \\\n sen2sen_similarity_max, word_mover_distance_similarity_sentence_min, \\\n word_mover_distance_wholebody, stanford_ppdb_score, stanford_ppdb_score_1sent, stanford_ppdb_score_2sent, stanford_ppdb_score_3sent, \\\n stanford_sentiment, stanford_sentiment_1sent, stanford_sentiment_2sent, stanford_sentiment_3sent, \\\n stanford_negation_features, stanford_negation_features_1sent, stanford_negation_features_2sent, stanford_negation_features_3sent, \\\n stanford_based_verb_noun_sim, stanford_based_verb_noun_sim_1sent, stanford_based_verb_noun_sim_2sent, stanford_based_verb_noun_sim_3sent, \\\n sdm_sim, stanford_avg_words_per_sent, stanford_avg_words_per_sent_1sent, stanford_avg_words_per_sent_2sent, stanford_avg_words_per_sent_3sent, \\\n hedging_features, ppdb, discuss_features, single_flat_LSTM_50d_100, latent_dirichlet_allocation_300, NMF_cos_300, \\\n char_3grams_5000_concat_all_data, \\\n lexical_features,max_diff_twitter_uni_bigrams,mpqa_unigrams, negated_context_word_12grams_concat_tf5000_l2_all_data, \\\n nrc_emo_lex,nrc_hashtag_sentiment_unigram, nrc_hashtag_sentiment_unigram_POS, POS_features, readability_features , \\\n sentiment140_unigrams, structural_features\n\nsys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\n\ndef get_args():\n ''' This function parses and return arguments passed in'''\n parser = argparse.ArgumentParser(description='Scorer pipeline')\n parser.add_argument('-p', '--pipeline_type', type=str, nargs='+', help='Pipeline Type (crossv,holdout,ftrain,ftest), e.g. -p ftrain', required=True)\n parser.add_argument('-s', '--scorer_type', type=str, help='Scorer Type (baselines, CNN, tf_idf, avg_embed, sdm, doc2vec, word_mover_sentence, word_mover_wholeText)', required=False)\n parser.add_argument('-t', '--threshold', type=float, help='Threshold', required=False)\n\n args = parser.parse_args()\n pipeline_type = args.pipeline_type\n scorer_type = args.scorer_type\n threshold = args.threshold\n return pipeline_type, scorer_type, threshold\n\n\ndef generate_features(stances, dataset, name, feature_list, features_dir):\n \"\"\"\n Creates feature vectors out of the provided dataset\n \"\"\"\n h, b, y, bodyId, headId = [], [], [], [], []\n\n feature_dict = {'overlap': word_overlap_features,\n 'refuting': refuting_features,\n 'polarity': polarity_features,\n 'hand': hand_features,\n 'word_unigrams_5000_concat_tf_l2_holdout_unlbled_test': word_unigrams_5000_concat_tf_l2_holdout_unlbled_test,\n 'NMF_cos_300_holdout_unlbled_test': NMF_cos_300_holdout_unlbled_test,\n 'NMF_concat_300_holdout_unlbled_test': NMF_concat_300_holdout_unlbled_test,\n 'latent_dirichlet_allocation_25_holdout_unlbled_test': latent_dirichlet_allocation_25_holdout_unlbled_test,\n 'latent_semantic_indexing_gensim_300_concat_holdout_unlbled_test': latent_semantic_indexing_gensim_300_concat_holdout_unlbled_test,\n 'NMF_fit_all_incl_holdout_and_test': NMF_fit_all_incl_holdout_and_test,\n 'latent_dirichlet_allocation_incl_holdout_and_test': latent_dirichlet_allocation_incl_holdout_and_test,\n 'latent_semantic_indexing_gensim_holdout_and_test': latent_semantic_indexing_gensim_holdout_and_test,\n 'NMF_fit_all_concat_300_and_test': NMF_fit_all_concat_300_and_test,\n 'word_ngrams_concat_tf5000_l2_w_holdout_and_test': word_ngrams_concat_tf5000_l2_w_holdout_and_test,\n 'NMF_fit_all': NMF_fit_all,\n 'word_ngrams_concat_tf5000_l2_w_holdout': word_ngrams_concat_tf5000_l2_w_holdout,\n 'latent_dirichlet_allocation': latent_dirichlet_allocation,\n 'latent_semantic_indexing_gensim_test': latent_semantic_indexing_gensim_test,\n 'NMF_fit_all_concat_300': NMF_fit_all_concat_300,\n 'NMF_cos_50': NMF_cos_50,\n 'latent_dirichlet_allocation_25': latent_dirichlet_allocation_25,\n 'latent_semantic_indexing_gensim_300_concat_holdout': latent_semantic_indexing_gensim_300_concat_holdout,\n 'NMF_concat_300_holdout': NMF_concat_300_holdout,\n 'word_unigrams_5000_concat_tf_l2_holdout': word_unigrams_5000_concat_tf_l2_holdout,\n 'ppdb': ppdb,\n 'stanford_ppdb': stanford_ppdb_score,\n 'stanford_ppdb_1sent': stanford_ppdb_score_1sent,\n 'stanford_ppdb_2sent': stanford_ppdb_score_2sent,\n 'stanford_ppdb_3sent': stanford_ppdb_score_3sent,\n 'stanford_sentiment': stanford_sentiment,\n 'stanford_sentiment_1sent': stanford_sentiment_1sent,\n 'stanford_sentiment_2sent': stanford_sentiment_2sent,\n 'stanford_sentiment_3sent': stanford_sentiment_3sent,\n 'stanford_wordsim': stanford_based_verb_noun_sim,\n 'stanford_wordsim_1sent': stanford_based_verb_noun_sim_1sent,\n 'stanford_wordsim_2sent': stanford_based_verb_noun_sim_2sent,\n 'stanford_wordsim_3sent': stanford_based_verb_noun_sim_3sent,\n 'stanford_negation': stanford_negation_features,\n 'stanford_negation_1sent': stanford_negation_features_1sent,\n 'stanford_negation_2sent': stanford_negation_features_2sent,\n 'stanford_negation_3sent': stanford_negation_features_3sent,\n 'stanford_avg_words_per_sent': stanford_avg_words_per_sent,\n 'stanford_avg_words_per_sent_1sent': stanford_avg_words_per_sent_1sent,\n 'stanford_avg_words_per_sent_2sent': stanford_avg_words_per_sent_2sent,\n 'stanford_avg_words_per_sent_3sent': stanford_avg_words_per_sent_3sent,\n 'hedging': hedging_features,\n 'sen2sen': sen2sen_similarity_max,\n 'wmdsenSen': word_mover_distance_similarity_sentence_min,\n 'wmdsenDoc': word_mover_distance_wholebody,\n 'sdm_sim': sdm_sim,\n 'discuss': discuss_features,\n 'single_flat_LSTM_50d_100': single_flat_LSTM_50d_100,\n 'char_3grams_5000_concat_all_data': char_3grams_5000_concat_all_data,\n 'lexical_features': lexical_features,\n 'max_diff_twitter_uni_bigrams': max_diff_twitter_uni_bigrams,\n 'mpqa_unigrams': mpqa_unigrams,\n 'negated_context_word_12grams_concat_tf5000_l2_all_data': negated_context_word_12grams_concat_tf5000_l2_all_data,\n 'nrc_emo_lex': nrc_emo_lex,\n 'nrc_hashtag_sentiment_unigram': nrc_hashtag_sentiment_unigram,\n 'nrc_hashtag_sentiment_unigram_POS': nrc_hashtag_sentiment_unigram_POS,\n #'POS_features': POS_features,\n 'readability_features': readability_features,\n 'sentiment140_unigrams': sentiment140_unigrams,\n 'structural_features': structural_features,\n 'latent_dirichlet_allocation_300': latent_dirichlet_allocation_300,\n 'NMF_cos_300': NMF_cos_300\n }\n\n stanceCounter = 0\n for stance in stances:\n y.append(LABELS.index(stance['Stance']))\n h.append(stance['Headline'])\n b.append(dataset.articles[stance['Body ID']])\n bodyId.append(stance['Body ID'])\n headId.append(name+str(stanceCounter))\n stanceCounter += 1\n\n X_feat = []\n feat_list = []\n last_index = 0\n for feature in feature_list:\n feat = gen_or_load_feats(feature_dict[feature], h, b, features_dir+\"/\"+feature+\".\"+name+'.npy', bodyId, feature, headId, fold=name)\n feat_list.append((last_index, last_index+len(feat[0]), str(feature)))\n last_index += len(feat[0])\n X_feat.append(feat)\n X = np.concatenate(X_feat, axis=1)\n\n return X, y, feat_list\n\ndef generate_features_test(stances, dataset, name, feature_list, features_dir):\n \"\"\"\n Equal to generate_features(), but creates features for the unlabeled test data\n \"\"\"\n h, b, bodyId, headId = [], [], [], []\n\n feature_dict = {'overlap': word_overlap_features,\n 'refuting': refuting_features,\n 'polarity': polarity_features,\n 'hand': hand_features,\n 'word_unigrams_5000_concat_tf_l2_holdout_unlbled_test': word_unigrams_5000_concat_tf_l2_holdout_unlbled_test,\n 'NMF_cos_300_holdout_unlbled_test': NMF_cos_300_holdout_unlbled_test,\n 'NMF_concat_300_holdout_unlbled_test': NMF_concat_300_holdout_unlbled_test,\n 'latent_dirichlet_allocation_25_holdout_unlbled_test': latent_dirichlet_allocation_25_holdout_unlbled_test,\n 'latent_semantic_indexing_gensim_300_concat_holdout_unlbled_test': latent_semantic_indexing_gensim_300_concat_holdout_unlbled_test,\n 'NMF_fit_all_incl_holdout_and_test': NMF_fit_all_incl_holdout_and_test,\n 'latent_dirichlet_allocation_incl_holdout_and_test': latent_dirichlet_allocation_incl_holdout_and_test,\n 'latent_semantic_indexing_gensim_holdout_and_test': latent_semantic_indexing_gensim_holdout_and_test,\n 'NMF_fit_all_concat_300_and_test': NMF_fit_all_concat_300_and_test,\n 'word_ngrams_concat_tf5000_l2_w_holdout_and_test': word_ngrams_concat_tf5000_l2_w_holdout_and_test,\n 'NMF_fit_all': NMF_fit_all,\n 'word_ngrams_concat_tf5000_l2_w_holdout': word_ngrams_concat_tf5000_l2_w_holdout,\n 'latent_dirichlet_allocation': latent_dirichlet_allocation,\n 'latent_semantic_indexing_gensim_test': latent_semantic_indexing_gensim_test,\n 'NMF_fit_all_concat_300': NMF_fit_all_concat_300,\n 'NMF_cos_50': NMF_cos_50,\n 'latent_dirichlet_allocation_25': latent_dirichlet_allocation_25,\n 'latent_semantic_indexing_gensim_300_concat_holdout': latent_semantic_indexing_gensim_300_concat_holdout,\n 'NMF_concat_300_holdout': NMF_concat_300_holdout,\n 'word_unigrams_5000_concat_tf_l2_holdout': word_unigrams_5000_concat_tf_l2_holdout,\n 'ppdb': ppdb,\n 'stanford_ppdb': stanford_ppdb_score,\n 'stanford_ppdb_1sent': stanford_ppdb_score_1sent,\n 'stanford_ppdb_2sent': stanford_ppdb_score_2sent,\n 'stanford_ppdb_3sent': stanford_ppdb_score_3sent,\n 'stanford_sentiment': stanford_sentiment,\n 'stanford_sentiment_1sent': stanford_sentiment_1sent,\n 'stanford_sentiment_2sent': stanford_sentiment_2sent,\n 'stanford_sentiment_3sent': stanford_sentiment_3sent,\n 'stanford_wordsim': stanford_based_verb_noun_sim,\n 'stanford_wordsim_1sent': stanford_based_verb_noun_sim_1sent,\n 'stanford_wordsim_2sent': stanford_based_verb_noun_sim_2sent,\n 'stanford_wordsim_3sent': stanford_based_verb_noun_sim_3sent,\n 'stanford_negation': stanford_negation_features,\n 'stanford_negation_1sent': stanford_negation_features_1sent,\n 'stanford_negation_2sent': stanford_negation_features_2sent,\n 'stanford_negation_3sent': stanford_negation_features_3sent,\n 'stanford_avg_words_per_sent': stanford_avg_words_per_sent,\n 'stanford_avg_words_per_sent_1sent': stanford_avg_words_per_sent_1sent,\n 'stanford_avg_words_per_sent_2sent': stanford_avg_words_per_sent_2sent,\n 'stanford_avg_words_per_sent_3sent': stanford_avg_words_per_sent_3sent,\n 'hedging': hedging_features,\n 'sen2sen': sen2sen_similarity_max,\n 'wmdsenSen': word_mover_distance_similarity_sentence_min,\n 'wmdsenDoc': word_mover_distance_wholebody,\n 'sdm_sim': sdm_sim,\n 'discuss': discuss_features,\n 'single_flat_LSTM_50d_100': single_flat_LSTM_50d_100,\n 'char_3grams_5000_concat_all_data': char_3grams_5000_concat_all_data,\n 'lexical_features': lexical_features,\n 'max_diff_twitter_uni_bigrams': max_diff_twitter_uni_bigrams,\n 'mpqa_unigrams': mpqa_unigrams,\n 'negated_context_word_12grams_concat_tf5000_l2_all_data': negated_context_word_12grams_concat_tf5000_l2_all_data,\n 'nrc_emo_lex': nrc_emo_lex,\n 'nrc_hashtag_sentiment_unigram': nrc_hashtag_sentiment_unigram,\n 'nrc_hashtag_sentiment_unigram_POS': nrc_hashtag_sentiment_unigram_POS,\n #'POS_features': POS_features,\n 'readability_features': readability_features,\n 'sentiment140_unigrams': sentiment140_unigrams,\n 'structural_features': structural_features,\n 'latent_dirichlet_allocation_300': latent_dirichlet_allocation_300,\n 'NMF_cos_300': NMF_cos_300\n }\n\n stanceCounter = 0\n for stance in stances:\n h.append(stance['Headline'])\n b.append(dataset.articles[stance['Body ID']])\n bodyId.append(stance['Body ID'])\n headId.append(name+str(stanceCounter))\n stanceCounter += 1\n\n X_feat = []\n for feature in feature_list:\n print(\"calculate feature: \" + str(feature))\n feat = gen_or_load_feats(feature_dict[feature], h, b, features_dir+\"/\"+feature+\"_test.\"+name+'.npy', bodyId, feature, headId, fold=name)\n X_feat.append(feat)\n print(len(feat))\n X = np.concatenate(X_feat, axis=1)\n return X\n\ndef generate_non_bleeding_features(fold_stances, hold_out_stances, no_folds, BOW_feature_list, features_dir, d):\n \"\"\"\n Does the same as generate_features(), just for non-bleeding features. It prevents bleeding by training certain features\n (e.g. word unigrams) strictly on the training data, instead of training+test data.\n Feature extraction methods in feature_engineering.py have to provide an extended parameter list in order to use this\n (method_name(headlines, bodies, headlines_test, bodies_test)). The saved feature files have the following structure:\n - e.g. feature_name_0.py will hold the features of the folds from 1 to 9 and feature_name_0.test.py\n will hold the features of fold 0, derived of the folds 1 to 9\n\n This method (and feature methods based on this one) is just to get more reliable (non-bleeding) score results and cannot be used for\n the training of the final classifier.\n \"\"\"\n\n # holds all bag of words features and their feature extraction methods\n non_bleeding_feature_dict = {}\n\n def generate_holdout_BOW_features():\n for fold in range(no_folds):\n ids = list(range(no_folds))\n merged = []\n for i in ids:\n merged.extend(fold_stances[i])\n\n h = []\n b = []\n for stance in merged:\n h.append(stance['Headline'])\n b.append(d.articles[stance['Body ID']])\n\n h_test = []\n b_test = []\n for stance in hold_out_stances:\n h_test.append(stance['Headline'])\n b_test.append(d.articles[stance['Body ID']])\n\n gen_non_bleeding_feats(non_bleeding_feature_dict[feature], h, b, h_test, b_test,\n features_dir, feature, 'holdout')\n\n def generate_fold_BOW_features():\n for fold in range(no_folds):\n ids = list(range(no_folds))\n del ids[fold]\n\n merged = []\n for i in ids:\n merged.extend(fold_stances[i])\n\n # 9 of 10 folds merged for training BOW features\n h = []\n b = []\n for stance in merged:\n h.append(stance['Headline'])\n b.append(d.articles[stance['Body ID']])\n\n # 1 fold (test) to extract features out of the generated BOW\n h_test = []\n b_test = []\n for stance in fold_stances[fold]:\n h_test.append(stance['Headline'])\n b_test.append(d.articles[stance['Body ID']])\n\n gen_non_bleeding_feats(non_bleeding_feature_dict[feature],\n h, b, h_test, b_test, features_dir, feature,\n fold)\n\n for feature in BOW_feature_list:\n generate_fold_BOW_features()\n generate_holdout_BOW_features()\n\n\ndef concat_non_bleeding_features(X_train, X_test, BOW_feature_list, features_dir, fold):\n \"\"\"\n Concatenates the given train and test feature vectors with all the non bleeding features\n specified in the non_bleeding_feature_list.\n \"\"\"\n feat_list = []\n for feature in BOW_feature_list:\n X_train_part = np.load(\"%s/%s.%s.npy\" % (features_dir, feature, fold))\n last_index = len(X_train[0])\n X_train = np.concatenate([X_train, X_train_part], axis=1)\n feat_list.append((last_index, last_index+len(X_train_part[0]), str(feature)))\n X_test_part = np.load(\"%s/%s.%s.test.npy\" % (features_dir, feature, fold))\n X_test = np.concatenate([X_test, X_test_part], axis=1)\n return X_train, X_test, feat_list\n\ndef print_score_from_restored_model(clf, X_test, y_test):\n \"\"\"\n Takes a fitted classifier, predicts on base on the given X,\n compares to the actual y and prints the score.\n \"\"\"\n y_predicted = clf.predict(X_test)\n predicted = [LABELS[int(a)] for a in y_predicted]\n actual = [LABELS[int(a)] for a in y_test]\n\n # calc FNC score\n fold_score, _ = score_submission(actual, predicted)\n max_fold_score, _ = score_submission(actual, actual)\n score = fold_score / max_fold_score\n\n print(\"FNC-1 score from restored model: \" + str(score) +\"\\n\")\n\n return score\n\ndef save_model(clf, save_folder, filename):\n \"\"\"\n Dumps a given classifier to the specific folder with the given name\n \"\"\"\n import pickle\n path = save_folder + filename\n with open(path, 'wb') as handle:\n pickle.dump(clf, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\ndef load_model(save_folder, filename):\n \"\"\"\n Loads and returns a classifier at the given folder with the given name\n \"\"\"\n print(\"Warning: Make sure older models with this name have been trained on the same features! Otherwise,\"\n \"if the lengths of the features the model has been trained on, differ, an error will occur!\")\n import pickle\n path = save_folder + filename\n with open(path, 'rb') as handle:\n return pickle.load(handle)\n\ndef get_save_folder(parent_folder, scorer_type):\n \"\"\"\n Returns an unused save location for a classifier based on its name\n \"\"\"\n if not os.path.exists(parent_folder):\n os.makedirs(parent_folder)\n\n # in order to prevent overwriting existing learned models, always create a new folder\n save_folder = parent_folder + scorer_type\n id = 0\n while os.path.exists(save_folder+\"_\"+str(id)):\n id += 1\n save_folder += \"_\" + str(id) +\"/\"\n os.makedirs(save_folder)\n return save_folder\n\ndef cross_validation(fold_stances, folds, Xs, ys, non_bleeding_features, features_dir,\n scorer_type, all_accuracies_related, all_accuracies_stance,\n all_f1_related, all_f1_stance, all_scores, result_string, learning_rate_string):\n best_score = 0\n\n for fold in fold_stances:\n ids = list(range(len(folds)))\n del ids[fold]\n\n X_train = np.vstack(tuple([Xs[i] for i in ids]))\n y_train = np.hstack(tuple([ys[i] for i in ids]))\n\n X_test = Xs[fold]\n y_test = ys[fold]\n\n # Add BOW features to current feature vectors\n # The features are specified in BOW_feature_list\n X_train, X_test, _ = concat_non_bleeding_features(\n X_train, X_test,\n non_bleeding_features, features_dir, fold)\n\n # get the estimator for this loop\n clf = esitmator_definitions.get_estimator(scorer_type)\n\n print(\"Begin fitting at: \" + str(datetime.datetime.now()).split('.')[0] + \"\\n\")\n\n # start fitting the estimator\n clf.fit(X_train, y_train)\n\n # predict the labes for fitted classifier with the test data\n predicted_int = clf.predict(X_test)\n\n #Baseline \"hack\" - uncomment to calculate the baseline\n #predicted_int = np.empty(len(y_test))\n #predicted_int.fill(3)\n\n predicted = [LABELS[int(a)] for a in predicted_int]\n actual = [LABELS[int(a)] for a in y_test]\n\n # calculate the FNC-1 score based on the predicted and the actual labels\n fold_score, _ = score_submission(actual, predicted)\n max_fold_score, _ = score_submission(actual, actual)\n score = fold_score / max_fold_score\n\n # calculates accuracy and f1-macro scores\n accuracy_stance = score_calculation.get_accuracy(predicted_int, y_test, stance=True)\n accuracy_related = score_calculation.get_accuracy(predicted_int, y_test, stance=False)\n f1_stance = score_calculation.get_f1score(predicted_int, y_test, stance=True)\n f1_related = score_calculation.get_f1score(predicted_int, y_test, stance=False)\n\n # add the scores to the list holding the stores of all folds\n all_accuracies_related.append(accuracy_related)\n all_accuracies_stance.append(accuracy_stance)\n all_f1_related.append(f1_related)\n all_f1_stance.append(f1_stance)\n\n # get best score of all folds\n all_scores.append(score)\n if score > best_score:\n best_score = score\n\n # Prepare printout for fold result\n printout = printout_manager.get_foldwise_printout(fold, accuracy_related, accuracy_stance, f1_related,\n f1_stance, score)\n print(printout) # print results for this fold\n result_string += printout # add results to final result file\n\n # add to special file that shows learning rate and loss of optimizer\n if isinstance(clf, MultiThreadingFeedForwardMLP):\n learning_rate_string += clf.get_learning_rates(fold) + \"\\n\"\n\n # Prepare printout for final result\n printout = printout_manager.get_cross_validation_printout(\n all_accuracies_related, all_accuracies_stance, all_f1_related, all_f1_stance, all_scores, best_score)\n print(printout) # print cross validation results\n result_string += printout # add cross validation results to result file\n\n return result_string, learning_rate_string\n\n#Taken from Benjamins LSTM\ndef append_to_loss_monitor_file(text, filepath):\n with open(filepath, 'a+') as the_file:\n the_file.write(text+\"\\n\")\n\ndef validate_holdout(Xs, ys, X_holdout, y_holdout, non_bleeding_features, features_dir,\n scorer_type, feat_indices, result_string, learning_rate_string, features):\n \"\"\"\n Trains the classifier on all of the train+test data and tests it on the holdout set\n :param Xs: All the training data's feature vectors, split in their folds\n :param ys: All the training data's labels, split in their folds\n :param X_holdout: The holdout feature vectors\n :param y_holdout: The holdout labels\n :param non_bleeding_features: The list of non-bleeding features that has to be concatenated to the existing feature vectors\n :param features_dir: the directory where the features are stored\n :param scorer_type: the scorer type, e.g. MLB_base (see estimator_definitions.py in utils folder)\n :param feat_indices: indices returned by generate_features() method. They indicate at what index of the feature vector a specific\n feature starts and where it ends. This is used for printing out the feature importances by the RandomForest classifier\n :param result_string: The current result string in order to add the holdout results\n :param learning_rate_string: The current learning rate string in order to add information about the learning rate\n :return: the updated result_string and learning_rate_string\n \"\"\"\n # define folder to save the classifier and create it if not existing\n parent_folder = \"%s/data/fnc-1/mlp_models/\" % (path.dirname(path.dirname(path.abspath(__file__))))\n\n # create the new save folder\n save_folder = get_save_folder(parent_folder, scorer_type+\"_new\")\n\n # only pass a save folder if the classifier should be saved\n best_clf = esitmator_definitions.get_estimator(scorer_type, save_folder=save_folder)\n\n # stack all the feature vectors of all the folds\n X_train = np.vstack(tuple([Xs[i] for i in range(10)]))\n y_train = np.hstack(tuple([ys[i] for i in range(10)]))\n\n # concat non-bleeding features\n X_train, X_holdout, feat_indices_holdout = concat_non_bleeding_features(\n X_train, X_holdout,\n non_bleeding_features, features_dir, 'holdout')\n\n # test for oversampling: fits the current classifier, oversampled with a given\n # method and checks the score on the holdout set\n use_over_sampling = False\n if use_over_sampling == True:\n from imblearn.over_sampling import SMOTE\n kind = ['regular', 'borderline1', 'borderline2', 'svm']\n for m in kind:\n sm = SMOTE(kind=m)\n X_res, y_res = sm.fit_sample(X_train, y_train)\n best_clf.fit(X_res, y_res)\n y_predicted = best_clf.predict(X_holdout)\n predicted = [LABELS[int(a)] for a in y_predicted]\n actual = [LABELS[int(a)] for a in y_holdout]\n fold_score, _ = score_submission(actual, predicted)\n max_fold_score, _ = score_submission(actual, actual)\n score = fold_score / max_fold_score\n print(\"Score \" + m + \":\" + str(score))\n\n\n #Taken from Benjamins LSTM\n loss_monitor_file_dir = \"%s/data/fnc-1/model_results/loss_results/\" % (\n path.dirname(path.dirname(path.abspath(__file__))))\n loss_filename = loss_monitor_file_dir + str(datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M\")) + \".txt\"\n # fit the classifier\n if 'f_ext' in scorer_type:\n append_to_loss_monitor_file(\"\\n\\nFOLD holdout and classifier: \" + scorer_type + \"\\n\", loss_filename)\n append_to_loss_monitor_file(str(datetime.datetime.now()).split('.')[0], loss_filename)\n best_clf.fit(X_train, y_train, X_holdout, np.array(y_holdout), 'holdout', loss_filename)\n else:\n best_clf.fit(X_train, y_train)\n\n # predict labels\n y_predicted = best_clf.predict(X_holdout)\n predicted = [LABELS[int(a)] for a in y_predicted]\n actual = [LABELS[int(a)] for a in y_holdout]\n\n # calc FNC score\n fold_score, cm = score_submission(actual, predicted)\n max_fold_score, _ = score_submission(actual, actual)\n score = fold_score / max_fold_score\n\n # calc accuracy for related/unrelated and stances\n accuracy_stance = score_calculation.get_accuracy(y_predicted, y_holdout, stance=True)\n accuracy_related = score_calculation.get_accuracy(y_predicted, y_holdout, stance=False)\n f1_stance = score_calculation.get_f1score(y_predicted, y_holdout, stance=True)\n f1_related = score_calculation.get_f1score(y_predicted, y_holdout, stance=False)\n\n # prepare printout for final results of holdout set\n printout = printout_manager.get_holdout_printout(save_folder, accuracy_related, accuracy_stance, f1_related, f1_stance, score)\n printout += printout_manager.calculate_confusion_matrix(cm)\n print(printout) # print holdout results\n result_string += printout + \"\\n\"# add results to string that is going to be saved into a file\n\n result_file_folder = \"%s\" % (path.dirname(path.dirname(path.abspath(__file__))))\n printout_manager.save_file(result_string, result_file_folder + \"/fnc_results_holdout.txt\", \"a+\")\n\n #aligned printout for ablation:\n summary = printout_manager.get_holdout_ablation_printout(features, score,f1_stance,save_folder)\n printout_manager.save_file(summary, result_file_folder + \"/fnc_results_holdout_summary.txt\", \"a+\")\n\n # test saving and restoring model\n #filename = scorer_type + \".sav\"\n #save_model(best_clf, save_folder,filename)\n #load_clf = load_model(parent_folder + scorer_type + \"_new_0/\", filename) # the 0th folder should always exist\n #print_score_from_restored_model(load_clf, X_holdout, y_holdout)\n\n # add to special file that shows learning rate and loss of optimizer\n if isinstance(best_clf, MultiThreadingFeedForwardMLP):\n learning_rate_string += best_clf.get_learning_rates('holdout') + \"\\n\"\n\n # print feature importances\n if scorer_type == 'randomforest':\n result_file_folder = \"%s\" % (path.dirname(path.dirname(path.abspath(__file__))))\n importances = best_clf.feature_importances_\n std = np.std([tree.feature_importances_ for tree in best_clf.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n feat_indices.append(feat_indices_holdout)\n\n feat_importance_string = str(feat_indices) + \"\\n\"\n for i in indices:\n feat_importance_string += str(i) + \";\" + str(importances[i]) + \";\" + str(std[i]) + \"\\n\"\n\n # save feature importances as file\n printout_manager.save_file(feat_importance_string, result_file_folder + \"/feat_importance_rf.txt\", \"a+\")\n\n return result_string, learning_rate_string\n\n\ndef final_clf_training(Xs, ys, X_holdout, y_holdout, scorer_type, sanity_check=False, oversampling=False):\n \"\"\"\n Train final classifier on all of the data to prepare it for the prediction of the FNC-1's unlabeled data\n :param Xs: All the training data's feature vectors, split in their folds\n :param ys: All the training data's labels, split in their folds\n :param X_holdout: The holdout feature vectors\n :param y_holdout: The holdout labels\n :param scorer_type: the scorer type, e.g. MLB_base (see estimator_definitions.py in utils folder)\n :param sanity_check: If true, the trained classifier predicts the labels of the data it was trained on and prints out the score\n :return: the final classifier\n \"\"\"\n\n # stack all the feature vectors of all the folds\n X_train = np.vstack(tuple([Xs[i] for i in range(10)]))\n y_train = np.hstack(tuple([ys[i] for i in range(10)]))\n\n # stack the holdout feature vectors on the feature vectors of all folds\n X_all = np.concatenate([X_train, X_holdout], axis=0)\n y_all = np.concatenate([y_train, y_holdout], axis=0)\n\n # define and create parent folder to save all trained classifiers into\n parent_folder = \"%s/data/fnc-1/mlp_models/\" % (path.dirname(path.dirname(path.abspath(__file__))))\n\n # create the new save folder for the specific classifer\n scorer_folder_name = scorer_type+\"_final\"\n save_folder = get_save_folder(parent_folder, scorer_folder_name+\"_new\")\n\n # get classifier and only pass a save folder if the classifier should be saved\n clf = esitmator_definitions.get_estimator(scorer_type, save_folder=save_folder)\n\n #perform oversampling if selected\n if oversampling == True:\n if 'f_ext' in scorer_type:\n print(\"Oversampling not defined for LSTM\")\n exit()\n\n import datetime\n start = datetime.datetime.now().time()\n print(\"Started oversampling/undersampling at: \" + str(start))\n # uncomment following lines for the different sampling methods #####\n # Oversampling\n from imblearn.over_sampling import SMOTE, ADASYN, RandomOverSampler\n print(\"Oversampling data\")\n #kind = ['regular', 'borderline1', 'borderline2', 'svm']\n #sm = SMOTE(kind='regular',)\n #X_res, y_res = sm.fit_sample(X_all, y_all)\n\n #ros = RandomOverSampler()\n #X_res, y_res = ros.fit_sample(X_all, y_all)\n\n #ada = ADASYN()\n #X_res, y_res = ada.fit_sample(X_all, y_all)\n\n ######################################################\n # Undersampling\n from imblearn.under_sampling import TomekLinks, EditedNearestNeighbours, CondensedNearestNeighbour, \\\n NeighbourhoodCleaningRule, InstanceHardnessThreshold\n # remove Tomek links\n tl = TomekLinks(return_indices=True)\n X_res, y_res, idx_resampled = tl.fit_sample(X_all, y_all)\n\n #enn = EditedNearestNeighbours(random_state=0)\n #X_res, y_res = enn.fit_sample(X_all, y_all)\n\n #cnn = CondensedNearestNeighbour(random_state=0)\n #X_res, y_res = cnn.fit_sample(X_all, y_all)\n\n #ncr = NeighbourhoodCleaningRule(random_state=0)\n #X_res, y_res = ncr.fit_sample(X_all, y_all)\n\n #iht = InstanceHardnessThreshold(random_state=0, estimator=clf)\n #X_res, y_res = iht.fit_sample(X_all, y_all)\n\n\n ##################\n # Combination of Undersampling and oversampling\n\n from imblearn.combine import SMOTEENN, SMOTETomek\n #smote_enn = SMOTEENN(random_state=0)\n #X_res, y_res = smote_enn.fit_sample(X_all, y_all)\n\n #smote_tomek = SMOTETomek(random_state=0)\n #X_res, y_res = smote_tomek.fit_sample(X_all, y_all)\n\n end = datetime.datetime.now().time()\n print(\"Ended oversampling/undersampling at: \" + str(end))\n\n clf.fit(X_res, y_res)\n else: # if oversampling is false\n import datetime\n # fit the final classifier\n loss_monitor_file_dir = \"%s/data/fnc-1/model_results/loss_results/\" % (\n path.dirname(path.dirname(path.abspath(__file__))))\n loss_filename = loss_monitor_file_dir + str(datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M\")) + \".txt\"\n # fit the classifier\n if 'f_ext' in scorer_type:\n append_to_loss_monitor_file(\"\\n\\nFOLD holdout and classifier: \" + scorer_type + \"\\n\", loss_filename)\n append_to_loss_monitor_file(str(datetime.datetime.now()).split('.')[0], loss_filename)\n clf.fit(X_train, y_train, X_holdout, np.array(y_holdout), 'holdout', loss_filename)\n else:\n clf.fit(X_all, y_all)\n\n # save the model\n filename = scorer_folder_name + \".sav\"\n save_model(clf, save_folder, filename) # save model with filename to specific folder\n\n # predict on the data the classifier was trained on => should give near perfect score\n if sanity_check == True:\n # get predicted and actual labels\n y_predicted = clf.predict(X_all)\n predicted = [LABELS[int(a)] for a in y_predicted]\n actual = [LABELS[int(a)] for a in y_all]\n\n # calc FNC score\n fold_score, _ = score_submission(actual, predicted)\n max_fold_score, _ = score_submission(actual, actual)\n score = fold_score / max_fold_score\n\n # calc accuracy, f1 macro\n accuracy_stance = score_calculation.get_accuracy(y_predicted, y_all, stance=True)\n accuracy_related = score_calculation.get_accuracy(y_predicted, y_all, stance=False)\n f1_stance = score_calculation.get_f1score(y_predicted, y_all, stance=True)\n f1_related = score_calculation.get_f1score(y_predicted, y_all, stance=False)\n\n # printout results\n printout = printout_manager.get_holdout_printout(save_folder, accuracy_related, accuracy_stance, f1_related,\n f1_stance, score)\n print(\"SANITY CHECK (predict on train data):\")\n print(printout)\n return clf\n\ndef final_clf_prediction(data_path, features, features_dir, scorer_type, run_final_train, final_clf):\n \"\"\"\n Run the prediction on the final model. In order to do that, the features vectors of the unlabeled FNC-1 data are\n generated first.\n :param data_path: data_path to the unlabeled stances and the corresponding bodies\n :param features: The feature list\n :param features_dir: The directory where the features are stored\n :param scorer_type: the scorer type, e.g. MLB_base (see estimator_definitions.py in utils folder)\n :param run_final_train: Sanity check: if the final classifier has been trained in this run, check if the prediction of it\n compared to the classifier that is being loaded in this method, are the same. If yes, they represent the same model.\n :param final_clf: The classifier that was trained in this run (IF a classifier was trained)\n :return:\n \"\"\"\n\n d = myConstants.testdataset\n\n # generate features for the unlabeled testing set\n X_final_test = generate_features_test(d.stances, d, str(\"final_test\"), features, features_dir)\n\n # define and create parent folder to save all trained classifiers into\n parent_folder = \"%s/data/fnc-1/mlp_models/\" % (path.dirname(path.dirname(path.abspath(__file__))))\n fnc_result_folder = \"%s/data/fnc-1/fnc_results/\" % (path.dirname(path.dirname(path.abspath(__file__))))\n\n # load model [scorer_type]_final_2 classifier\n filename = scorer_type + \"_final.sav\"\n load_clf = load_model(parent_folder + scorer_type + myConstants.model_name, filename)\n # The model is set in settings.py in class \"myConstants\"\n\n print(\"Load model for final prediction of test set: \" + parent_folder + scorer_type + myConstants.model_name + filename)\n\n # predict classes and turn into labels\n y_predicted = load_clf.predict(X_final_test)\n predicted = [LABELS[int(a)] for a in y_predicted]\n\n # create folder to save the file\n if not os.path.exists(parent_folder):\n os.makedirs(parent_folder)\n if not os.path.exists(fnc_result_folder):\n os.makedirs(fnc_result_folder)\n\n # save the submission file, including the prediction for the labels\n with open(fnc_result_folder + scorer_type + \"_submission.csv\", 'w') as csvfile:\n fieldnames = [\"Headline\", \"Body ID\", \"Stance\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n i = 0\n for stance in d.stances:\n writer.writerow(\n {'Headline': stance['Headline'], 'Body ID': stance['Body ID'], 'Stance': predicted[i]})\n i += 1\n\n\n # save the probabilities file, including the prediction for the labels\n if (\"voting_\" not in scorer_type) and (\"f_ext\" not in scorer_type) and (\"MLP_base_2\" not in scorer_type) and (\"featMLP\" not in scorer_type) and (\"stackLSTM\" not in scorer_type):\n print(\"Generating submission_probabilities.csv\")\n predicted_proba = load_clf.predict_proba(X_final_test)\n with open(fnc_result_folder + scorer_type + \"_probabilities.csv\", 'w') as csvfile:\n fieldnames = [\"Headline\", \"Body ID\", \"Agree\", \"Disagree\", \"Discuss\", \"Unrelated\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n i = 0\n for stance in d.stances:\n writer.writerow(\n {'Headline': stance['Headline'], 'Body ID': stance['Body ID'], 'Agree': predicted_proba[i][0],\n 'Disagree': predicted_proba[i][1], 'Discuss': predicted_proba[i][2],\n 'Unrelated': predicted_proba[i][3]})\n i += 1\n else:\n print(\"Not generating submission_probabilities.csv - because classifier contains \\\"voting\\\", \\\"f_ext\\\" or \\\"MLP_base_2\\\"\")\n\n # check whether loaded clf from disk and just trained clf return the same results\n if (run_final_train == True) and not (final_clf is None):\n print(\"Check whether loaded final model and just trained final model retrieve the same results.\"\n \"The results are only equal (=None) if they are the same model. This is a check to see whether \"\n \"saving and loading the model works correctly:\")\n print(np.testing.assert_array_equal(y_predicted, final_clf.predict(X_final_test)))\n\ndef delete_ffmlp_data():\n \"\"\"\n In order to copy the structure of Sklearn's BaseEstimator (fit(), predict(), ...) the MultiThreadingFeedForwardMLP\n has to save its graph after fitting. If its argument \"save_folder\" doesn't get a specific folder, it's seen as a\n temporary model (lifetime of the model is just for the runtime). The model will be saved in a special temporary folder.\n This method is called after the pipeline run has finished and deletes all the temporarily saved models of\n MultiThreadingFeedForwardMLP.\n \"\"\"\n import shutil\n ffmlp_dir = \"%s/data/fnc-1/mlp_models/temp_models\" % (\n path.dirname(path.dirname(path.abspath(__file__))))\n if (os.path.exists(ffmlp_dir)):\n for the_file in os.listdir(ffmlp_dir):\n file_path = os.path.join(ffmlp_dir, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print(e)\n\ndef pipeline():\n # define data paths\n result_file_folder = \"%s\" % (path.dirname(path.dirname(path.abspath(__file__))))\n data_path = \"%s/data/fnc-1\" % (path.dirname(path.dirname(path.abspath(__file__))))\n embeddPath = \"%s/data/embeddings/google_news/GoogleNews-vectors-negative300.bin.gz\" % (path.dirname(path.dirname(path.abspath(__file__))))\n\n # get arguments for pipeline call\n pipeline_type, scorer_type, threshold = get_args()\n splits_dir = myConstants.splits_dir\n features_dir = myConstants.features_dir\n\n # configure pipeline runs by using given terminal arguments\n run_CV = False\n if \"crossv\" in pipeline_type:\n run_CV = True # run cross validation\n\n run_validation = False\n if \"holdout\" in pipeline_type:\n run_validation = True # run validation on holdout set\n\n run_final_train = False\n if \"ftrain\" in pipeline_type:\n run_final_train = True # train classifier on all the data available\n\n run_final_prediction = False\n if \"ftest\" in pipeline_type:\n run_final_prediction = True # run prediction on test data provided by FNC-1 challenge\n\n if \"analysis\" in pipeline_type:\n # parse in train bodies and stances for analysis\n bodyDict = myConstants.testdataset.articles\n train_data = myConstants.testdataset.stances\n\n if scorer_type == None:\n raise AttributeError(\"Please specify scorer_type\")\n if threshold == None:\n raise AttributeError(\"Please specify Threshold\")\n model = Model(scorer_type, embeddPath=embeddPath)\n model.analyze_data(train_data, bodyDict, threshold=threshold)\n\n perform_oversampling = myConstants.perform_oversampling\n\n # train the model / predict on basis of the model\n if True in [run_CV, run_validation, run_final_train, run_final_prediction]:\n\n if sys.version_info.major < 3:\n sys.stderr.write('Please use Python version 3 and above\\n')\n sys.exit(1)\n\n d = myConstants.d\n\n folds, hold_out = kfold_split(d, n_folds=10, base_dir=splits_dir)\n fold_stances, hold_out_stances = get_stances_for_folds(d,folds,hold_out)\n\n Xs = dict()\n ys = dict()\n\n feature_list = myConstants.feature_list\n\n for scorer_type, features, non_bleeding_features in feature_list:\n\n # print classifier and features for this loop\n print(scorer_type)\n print(features)\n print(non_bleeding_features)\n\n # generate bow features for later use\n generate_non_bleeding_features(fold_stances, hold_out_stances, 10, non_bleeding_features, features_dir, d)\n\n # Load/Precompute all features now\n X_holdout, y_holdout, feat_indices = generate_features(hold_out_stances, d, \"holdout\", features, features_dir)\n for fold in fold_stances:\n Xs[fold], ys[fold], _ = generate_features(fold_stances[fold], d, str(fold), features, features_dir)\n\n # initialize lists needed to save results for each fold\n all_accuracies_related = []\n all_accuracies_stance = []\n all_f1_related = []\n all_f1_stance = []\n all_scores = []\n\n # get head for result file\n file_head = printout_manager.get_printout_file_head(scorer_type, features, non_bleeding_features)\n result_string = file_head # use head for result file\n learning_rate_string = file_head # use head for learning rate file\n\n # run cross validation on the specified folds\n if run_CV == True:\n result_string, learning_rate_string = cross_validation(fold_stances, folds, Xs, ys, non_bleeding_features, features_dir,\n scorer_type, all_accuracies_related, all_accuracies_stance,\n all_f1_related, all_f1_stance, all_scores, result_string, learning_rate_string)\n\n # Test on holdout set\n if run_validation == True:\n result_string, learning_rate_string = validate_holdout(Xs, ys, X_holdout, y_holdout, non_bleeding_features,\n features_dir, scorer_type, feat_indices, result_string, learning_rate_string, features)\n\n # Train the final classifer\n if run_final_train == True:\n final_clf = final_clf_training(Xs, ys, X_holdout, y_holdout, scorer_type, sanity_check=True, oversampling=perform_oversampling)\n\n # Run the final classifier on the test data\n if run_final_prediction == True:\n if run_final_train == True:\n final_clf_prediction(data_path, features, features_dir, scorer_type, run_final_train, final_clf)\n else:\n final_clf_prediction(data_path, features, features_dir, scorer_type, run_final_train, None)\n\n # calculate FNC Score\n fnc_result_folder = \"%s/data/fnc-1/fnc_results/%s_submission.csv\" % (path.dirname(path.dirname(path.abspath(__file__))), scorer_type)\n fnc_gold_labels_file = myConstants.test_stances_gold\n predicted_set = scorer.load_dataset(fnc_result_folder)\n fnc_gold_labels = scorer.load_dataset(fnc_gold_labels_file)\n test_score, cm, f1_score = scorer.score_submission(fnc_gold_labels, predicted_set)\n null_score, max_score = scorer.score_defaults(fnc_gold_labels)\n\n fnc_results = \"################################################ \\n\"\n fnc_results += \"Corpora: \" + myConstants.datasetName + \"\\n\"\n fnc_results += \"Model:\" + scorer_type + myConstants.model_name + \"\\n\"\n if perform_oversampling == True:\n fnc_results += \"Using oversampling \\n\"\n fnc_results += result_string + \"\\n\" + printout_manager.calculate_confusion_matrix(cm)\n fnc_results += scorer.SCORE_REPORT.format(max_score, null_score, test_score) + \"\\n\"\n fnc_results += \"\\nRelative FNC Score: {:.3f}\".format(100/max_score*test_score) + \"% \\n\"\n fnc_results += \"\\n\" + f1_score + \"\\n\"\n\n print(fnc_results)\n printout_manager.save_file(fnc_results, result_file_folder + \"/fnc_results.txt\", \"a+\")\n\n # save file with results to disk\n printout_manager.save_file(result_string, result_file_folder + \"/result_file_temp.txt\", \"a+\")\n\n # save file with learning rates to disk\n learning_rate_string += \"===================================\\n\"\n printout_manager.save_file(learning_rate_string, result_file_folder + \"/learning_rate_file_temp.txt\", \"a+\")\n\n # delete temporary saved MultiThreadingFeedForwardMLP models if existing\n delete_ffmlp_data()\n\nif __name__ == '__main__':\n pipeline()\n"
] | [
[
"numpy.load",
"numpy.argsort",
"numpy.array",
"numpy.std",
"numpy.concatenate"
]
] |
PaullMP/TensorFlowT | [
"b9b3b5b19971671fe24868273ca5274c1ec7169f"
] | [
"tensorflow/python/__init__.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Import core names of TensorFlow.\n\nPrograms that want to build TensorFlow Ops and Graphs without having to import\nthe constructors and utilities individually can import this file:\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\"\"\"\n\nimport ctypes\nimport importlib\nimport inspect\nimport sys\nimport traceback\n\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import,g-bad-import-order,g-import-not-at-top\n\n# On UNIX-based platforms, pywrap_tensorflow is a SWIG-generated\n# python library that dynamically loads _pywrap_tensorflow.so. The\n# default mode for loading keeps all the symbol private and not\n# visible to other libraries that may be loaded. Setting the mode to\n# RTLD_GLOBAL to make the symbols visible, so that custom op libraries\n# imported using `tf.load_op_library()` can access symbols defined in\n# _pywrap_tensorflow.so.\nimport numpy as np\ntry:\n if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):\n _default_dlopen_flags = sys.getdlopenflags()\n sys.setdlopenflags(_default_dlopen_flags | ctypes.RTLD_GLOBAL)\n from tensorflow.python import pywrap_tensorflow\n sys.setdlopenflags(_default_dlopen_flags)\n else:\n # TODO(keveman,mrry): Support dynamic op loading on platforms that do not\n # use `dlopen()` for dynamic loading.\n from tensorflow.python import pywrap_tensorflow\nexcept ImportError:\n msg = \"\"\"%s\\n\\nError importing tensorflow. Unless you are using bazel,\nyou should not try to import tensorflow from its source directory;\nplease exit the tensorflow source tree, and relaunch your python interpreter\nfrom there.\"\"\" % traceback.format_exc()\n raise ImportError(msg)\n\n# Protocol buffers\nfrom tensorflow.core.framework.graph_pb2 import *\nfrom tensorflow.core.framework.node_def_pb2 import *\nfrom tensorflow.core.framework.summary_pb2 import *\nfrom tensorflow.core.framework.attr_value_pb2 import *\nfrom tensorflow.core.protobuf.config_pb2 import *\nfrom tensorflow.core.util.event_pb2 import *\n\n# Framework\nfrom tensorflow.python.framework.framework_lib import *\nfrom tensorflow.python.framework.versions import *\nfrom tensorflow.python.framework import errors\n\n# Session\nfrom tensorflow.python.client.client_lib import *\n\n# Ops\nfrom tensorflow.python.ops.standard_ops import *\n\n# Bring in subpackages.\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import sdca_ops as sdca\nfrom tensorflow.python.ops import image_ops as image\nfrom tensorflow.python.user_ops import user_ops\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.summary import summary\n\n# Import the names from python/training.py as train.Name.\nfrom tensorflow.python.training import training as train\n\n# Sub-package for performing i/o directly instead of via ops in a graph.\nfrom tensorflow.python.lib.io import python_io\n\n# Make some application and test modules available.\nfrom tensorflow.python.platform import app\nfrom tensorflow.python.platform import flags\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.platform import resource_loader\nfrom tensorflow.python.platform import sysconfig\nfrom tensorflow.python.platform import test\n\nfrom tensorflow.python.util.all_util import remove_undocumented\nfrom tensorflow.python.util.all_util import make_all\n\n# Import modules whose docstrings contribute, for use by remove_undocumented\n# below.\nfrom tensorflow.python.client import client_lib\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import framework_lib\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import functional_ops\nfrom tensorflow.python.ops import histogram_ops\nfrom tensorflow.python.ops import io_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import script_ops\nfrom tensorflow.python.ops import session_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.ops import tensor_array_ops\n\n# Symbols whitelisted for export without documentation.\n# TODO(cwhipkey): review these and move to contrib, expose through\n# documentation, or remove.\n_allowed_symbols = [\n 'AttrValue',\n 'ConfigProto',\n 'DeviceSpec',\n 'Event',\n 'GPUOptions',\n 'GRAPH_DEF_VERSION',\n 'GRAPH_DEF_VERSION_MIN_CONSUMER',\n 'GRAPH_DEF_VERSION_MIN_PRODUCER',\n 'GraphDef',\n 'GraphOptions',\n 'HistogramProto',\n 'LogMessage',\n 'NameAttrList',\n 'NodeDef',\n 'OptimizerOptions',\n 'RunOptions',\n 'RunMetadata',\n 'SessionLog',\n 'Summary',\n]\n\n# The following symbols are kept for compatibility. It is our plan\n# to remove them in the future.\n_allowed_symbols.extend([\n 'arg_max',\n 'arg_min',\n 'create_partitioned_variables',\n 'deserialize_many_sparse',\n 'lin_space',\n 'list_diff', # Use tf.listdiff instead.\n 'parse_single_sequence_example',\n 'serialize_many_sparse',\n 'serialize_sparse',\n 'sparse_matmul', ## use tf.matmul instead.\n])\n\n# This is needed temporarily because we import it explicitly.\n_allowed_symbols.extend([\n 'platform', ## This is included by the tf.learn main template.\n 'pywrap_tensorflow',\n])\n\n# Dtypes exported by framework/dtypes.py.\n# TODO(cwhipkey): expose these through documentation.\n_allowed_symbols.extend([\n 'QUANTIZED_DTYPES',\n 'bfloat16',\n 'bfloat16_ref',\n 'bool',\n 'bool_ref',\n 'complex64',\n 'complex64_ref',\n 'complex128',\n 'complex128_ref',\n 'double',\n 'double_ref',\n 'half',\n 'half_ref',\n 'float16',\n 'float16_ref',\n 'float32',\n 'float32_ref',\n 'float64',\n 'float64_ref',\n 'int16',\n 'int16_ref',\n 'int32',\n 'int32_ref',\n 'int64',\n 'int64_ref',\n 'int8',\n 'int8_ref',\n 'qint16',\n 'qint16_ref',\n 'qint32',\n 'qint32_ref',\n 'qint8',\n 'qint8_ref',\n 'quint16',\n 'quint16_ref',\n 'quint8',\n 'quint8_ref',\n 'string',\n 'string_ref',\n 'uint16',\n 'uint16_ref',\n 'uint8',\n 'uint8_ref',\n 'resource',\n 'resource_ref',\n])\n\n# Export modules and constants.\n_allowed_symbols.extend([\n 'app',\n 'compat',\n 'errors',\n 'flags',\n 'gfile',\n 'image',\n 'logging',\n 'newaxis',\n 'nn',\n 'python_io',\n 'resource_loader',\n 'sdca',\n 'summary',\n 'sysconfig',\n 'test',\n 'train',\n 'user_ops',\n])\n\n# Variables framework.versions:\n_allowed_symbols.extend([\n 'VERSION',\n 'GIT_VERSION',\n 'COMPILER_VERSION',\n])\n\n# Remove all extra symbols that don't have a docstring or are not explicitly\n# referenced in the whitelist.\nremove_undocumented(__name__, _allowed_symbols,\n [framework_lib, array_ops, client_lib, check_ops,\n compat, constant_op, control_flow_ops, functional_ops,\n histogram_ops, io_ops, math_ops, nn, script_ops,\n session_ops, sparse_ops, state_ops, string_ops,\n summary, tensor_array_ops, train])\n\n# Special dunders that we choose to export:\n_exported_dunders = set([\n '__version__',\n '__git_version__',\n '__compiler_version__',\n])\n\n# Expose symbols minus dunders, unless they are whitelisted above.\n# This is necessary to export our dunders.\n__all__ = [s for s in dir() if s in _exported_dunders or not s.startswith('_')]\n"
] | [
[
"tensorflow.python.util.all_util.remove_undocumented"
]
] |
KOLANICH-ML/rbfopt | [
"2243135f7307b4cb9a99292220e2381a1e776fbf"
] | [
"tests/test_rbfopt_degree0_models.py"
] | [
"\"\"\"Test the successful creation of Pyomo 0-degree models in RBFOpt.\n\nThis module contains unit tests for the module rbfopt_degree0_models.\n\nLicensed under Revised BSD license, see LICENSE.\n(C) Copyright International Business Machines Corporation 2016.\n\n\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport unittest\nimport numpy as np\nimport pyomo.environ\nimport rbfopt\nimport rbfopt.rbfopt_utils as ru\nimport rbfopt.rbfopt_degree0_models as d0\nfrom rbfopt.rbfopt_settings import RbfoptSettings\n\nclass TestMultiquadricModels(unittest.TestCase):\n \"\"\"Test the rbfopt_degree0_models module using multiquadric RBF.\"\"\"\n\n def setUp(self):\n \"\"\"Generate data to simulate an optimization problem.\"\"\"\n np.random.seed(71294123)\n self.settings = RbfoptSettings(rbf = 'multiquadric')\n self.n = 3\n self.k = 5\n self.var_lower = np.array([i for i in range(self.n)])\n self.var_upper = np.array([i + 10 for i in range(self.n)])\n self.node_pos = np.array([self.var_lower, self.var_upper,\n [1, 2, 3], [9, 5, 8.8], [5.5, 7, 12]])\n self.node_val = np.array([2*i for i in range(self.k)])\n Amat = [[1.0, 17.349351572897476, 1.9999999999999998,\n 12.009995836801943, 12.932517156377562, 1.0],\n [17.349351572897476, 1.0, 15.620499351813308,\n 6.945502141674135, 6.103277807866851, 1.0],\n [1.9999999999999998, 15.620499351813308, 1.0,\n 10.374969879474351, 11.280514172678478, 1.0],\n [12.009995836801943, 6.945502141674135, 10.374969879474351,\n 1.0, 5.243090691567331, 1.0], \n [12.932517156377562, 6.103277807866851, 11.280514172678478,\n 5.243090691567331, 1.0, 1.0], \n [1.0, 1.0, 1.0, 1.0, 1.0, 0.0]]\n self.Amat = np.matrix(Amat)\n self.Amatinv = self.Amat.getI()\n self.rbf_lambda = np.array([1.981366489986409, 0.6262004309283905,\n -1.8477896263093248, -0.10028069928913483,\n -0.65949659531634])\n self.rbf_h = np.array([0.5833631458309435])\n self.integer_vars = np.array([1])\n # -- end function \n\n def test_create_min_rbf_model(self):\n \"\"\"Test the create_min_rbf_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n model = d0.create_min_rbf_model(self.settings, self.n, self.k,\n self.var_lower, self.var_upper,\n self.integer_vars, None, self.node_pos,\n self.rbf_lambda, self.rbf_h)\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n model = d0.create_min_rbf_model(\n self.settings, 10, 20, np.array([0] * 10),np.array([1] * 10),\n np.array([i for i in range(10)]),\n (np.array([0]), np.array([]),\n [(0, 0, np.array([i for i in range(10)]))]),\n np.random.randint(0, 2, size=(20, 10)),\n np.random.uniform(size=20), np.array([-1]))\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n \n\n def test_create_max_one_over_mu_model(self):\n \"\"\"Test the create_max_one_over_mu_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n model = d0.create_max_one_over_mu_model(self.settings, self.n, self.k,\n self.var_lower, self.var_upper,\n self.integer_vars, None,\n self.node_pos, self.Amat)\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n def test_create_max_h_k_model(self):\n \"\"\"Test the create_max_h_k_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n model = d0.create_max_h_k_model(self.settings, self.n, self.k,\n self.var_lower, self.var_upper,\n self.integer_vars, None,\n self.node_pos, self.rbf_lambda,\n self.rbf_h, self.Amat, -1)\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n def test_create_min_bump_model(self):\n \"\"\"Test the create_min_bump_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n Phimat = self.Amat[:self.k, :self.k]\n Pmat = self.Amat[:self.k, self.k:]\n node_err_bounds = np.array([[- 2, + 2] for i in range(self.k)])\n model = d0.create_min_bump_model(self.settings, self.n, self.k, \n Phimat, Pmat, self.node_val,\n node_err_bounds)\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n def test_create_maximin_dist_model(self):\n \"\"\"Test the create_maximin_dist_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n model = d0.create_maximin_dist_model(self.settings, self.n, self.k,\n self.var_lower, self.var_upper, \n self.integer_vars, None,\n self.node_pos)\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n def test_create_min_msrsm_model(self):\n \"\"\"Test the create_min_msrsm_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n model = d0.create_min_msrsm_model(self.settings, self.n, self.k,\n self.var_lower, self.var_upper,\n self.integer_vars, None,\n self.node_pos, self.rbf_lambda,\n self.rbf_h, 0.5, 0.0, 1.0,\n min(self.node_val),\n max(self.node_val))\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n# -- end class\n\nclass TestLinearModels(unittest.TestCase):\n \"\"\"Test the rbfopt_degree0_models module using linear RBF.\"\"\"\n\n def setUp(self):\n \"\"\"Generate data to simulate an optimization problem.\"\"\"\n self.settings = RbfoptSettings(rbf = 'linear')\n self.n = 3\n self.k = 5\n self.var_lower = np.array([i for i in range(self.n)])\n self.var_upper = np.array([i + 10 for i in range(self.n)])\n self.node_pos = np.array([self.var_lower, self.var_upper,\n [1, 2, 3], [9, 5, 8.8], [5.5, 7, 12]])\n self.node_val = np.array([2*i for i in range(self.k)])\n Amat = [[0.0, 17.320508075688775, 1.7320508075688772,\n 11.968291440301744, 12.893796958227627, 1.0],\n [17.320508075688775, 0.0, 15.588457268119896,\n 6.873136110975833, 6.020797289396148, 1.0],\n [1.7320508075688772, 15.588457268119896, 0.0,\n 10.32666451474047, 11.236102527122116, 1.0],\n [11.968291440301744, 6.873136110975833, \n 10.32666451474047, 0.0, 5.146843692983108, 1.0],\n [12.893796958227627, 6.020797289396148,\n 11.236102527122116, 5.146843692983108, 0.0, 1.0], \n [1.0, 1.0, 1.0, 1.0, 1.0, 0.0]]\n self.Amat = np.matrix(Amat)\n self.Amatinv = self.Amat.getI()\n self.rbf_lambda = np.array([1.1704846814048488, 0.5281643269521171,\n -0.9920149389974761, -0.1328847504999134,\n -0.5737493188595765])\n self.rbf_h = np.array([1.5583564301976252])\n self.integer_vars = np.array([1])\n # -- end function \n\n def test_create_min_rbf_model(self):\n \"\"\"Test the create_min_rbf_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n model = d0.create_min_rbf_model(self.settings, self.n, self.k,\n self.var_lower, self.var_upper,\n self.integer_vars, None, self.node_pos,\n self.rbf_lambda, self.rbf_h)\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n model = d0.create_min_rbf_model(\n self.settings, 10, 20, np.array([0] * 10),np.array([1] * 10),\n np.array([i for i in range(10)]),\n (np.array([0]), np.array([]),\n [(0, 0, np.array([i for i in range(10)]))]),\n np.random.randint(0, 2, size=(20, 10)),\n np.random.uniform(size=20), np.array([-1]))\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n\n def test_create_max_one_over_mu_model(self):\n \"\"\"Test the create_max_one_over_mu_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n model = d0.create_max_one_over_mu_model(self.settings, self.n, self.k,\n self.var_lower, self.var_upper,\n self.integer_vars, None,\n self.node_pos, self.Amat)\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n def test_create_max_h_k_model(self):\n \"\"\"Test the create_max_h_k_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n model = d0.create_max_h_k_model(self.settings, self.n, self.k,\n self.var_lower, self.var_upper,\n self.integer_vars, None,\n self.node_pos, self.rbf_lambda,\n self.rbf_h, self.Amat, -1)\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n def test_create_min_bump_model(self):\n \"\"\"Test the create_min_bump_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n Phimat = self.Amat[:self.k, :self.k]\n Pmat = self.Amat[:self.k, self.k:]\n node_err_bounds = np.array([[- 2, + 2] for i in range(self.k)])\n model = d0.create_min_bump_model(self.settings, self.n, self.k, \n Phimat, Pmat, self.node_val,\n node_err_bounds)\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n def test_create_maximin_dist_model(self):\n \"\"\"Test the create_maximin_dist_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n model = d0.create_maximin_dist_model(self.settings, self.n, self.k,\n self.var_lower, self.var_upper, \n self.integer_vars, None,\n self.node_pos)\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n def test_create_min_msrsm_model(self):\n \"\"\"Test the create_min_msrsm_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n model = d0.create_min_msrsm_model(self.settings, self.n, self.k,\n self.var_lower, self.var_upper,\n self.integer_vars, None,\n self.node_pos, self.rbf_lambda,\n self.rbf_h, 0.5, 0.0, 1.0,\n min(self.node_val),\n max(self.node_val))\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n# -- end class\n"
] | [
[
"numpy.random.uniform",
"numpy.matrix",
"numpy.random.seed",
"numpy.array",
"numpy.random.randint"
]
] |
derdon/sunpy | [
"619102cd48c73a326c45263369446be9b74366e8"
] | [
"sunpy/wcs/wcs.py"
] | [
"from __future__ import absolute_import\n\nimport numpy as np\nimport sunpy.sun as sun\n\nimport astropy.units as u\n\nrsun_meters = sun.constants.radius.si.value\n\n__all__ = ['_convert_angle_units', 'convert_pixel_to_data', 'convert_hpc_hg',\n 'convert_data_to_pixel', 'convert_hpc_hcc', 'convert_hcc_hpc',\n 'convert_hcc_hg', 'convert_hg_hcc', 'proj_tan',\n 'convert_hg_hpc', 'convert_to_coord',\n 'get_center']\n\ndef _convert_angle_units(unit='arcsec'):\n \"\"\"Determine the conversion factor between the data units and radians.\"\"\"\n if unit == 'degrees':\n return np.deg2rad(1)\n elif unit == 'arcmin':\n return np.deg2rad(1) / 60.0\n elif unit == 'arcsec':\n return np.deg2rad(1) / (60 * 60.0)\n elif unit == 'mas':\n return np.deg2rad(1) / (60 * 60 * 1000.0)\n else:\n raise ValueError(\"The units specified are either invalid or is not supported at this time.\")\n\ndef convert_pixel_to_data(size, scale, reference_pixel,\n reference_coordinate, x=None, y=None):\n \"\"\"Calculate the data coordinate for particular pixel indices.\n\n Parameters\n ----------\n size : 2d ndarray\n Number of pixels in width and height.\n scale : 2d ndarray\n The size of a pixel (dx,dy) in data coordinates (equivalent to WCS/CDELT)\n reference_pixel : 2d ndarray\n The reference pixel (x,y) at which the reference coordinate is given (equivalent to WCS/CRPIX)\n reference_coordinate : 2d ndarray\n The data coordinate (x, y) as measured at the reference pixel (equivalent to WCS/CRVAL)\n x,y : int or ndarray\n The pixel values at which data coordinates are requested. If none are given,\n returns coordinates for every pixel.\n\n Returns\n -------\n out : ndarray\n The data coordinates at pixel (x,y).\n\n Notes\n -----\n This function assumes a gnomic projection which is correct for a detector at the focus\n of an optic observing the Sun.\n\n Examples\n --------\n\n \"\"\"\n cdelt = np.array(scale)\n crpix = np.array(reference_pixel)\n crval = np.array(reference_coordinate)\n\n # first assume that coord is just [x,y]\n if (x is None) and (y is None):\n x, y = np.meshgrid(np.arange(size[0]), np.arange(size[1]))\n\n # note that crpix[] counts pixels starting at 1\n\n coordx = (x - (crpix[0] - 1)) * cdelt[0] + crval[0]\n coordy = (y - (crpix[1] - 1)) * cdelt[1] + crval[1]\n\n # Correct for Gnomic projection\n coordx, coordy = proj_tan(coordx, coordy)\n\n return coordx, coordy\n\ndef get_center(size, scale, reference_pixel, reference_coordinate):\n \"\"\"Returns the center of the image in data coordinates.\n\n Parameters\n ----------\n size : 2d ndarray\n Number of pixels in width and height.\n scale : 2d ndarray\n The size of a pixel (dx,dy) in data coordinates (equivalent to WCS/CDELT)\n reference_pixel : 2d ndarray\n The reference pixel (x,y) at which the reference coordinate is given (equivalent to WCS/CRPIX)\n reference_coordinate : 2d ndarray\n The data coordinate (x, y) as measured at the reference pixel (equivalent to WCS/CRVAL)\n\n Returns\n -------\n out : ndarray\n The data coordinates\n\n Examples\n --------\n\n \"\"\"\n return scale * (size - 1 * u.pix) / 2. + reference_coordinate - (reference_pixel - 1 * u.pix) * scale\n\ndef convert_data_to_pixel(x, y, scale, reference_pixel, reference_coordinate):\n \"\"\"Calculate the pixel indices for a given data coordinate.\n\n Parameters\n ----------\n x, y : float\n Data coordinate in same units as reference coordinate\n scale : 2d ndarray\n The size of a pixel (dx,dy) in data coordinates (equivalent to WCS/CDELT)\n reference_pixel : 2d ndarray\n The reference pixel (x,y) at which the reference coordinate is given (equivalent to WCS/CRPIX)\n reference_coordinate : 2d ndarray\n The data coordinate (x, y) as measured at the reference pixel (equivalent to WCS/CRVAL)\n\n Returns\n -------\n out : ndarray\n The pixel coordinates (x,y) at that data coordinate.\n\n Examples\n --------\n\n \"\"\"\n\n # TODO: Needs to check what coordinate system the data is given in\n cdelt = np.array(scale)\n crpix = np.array(reference_pixel)\n crval = np.array(reference_coordinate)\n # De-apply any tabular projections.\n # coord = inv_proj_tan(coord)\n\n # note that crpix[] counts pixels starting at 1\n pixelx = (x - crval[0]) / cdelt[0] + (crpix[1] - 1)\n pixely = (y - crval[1]) / cdelt[1] + (crpix[1] - 1)\n\n return pixelx, pixely\n\ndef convert_hpc_hcc(x, y, dsun_meters=None, angle_units='arcsec', z=False):\n \"\"\"Converts from Helioprojective-Cartesian (HPC) coordinates into\n Heliocentric-Cartesian (HCC) coordinates. Returns all three dimensions, x, y, z in\n meters.\n\n Parameters\n ----------\n x, y : float\n Data coordinate in angle units (default is arcsec)\n dsun_meters : float\n Distance from the observer to the Sun in meters. Default is 1 AU.\n angle_units : str\n Units of the data coordinates (e.g. arcsec, arcmin, degrees). Default is arcsec.\n z : Bool\n If true return the z coordinate as well.\n\n Returns\n -------\n out : ndarray\n The data coordinates (x,y,z) in heliocentric cartesian coordinates in meters.\n\n Notes\n -----\n Implements Eq. (15) of Thompson (2006), A&A, 449, 791.\n\n Examples\n --------\n >>> import sunpy.wcs\n >>> sunpy.wcs.convert_hpc_hcc(40.0, 32.0, z=True)\n (28876152.176423457, 23100922.071266972, 694524220.8157959)\n\n \"\"\"\n c = np.array([_convert_angle_units(unit=angle_units),\n _convert_angle_units(unit=angle_units)])\n\n cosx = np.cos(x * c[0])\n sinx = np.sin(x * c[0])\n cosy = np.cos(y * c[1])\n siny = np.sin(y * c[1])\n\n if dsun_meters is None:\n dsun_meters = sun.constants.au.si.value\n elif isinstance(dsun_meters, u.Quantity):\n dsun_meters = dsun_meters.si.value\n\n q = dsun_meters * cosy * cosx\n distance = q ** 2 - dsun_meters ** 2 + rsun_meters ** 2\n # distance[np.where(distance < 0)] = np.sqrt(-1)\n distance = q - np.sqrt(distance)\n\n rx = distance * cosy * sinx\n ry = distance * siny\n rz = dsun_meters - distance * cosy * cosx\n\n\n if np.all(z == True):\n return rx, ry, rz\n else:\n return rx, ry\n\ndef convert_hcc_hpc(x, y, dsun_meters=None, angle_units='arcsec'):\n \"\"\"Convert Heliocentric-Cartesian (HCC) to angular\n Helioprojective-Cartesian (HPC) coordinates (in degrees).\n\n Parameters\n ----------\n x, y : float (meters)\n Data coordinate in meters.\n dsun_meters : float\n Distance from the observer to the Sun in meters. Default is 1 AU.\n angle_units : str\n Units of the data coordinates (e.g. arcsec, arcmin, degrees). Default is arcsec.\n\n Returns\n -------\n out : ndarray\n The data coordinates (x,y) in helioprojective cartesian coordinates in arcsec.\n\n Notes\n -----\n Implements Eq. (16) of Thompson (2006), A&A, 449, 791.\n\n Examples\n --------\n >>> import sunpy.wcs\n >>> sunpy.wcs.convert_hcc_hpc(28748691, 22998953)\n (39.823439773829705, 31.858751644835717)\n\n \"\"\"\n\n # Calculate the z coordinate by assuming that it is on the surface of the Sun\n z = np.sqrt(rsun_meters ** 2 - x ** 2 - y ** 2)\n\n if dsun_meters is None:\n dsun_meters = sun.constants.au.si.value\n elif isinstance(dsun_meters, u.Quantity):\n dsun_meters = dsun_meters.si.value\n\n zeta = dsun_meters - z\n distance = np.sqrt(x**2 + y**2 + zeta**2)\n hpcx = np.rad2deg(np.arctan2(x, zeta))\n hpcy = np.rad2deg(np.arcsin(y / distance))\n\n if angle_units == 'arcsec':\n hpcx = 60 * 60 * hpcx\n hpcy = 60 * 60 * hpcy\n elif angle_units == 'arcmin':\n hpcx = 60 * hpcx\n hpcy = 60 * hpcy\n\n return hpcx, hpcy\n\ndef convert_hcc_hg(x, y, z=None, b0_deg=0, l0_deg=0, radius=False):\n \"\"\"Convert from Heliocentric-Cartesian (HCC) (given in meters) to\n Stonyhurst Heliographic coordinates (HG) given in degrees, with\n radial output in meters.\n\n Parameters\n ----------\n x, y : float (meters)\n Data coordinate in meters.\n z : float (meters)\n Data coordinate in meters. If None, then the z-coordinate is assumed\n to be on the Sun.\n b0_deg : float (degrees)\n Tilt of the solar North rotational axis toward the observer\n (heliographic latitude of the observer). Usually given as SOLAR_B0,\n HGLT_OBS, or CRLT_OBS. Default is 0.\n l0_deg : float (degrees)\n Carrington longitude of central meridian as seen from Earth. Default is 0.\n radius : Bool\n If true, forces the output to return a triple of (lon, lat, r). If\n false, return (lon, lat) only.\n\n Returns\n -------\n out : ndarray (degrees, meters)\n if radius is false, return the data coordinates (lon, lat). If\n radius=True, return the data coordinates (lon, lat, r). The quantities\n (lon, lat) are the heliographic coordinates in degrees. The quantity\n 'r' is the heliographic radius in meters.\n\n Notes\n -----\n Implements Eq. (12) of Thompson (2006), A&A, 449, 791.\n\n Examples\n --------\n >>> import sunpy.wcs\n >>> sunpy.wcs.convert_hcc_hg(230000.0,45000000.0,\n ... z=695508000.0 + 8000000.0, radius=True)\n (0.01873188196651189, 3.6599471896203317, 704945784.41465974)\n \"\"\"\n if z is None:\n z = np.sqrt(rsun_meters**2 - x**2 - y**2)\n\n cosb = np.cos(np.deg2rad(b0_deg))\n sinb = np.sin(np.deg2rad(b0_deg))\n\n hecr = np.sqrt(x**2 + y**2 + z**2)\n hgln = np.arctan2(x, z * cosb - y * sinb) + np.deg2rad(l0_deg)\n hglt = np.arcsin((y * cosb + z * sinb) / hecr)\n\n if radius:\n return np.rad2deg(hgln), np.rad2deg(hglt), hecr\n else:\n return np.rad2deg(hgln), np.rad2deg(hglt)\n\ndef convert_hg_hcc(hglon_deg, hglat_deg, b0_deg=0, l0_deg=0, occultation=False,\n z=False, r=rsun_meters):\n \"\"\"Convert from Stonyhurst Heliographic coordinates (given in degrees) to\n Heliocentric-Cartesian coordinates (given in meters).\n\n Parameters\n ----------\n hglon_deg, hglat_deg : float (degrees)\n Heliographic longitude and latitude in degrees.\n b0_deg : float (degrees)\n Tilt of the solar North rotational axis toward the observer\n (heliographic latitude of the observer). Usually given as SOLAR_B0,\n HGLT_OBS, or CRLT_OBS. Default is 0.\n l0_deg : float (degrees)\n Carrington longitude of central meridian as seen from Earth. Default is 0.\n occultation : Bool\n If true set all points behind the Sun (e.g. not visible) to Nan.\n z : Bool\n If true return the z coordinate as well.\n r : float (meters)\n Heliographic radius\n\n Returns\n -------\n out : ndarray (meters)\n The data coordinates in Heliocentric-Cartesian coordinates.\n\n Notes\n -----\n Implements Eq. (11) of Thompson (2006), A&A, 449, 791, with the default\n assumption that the value 'r' in Eq. (11) is identical to the radius of the\n Sun.\n\n Examples\n --------\n >>> import sunpy.wcs\n >>> sunpy.wcs.convert_hg_hcc(0.01873188196651189, 3.6599471896203317,\n ... r=704945784.41465974, z=True)\n (230000.0, 45000000.0, 703508000.0)\n \"\"\"\n lon = np.deg2rad(hglon_deg)\n lat = np.deg2rad(hglat_deg)\n\n cosb = np.cos(np.deg2rad(b0_deg))\n sinb = np.sin(np.deg2rad(b0_deg))\n\n lon = lon - np.deg2rad(l0_deg)\n\n cosx = np.cos(lon)\n sinx = np.sin(lon)\n cosy = np.cos(lat)\n siny = np.sin(lat)\n\n # Perform the conversion.\n x = r * cosy * sinx\n y = r * (siny * cosb - cosy * cosx * sinb)\n zz = r * (siny * sinb + cosy * cosx * cosb)\n\n if occultation:\n x[zz < 0] = np.nan\n y[zz < 0] = np.nan\n\n if np.all(z == True):\n return x, y, zz\n else:\n return x, y\n\ndef convert_hg_hpc(hglon_deg, hglat_deg, b0_deg=0, l0_deg=0, dsun_meters=None, angle_units='arcsec',\n occultation=False):\n \"\"\"Convert from Heliographic coordinates (HG) to Helioprojective-Cartesian\n (HPC).\n\n Parameters\n ----------\n hglon_deg, hglat_deg : float (degrees)\n Heliographic longitude and latitude in degrees.\n b0_deg : float (degrees)\n Tilt of the solar North rotational axis toward the observer\n (heliographic latitude of the observer). Usually given as SOLAR_B0,\n HGLT_OBS, or CRLT_OBS. Default is 0.\n l0_deg : float (degrees)\n Carrington longitude of central meridian as seen from Earth. Default is 0.\n occultation : Bool\n If true set all points behind the Sun (e.g. not visible) to Nan.\n dsun_meters : float (meters)\n Distance between the observer and the Sun.\n angle_units : str\n\n\n Returns\n -------\n out : ndarray (arcsec)\n The data coordinates (x,y) in Helioprojective-Cartesian coordinates.\n\n Notes\n -----\n Uses equations 11 and 16 in Thompson (2006), A&A, 449, 791-803.\n\n Examples\n --------\n >>> import sunpy.wcs\n >>> sunpy.wcs.convert_hg_hpc(34.0, 45.0, b0_deg=-7.064078, l0_deg=0.0)\n (380.05656560308898, 743.78281283290016)\n \"\"\"\n\n tempx, tempy = convert_hg_hcc(hglon_deg, hglat_deg, b0_deg=b0_deg, l0_deg=l0_deg, occultation=occultation)\n x, y = convert_hcc_hpc(tempx, tempy, dsun_meters=dsun_meters, angle_units=angle_units)\n return x, y\n\ndef convert_hpc_hg(x, y, b0_deg=0, l0_deg=0, dsun_meters=None, angle_units='arcsec'):\n \"\"\"Convert from Helioprojective-Cartesian (HPC) to Heliographic coordinates\n (HG) in degrees.\n\n Parameters\n ----------\n x, y : float ()\n Data coordinate in angle units.\n b0 : float (degrees)\n Tilt of the solar North rotational axis toward the observer\n (heliographic latitude of the observer). Usually given as SOLAR_B0,\n HGLT_OBS, or CRLT_OBS. Default is 0.\n l0 : float (degrees)\n Carrington longitude of central meridian as seen from Earth. Default is 0.\n dsun_meters : float (meters)\n Distance between the observer and the Sun.\n angle_units : str\n Units used for input x and y. Default is arcsec.\n\n Returns\n -------\n out : ndarray (degrees)\n The data coordinates (hglongitude, hglatitude) in Heliographic coordinates.\n\n Notes\n -----\n Uses equations 15 and 12 in Thompson (2006), A&A, 449, 791-803.\n\n Examples\n --------\n >>> import sunpy.wcs\n >>> sunpy.wcs.convert_hpc_hg(382, 748, b0_deg=-7.064078, l0_deg=0.0)\n (34.504653439914669, 45.443143275518182)\n \"\"\"\n tempx, tempy = convert_hpc_hcc(x, y, dsun_meters=dsun_meters, angle_units=angle_units)\n lon, lat = convert_hcc_hg(tempx, tempy, b0_deg=b0_deg, l0_deg=l0_deg)\n return lon, lat\n\ndef proj_tan(x, y, force=False):\n \"\"\"Applies the gnomonic (TAN) projection to intermediate relative\n coordinates. This function is not currently implemented!\"\"\"\n # if pixels are within 3 degrees of the Sun then skip the calculation unless\n # force is True. This applies to all sdo images so this function is just\n # here as a place holder for the future\n # TODO: write proj_tan function\n return x, y\n\ndef convert_to_coord(x, y, from_coord, to_coord, b0_deg=0, l0_deg=0, dsun_meters=None, angle_units='arcsec'):\n \"\"\"Apply a coordinate transform to coordinates. Right now can only do hpc\n to hcc to hg\"\"\"\n\n if (from_coord == 'hcc') and (to_coord == 'hg'):\n rx, ry = convert_hcc_hg(x, y, b0_deg=b0_deg, l0_deg=l0_deg)\n elif (from_coord == 'hpc') and (to_coord == 'hg'):\n rx, ry = convert_hpc_hg(x, y, b0_deg=b0_deg, l0_deg=l0_deg, dsun_meters=dsun_meters, angle_units=angle_units)\n elif (from_coord == 'hg') and (to_coord == 'hcc'):\n rx, ry = convert_hg_hcc(x, y, b0_deg=b0_deg, l0_deg=l0_deg)\n elif (from_coord == 'hcc') and (to_coord == 'hpc'):\n rx, ry = convert_hcc_hpc(x, y, dsun_meters=dsun_meters, angle_units=angle_units)\n elif (from_coord == 'hg') and (to_coord == 'hpc'):\n rx, ry = convert_hg_hpc(x, y, b0_deg=b0_deg, l0_deg=l0_deg, dsun_meters=dsun_meters, angle_units=angle_units)\n elif (from_coord == 'hpc') and (to_coord == 'hcc'):\n rx, ry = convert_hpc_hcc(x, y, dsun_meters=dsun_meters, angle_units=angle_units)\n\n return rx, ry\n"
] | [
[
"numpy.sqrt",
"numpy.arctan2",
"numpy.arcsin",
"numpy.rad2deg",
"numpy.cos",
"numpy.arange",
"numpy.all",
"numpy.array",
"numpy.sin",
"numpy.deg2rad"
]
] |
Julio-Felix/socket-python | [
"93b6ce44dd88c2af49e7702bb16c69bc4f55240d"
] | [
"transmissor.py"
] | [
"import socket\r\nimport numpy as np # pip install numpy\r\n\r\n\r\nsocketUDP = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\ntransmissor = (\"127.0.0.1\", 2020)\r\nreceptor = (\"127.0.0.1\", 3030)\r\nsocketUDP.bind(transmissor)\r\nbuff_size = 10000\r\n\r\nnext_sequence_number = 0\r\n\r\n\r\ndef calculate_checksum(data):\r\n data_sum = np.uint16(0)\r\n for element in data:\r\n data_sum += element\r\n return np.invert(data_sum)\r\n\r\n\r\ndef verify_checksum(data):\r\n data_sum = np.uint16(0)\r\n for element in data:\r\n data_sum += element\r\n return data_sum == 0xFFFF\r\n\r\n\r\ndef udt_send(packet):\r\n socketUDP.sendto(packet.tobytes(), receptor)\r\n\r\n\r\ndef rdt_rcv():\r\n while True:\r\n message, source = socketUDP.recvfrom(buff_size)\r\n if source == receptor:\r\n return np.frombuffer(message, dtype=np.uint16)\r\n\r\n\r\ndef rdt_send(data):\r\n global next_sequence_number\r\n\r\n sndpkt = np.array([], np.uint16)\r\n sndpkt = np.append(sndpkt, np.uint16(next_sequence_number))\r\n sndpkt = np.append(sndpkt, np.uint16(0)) # checksum\r\n sndpkt = np.concatenate((sndpkt, data))\r\n\r\n sndpkt[1] = calculate_checksum(sndpkt)\r\n udt_send(sndpkt)\r\n\r\n while True:\r\n rcvpkt = rdt_rcv()\r\n is_corrupt = not verify_checksum(rcvpkt)\r\n is_ack = rcvpkt[2] == True\r\n is_nack = rcvpkt[2] == False\r\n print(\"Dados recebidos \", rcvpkt)\r\n print(\"Está corrompido? \", is_corrupt)\r\n print(\"Está Nack? \", is_nack)\r\n print(\"Está Ack? \", is_ack)\r\n print(\"Seq Num? \", next_sequence_number)\r\n \r\n if is_corrupt or is_nack:\r\n udt_send(sndpkt)\r\n if is_ack and not is_corrupt:\r\n break\r\n\r\n if next_sequence_number == 0:\r\n next_sequence_number = 1\r\n else:\r\n next_sequence_number = 0\r\n\r\n\r\nif __name__ == \"__main__\":\r\n i = 1\r\n while i <= 3:\r\n dados = np.random.randint(5, size=10, dtype=np.uint16)\r\n print(f'Dados a serem enviados {dados}')\r\n rdt_send(dados)\r\n i+=1\r\n"
] | [
[
"numpy.invert",
"numpy.uint16",
"numpy.array",
"numpy.concatenate",
"numpy.random.randint",
"numpy.frombuffer"
]
] |
UKPLab/linspector | [
"46a7cca6ad34dc673feb47c4d452f1248d5e635b"
] | [
"intrinsic/evaluation/classifiers/embeddings/sentence_embedding.py"
] | [
"import codecs\nfrom collections import defaultdict\n\nimport torch\nfrom allennlp.common import Params\nfrom allennlp.data import Vocabulary\nfrom allennlp.modules.token_embedders.token_embedder import TokenEmbedder\n\n\[email protected](\"sentence_embedding\")\nclass SentenceEmbedding(TokenEmbedder):\n \"\"\"\n Embedder for contextual embeddings. which reads a file of the format 'sentence TAB index TAB vector'.\n \"\"\"\n\n def read_file(self, path):\n self.embs = defaultdict(lambda: defaultdict())\n with codecs.open(path, encoding='utf-8') as f:\n for line in f:\n # Read sentence, index and word vector\n sp = line.split(\"\\t\")\n vector_str = sp[2]\n vector = []\n for n in vector_str.split(\" \"):\n try:\n vector.append(float(n))\n except ValueError:\n break\n index = int(sp[1])\n sentence = sp[0]\n\n # Save vector in a dict\n self.embs[sentence][index] = vector\n\n def get_output_dim(self) -> int:\n return self.output_dim\n\n def forward(self, # pylint: disable=arguments-differ\n inputs: torch.Tensor,\n word_inputs: torch.Tensor = None) -> torch.Tensor:\n \"\"\"\n\n :param inputs: list of sentences (sentence = list of token indices)\n :param word_inputs: not used\n :return: tensor which contains a list of embedded sentences (every sentence is a list of word vectors)\n \"\"\"\n if self.output_dim is None or self.output_dim == 0:\n raise NotImplementedError\n\n # Get tokens from token indices\n max_sentences_length = len(inputs[0].tolist())\n sentences = []\n for i in inputs:\n token_list = []\n for j in i:\n if j.item() != 0:\n token = self.vocab.get_token_from_index(j.item())\n token_list += [token]\n sentences += [token_list]\n\n sentence_emb = []\n\n # Read the embeddings from the dict\n for sentence_list in sentences:\n sentence = \" \".join(sentence_list[0:-1])\n index = int(sentence_list[-1])\n\n try:\n word_embedding = self.embs[sentence][index]\n except KeyError:\n print(\"KEY ERROR \" + sentence + \" INDEX \" + str(index))\n word_embedding = [0] * self.output_dim\n\n vector_list = []\n\n # Add zeros to the returning tensor for all tokens without vectors. AllenNLP wants an embedding for every token\n if index != 0:\n for i in range(0, index):\n vector_list += [[0] * self.output_dim]\n vector_list += [word_embedding]\n\n for i in range(0, max_sentences_length - index - 1):\n vector_list += [[0] * self.output_dim]\n\n sentence_emb += [vector_list]\n\n # Create tensor\n device = inputs.device\n # print(sentence_emb)\n tensor = torch.tensor(sentence_emb, device=device)\n\n return tensor\n\n @classmethod\n def from_params(cls, vocab: Vocabulary, params: Params) -> 'SentenceEmbedding':\n cls.vocab = vocab\n embedding_dim = params[\"embedding_dim\"]\n pretrained_file = params[\"pretrained_vector_file\"]\n return cls(pretrained_file, embedding_dim)\n\n def __init__(self, file, vector_size) -> None:\n super().__init__()\n self.embs = {}\n self.output_dim = vector_size\n self.read_file(file)\n"
] | [
[
"torch.tensor"
]
] |
spatchcock/models | [
"b97eef75d080c903cc6280b1d5955033d14bcf84",
"b97eef75d080c903cc6280b1d5955033d14bcf84"
] | [
"normal.py",
"foraminifera/foraminiferal_test_accumulation_time_evolution.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 24 22:11:12 2014\n\n@author: spatchcock\n\"\"\"\n\nimport math\nimport numpy\nimport matplotlib.pyplot as plt\n\n# Plot the normal distribution function as well as its first and second derivatives\n# \n# Use the numpy.vectorize function to handle array manupulation\n\n# http://statistics.about.com/od/Mathstat/a/Inflection-Points-Of-The-Probability-Density-Function-Of-A-Normal-Distribution.htm\n\ndef norm(x, mean, sd):\n var = sd**2\n pi = 3.1415926\n denom = (2*pi*var)**.5\n num = math.exp(-(x-mean)**2/(2*var))\n return num/denom\n \ndef norm_first_deriv(x, mean, std):\n return -(x-mean)*norm(x, mean, std)/std**2\n\n \ndef norm_second_deriv(x, mean, std):\n return -norm(x, mean, std)/std**2 + (x-mean)**2*norm(x, mean, std)/std**4\n \n \nv_norm = numpy.vectorize(norm)\nv_norm_first_deriv = numpy.vectorize(norm_first_deriv)\nv_norm_second_deriv = numpy.vectorize(norm_second_deriv)\n\n\nmean = 0\nstd = 1.9\na = numpy.arange(-5,5,0.1)\nb = v_norm(a, mean, std)\nc = v_norm_first_deriv(a, mean, std)\nd = v_norm_second_deriv(a, mean, std)\n\nfig = plt.figure()\n\nnorm = fig.add_subplot(111, xlim=(-6, 6), ylim=(-1, 1))\nnorm.grid()\n\nline, = norm.plot([], [], lw=3, color='r')\nline.set_data(a,b)\n\nfirst = fig.add_subplot(111, xlim=(-6, 6), ylim=(-1, 1))\nline, = first.plot([], [], lw=3, color='b')\nline.set_data(a,c)\n\nsecond = fig.add_subplot(111, xlim=(-6, 6), ylim=(-1, 1))\nline, = second.plot([], [], lw=3, color='g')\nline.set_data(a,d)\n\n\nstddev = fig.add_subplot(111, xlim=(-6, 6), ylim=(-1, 1))\nline, = stddev.plot([], [], lw=3, color='y')\nline.set_data([-std, -std],[-1,1])\n\n\nconstant = fig.add_subplot(111, xlim=(-6, 6), ylim=(-1, 1))\nline, = constant.plot([], [], lw=3, color='b')\nline.set_data([-6, 6],[0.1,0.1])",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 15 22:52:11 2014\n\n@author: spatchcock\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n# Advection - diffusion - Decay - production\n#\n# Differential equation\n#\n# dC/dt = D(d^2C/dx^2) - w(dC/dx) - uC + Ra(x)\n# \n# Difference equation\n#\n# (C{x,t} = C{x,t-1} + dt * [D*(C{x+1,t-1} - 2C{x, t-1} + C{x-1,t-1})/dx^2 - w(C{x+1,t-1} - C{x-1,t-1})/2dx - u*C{x,t-1} + Ra{x}]\n#\n# Initial conditions\n#\n# C(x,0) = R{x}\n#\n\n# %% DEFINE NUMERICAL SCHEME\n\nmax_depth = 30.0 # maximum depth of domain of interest\nN_x = 101 # number of nodes across 1D domain\ndx = max_depth/N_x # cell size (regular/uniform)\nsigma = 0.1 # CFL sigma value. Scales the timestep according to the depth step. \n # Ensures timestep is sufficiently smaller that distance step to provide\n # stability (although this also depends on the sedimentation rate)\ndt = sigma*dx # time step\n\n\n# %% SET UP PLACEHOLDER ARRAYS FOR PARAMETERS AND VARIABLES\n\n# Each parameter and variable will be represented by a value at each node across\n# the 1D domain.\n\n# Dependent and independent variables (C, x)\nx = np.linspace(0.0,max_depth,N_x) # depth\nC = np.zeros(N_x) # concentration\n\n# Parameters - Each parameter can, in principle vary with depth, x. Initialise arrays \n# for each, although we can set a constant value for all x if required.\nRa = np.zeros(N_x) # production (product of standing crop, a, and reproduction rate, R)\nD = np.zeros(N_x) # diffusion (mixing rate)\nu = np.zeros(N_x) # taphonomic decay rate\nw = np.zeros(N_x) # advection speed (sedimentation rate)\nCu = np.zeros(N_x) # placeholder for memoizing previous timestep concentrations\n\n\n# %% DEFINE DEPTH-DEPENDENT FUNCTION FOR TAPHONOMIC DECAY\n\n# It is likely that taphonomic decay decreases with depth so most circumstances probably\n# require a function for the taphonomic decay rate that decrease through the domain. In\n# some circumstances, considering decay rates to be constant across some or all of the domain\n# might be appropriate. Three choices are presented below. Comment/uncomment as required\n# or set u[] to another appropriate function of depth.\n\n# Constant function\n\n# This simply sets the same decay rate for all values of x.\n\n# u[:] = 0.005\n\n\n# Decreasing function\n\n# This drescribes taphonic decay rate as decreasing exponential with depth frmom\n# some maximum value at the surface. This is the simplest decreasing function that\n# asymptotes with depth.\n\nu_0 = 0.005 # value at surface, i.e. x = 0\nu_attenuation = 0.5 # rate at which decay rate decreases with depth\nu[0:] = u_0 * np.exp(-u_attenuation*x[0:]) # exponentially decreasing taphonomic decay rate\n\n\n# Step function\n\n# This sets the decay rate as a constant across some limited upper interval of the\n# sediment. This resembles the commonly invoked concept of the Taphonomically Active Zone\n# (the \"TAZ\"). Of course, any other more complicated step function could be defined in a \n# similar way.\n\n# max_depth_decay = 10.0 # Maximum depth of decay\n# max_x_decay = int(max_depth_decay/max_depth*N_x) # Index of maximum decay depth\n# u[0:max_x_decay] = 0.005 # Step function\n\n\n# %% DEFINE DEPTH DEPENDENT FUNCTION FOR SEDIMENTATION RATE\n\n# In principle, sedimentation rate may have varied during the time in which a given\n# sediment interval has accumulated. For now, we'll just assume that it is constant.\n\n# Constant function\nw[:] = 0.6 \n\n\n# %% DEFINE DEPTH DEPENDENT FUNCTION FOR MIXING/BIOTURBATION\n\n# constant in upper mixed zone, zero below\nmax_depth_mixing = 15.0\nmax_x_mixing = int(max_depth_mixing/max_depth*N_x)\nD[0:max_x_mixing] = 0.2399 \n\n\n# %% DEFINE DEPTH-DEPENDENT FUNCTION FOR TEST PRODUCTION\n\nRa_0 = 30.0\nRa_attenuation = 0.05\nRa_peak_depth = 2\nRa_gamma = 4\nmax_x_Ra = int(Ra_peak_depth/max_depth*N_x)\n\n#Ra[0:max_x_Ra] = Ra_0 # constant over interval\n#Ra[0:] = Ra_0 * np.exp(-Ra_attenuation*x[0:]) # exponential decrease\nRa[0:] = Ra_0 * np.exp(-Ra_attenuation*(x[0:]-Ra_peak_depth)**Ra_gamma) # subsurface peak, normally distributed\n\n\n# %% IMPLEMENT DISCRETIZED EQUATION AS INVOKABLE TIMESTEP FUNCTION\n\ndef step():\n # memoize last timestep\n Cu[:] = C[:]\n \n # boundary, surficial layer (x=0)\n C[0] = dt * Ra[0]\n \n # Interior points \n C[1:-1] = Cu[1:-1] + dt * (D[1:-1]*(Cu[2:] - 2.0*Cu[1:-1] + Cu[0:-2])/dx**2.0 - w[1:-1]*(Cu[2:] - Cu[0:-2])/2.0*dx - u[1:-1]*Cu[1:-1] + Ra[1:-1])\n \n # boundary, bottomost layer (x=max_depth)\n C[-1] = C[-2] \n\n\n# %% SET UP PLOTS\n\nfig = plt.figure()\n\nRa_plot = fig.add_subplot(151, ylim=(max_depth, 0), xlim=(0, max(Ra)*1.5))\nRa_line, = Ra_plot.plot([], [], lw=3)\nRa_plot.grid()\nRa_plot.axes.get_xaxis().set_ticks([0.0, max(Ra)])\nRa_plot.set_xlabel('Ra')\n\nD_plot = fig.add_subplot(152, ylim=(max_depth, 0), xlim=(0, max(D)*1.5))\nD_line, = D_plot.plot([], [], lw=3)\nD_plot.grid()\nD_plot.axes.get_yaxis().set_ticklabels([])\nD_plot.axes.get_xaxis().set_ticks([0.0, max(D)])\nD_plot.set_xlabel('D')\n\nw_plot = fig.add_subplot(153, ylim=(max_depth, 0), xlim=(0, max(w)*1.5))\nw_line, = w_plot.plot([], [], lw=3)\nw_plot.grid()\nw_plot.axes.get_yaxis().set_ticklabels([])\nw_plot.axes.get_xaxis().set_ticks([0.0, max(w)])\nw_plot.set_xlabel('w')\n\nu_plot = fig.add_subplot(154, ylim=(max_depth, 0), xlim=(0, max(u)*1.5))\nu_line, = u_plot.plot([], [], lw=3)\nu_plot.grid()\nu_plot.axes.get_yaxis().set_ticklabels([])\nu_plot.axes.get_xaxis().set_ticks([0.0, max(u)])\nu_plot.set_xlabel('u')\n\nC_plot = fig.add_subplot(155, ylim=(max_depth, 0), xlim=(0, 1000))\nC_line, = C_plot.plot([], [], lw=3)\nstep_text = C_plot.text(0.2, 0.02, '', transform=C_plot.transAxes)\nC_plot.grid()\nC_plot.axes.get_yaxis().set_ticklabels([])\nC_plot.set_xlabel('C')\n\nplt.subplots_adjust(wspace=0.1)\n\n# %% SET ANIMATION\n\n# Clear frame on each interation\ndef init():\n # Reset each line\n Ra_line.set_data([], [])\n D_line.set_data([], [])\n w_line.set_data([], [])\n u_line.set_data([], [])\n C_line.set_data([], [])\n \n return Ra_line,D_line,w_line,u_line,C_line, \n\n\n# Invoke model timestep and replot data on each iteration\ndef animate(i):\n # Iterate model\n step()\n \n # Update each line\n Ra_line.set_data(Ra, x)\n D_line.set_data(D, x)\n w_line.set_data(w, x)\n u_line.set_data(u, x)\n C_line.set_data(C, x)\n\n step_text.set_text('iter: %.1f' % i)\n\n return Ra_line,D_line,w_line,u_line,C_line,step_text\n\n\n# %% RUN ANIMATION\nani = animation.FuncAnimation(fig, animate, frames=10000000, interval=1, blit=True, init_func=init)\n\n\n\n "
] | [
[
"numpy.arange",
"matplotlib.pyplot.figure",
"numpy.vectorize"
],
[
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.exp",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.animation.FuncAnimation",
"numpy.linspace"
]
] |
magood/MarkeplacePredict | [
"f74ea035d6b861b9594ec2b91b38adad18e1bb00"
] | [
"eda.py"
] | [
"# Exploratory data analysis\n# py 3, using \"mplace\" conda env.\n\nimport numpy as np\nimport pandas as pd\nimport pickle, itertools, os\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nfrom yahoofinancials import YahooFinancials as YF\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.ensemble import RandomForestClassifier\nimport utils\n\nmusic_fn = 'music.csv'\nsp_ticker = '^GSPC'\ndow_ticker = '^DJI'\nnasdaq_ticker = '^IXIC'\nall_tickers = [sp_ticker, dow_ticker, nasdaq_ticker]\nnice_markers = ('o', 'v', '^', '<', '>', '1', 's', 'P', '*', '+', 'X', 'D', '_', '|')\nrf_outpath = os.path.join('.', 'output', 'RF')\nrf_feature_imp_fn = 'rf_feature_imp.csv'\n\n\ndef download(ticker, start_date='2018-02-15', end_date='2015-08-19'):\n yf = YF(ticker)\n # this worked but looks like the dates are reversed?\n # hst = yf.get_historical_price_data('2015-08-19', '2018-02-15', 'daily')\n hst = yf.get_historical_price_data(start_date, end_date, 'daily')\n pricelist = hst[ticker]['prices']\n # example: [{'date': 1439991000, 'high': 2096.169921875, 'low': 2070.530029296875, 'open': 2095.68994140625, 'close': 2079.610107421875, 'volume': 3512920000, 'adjclose': 2079.610107421875, 'formatted_date': '2015-08-19'}]\n df = pd.DataFrame(pricelist)\n df['date'] = pd.to_datetime(df['formatted_date'])\n df.set_index('date', inplace=True)\n df.drop('formatted_date', axis=1, inplace=True)\n return df\n\n\ndef get_ticker_data(ticker, start_date, end_date):\n try:\n df = pd.read_pickle(f\"./{ticker}.pkl\")\n return df\n except FileNotFoundError:\n df = download(ticker, start_date, end_date)\n df.to_pickle(f\"./{ticker}.pkl\")\n return df\n\n\ndef augment_financials(df):\n df['swing'] = df['high'] - df['low']\n df['return'] = 0.\n df['return'] = (df['adjclose'] / df['adjclose'].shift(1)) - 1\n\n\ndef get_index_music(ticker):\n sp = get_ticker_data(ticker)\n augment_financials(sp)\n df = pd.DataFrame(index=sp.index)\n mdf = pd.read_csv(music_fn)\n mdf['date'] = pd.to_datetime(mdf['Date'])\n mdf.set_index('date', inplace=True)\n mdf.drop('Date', axis=1, inplace=True)\n mdf = mdf[mdf['Music'].isnull() == False]\n df = sp.join(mdf, how='inner')\n return df\n\n\ndef get_music_df():\n mdf = pd.read_csv(music_fn)\n mdf['date'] = pd.to_datetime(mdf['Date'])\n mdf.set_index('date', inplace=True)\n mdf.drop('Date', axis=1, inplace=True)\n mdf = mdf[mdf['Music'].isnull() == False]\n return mdf\n\n\ndef build_index_df(tickers, mindate, maxdate):\n df = None\n for ticker in tickers:\n idx_df = get_ticker_data(ticker, mindate, maxdate)\n augment_financials(idx_df)\n # rename columns with index postfix\n idx_df = idx_df.add_suffix('_' + ticker)\n if df is None:\n df = pd.DataFrame(index=idx_df.index)\n df = idx_df.join(df, how='inner')\n # Now possibly do any inter-index calculations.\n # What is the difference in return across indices from highest to lowest?\n df['max_return'] = df[['return_^GSPC', 'return_^IXIC', 'return_^DJI']].max(axis=1)\n df['min_return'] = df[['return_^GSPC', 'return_^IXIC', 'return_^DJI']].min(axis=1)\n df['return_diff'] = df['max_return'] - df['min_return']\n df = df.dropna()\n return df\n\n\ndef get_all_df(tickers):\n mdf = get_music_df()\n mindate = mdf.index.min().strftime('%Y-%m-%d')\n maxdate = mdf.index.max().strftime('%Y-%m-%d')\n df = build_index_df(tickers, mindate, maxdate)\n df = df.join(mdf, how='inner')\n return df\n\n\ndef scatter_markers(df, xcol, ycol):\n # ensure we have markers for each music selection, looping if necessary.\n music = list(df.Music.unique())\n # ensure we have markers for each music selection, looping if necessary.\n infmarkers = itertools.cycle(nice_markers)\n markers = list(itertools.islice(infmarkers, len(music)))\n for tune, symbol in zip(music, markers):\n df_tune = df[df['Music'] == tune]\n x = df_tune[xcol]\n y = df_tune[ycol]\n plt.scatter(x, y, marker=symbol, label=tune)\n plt.legend()\n plt.xlabel(xcol)\n plt.ylabel(ycol)\n plt.title(\"Marketplace Music Selection\")\n\n\ndef rf_feature_imp(X, y, columns):\n np.random.seed(0)\n X = StandardScaler().fit_transform(X)\n dims = [2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 35, 40, 45] # grid of dimensions to select\n dims = [d for d in dims if d < X.shape[1]]\n # Always include the actual number of features, too, as a baseline\n if X.shape[1] not in dims:\n dims += [X.shape[1]]\n\n rfc = RandomForestClassifier(n_estimators=100, class_weight='balanced', random_state=0, n_jobs=8)\n fs = rfc.fit(X, y).feature_importances_\n fi = dict(zip(columns, list(fs)))\n ordered_fi = [(k, fi[k]) for k in sorted(fi, key=fi.get, reverse=True)]\n ordered_fi_df = pd.DataFrame(ordered_fi)\n ordered_fi_df.columns = ['feature','importance']\n ordered_fi_df.to_csv(os.path.join(rf_outpath, rf_feature_imp_fn))\n return rfc\n\n\ndef plot_feature_imp(columns):\n plt.close()\n df = pd.read_csv(os.path.join(rf_outpath, rf_feature_imp_fn))\n ax = df.plot.bar(x='feature', y='importance', rot=0, figsize=(40, 10))\n plt.ylabel('Importance')\n plt.title('Feature Importances by Randomized Forest')\n plt.savefig(os.path.join(rf_outpath, 'full_feature_imp.png'), bbox_inches='tight')\n\n\ndef plot_correlations(df):\n plt.close()\n f = plt.figure(figsize=(25, 25))\n df2 = pd.get_dummies(df)\n sns.heatmap(df2.corr(), cmap=sns.diverging_palette(220, 10, as_cmap=True), center=0, linewidths=.5, square=True)\n plt.yticks(rotation=0)\n plt.xticks(rotation=90)\n plt.title('Correlation Matrix', fontsize=16)\n plt.savefig(os.path.join(rf_outpath, 'corr_matrix.png'), bbox_inches='tight')\n return f\n\n\ndef drop_useless_dim_prefixes(df, remove_field_prefixes):\n \"\"\"\n Drops dimensions/columns from the df that do not appear to be useful.\n Provide a list of prefixes for useless columns (remove_field_prefixes).\n \"\"\"\n droplist = []\n for t in all_tickers:\n for pfx in remove_field_prefixes:\n droplist.append(f'{pfx}_{t}')\n df.drop(droplist, axis=1, inplace=True)\n return df\n\n\nif __name__ == '__main__':\n df = get_all_df(all_tickers)\n # should get rid of a bunch of stuff we don't think will be predictive before doing a bunch of plots because it's confusing.\n target_field = 'Music'\n columns = df.drop(target_field, 1).columns\n X = df.drop(target_field, 1).copy().values\n y_categorical = df[target_field].copy().values\n le = LabelEncoder()\n le.fit(y_categorical)\n y = le.transform(y_categorical)\n # Scikit learn really wants floats or the scaler will complain\n X = X.astype(np.float64)\n rfc = rf_feature_imp(X, y, columns)\n plot_feature_imp(columns)\n\n # Look at some correlations here...\n cf = plot_correlations(df)\n \n # Items that are correlated to the music are:\n # Volume, return, swing, return diff, max return, min return.\n # We can see that there are many highly-correlated features, so we can remove many of those.\n # High, low, open, close, adjclose all worthless.\n remove_field_prefixes = ['adjclose', 'close', 'high', 'low', 'open']\n df = drop_useless_dim_prefixes(df, remove_field_prefixes)\n\n df.to_csv(utils.ix.ds_csv_file_name)\n\n # print(df.describe())\n # scatter_markers(df, 'return_^GSPC', 'swing_^GSPC')\n # df.groupby('Music').hist()\n # plt.show()\n # some other nice data vis examples: https://machinelearningmastery.com/quick-and-dirty-data-analysis-with-pandas/\n # Also, conda install -c conda-forge pandas-profiling, then import pandas_profiling, df.profile_report()"
] | [
[
"pandas.read_pickle",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.xticks",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"numpy.random.seed",
"matplotlib.pyplot.title",
"pandas.to_datetime",
"matplotlib.pyplot.ylabel",
"sklearn.ensemble.RandomForestClassifier",
"matplotlib.pyplot.close",
"sklearn.preprocessing.LabelEncoder",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.xlabel",
"pandas.get_dummies",
"sklearn.preprocessing.StandardScaler"
]
] |
ongchinkiat/CarND-Capstone | [
"abd768450825a03975f2b7b87f1379285357347b"
] | [
"ros/src/visual/visual.py"
] | [
"#!/usr/bin/env python\n\nimport numpy as np\nimport rospy\nimport matplotlib\nmatplotlib.use('Qt5Agg')\n\nfrom matplotlib import pyplot as plt\nfrom geometry_msgs.msg import PoseStamped\nfrom geometry_msgs.msg import TwistStamped\nfrom sensor_msgs.msg import Image\nfrom styx_msgs.msg import TrafficLightArray, TrafficLight\nfrom styx_msgs.msg import Lane\nfrom scipy.spatial import KDTree\nfrom cv_bridge import CvBridge\nimport cv2\n\nimport math\n\n'''\nThis node is for visualizing data\n\npip install -U matplotlib\n\napt-get install x11-apps\napt-get install gnome-calculator\napt-get install qtbase5-dev\napt-get install python-tk\napt-get install python-gtk2-dev\nexport DISPLAY=:0\n'''\n\nLOOKAHEAD_WPS = 200 # Number of waypoints we will publish. You can change this number\n\n\nclass Visual(object):\n def __init__(self):\n rospy.init_node('visual')\n\n self.pose = None\n self.base_waypoints = None\n self.waypoints_2d = None\n self.waypoint_tree = None\n self.fig = plt.figure(figsize=(15, 15))\n plt.ion()\n plt.show()\n self.table1 = self.fig.add_subplot(2,2,1)\n # x1,x2,y1,y2\n self.table1.axis([0, 2500, 1000, 3100])\n self.table2 = self.fig.add_subplot(2,2,2)\n self.table2.axis([0, 2500, 1000, 3100])\n self.table3 = self.fig.add_subplot(2,2,3)\n self.table4 = self.fig.add_subplot(2,2,4)\n self.waypoints_x = []\n self.waypoints_y = []\n self.pose_x = []\n self.pose_y = []\n self.recent_pose_x = []\n self.recent_pose_y = []\n self.vel_x = []\n self.vel_y = []\n self.lights_x = []\n self.lights_y = []\n self.final_waypoints_x = []\n self.final_waypoints_y = []\n\n self.has_image = False\n self.cv_image = None\n self.bridge = CvBridge()\n self.lights = None\n\t\t\n # prevent refreshing graph while updating pose array\n self.updatelock = 0\n self.start_seconds = rospy.get_time()\n self.last_image_time = rospy.get_time()\n self.image_update = 0\n self.pose_update = 0\n self.vel_update = 0\n\n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)\n sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)\n sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)\n\n rospy.Subscriber('/final_waypoints', Lane, self.final_waypoints_cb)\n\n # TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below\n\n # TODO: Add other member variables you need below\n self.loop()\n\n def loop(self):\n rate = rospy.Rate(2) # can go as low as 30Hz\n while not rospy.is_shutdown():\n if self.pose and self.base_waypoints and self.table1:\n #table1.clear()\n self.table1.plot(self.waypoints_x, self.waypoints_y)\n if self.updatelock == 0:\n self.table1.plot(self.pose_x, self.pose_y)\n if self.updatelock == 0:\n self.table1.plot(self.lights_x, self.lights_y)\n if self.updatelock == 0:\n self.table4.plot(self.vel_x, self.vel_y)\n if (self.updatelock == 0) and self.has_image and (self.image_update == 1):\n self.table3.cla()\n self.table3.imshow(self.cv_image)\n self.image_update = 0\n\n if self.updatelock == 0:\n self.table2.cla()\n if self.updatelock == 0:\n self.table2.plot(self.recent_pose_x, self.recent_pose_y)\n if self.updatelock == 0:\n self.table2.plot(self.final_waypoints_x, self.final_waypoints_y)\n\n plt.draw()\n plt.pause(0.001)\n #print(\"loop\")\n rate.sleep()\n\n def get_closest_waypoint_idx(self):\n x = self.pose.pose.position.x\n y = self.pose.pose.position.y\n closest_idx = self.waypoint_tree.query([x, y], 1)[1]\n\n # check if closest is ahead or behind vehilcle\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n\n # equation for hyperplane through closest_coords\n cl_vect = np.array(closest_coord)\n prev_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n\n val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)\n\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n return closest_idx\n\n def publish_waypoints(self):\n if self.waypoint_tree:\n closest_wp_idx = self.get_closest_waypoint_idx()\n farthest_wp_idx = closest_wp_idx + LOOKAHEAD_WPS\n\n lane = Lane()\n lane.header = self.base_waypoints.header\n lane.waypoints = self.base_waypoints.waypoints[closest_wp_idx:farthest_wp_idx]\n self.final_waypoints_pub.publish(lane)\n\n def traffic_cb(self, msg):\n self.lights = msg.lights\n self.updatelock = 1\n self.lights_x = []\n self.lights_y = []\t\t\n for light in self.lights:\n self.lights_x.append(light.pose.pose.position.x);\n self.lights_y.append(light.pose.pose.position.y);\n self.updatelock = 0\n\t\t\t\n def image_cb(self, msg):\n \"\"\"\n Args:\n msg (Image): image from car-mounted camera\n\n \"\"\"\n now_time = rospy.get_time()\n if (self.image_update == 0) and (now_time > (self.last_image_time + 1)):\n self.last_image_time = now_time\n self.has_image = True\n self.updatelock = 1\n self.cv_image = cv2.cvtColor(self.bridge.imgmsg_to_cv2(msg, \"bgr8\"), cv2.COLOR_BGR2RGB)\n\n self.cv_image = self.cv_image.astype(int)\n self.cv_image = cv2.normalize(self.cv_image, None, 255,0, cv2.NORM_MINMAX, cv2.CV_8UC1)\n self.updatelock = 0\n self.image_update = 1\n\t\t\n\t\t\t\n def pose_cb(self, msg):\n self.pose = msg # around 50 Hz\n self.updatelock = 1\n self.pose_x.append(msg.pose.position.x)\n self.pose_y.append(msg.pose.position.y)\n if len(self.pose_x) > 21:\n self.recent_pose_x = self.pose_x[-20:]\n self.recent_pose_y = self.pose_y[-20:]\n else:\n self.recent_pose_x = self.pose_x\n self.recent_pose_y = self.pose_y\n self.updatelock = 0\n #print(\"new pose\")\n\n def velocity_cb(self, msg):\n self.updatelock = 1\n timenow = rospy.get_time() - self.start_seconds\n self.vel_x.append(timenow)\n self.vel_y.append(msg.twist.linear.x)\n self.updatelock = 0\n #print(\"new vel\")\n\t\t\n def waypoints_cb(self, waypoints):\n # load base waypoints\n print(\"new waypoints\")\n self.base_waypoints = waypoints\n if not self.waypoints_2d:\n # convert waypoints to (x,y) list\n self.waypoints_2d = [\n [\n waypoint.pose.pose.position.x,\n waypoint.pose.pose.position.y\n ] for waypoint in waypoints.waypoints\n ]\n maxdist = 0\n prev_x = -1\n prev_y = -1\n for waypoint in waypoints.waypoints:\n self.waypoints_x.append(waypoint.pose.pose.position.x);\n self.waypoints_y.append(waypoint.pose.pose.position.y);\n if prev_x >= 0:\n x = waypoint.pose.pose.position.x - prev_x\n y = waypoint.pose.pose.position.y - prev_y\n\n dist = math.sqrt((x*x) + (y*y))\n if dist > maxdist:\n maxdist = dist\n prev_x = waypoint.pose.pose.position.x\n prev_y = waypoint.pose.pose.position.y\n # build KDTree\n self.waypoint_tree = KDTree(self.waypoints_2d)\n # for Highway map, maxdist = 2.6486\n print(\"Waypoints max distance between points = \",maxdist)\n\n def final_waypoints_cb(self, waypoints):\n self.updatelock = 1\n self.final_waypoints_x = []\n self.final_waypoints_y = []\n for waypoint in waypoints.waypoints:\n self.final_waypoints_x.append(waypoint.pose.pose.position.x);\n self.final_waypoints_y.append(waypoint.pose.pose.position.y);\n self.updatelock = 0\n\n def obstacle_cb(self, msg):\n # TODO: Callback for /obstacle_waypoint message. We will implement it later\n pass\n\n def get_waypoint_velocity(self, waypoint):\n return waypoint.twist.twist.linear.x\n\n def set_waypoint_velocity(self, waypoints, waypoint, velocity):\n waypoints[waypoint].twist.twist.linear.x = velocity\n\n def distance(self, waypoints, wp1, wp2):\n dist = 0\n dl = lambda a, b: math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)\n for i in range(wp1, wp2 + 1):\n dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)\n wp1 = i\n return dist\n\n\nif __name__ == '__main__':\n try:\n Visual()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start visual node.')\n"
] | [
[
"matplotlib.pyplot.pause",
"matplotlib.pyplot.draw",
"scipy.spatial.KDTree",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.use",
"matplotlib.pyplot.ion",
"numpy.dot",
"numpy.array"
]
] |
aphedges/pytorch-lightning | [
"160e7e128909abc8489261287a562777cf1ada02"
] | [
"pytorch_lightning/loops/utilities.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nfrom typing import Any, Dict, Generator, Iterator, Mapping, Optional, Sequence, Tuple\n\nimport torch\nfrom torch.optim import Optimizer\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.plugins import ParallelPlugin\nfrom pytorch_lightning.trainer.connectors.logger_connector.result import ResultCollection\nfrom pytorch_lightning.utilities.apply_func import apply_to_collection\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.fetching import AbstractDataFetcher, DataLoaderIterDataFetcher\nfrom pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature\nfrom pytorch_lightning.utilities.types import STEP_OUTPUT\n\n\ndef check_finite_loss(loss: Optional[torch.Tensor]) -> None:\n \"\"\"Checks for finite loss value.\n\n Args:\n loss: the loss value to check to be finite\n \"\"\"\n if loss is not None and not torch.isfinite(loss).all():\n raise ValueError(f\"The loss returned in `training_step` is {loss}.\")\n\n\ndef _check_training_step_output(model: \"pl.LightningModule\", training_step_output: STEP_OUTPUT) -> None:\n \"\"\"Sanity checks that training produced a valid output and optimizer step has already been called in manual\n optimization.\n\n Args:\n model: a reference to the trainer\n training_step_output: the output of the training step (before wrapping in an AttributeDict)\n \"\"\"\n if isinstance(training_step_output, torch.Tensor) and not model.automatic_optimization:\n if training_step_output.grad_fn is None:\n # TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...\n raise MisconfigurationException(\"In manual optimization, `training_step` should not return a Tensor\")\n elif model.automatic_optimization:\n if not any(\n (\n isinstance(training_step_output, torch.Tensor),\n (isinstance(training_step_output, Mapping) and \"loss\" in training_step_output),\n training_step_output is None,\n )\n ):\n raise MisconfigurationException(\n \"In automatic optimization, `training_step` must either return a Tensor, \"\n \"a dict with key 'loss' or None (where the step will be skipped).\"\n )\n\n\ndef _process_training_step_output(\n trainer: \"pl.Trainer\", training_step_output: STEP_OUTPUT\n) -> Tuple[Optional[ResultCollection], Optional[Any]]:\n \"\"\"Adds the :param:`training_step_output` to the trainer's results.\n\n Args:\n trainer: a reference to the trainer\n training_step_output: the output of the training step (before wrapping into an AttributeDict)\n\n Returns:\n the updated results (None if the training_step's output was None) and hiddens exract from the results\n \"\"\"\n if training_step_output is None:\n return None, None\n\n results = trainer._results\n\n loss = None\n hiddens = None\n\n # handle dict return\n if isinstance(training_step_output, dict):\n # this should not modify the `training_step_output`, as the user could be using it after `training_step_end`\n loss = training_step_output.get(\"loss\")\n hiddens = training_step_output.get(\"hiddens\")\n # detach hiddens to avoid `RuntimeError: Trying to backward through the graph a second time`\n hiddens = apply_to_collection(hiddens, torch.Tensor, lambda t: t.detach())\n # use the setter instead of `dict.update` because it calls `detach` on the tensor items\n results.extra = {k: v for k, v in training_step_output.items() if k not in (\"loss\", \"hiddens\")}\n\n # handle scalar return\n elif isinstance(training_step_output, torch.Tensor):\n loss = training_step_output\n\n if trainer.terminate_on_nan:\n check_finite_loss(loss)\n\n # the loss shouldn't be moved to cpu.\n if trainer.move_metrics_to_cpu:\n results.cpu()\n\n # map to results under the hood\n results.minimize = loss\n\n return results, hiddens\n\n\ndef _build_training_step_kwargs(\n lightning_module: \"pl.LightningModule\",\n optimizers: Sequence[Optimizer],\n batch: Any,\n batch_idx: int,\n opt_idx: Optional[int],\n hiddens: Optional[Any],\n) -> Dict[str, Any]:\n \"\"\"Builds the keyword arguments for training_step.\n\n Args:\n lightning_module: the LightningModule with a `training_step` hook implementation\n optimizers: the list of optimizers from the Trainer\n batch: the batch to train on\n batch_idx: the index of the current batch\n opt_idx: the index of the current optimizer\n hiddens: the hidden state of the previous RNN iteration\n\n Returns:\n the keyword arguments for the training step\n \"\"\"\n # enable not needing to add opt_idx to training_step\n step_kwargs = OrderedDict([(\"batch\", batch)])\n\n training_step_fx = getattr(lightning_module, \"training_step\")\n\n if is_param_in_hook_signature(training_step_fx, \"batch_idx\", min_args=2):\n step_kwargs[\"batch_idx\"] = batch_idx\n\n if len(optimizers) > 1:\n has_opt_idx_in_train_step = is_param_in_hook_signature(training_step_fx, \"optimizer_idx\")\n if has_opt_idx_in_train_step:\n if not lightning_module.automatic_optimization:\n raise ValueError(\n \"Your `LightningModule.training_step` signature contains an `optimizer_idx` argument but\"\n \" in manual optimization optimizers must be handled by the user. Remove the optimizer_idx\"\n \" argument or set `self.automatic_optimization = True`.\"\n )\n step_kwargs[\"optimizer_idx\"] = opt_idx\n elif not has_opt_idx_in_train_step and lightning_module.automatic_optimization:\n raise ValueError(\n f\"Your LightningModule defines {len(optimizers)} optimizers but\"\n \" `training_step` is missing the `optimizer_idx` argument.\"\n )\n\n # pass hiddens if using tbptt\n if lightning_module.truncated_bptt_steps > 0:\n step_kwargs[\"hiddens\"] = hiddens\n\n return step_kwargs\n\n\ndef _prepare_dataloader_iter(data_fetcher: AbstractDataFetcher, batch_idx: int) -> Iterator:\n \"\"\"Attach the dataloader.\"\"\"\n if not isinstance(data_fetcher, DataLoaderIterDataFetcher):\n # restore iteration\n dataloader_iter = enumerate(data_fetcher, batch_idx)\n else:\n dataloader_iter = iter(data_fetcher)\n return dataloader_iter\n\n\n@contextmanager\ndef _block_parallel_sync_behavior(trainer: \"pl.Trainer\", block: bool = True) -> Generator[None, None, None]:\n \"\"\"Blocks synchronization in :class:`~pytorch_lightning.plugins.training_type.parallel.ParallelPlugin`. This is\n useful for example when when accumulating gradients to reduce communication when it is not needed.\n\n Args:\n trainer: the trainer instance with a reference to a training type plugin\n block: whether the context manager is enabled or not\n\n Returns:\n context manager with sync behaviour off\n \"\"\"\n if isinstance(trainer.training_type_plugin, ParallelPlugin) and block:\n with trainer.training_type_plugin.block_backward_sync():\n yield None\n else:\n yield None\n"
] | [
[
"torch.isfinite"
]
] |
fengtony686/workspace | [
"9e382a02439cb510df5fb2c278ae4e206d830336"
] | [
"MachineLearning/MINST/CNN.py"
] | [
"import os\nimport torch\nimport torch.nn as nn\nimport torch.utils.data as Data\nimport torchvision\n\n\nEPOCH = 1\nBATCH_SIZE = 50\nLR = 0.001\nDOWNLOAD_MNIST = False\n\n\nif not(os.path.exists('./mnist/')) or not os.listdir('./mnist/'):\n DOWNLOAD_MNIST = True\n\n\ntrain_data = torchvision.datasets.MNIST(\n root='./mnist/',\n train=True,\n transform=torchvision.transforms.ToTensor(),\n download=DOWNLOAD_MNIST,\n)\n\n\nprint(train_data.data.size())\nprint(train_data.targets.size())\n\n\ntrain_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True) # batch shape:(50,1,28,28)\n\n\ntest_data = torchvision.datasets.MNIST(root='./mnist/', train=False)\ntest_x = torch.unsqueeze(test_data.data, dim=1).type(torch.FloatTensor)[:2000]/255. # shape:(2000,1,28,28)\ntest_y = test_data.targets[:2000]\n\n\nclass CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(1, 16, 5, 1, 2),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2),\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(16, 32, 5, 1, 2),\n nn.ReLU(),\n nn.MaxPool2d(2),\n )\n self.out = nn.Linear(32*7*7, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = x.view(x.size(0), -1)\n out = self.out(x)\n return out, x\n\n\ncnn = CNN()\nprint(cnn)\noptimizer = torch.optim.Adam(cnn.parameters(), lr=LR)\nloss_func = nn.CrossEntropyLoss()\n\n\nfor epoch in range(EPOCH):\n for step, (b_x, b_y) in enumerate(train_loader):\n output = cnn(b_x)[0]\n loss = loss_func(output, b_y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if step % 50 == 0:\n test_output, last_layer = cnn(test_x)\n pred_y = torch.max(test_output, 1)[1].data.numpy()\n accuracy = float((pred_y == test_y.data.numpy()).astype(int).sum())/float(test_y.size(0))\n print('Epoch: ', epoch, '| Training Loss: %.4f' % loss.data.numpy(), '| Test Accuracy: %.2f' % accuracy)\n\n\ntest_output, _ = cnn(test_x[:20])\npred_y = torch.max(test_output, 1)[1].data.numpy()\nprint(pred_y, 'Prediction Number')\nprint(test_y[:20].numpy(), 'Real Number')"
] | [
[
"torch.unsqueeze",
"torch.utils.data.DataLoader",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.nn.CrossEntropyLoss",
"torch.nn.Conv2d",
"torch.max",
"torch.nn.ReLU"
]
] |
kundajelab/bias_correction | [
"521678ea8739473f793b0ce85e22e622d13df6fe"
] | [
"genomewide_gc/get_gc_content.py"
] | [
"import pandas as pd\nimport pysam\nimport argparse\ndef parse_args():\n parser=argparse.ArgumentParser(description=\"get gc content from a bed file\")\n parser.add_argument(\"--input_bed\")\n parser.add_argument(\"--ref_fasta\")\n parser.add_argument(\"--split_chroms\",action=\"store_true\",default=False)\n parser.add_argument(\"--out_prefix\")\n parser.add_argument(\"--center_summit\",action=\"store_true\",default=False)\n parser.add_argument(\"--flank_size\",type=int,default=500)\n parser.add_argument(\"--store_seq\",action=\"store_true\",default=False) \n return parser.parse_args()\ndef get_line_narrowPeak(row,args):\n chrom=row[0]\n start=row[1]\n end=row[2] \n if args.center_summit==True:\n summit=start+row[9]\n start=summit-args.flank_size\n end=summit+args.flank_size\n return chrom,start,end\n\ndef get_line_hdf5(index):\n chrom=index[0]\n start=index[1]\n end=index[2]\n return chrom, start,end \n\n\ndef main():\n args=parse_args()\n ref=pysam.FastaFile(args.ref_fasta)\n outputs=dict()\n outf=None\n is_narrowPeak=True\n if args.input_bed.endswith('.hdf5'):\n #load as hdf5\n is_narrowPeak=False\n data=pd.read_hdf(args.input_bed,header=0,sep='\\t')\n else:\n #load csv\n data=pd.read_csv(args.input_bed,header=0,sep='\\t')\n print(\"loaded bed file\")\n num_rows=str(data.shape[0])\n print(\"num_rows:\"+num_rows) \n cur_row=0 \n for index,row in data.iterrows():\n if cur_row%1000==0:\n print(str(cur_row)+\"/\"+num_rows)\n cur_row+=1\n if is_narrowPeak is True:\n chrom,start,end=get_line_narrowPeak(row,args)\n else:\n chrom,start,end=get_line_hdf5(index)\n #extract fasta\n seq=ref.fetch(chrom,start,end).upper()\n g=seq.count('G')\n c=seq.count('C')\n gc=g+c\n gc_fract=round(gc/len(seq),2)\n if args.split_chroms is True:\n if chrom not in outputs:\n outputs[chrom]=open(args.out_prefix+'.'+chrom,'w')\n print(\"created:\"+str(args.out_prefix+'.'+chrom))\n outputs[chrom].write(chrom+'\\t'+str(start)+'\\t'+str(end)+'\\t'+str(gc_fract))\n if args.store_seq is True:\n outputs[chrom].write('\\t'+seq+'\\n')\n else:\n outputs[chrom].write('\\n')\n else:\n if outf is None:\n outf=open(args.out_prefix,'w')\n print(\"created:\"+str(args.out_prefix))\n outf.write(chrom+'\\t'+str(start)+'\\t'+str(end)+'\\t'+str(gc_fract))\n if args.store_seq is True:\n outf.write('\\t'+seq+'\\n')\n else:\n outf.write('\\n')\n #close files\n if args.split_chroms is True:\n for chrom in outputs:\n outputs[chrom].close()\n else:\n outf.close()\n \nif __name__==\"__main__\":\n main()\n"
] | [
[
"pandas.read_csv",
"pandas.read_hdf"
]
] |
repos-cl/akshare | [
"94fa42fb095ac4bfa5d8d58673b805d36cc0128e"
] | [
"akshare/index/index_eri.py"
] | [
"# -*- coding:utf-8 -*-\n# /usr/bin/env python\n\"\"\"\nDate: 2021/5/9 16:16\nDesc: 浙江省排污权交易指数\nhttps://zs.zjpwq.net/\n\"\"\"\nimport requests\nimport pandas as pd\n\n\ndef index_eri() -> pd.DataFrame:\n \"\"\"\n 浙江省排污权交易指数\n https://zs.zjpwq.net\n :return: 浙江省排污权交易指数\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"https://zs.zjpwq.net/zhe-jiang-pwq-webapi/indexData\"\n params = {\n \"indexId\": \"1\",\n \"areaCode\": \"330000\",\n \"cycle\": \"MONTH\",\n \"structCode\": \"01\",\n }\n r = requests.get(url, params=params)\n data_json = r.json()\n temp_df = pd.DataFrame(data_json[\"data\"])\n del temp_df[\"id\"]\n del temp_df[\"indexId\"]\n del temp_df[\"stageId\"]\n del temp_df[\"structCode\"]\n del temp_df[\"areaCode\"]\n del temp_df[\"rawValue\"]\n temp_df.columns = [\n \"value\",\n \"date\",\n ]\n temp_df = temp_df[\n [\n \"date\",\n \"value\",\n ]\n ]\n big_df = temp_df\n url = \"https://zs.zjpwq.net/zhe-jiang-pwq-webapi/rawValueStatistics\"\n params = {\n \"orderBy\": \"-date\",\n \"pageSize\": \"1000\",\n \"quotaType\": \"0\",\n \"index\": \"TOTAL_QUANTITY\",\n \"areaCode\": \"330000\",\n }\n r = requests.get(url, params=params)\n data_json = r.json()\n temp_df = pd.DataFrame(data_json[\"data\"])\n del temp_df[\"id\"]\n del temp_df[\"quotaType\"]\n del temp_df[\"index\"]\n temp_df.columns = [\n \"date\",\n \"value\",\n \"update\",\n ]\n big_df = big_df.merge(temp_df, on=\"date\")\n big_df.columns = [\n \"日期\",\n \"交易指数\",\n \"成交量\",\n \"更新时间\",\n ]\n return big_df\n\n\nif __name__ == \"__main__\":\n index_eri_df = index_eri()\n print(index_eri_df)\n"
] | [
[
"pandas.DataFrame"
]
] |
abc4pwm/abc4pwm | [
"29c9e833b076f8ce7e3e206c5ae8b560eff02b9e"
] | [
"build/lib/abc4pwm/clustering.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 23 14:43:24 2019\n\n@author: omerali\n\"\"\"\n\nimport numpy as np\nimport os, shutil\nfrom pathlib import Path\nfrom time import gmtime, strftime\nimport json\nfrom glob import glob\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom distutils.dir_util import copy_tree\nimport multiprocessing as mp\nfrom abc4pwm.convert_count_to_pwm import motif_weight2p\nfrom abc4pwm.similarity_score import compute_similarity_score4alignment\nfrom abc4pwm.energy_to_p import read_energy_matrix\nfrom abc4pwm.non_dbd_clustering import non_dbd_ClusteringPwm\n\nfrom functools import partial\n\n\nfrom sklearn.cluster import AffinityPropagation\n\nclass ClusteringPwm():\n\n def __init__(self, input_folder_path, output_folder_path, in_dbd = True, minimum_pwms_in_dbd = 5, max_no_processors = 5):\n \"\"\"\n\n :param input_folder_path: this should point to folder which contain DBD folders\n :param output_folder_path: this should point to folder which contain clustered DBD folders\n :param in_dbd: this should be true if you want clustering inside dbd. Otherwise false.\n :param minimum_pwms_in_dbd: a dbd having less than this number of pwm will not be clustered\n :param max_no_processors: for parallel processing, select maximum number of processors. Default is 5\n\n \"\"\"\n print(\"\\nTask: Clustering of TFs based on their DNA Binding Domain\")\n\n self.output_folder_path = output_folder_path\n leaf_folder = Path(self.output_folder_path)\n out_dir = leaf_folder.parent\n\n if not os.path.exists(output_folder_path):\n os.makedirs(output_folder_path, exist_ok=True)\n\n if not in_dbd:\n clusteringClassobj = non_dbd_ClusteringPwm(input_folder_path, output_folder_path)\n exit()\n self.empty_dir(output_folder_path)\n self.minimum_pwms_in_dbd = minimum_pwms_in_dbd\n self.max_processors = max_no_processors\n self.total_clusters = 0\n self.unclustered_dbds = 0\n self.unclustered_pwms = 0\n\n copy_tree(input_folder_path,output_folder_path)\n input_folder_path = output_folder_path\n\n dbds = sorted(os.listdir(input_folder_path))\n for ind, i in enumerate(dbds):\n if i.startswith('.DS'):\n dbds.pop(ind)\n for i in dbds:\n path_to_dbd = os.path.join(input_folder_path, i)\n self.drive_clustering(self, path_to_dbd)\n\n\n path_to_text_reports = os.path.join(out_dir, 'reports_in_text/')\n\n if os.path.exists(os.path.join(path_to_text_reports,\"clusterSummary.txt\")):\n os.remove(os.path.join(path_to_text_reports,\"clusterSummary.txt\"))\n os.makedirs(path_to_text_reports, exist_ok=True)\n with open(os.path.join(path_to_text_reports,\"clusterSummary.txt\"),'w') as cs:\n cs.writelines(\"Clustering Time: \" + str(strftime(\"%a, %d %b %Y %H:%M:%S +0000\", gmtime()))+ \"\\n\"\n \"Clustering Technique: Affinity Propagation Clustering \\n\"\n \"Minimum PWMs in a DBD considered for Clustering: \" + str(self.minimum_pwms_in_dbd) + \"\\n\\n\"\n \"Total DBDs : \" + str(len(dbds)) + \"\\n\"\n \"Total Clusters made : \" + str(self.total_clusters) + \"\\n\"\n \"UnClustered DBDs due to less than threshold : \" + str(self.unclustered_dbds) + \"\\n\"\n \"UnClustered pwms in DBDs less than threshold: \" + str(self.unclustered_pwms) + \"\\n\")\n\n\n print(\"Task completed. \\n \"\n \"Please see clusters in : \", input_folder_path, \"<dbd_folder>/out \\n\"\n \"Clustering summary in data/out/reports_in_text\")\n\n\n @staticmethod\n def drive_clustering(self, inputdir):\n #this function prepares a similiarity matrix in parallel and send to a function for clustering\n #rename in files according to their clusters and put in respective cluster folder\n #also call representative motif function at the end\n\n leaf_folder = Path(self.output_folder_path)\n out_dir = leaf_folder.parent\n\n pwms = [i.split('/')[-1] for i in glob(os.path.join(inputdir, \"*.mlp\"))]\n\n if len(pwms) < int(self.minimum_pwms_in_dbd):\n # for x in pwms:\n # shutil.move(os.path.join(inputdir,x),dst_for_bad_pwms)\n\n\n\n self.unclustered_dbds+=1\n self.unclustered_pwms+=len(pwms)\n\n return 1\n else:\n n_processors = int(np.ceil(len(pwms)/30))\n if n_processors > self.max_processors:\n n_processors = self.max_processors\n pool = mp.Pool(processes=n_processors)\n\n start = 0\n processor_capacity = int(np.ceil(len(pwms) / n_processors))\n end = processor_capacity\n chunks_of_pwms = []\n for i in range(n_processors):\n chunks_of_pwms.append(pwms[start:end])\n start = end\n end = end + processor_capacity\n calculate_similarity = partial(self.calculate_similarity_matrix, inputdir=inputdir)\n chunks_similarity_matrix = pool.map(calculate_similarity, chunks_of_pwms)\n similarity_matrix = np.concatenate((chunks_similarity_matrix), axis=0)\n\n clusters_labels = self.clustering(similarity_matrix)\n\n\n\n self.renaming_mlp(self, inputdir, clusters_labels,pwms)\n self.total_clusters += len(np.unique(clusters_labels))\n\n self.folderizeclusters(os.path.join(inputdir,'out/'))\n\n @staticmethod\n def folderizeclusters(folder_path):\n # folder_path = \"test_out/\"\n\n mlpfiles = [i.split('/')[-1] for i in glob(os.path.join(folder_path, \"*.mlp\"))]\n\n for pwm in mlpfiles:\n folder_name = pwm.split('_')[0]\n\n new_path = os.path.join(folder_path, folder_name)\n if not os.path.exists(new_path):\n os.makedirs(new_path)\n\n old_mlp_path = os.path.join(folder_path, pwm)\n new_mlp_path = os.path.join(new_path, pwm)\n shutil.move(old_mlp_path, new_mlp_path)\n\n @staticmethod\n def calculate_similarity_matrix(pwms_full, inputdir):\n #this function extracts matrices from files, convert them to probablity and return similarity matrix\n\n\n\n full_pwms = [i.split('/')[-1] for i in glob(os.path.join(inputdir, \"*.mlp\"))]\n\n df = np.zeros((len(pwms_full), len(full_pwms)))\n for index1, i in enumerate(pwms_full):\n matrix1, matrix_string1, maximum_feq1, total_maximum1, info1 = read_energy_matrix(os.path.join(inputdir, i))\n matrix1 = motif_weight2p(matrix1)\n\n for index2, j in enumerate(full_pwms):\n matrix2, matrix_string2, maximum_feq2, total_maximum2, info2 = read_energy_matrix(os.path.join(inputdir, j))\n matrix2 = motif_weight2p(matrix2)\n\n df[index1, index2] = compute_similarity_score4alignment(matrix1, matrix2)\n\n\n similarityMatrix = np.asarray(df)\n return similarityMatrix\n\n\n\n @staticmethod\n def read_similarity_matrix():\n #funciton for reading similariy matrix stores in a json file\n with open('similarityMatrix.json') as f:\n similarityMatrix = np.array(json.load(f))\n return similarityMatrix\n\n @staticmethod\n def clustering(similarityMatrix):\n #function for clustering algorithm\n clusters = AffinityPropagation(damping=0.5, max_iter=400, convergence_iter=30, preference=None, affinity='precomputed', verbose=False).fit(similarityMatrix)\n return clusters.labels_\n\n\n @staticmethod\n def renaming_mlp(self, inputdir, clusters,pwms):\n #this function add cluster number to every file\n\n\n if not os.path.exists(os.path.join(inputdir,'out/')):\n os.mkdir(os.path.join(inputdir,'out/'))\n outputdir = os.path.join(inputdir,'out/')\n self.empty_dir(outputdir)\n\n for ind, i in enumerate(pwms):\n src = os.path.join(inputdir,i) # renaming\n dst = str(clusters[ind]) + '_' + str(i)\n dst = os.path.join(outputdir,dst)\n\n os.rename(src, dst)\n\n @staticmethod\n def empty_dir(folder):\n #function for deleting files from a folder\n\n for filename in os.listdir(folder):\n file_path = os.path.join(folder, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print(('Failed to delete %s. Reason: %s' % (file_path, e)))\n\n\n @staticmethod\n def moving_files_input(src_copy, dest_copy):\n #moving files to input folder from original files for the next run\n\n\n if not os.path.exists(dest_copy):\n os.mkdir(dest_copy,mode=0o777)\n\n pwms = os.listdir(src_copy)\n\n for indexpwm, i in enumerate(pwms):\n if i.startswith('.'):\n pwms.pop(indexpwm)\n\n for file_name in pwms:\n full_file_name = os.path.join(src_copy, file_name)\n if os.path.isfile(full_file_name):\n shutil.move(full_file_name, dest_copy)\n\n\n\nif __name__ == \"__main__\":\n clusteringClassobj = ClusteringPwm('../data/out/classification_out', '../data/out/clustering_out/')\n # clusteringClassobj = ClusteringPwm('../data/in/in_pwms', '../data/out/non_dbd_clustering_out/', False)"
] | [
[
"sklearn.cluster.AffinityPropagation",
"numpy.concatenate",
"numpy.unique",
"numpy.asarray"
]
] |
ezavesky/metadata-flatten-extractor | [
"5e81713424970087492b7835195235575f0024e2"
] | [
"contentai_metadata_flatten/parsers/yolo3.py"
] | [
"#! python\n# ===============LICENSE_START=======================================================\n# metadata-flatten-extractor Apache-2.0\n# ===================================================================================\n# Copyright (C) 2017-2020 AT&T Intellectual Property. All rights reserved.\n# ===================================================================================\n# This software file is distributed by AT&T \n# under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# This file is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ===============LICENSE_END=========================================================\n# -*- coding: utf-8 -*-\n\nfrom os import path\nimport json\nfrom pandas import DataFrame\n\nfrom contentai_metadata_flatten.parsers import Flatten\n\nclass Parser(Flatten):\n def __init__(self, path_content, logger=None):\n super().__init__(path_content, logger=logger)\n self.EXTRACTOR = \"yolo3\"\n\n @staticmethod\n def known_types():\n \"\"\"Return the output types for this generator\n :return: list. List of output types (file types) for this generator\n \"\"\"\n return ['tag']\n\n def parse(self, run_options):\n \"\"\"Flatten Yolo Classifier\n - https://pjreddie.com/darknet/yolo/\n\n :param: run_options (dict): specific runtime information\n :returns: (DataFrame): DataFrame on successful decoding and export, None (or exception) otherwise\n \"\"\"\n list_items = []\n\n dict_data = self.get_extractor_results(self.EXTRACTOR, \"data.json\")\n\n for local_obj in dict_data: # traverse items\n if \"results\" in local_obj or \"milliseconds\" in local_obj:\n # { \"milliseconds\": 5872.539205872539, \"frameNumber\": 176,\n # \"results\": [ { \"objects\": [ \n # { \"name\": \"person\", \"confidence\": 0.9987912774085999,\n # \"boundingBox\": { \"left\": 0.559375, \"top\": 0.03611, \"width\": 0.3, \"height\": 0.9611 } },\n # { \"name\": \"person\", \"confidence\": 0.9953850507736206,\n # \"boundingBox\": { \"left\": 0.134375, \"top\": 0.175, \"width\": 0.3078, \"height\": 0.80277 } }\n # ] } ] },\n\n time_frame = round(float(local_obj[\"milliseconds\"]) / 1000.0, self.ROUND_DIGITS)\n base_obj = { \"time_begin\": time_frame, \"time_event\": time_frame, \"time_end\": time_frame,\n \"tag_type\": \"tag\", \"source_event\": \"image\", \"extractor\": self.EXTRACTOR }\n for obj_result in local_obj[\"results\"]: # iterate through result sets\n if \"objects\" in obj_result:\n for instance_obj in obj_result[\"objects\"]: # iterate through objects\n details_obj = { 'box': {'w': round(instance_obj['boundingBox']['width'], self.ROUND_DIGITS), \n 'h': round(instance_obj['boundingBox']['height'], self.ROUND_DIGITS),\n 'l': round(instance_obj['boundingBox']['left'], self.ROUND_DIGITS), \n 't': round(instance_obj['boundingBox']['top'], self.ROUND_DIGITS) } }\n score_frame = round(float(instance_obj[\"confidence\"]), self.ROUND_DIGITS)\n obj_insert = { \"tag\": instance_obj[\"name\"], \"score\": score_frame, \n \"details\": json.dumps(details_obj) }\n obj_insert.update(base_obj)\n list_items.append(obj_insert)\n\n if len(list_items) > 0: # return the whole thing as dataframe\n return DataFrame(list_items)\n\n if run_options[\"verbose\"]:\n self.logger.critical(f\"No tag entries found in source '{self.EXTRACTOR}'\")\n return None\n"
] | [
[
"pandas.DataFrame"
]
] |
tiagotouso/TALENTOS_HUMANOS | [
"c391f7d7a331d5f8b186b27af6a9b61448620cc6"
] | [
"IMPORTAMBIENTE.py"
] | [
"'''\nARQUIVO PARA IMPORTAR OS AMBIENTES DOS SERVIDORES\n\nOSB: COLOCAR A LISTA DE SERVIDORES (XLSX) COM OS CAMPOS [SIAPE - AMBIENTE - SETOR EXERCÍCIO]\n'''\nimport os\nimport pandas as pd\n\nfrom SQL import sqlexecute\nfrom MENSAGEM import mensagemErro, mensagemInformacao\n\ndef importarAmbienteServidores():\n '''\n FUNÇÃO IMPORTAR AMBIENTE E EXERCÍCIO DOS SERVIDORES PARA O BANCO DE DADOS\n ENTRA\n PLANILHA DOS SERVIDORES DO SISTEMA INTEGRADO (RELATÓRIO)\n SAI\n BANCO DE DADOS ATUALIZADO COM AMBIENTE E EXERCÍCIO DOS SERVIDORES\n '''\n\n listdir = os.listdir('DADOS_EXTRATOR\\\\')\n if 'servidores.xlsx' in listdir:\n\n xls = 'DADOS_EXTRATOR\\\\servidores.xlsx'\n folha = 'Servidores'\n\n arq = pd.read_excel(xls, folha)\n dados = arq[['Siape', 'Ambiente', 'Exercício']]\n dados = dados[dados['Siape'].notnull()]\n dados['Siape'] = dados['Siape'].apply(lambda x: str(x).rjust(7, '0'))\n dados = dados.dropna(thresh=2)\n dados = dados.fillna('null')\n dados = dados[dados['Siape'].duplicated() == False]\n\n sql = '''delete from ts_sis_ambientes;'''\n sqlexecute(sql)\n\n sql = '''INSERT INTO ts_sis_ambientes\\n(GR_MATRICULA, AMBIENTE, EXERCICIO)\\nvalues\\n'''\n lx = ''\n for i in dados.values:\n if len(i[0]) == 7:\n lx = '''( '{0}', '{1}', '{2}' ),\\n'''.format(i[0], i[1], i[2])\n sql += lx\n sql = sql[:-2] + ';'\n sql = sql.replace('\\'null\\'', 'null')\n sqlexecute(sql)\n\n mensagemInformacao('Importação do AMBIENTE concluída.')\n else:\n mensagemErro('Arquivo \"servidores.xlsx\" não encontrado. (AMBIENTE)')\n\n\n"
] | [
[
"pandas.read_excel"
]
] |
treid5/probnum | [
"1c5499883672cfa029c12045848ea04491c69e08"
] | [
"src/probnum/quad/solvers/stopping_criteria/_rel_mean_change.py"
] | [
"\"\"\"Stopping criterion based on the relative change of the successive integral estimators.\"\"\"\n\nimport numpy as np\n\nfrom probnum.quad.solvers.bq_state import BQState\nfrom probnum.quad.solvers.stopping_criteria import BQStoppingCriterion\nfrom probnum.typing import FloatArgType\n\n# pylint: disable=too-few-public-methods\n\n\nclass RelativeMeanChange(BQStoppingCriterion):\n \"\"\"Stop once the relative change of consecutive integral estimates are smaller than\n a tolerance.\n\n The stopping criterion is: :math:`|\\\\hat{F}_{c} - \\\\hat{F}_{p}|/ |\\\\hat{F}_{c}| \\\\leq r`\n where :math:`\\\\hat{F}_{c}` and :math:`\\\\hat{F}_{p}` are the integral estimates of the current and previous iteration\n respectively, and :math:`r` is the relative tolerance.\n\n Parameters\n ----------\n rel_tol:\n Relative error tolerance on consecutive integral mean values.\n \"\"\"\n\n def __init__(self, rel_tol: FloatArgType):\n self.rel_tol = rel_tol\n\n def __call__(self, bq_state: BQState) -> bool:\n integral_belief = bq_state.integral_belief\n return (\n np.abs(\n (integral_belief.mean - bq_state.previous_integral_beliefs[-1].mean)\n / integral_belief.mean\n )\n <= self.rel_tol\n )\n"
] | [
[
"numpy.abs"
]
] |
JiangBowen0008/bop_toolkit | [
"375da05664c1b9b4249b191378f25d5815c305f9"
] | [
"bop_toolkit_lib/renderer_py.py"
] | [
"# Author: Tomas Hodan ([email protected])\n# Center for Machine Perception, Czech Technical University in Prague\n\n\"\"\"A Python based renderer.\"\"\"\n\nimport os\nimport numpy as np\nfrom glumpy import app, gloo, gl\n\nfrom bop_toolkit_lib import inout\nfrom bop_toolkit_lib import misc\nfrom bop_toolkit_lib import renderer\n\n# Set glumpy logging level.\nfrom glumpy.log import log\nimport logging\nlog.setLevel(logging.WARNING) # Options: ERROR, WARNING, DEBUG, INFO.\n\n# Set backend (http://glumpy.readthedocs.io/en/latest/api/app-backends.html).\n# app.use('glfw') # Options: 'glfw', 'qt5', 'pyside', 'pyglet'.\n\n\n# RGB vertex shader.\n_rgb_vertex_code = \"\"\"\nuniform mat4 u_mv;\nuniform mat4 u_nm;\nuniform mat4 u_mvp;\nuniform vec3 u_light_eye_pos;\n\nattribute vec3 a_position;\nattribute vec3 a_normal;\nattribute vec3 a_color;\nattribute vec2 a_texcoord;\n\nvarying vec3 v_color;\nvarying vec2 v_texcoord;\nvarying vec3 v_eye_pos;\nvarying vec3 v_L;\nvarying vec3 v_normal;\n\nvoid main() {\n gl_Position = u_mvp * vec4(a_position, 1.0);\n v_color = a_color;\n v_texcoord = a_texcoord;\n \n // The following points/vectors are expressed in the eye coordinates.\n v_eye_pos = (u_mv * vec4(a_position, 1.0)).xyz; // Vertex.\n v_L = normalize(u_light_eye_pos - v_eye_pos); // Vector to the light.\n v_normal = normalize(u_nm * vec4(a_normal, 1.0)).xyz; // Normal vector.\n}\n\"\"\"\n\n# RGB fragment shader - flat shading.\n_rgb_fragment_flat_code = \"\"\"\nuniform float u_light_ambient_w;\nuniform sampler2D u_texture;\nuniform int u_use_texture;\n\nvarying vec3 v_color;\nvarying vec2 v_texcoord;\nvarying vec3 v_eye_pos;\nvarying vec3 v_L;\n\nvoid main() {\n // Face normal in eye coords.\n vec3 f_normal = normalize(cross(dFdx(v_eye_pos), dFdy(v_eye_pos)));\n\n float light_diffuse_w = max(dot(normalize(v_L), normalize(f_normal)), 0.0);\n float light_w = u_light_ambient_w + light_diffuse_w;\n if(light_w > 1.0) light_w = 1.0;\n\n if(bool(u_use_texture)) {\n gl_FragColor = vec4(light_w * texture2D(u_texture, v_texcoord));\n }\n else {\n gl_FragColor = vec4(light_w * v_color, 1.0);\n }\n}\n\"\"\"\n\n# RGB fragment shader - Phong shading.\n_rgb_fragment_phong_code = \"\"\"\nuniform float u_light_ambient_w;\nuniform sampler2D u_texture;\nuniform int u_use_texture;\n\nvarying vec3 v_color;\nvarying vec2 v_texcoord;\nvarying vec3 v_eye_pos;\nvarying vec3 v_L;\nvarying vec3 v_normal;\n\nvoid main() {\n float light_diffuse_w = max(dot(normalize(v_L), normalize(v_normal)), 0.0);\n float light_w = u_light_ambient_w + light_diffuse_w;\n if(light_w > 1.0) light_w = 1.0;\n\n if(bool(u_use_texture)) {\n gl_FragColor = vec4(light_w * texture2D(u_texture, v_texcoord));\n }\n else {\n gl_FragColor = vec4(light_w * v_color, 1.0);\n }\n}\n\"\"\"\n\n# Depth vertex shader.\n# Ref: https://github.com/julienr/vertex_visibility/blob/master/depth.py\n#\n# Getting the depth from the depth buffer in OpenGL is doable, see here:\n# http://web.archive.org/web/20130416194336/http://olivers.posterous.com/linear-depth-in-glsl-for-real\n# http://web.archive.org/web/20130426093607/http://www.songho.ca/opengl/gl_projectionmatrix.html\n# http://stackoverflow.com/a/6657284/116067\n# but it is difficult to achieve high precision, as explained in this article:\n# http://dev.theomader.com/depth-precision/\n#\n# Once the vertex is in the view coordinates (view * model * v), its depth is\n# simply the Z axis. Hence, instead of reading from the depth buffer and undoing\n# the projection matrix, we store the Z coord of each vertex in the color\n# buffer. OpenGL allows for float32 color buffer components.\n_depth_vertex_code = \"\"\"\nuniform mat4 u_mv;\nuniform mat4 u_mvp;\nattribute vec3 a_position;\nattribute vec3 a_color;\nvarying float v_eye_depth;\n\nvoid main() {\n gl_Position = u_mvp * vec4(a_position, 1.0);\n vec3 v_eye_pos = (u_mv * vec4(a_position, 1.0)).xyz; // In eye coords.\n\n // OpenGL Z axis goes out of the screen, so depths are negative\n v_eye_depth = -v_eye_pos.z;\n}\n\"\"\"\n\n# Depth fragment shader.\n_depth_fragment_code = \"\"\"\nvarying float v_eye_depth;\n\nvoid main() {\n gl_FragColor = vec4(v_eye_depth, 0.0, 0.0, 1.0);\n}\n\"\"\"\n\n\n# Functions to calculate transformation matrices.\n# Note that OpenGL expects the matrices to be saved column-wise.\n# (Ref: http://www.songho.ca/opengl/gl_transform.html)\n\n\ndef _calc_model_view(model, view):\n \"\"\"Calculates the model-view matrix.\n\n :param model: 4x4 ndarray with the model matrix.\n :param view: 4x4 ndarray with the view matrix.\n :return: 4x4 ndarray with the model-view matrix.\n \"\"\"\n return np.dot(model, view)\n\n\ndef _calc_model_view_proj(model, view, proj):\n \"\"\"Calculates the model-view-projection matrix.\n\n :param model: 4x4 ndarray with the model matrix.\n :param view: 4x4 ndarray with the view matrix.\n :param proj: 4x4 ndarray with the projection matrix.\n :return: 4x4 ndarray with the model-view-projection matrix.\n \"\"\"\n return np.dot(np.dot(model, view), proj)\n\n\ndef _calc_normal_matrix(model, view):\n \"\"\"Calculates the normal matrix.\n\n Ref: http://www.songho.ca/opengl/gl_normaltransform.html\n\n :param model: 4x4 ndarray with the model matrix.\n :param view: 4x4 ndarray with the view matrix.\n :return: 4x4 ndarray with the normal matrix.\n \"\"\"\n return np.linalg.inv(np.dot(model, view)).T\n\n\ndef _calc_calib_proj(K, x0, y0, w, h, nc, fc, window_coords='y_down'):\n \"\"\"Conversion of Hartley-Zisserman intrinsic matrix to OpenGL proj. matrix.\n\n Ref:\n 1) https://strawlab.org/2011/11/05/augmented-reality-with-OpenGL\n 2) https://github.com/strawlab/opengl-hz/blob/master/src/calib_test_utils.py\n\n :param K: 3x3 ndarray with the intrinsic camera matrix.\n :param x0 The X coordinate of the camera image origin (typically 0).\n :param y0: The Y coordinate of the camera image origin (typically 0).\n :param w: Image width.\n :param h: Image height.\n :param nc: Near clipping plane.\n :param fc: Far clipping plane.\n :param window_coords: 'y_up' or 'y_down'.\n :return: 4x4 ndarray with the OpenGL projection matrix.\n \"\"\"\n depth = float(fc - nc)\n q = -(fc + nc) / depth\n qn = -2 * (fc * nc) / depth\n\n # Draw our images upside down, so that all the pixel-based coordinate\n # systems are the same.\n if window_coords == 'y_up':\n proj = np.array([\n [2 * K[0, 0] / w, -2 * K[0, 1] / w, (-2 * K[0, 2] + w + 2 * x0) / w, 0],\n [0, -2 * K[1, 1] / h, (-2 * K[1, 2] + h + 2 * y0) / h, 0],\n [0, 0, q, qn], # Sets near and far planes (glPerspective).\n [0, 0, -1, 0]\n ])\n\n # Draw the images upright and modify the projection matrix so that OpenGL\n # will generate window coords that compensate for the flipped image coords.\n else:\n assert window_coords == 'y_down'\n proj = np.array([\n [2 * K[0, 0] / w, -2 * K[0, 1] / w, (-2 * K[0, 2] + w + 2 * x0) / w, 0],\n [0, 2 * K[1, 1] / h, (2 * K[1, 2] - h + 2 * y0) / h, 0],\n [0, 0, q, qn], # Sets near and far planes (glPerspective).\n [0, 0, -1, 0]\n ])\n return proj.T\n\n\nclass RendererPython(renderer.Renderer):\n \"\"\"A Python based renderer.\"\"\"\n\n def __init__(self, width, height, mode='rgb+depth', shading='phong',\n bg_color=(0.0, 0.0, 0.0, 0.0)):\n \"\"\"Constructor.\n\n :param width: Width of the rendered image.\n :param height: Height of the rendered image.\n :param mode: Rendering mode ('rgb+depth', 'rgb', 'depth').\n :param shading: Type of shading ('flat', 'phong').\n :param bg_color: Color of the background (R, G, B, A).\n \"\"\"\n super(RendererPython, self).__init__(width, height)\n\n self.mode = mode\n self.shading = shading\n self.bg_color = bg_color\n\n # Indicators whether to render RGB and/or depth image.\n self.render_rgb = self.mode in ['rgb', 'rgb+depth']\n self.render_depth = self.mode in ['depth', 'rgb+depth']\n\n # Structures to store object models and related info.\n self.models = {}\n self.model_bbox_corners = {}\n self.model_textures = {}\n\n # Rendered images.\n self.rgb = None\n self.depth = None\n\n # Window for rendering.\n self.window = app.Window(visible=False)\n\n # Per-object vertex and index buffer.\n self.vertex_buffers = {}\n self.index_buffers = {}\n\n # Per-object OpenGL programs for rendering of RGB and depth images.\n self.rgb_programs = {}\n self.depth_programs = {}\n\n # The frame buffer object.\n rgb_buf = np.zeros(\n (self.height, self.width, 4), np.float32).view(gloo.TextureFloat2D)\n depth_buf = np.zeros(\n (self.height, self.width), np.float32).view(gloo.DepthTexture)\n self.fbo = gloo.FrameBuffer(color=rgb_buf, depth=depth_buf)\n\n # Activate the created frame buffer object.\n self.fbo.activate()\n\n def add_object(self, obj_id, model_path, **kwargs):\n \"\"\"See base class.\"\"\"\n # Color of the object model (the original color saved with the object model\n # will be used if None).\n surf_color = None\n if 'surf_color' in kwargs:\n surf_color = kwargs['surf_color']\n \n if 'scale' in kwargs:\n scale = kwargs['scale']\n\n # Load the object model.\n model = inout.load_ply(model_path)\n model['pts'] = model['pts'] * scale\n self.models[obj_id] = model\n\n # Calculate the 3D bounding box of the model (will be used to set the near\n # and far clipping plane).\n bb = misc.calc_3d_bbox(\n model['pts'][:, 0], model['pts'][:, 1], model['pts'][:, 2])\n self.model_bbox_corners[obj_id] = np.array([\n [bb[0], bb[1], bb[2]],\n [bb[0], bb[1], bb[2] + bb[5]],\n [bb[0], bb[1] + bb[4], bb[2]],\n [bb[0], bb[1] + bb[4], bb[2] + bb[5]],\n [bb[0] + bb[3], bb[1], bb[2]],\n [bb[0] + bb[3], bb[1], bb[2] + bb[5]],\n [bb[0] + bb[3], bb[1] + bb[4], bb[2]],\n [bb[0] + bb[3], bb[1] + bb[4], bb[2] + bb[5]],\n ])\n\n # Set texture/color of vertices.\n self.model_textures[obj_id] = None\n\n # Use the specified uniform surface color.\n if surf_color is not None:\n colors = np.tile(list(surf_color) + [1.0], [model['pts'].shape[0], 1])\n\n # Set UV texture coordinates to dummy values.\n texture_uv = np.zeros((model['pts'].shape[0], 2), np.float32)\n\n # Use the model texture.\n elif 'texture_file' in self.models[obj_id].keys():\n model_texture_path = os.path.join(\n os.path.dirname(model_path), self.models[obj_id]['texture_file'])\n model_texture = inout.load_im(model_texture_path)\n\n # Normalize the texture image.\n if model_texture.max() > 1.0:\n model_texture = model_texture.astype(np.float32) / 255.0\n model_texture = np.flipud(model_texture)\n self.model_textures[obj_id] = model_texture\n\n # UV texture coordinates.\n texture_uv = model['texture_uv']\n\n # Set the per-vertex color to dummy values.\n colors = np.zeros((model['pts'].shape[0], 3), np.float32)\n\n # Use the original model color.\n elif 'colors' in model.keys():\n assert (model['pts'].shape[0] == model['colors'].shape[0])\n colors = model['colors']\n if colors.max() > 1.0:\n colors /= 255.0 # Color values are expected in range [0, 1].\n\n # Set UV texture coordinates to dummy values.\n texture_uv = np.zeros((model['pts'].shape[0], 2), np.float32)\n\n # Set the model color to gray.\n else:\n colors = np.ones((model['pts'].shape[0], 3), np.float32) * 0.5\n\n # Set UV texture coordinates to dummy values.\n texture_uv = np.zeros((model['pts'].shape[0], 2), np.float32)\n\n # Set the vertex data.\n if self.mode == 'depth':\n vertices_type = [\n ('a_position', np.float32, 3),\n ('a_color', np.float32, colors.shape[1])\n ]\n vertices = np.array(list(zip(model['pts'], colors)), vertices_type)\n else:\n if self.shading == 'flat':\n vertices_type = [\n ('a_position', np.float32, 3),\n ('a_color', np.float32, colors.shape[1]),\n ('a_texcoord', np.float32, 2)\n ]\n vertices = np.array(list(zip(model['pts'], colors, texture_uv)),\n vertices_type)\n elif self.shading == 'phong':\n vertices_type = [\n ('a_position', np.float32, 3),\n ('a_normal', np.float32, 3),\n ('a_color', np.float32, colors.shape[1]),\n ('a_texcoord', np.float32, 2)\n ]\n vertices = np.array(list(zip(model['pts'], model['normals'],\n colors, texture_uv)), vertices_type)\n else:\n raise ValueError('Unknown shading type.')\n\n # Create vertex and index buffer for the loaded object model.\n self.vertex_buffers[obj_id] = vertices.view(gloo.VertexBuffer)\n self.index_buffers[obj_id] = \\\n model['faces'].flatten().astype(np.uint32).view(gloo.IndexBuffer)\n\n # Set shader for the selected shading.\n if self.shading == 'flat':\n rgb_fragment_code = _rgb_fragment_flat_code\n elif self.shading == 'phong':\n rgb_fragment_code = _rgb_fragment_phong_code\n else:\n raise ValueError('Unknown shading type.')\n\n # Prepare the RGB OpenGL program.\n rgb_program = gloo.Program(_rgb_vertex_code, rgb_fragment_code)\n rgb_program.bind(self.vertex_buffers[obj_id])\n if self.model_textures[obj_id] is not None:\n rgb_program['u_use_texture'] = int(True)\n rgb_program['u_texture'] = self.model_textures[obj_id]\n else:\n rgb_program['u_use_texture'] = int(False)\n rgb_program['u_texture'] = np.zeros((1, 1, 4), np.float32)\n self.rgb_programs[obj_id] = rgb_program\n\n # Prepare the depth OpenGL program.\n depth_program = gloo.Program(_depth_vertex_code,_depth_fragment_code)\n depth_program.bind(self.vertex_buffers[obj_id])\n self.depth_programs[obj_id] = depth_program\n\n def remove_object(self, obj_id):\n \"\"\"See base class.\"\"\"\n del self.models[obj_id]\n del self.model_bbox_corners[obj_id]\n if obj_id in self.model_textures:\n del self.model_textures[obj_id]\n del self.vertex_buffers[obj_id]\n del self.index_buffers[obj_id]\n del self.rgb_programs[obj_id]\n del self.depth_programs[obj_id]\n\n def render_object(self, obj_id, R, t, fx, fy, cx, cy):\n \"\"\"See base class.\"\"\"\n\n # Define the following variables as global so their latest values are always\n # seen in function on_draw below.\n global curr_obj_id, mat_model, mat_view, mat_proj\n curr_obj_id = obj_id\n\n # Model matrix (from object space to world space).\n mat_model = np.eye(4, dtype=np.float32)\n\n # View matrix (from world space to eye space; transforms also the coordinate\n # system from OpenCV to OpenGL camera space).\n mat_view_cv = np.eye(4, dtype=np.float32)\n mat_view_cv[:3, :3], mat_view_cv[:3, 3] = R, t.squeeze()\n yz_flip = np.eye(4, dtype=np.float32)\n yz_flip[1, 1], yz_flip[2, 2] = -1, -1\n mat_view = yz_flip.dot(mat_view_cv) # OpenCV to OpenGL camera system.\n mat_view = mat_view.T # OpenGL expects column-wise matrix format.\n\n # Calculate the near and far clipping plane from the 3D bounding box.\n bbox_corners = self.model_bbox_corners[obj_id]\n bbox_corners_ht = np.concatenate(\n (bbox_corners, np.ones((bbox_corners.shape[0], 1))), axis=1).transpose()\n bbox_corners_eye_z = mat_view_cv[2, :].reshape((1, 4)).dot(bbox_corners_ht)\n clip_near = bbox_corners_eye_z.min()\n clip_far = bbox_corners_eye_z.max()\n\n # Projection matrix.\n K = np.array([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]])\n mat_proj = _calc_calib_proj(\n K, 0, 0, self.width, self.height, clip_near, clip_far)\n\n @self.window.event\n def on_draw(dt):\n self.window.clear()\n global curr_obj_id, mat_model, mat_view, mat_proj\n\n # Render the RGB image.\n if self.render_rgb:\n self.rgb = self._draw_rgb(\n curr_obj_id, mat_model, mat_view, mat_proj)\n\n # Render the depth image.\n if self.render_depth:\n self.depth = self._draw_depth(\n curr_obj_id, mat_model, mat_view, mat_proj)\n\n # The on_draw function is called framecount+1 times.\n app.run(framecount=0)\n\n if self.mode == 'rgb':\n return {'rgb': self.rgb}\n elif self.mode == 'depth':\n return {'depth': self.depth}\n elif self.mode == 'rgb+depth':\n return {'rgb': self.rgb, 'depth': self.depth}\n\n def _draw_rgb(self, obj_id, mat_model, mat_view, mat_proj):\n \"\"\"Renders an RGB image.\n\n :param obj_id: ID of the object model to render.\n :param mat_model: 4x4 ndarray with the model matrix.\n :param mat_view: 4x4 ndarray with the view matrix.\n :param mat_proj: 4x4 ndarray with the projection matrix.\n :return: HxWx3 ndarray with the rendered RGB image.\n \"\"\"\n # Update the OpenGL program.\n program = self.rgb_programs[obj_id]\n program['u_light_eye_pos'] = list(self.light_cam_pos)\n program['u_light_ambient_w'] = self.light_ambient_weight\n program['u_mv'] = _calc_model_view(mat_model, mat_view)\n program['u_nm'] = _calc_normal_matrix(mat_model, mat_view)\n program['u_mvp'] = _calc_model_view_proj(mat_model, mat_view, mat_proj)\n\n # OpenGL setup.\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glClearColor(\n self.bg_color[0], self.bg_color[1], self.bg_color[2], self.bg_color[3])\n gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)\n gl.glViewport(0, 0, self.width, self.height)\n\n # Keep the back-face culling disabled because of objects which do not have\n # well-defined surface (e.g. the lamp from the lm dataset).\n gl.glDisable(gl.GL_CULL_FACE)\n\n # Rendering.\n program.draw(gl.GL_TRIANGLES, self.index_buffers[obj_id])\n\n # Get the content of the FBO texture.\n rgb = np.zeros((self.height, self.width, 4), dtype=np.float32)\n gl.glReadPixels(0, 0, self.width, self.height, gl.GL_RGBA, gl.GL_FLOAT, rgb)\n rgb.shape = (self.height, self.width, 4)\n rgb = rgb[::-1, :]\n rgb = np.round(rgb[:, :, :3] * 255).astype(np.uint8) # Convert to [0, 255].\n\n return rgb\n\n def _draw_depth(self, obj_id, mat_model, mat_view, mat_proj):\n \"\"\"Renders a depth image.\n\n :param obj_id: ID of the object model to render.\n :param mat_model: 4x4 ndarray with the model matrix.\n :param mat_view: 4x4 ndarray with the view matrix.\n :param mat_proj: 4x4 ndarray with the projection matrix.\n :return: HxW ndarray with the rendered depth image.\n \"\"\"\n # Update the OpenGL program.\n program = self.depth_programs[obj_id]\n program['u_mv'] = _calc_model_view(mat_model, mat_view)\n program['u_mvp'] = _calc_model_view_proj(mat_model, mat_view, mat_proj)\n\n # OpenGL setup.\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glClearColor(0.0, 0.0, 0.0, 0.0)\n gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)\n gl.glViewport(0, 0, self.width, self.height)\n\n # Keep the back-face culling disabled because of objects which do not have\n # well-defined surface (e.g. the lamp from the lm dataset).\n gl.glDisable(gl.GL_CULL_FACE)\n\n # Rendering.\n program.draw(gl.GL_TRIANGLES, self.index_buffers[obj_id])\n\n # Get the content of the FBO texture.\n depth = np.zeros((self.height, self.width, 4), dtype=np.float32)\n gl.glReadPixels(\n 0, 0, self.width, self.height, gl.GL_RGBA, gl.GL_FLOAT, depth)\n depth.shape = (self.height, self.width, 4)\n depth = depth[::-1, :]\n depth = depth[:, :, 0] # Depth is saved in the first channel\n\n return depth\n"
] | [
[
"numpy.eye",
"numpy.ones",
"numpy.flipud",
"numpy.zeros",
"numpy.array",
"numpy.dot",
"numpy.round"
]
] |
falabrasil/kaldi-br | [
"2b11eb937c485941c2209f577af38c2f21bf9017"
] | [
"utils/clustering/cluster.py"
] | [
"#!/usr/bin/env python3\n#\n# author: dec 2020\n# cassio batista - https://cassota.gitlab.io\n#\n# sponsored by MidiaClip (Salvador - BA)\n\n\nimport sys\nimport os\nimport shutil\nimport glob\nimport argparse\nimport logging\nfrom collections import OrderedDict\n\nimport torch\nimport numpy as np\n\nfrom pyannote.pipeline.blocks.clustering import (\n HierarchicalAgglomerativeClustering\n)\n\n\nlogging.basicConfig(format=\"[%(filename)s] %(levelname)s: %(message)s\",\n level=logging.INFO)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Cluster audio files by speaker\")\n parser.add_argument(\"in_dir\", help=\"input dir\")\n parser.add_argument(\"out_dir\", help=\"output dir\")\n\n # parse args and minimally validate input\n args = parser.parse_args()\n if not os.path.isdir(args.in_dir):\n logging.error(\"input dir does not exist: '%s'\" % args.in_dir)\n sys.exit(1)\n if os.path.isdir(args.out_dir):\n logging.warning(\"output dir '%s' exists and *WILL NOT* \"\n \"be overwritten \" % args.out_dir)\n else:\n logging.info(\"creating output dir: '%s'\" % args.out_dir)\n os.mkdir(args.out_dir)\n\n # input dir is expected to contain only two subdirectories,\n # one for a male and another for a female speaker\n subdirs = []\n for d in os.listdir(args.in_dir):\n d = os.path.join(args.in_dir, d) # readlink -f\n if os.path.isdir(d):\n subdirs.append(d)\n\n if len(subdirs) < 1:\n logging.warning(\"expected at least one subdir in '%s'\" % args.in_dir)\n sys.exit(1)\n\n logging.info(\"loading pyannote's speaker embedding model\")\n model = torch.hub.load(\"pyannote/pyannote-audio\", \"emb\")\n clustering = HierarchicalAgglomerativeClustering()\n\n for d in subdirs:\n # get broadcaster name and gender tag + transmission date from dir name\n broadcaster, gtx = d.split(\"/\")[-1].split(\"_\")\n gender, txdate = gtx[0].upper(), gtx[1:]\n\n # sanity check on gender tag\n if gender != \"M\" and gender != \"F\":\n logging.error(\"gender flag expected to be either M or F. \"\n \"got '%s' instead\" % gender)\n sys.exit(1)\n\n # scan subdirs looking for wav and txt files\n # later check if the numbers match, abort if it doesn't\n wavlist = sorted(glob.glob(os.path.join(d, \"*.wav\")))\n txtlist = sorted(glob.glob(os.path.join(d, \"*.txt\")))\n if len(wavlist) != len(txtlist):\n logging.error(\"number of audio and transcription files do not \"\n \"match: %d vs %d\" % (len(wavlist), len(txtlist)))\n sys.exit(1)\n\n # clustering: check `_turn_level()` method from `SpeechTurnClustering`\n # https://github.com/pyannote/pyannote-audio/blob/master/pyannote/audio/pipeline/speech_turn_clustering.py#L162\n X, labels, num_emb = [], [], 0\n for i, wavfile in enumerate(wavlist):\n # label = re.sub('[/.-]', ' ', wavfile).split()[-2]\n label = os.path.basename(wavfile)\n\n logging.info(\"extracting embeddings from '%s'\" % wavfile)\n embedding = model(current_file={'audio': wavfile})\n num_emb += 1\n\n # I'm doing this because I found no way on earth to set a goddamn\n # `speech_turns` variable, which in turn contains a `Timeline`\n # object used for cropping\n # https://github.com/pyannote/pyannote-audio-hub#speaker-embedding\n # https://github.com/pyannote/pyannote-core/blob/develop/pyannote/core/timeline.py#L114\n for window, emb in embedding:\n x = embedding.crop(window)\n\n # TODO could I ignore this break and add multiple embedding\n # vectors for the same label? I know for a fact the mapping\n # label-cluster would be kept 1:1 if I moved in both `labels`\n # and `X` appends below...\n if len(x) > 0:\n break\n\n # FIXME skip labels so small we don't have any embedding for it\n if len(x) < 1:\n logging.warning(\"well, we'll have to think of something for \"\n \"utterances like '%s'\" % wavfile)\n continue\n\n labels.append(label)\n X.append(np.mean(x, axis=0))\n\n # apply clustering of label embeddings\n logging.info(\"clustering files from '%s' subdir\" % d)\n clusters = clustering(np.vstack(X)) # int indices\n\n # map each clustered label to its cluster (between 1 and N_CLUSTERS)\n # https://stackoverflow.com/questions/16772071/sort-dict-by-value-python\n mapping = {label: cluster for label, cluster in zip(labels, clusters)}\n mapping = OrderedDict(sorted(mapping.items(), key=lambda x:x[1]))\n\n # https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python/11101867#11101867\n for fileid, (label, cluster) in enumerate(mapping.items()):\n # dir names store tag and speaker id information\n tag = \"%s%s\" % (broadcaster, txdate) # andaiafm20201105\n spk = \"%s-%s%04d\" % (tag, gender, cluster) # andaiafm20201105-F0001\n\n src = os.path.join(d, label.replace(\".wav\", \"\"))\n dst = os.path.join(args.out_dir, tag, spk)\n if not os.path.isdir(dst):\n os.makedirs(dst, exist_ok=True)\n\n # andaiafm20201105/andaiafm20201105-F0001/andaiafm20201105F0001_000001.{wav,txt}\n dst = os.path.join(dst, \"%s_%06d\" % (spk.replace(\"-\", \"\"), fileid))\n logging.info(\"copy: '%s'.{wav,txt} -> '%s'.{wav,txt}\" % (src, dst))\n for ext in (\"wav\", \"txt\"):\n f = \"%s.%s\" % (src, ext) # from source\n t = \"%s.%s\" % (dst, ext) # to destination\n if os.path.isfile(t):\n logging.warning(\"dst file '%s' exists, that's odd\" % t)\n shutil.copy2(f, t)\n\n logging.info(\"done scanning subdir %s: %d embeddings extracted, \"\n \"%d embeddings processed\" % (d, num_emb, len(X)))\n"
] | [
[
"numpy.vstack",
"torch.hub.load",
"numpy.mean"
]
] |
RainingComputers/pykitml | [
"1c3e50cebcdb6c4da63979ef9a812b44d23a4857"
] | [
"tests/test_mnist.py"
] | [
"import sys\nimport os.path\n\nimport numpy as np\nimport pykitml as pk\nfrom pykitml.datasets import mnist\nfrom pykitml.testing import pktest_graph, pktest_nograph\n\ndef test_download():\n # Download the mnist data set\n mnist.get()\n # Test ran successfully\n assert True\n\n@pktest_graph\ndef test_adagrad():\n # Load dataset\n training_data, training_targets, testing_data, testing_targets = mnist.load()\n \n # Create a new neural network\n digit_classifier = pk.NeuralNetwork([784, 100, 10])\n\n # Train it\n digit_classifier.train(\n training_data=training_data,\n targets=training_targets, \n batch_size=50, \n epochs=1200, \n optimizer=pk.Adagrad(learning_rate=0.07, decay_rate=0.99), \n testing_data=testing_data, \n testing_targets=testing_targets,\n testing_freq=30,\n decay_freq=10\n )\n \n # Save it\n pk.save(digit_classifier, 'digit_classifier_network.pkl')\n\n # Show performance\n accuracy = digit_classifier.accuracy(training_data, training_targets)\n print('Train Accuracy:', accuracy) \n accuracy = digit_classifier.accuracy(testing_data, testing_targets)\n print('Test Accuracy:', accuracy)\n \n # Plot performance graph\n digit_classifier.plot_performance()\n\n # Show confusion matrix\n digit_classifier.confusion_matrix(training_data, training_targets)\n\n # Assert if it has enough accuracy\n assert digit_classifier.accuracy(training_data, training_targets) > 94\n\n@pktest_graph\ndef test_nesterov():\n # Load dataset\n training_data, training_targets, testing_data, testing_targets = mnist.load()\n \n # Create a new neural network\n digit_classifier = pk.NeuralNetwork([784, 100, 10])\n\n # Train it\n digit_classifier.train(\n training_data=training_data,\n targets=training_targets, \n batch_size=50, \n epochs=1200, \n optimizer=pk.Nesterov(learning_rate=0.1, decay_rate=0.99), \n testing_data=testing_data, \n testing_targets=testing_targets,\n testing_freq=30,\n decay_freq=10\n )\n \n # Save it\n pk.save(digit_classifier, 'digit_classifier_network.pkl')\n\n # Show performance\n accuracy = digit_classifier.accuracy(training_data, training_targets)\n print('Train Accuracy:', accuracy) \n accuracy = digit_classifier.accuracy(testing_data, testing_targets)\n print('Test Accuracy:', accuracy)\n \n # Plot performance graph\n digit_classifier.plot_performance()\n\n # Show confusion matrix\n digit_classifier.confusion_matrix(training_data, training_targets)\n\n # Assert if it has enough accuracy\n assert digit_classifier.accuracy(training_data, training_targets) > 94\n\n@pktest_graph\ndef test_relu_nesterov():\n # Load dataset\n training_data, training_targets, testing_data, testing_targets = mnist.load()\n \n # Create a new neural network\n digit_classifier = pk.NeuralNetwork([784, 100, 10], config='relu-softmax-cross_entropy')\n\n # Train it\n digit_classifier.train(\n training_data=training_data,\n targets=training_targets, \n batch_size=50, \n epochs=1200, \n optimizer=pk.Nesterov(learning_rate=0.1, decay_rate=0.99), \n testing_data=testing_data, \n testing_targets=testing_targets,\n testing_freq=30,\n decay_freq=10\n )\n \n # Save it\n pk.save(digit_classifier, 'digit_classifier_network.pkl')\n\n # Show performance\n accuracy = digit_classifier.accuracy(training_data, training_targets)\n print('Train Accuracy:', accuracy) \n accuracy = digit_classifier.accuracy(testing_data, testing_targets)\n print('Test Accuracy:', accuracy)\n \n # Plot performance graph\n digit_classifier.plot_performance()\n\n # Show confusion matrix\n digit_classifier.confusion_matrix(training_data, training_targets)\n\n # Assert if it has enough accuracy\n assert digit_classifier.accuracy(training_data, training_targets) > 94\n\n@pktest_graph\ndef test_momentum():\n # Load dataset\n training_data, training_targets, testing_data, testing_targets = mnist.load()\n \n # Create a new neural network\n digit_classifier = pk.NeuralNetwork([784, 100, 10])\n \n # Train it\n digit_classifier.train(\n training_data=training_data,\n targets=training_targets, \n batch_size=50, \n epochs=1200, \n optimizer=pk.Momentum(learning_rate=0.1, decay_rate=0.95), \n testing_data=testing_data, \n testing_targets=testing_targets,\n testing_freq=30,\n decay_freq=20\n )\n \n # Save it\n pk.save(digit_classifier, 'digit_classifier_network.pkl')\n\n # Show performance\n accuracy = digit_classifier.accuracy(training_data, training_targets)\n print('Train Accuracy:', accuracy) \n accuracy = digit_classifier.accuracy(testing_data, testing_targets)\n print('Test Accuracy:', accuracy)\n \n # Plot performance graph\n digit_classifier.plot_performance()\n\n # Show confusion matrix\n digit_classifier.confusion_matrix(training_data, training_targets)\n\n # Assert if it has enough accuracy\n assert digit_classifier.accuracy(training_data, training_targets) > 94\n\n@pktest_graph\ndef test_gradient_descent():\n # Load dataset\n training_data, training_targets, testing_data, testing_targets = mnist.load()\n \n # Create a new neural network\n digit_classifier = pk.NeuralNetwork([784, 100, 10])\n \n # Train it\n digit_classifier.train(\n training_data=training_data,\n targets=training_targets, \n batch_size=50, \n epochs=1200, \n optimizer=pk.GradientDescent(learning_rate=0.2, decay_rate=0.99), \n testing_data=testing_data, \n testing_targets=testing_targets,\n testing_freq=30,\n decay_freq=20\n )\n \n # Save it\n pk.save(digit_classifier, 'digit_classifier_network.pkl')\n\n # Show performance\n accuracy = digit_classifier.accuracy(training_data, training_targets)\n print('Train Accuracy:', accuracy) \n accuracy = digit_classifier.accuracy(testing_data, testing_targets)\n print('Test Accuracy:', accuracy)\n \n # Plot performance graph\n digit_classifier.plot_performance()\n\n # Show confusion matrix\n digit_classifier.confusion_matrix(training_data, training_targets)\n\n # Assert if it has enough accuracy\n assert digit_classifier.accuracy(training_data, training_targets) > 92\n\n@pktest_graph\ndef test_RMSprop():\n # Load dataset\n training_data, training_targets, testing_data, testing_targets = mnist.load()\n \n # Create a new neural network\n digit_classifier = pk.NeuralNetwork([784, 100, 10])\n \n # Train it\n digit_classifier.train(\n training_data=training_data,\n targets=training_targets, \n batch_size=50, \n epochs=1200, \n optimizer=pk.RMSprop(learning_rate=0.012, decay_rate=0.95), \n testing_data=testing_data, \n testing_targets=testing_targets,\n testing_freq=30,\n decay_freq=15\n )\n \n # Save it\n pk.save(digit_classifier, 'digit_classifier_network.pkl')\n\n # Show performance\n accuracy = digit_classifier.accuracy(training_data, training_targets)\n print('Train Accuracy:', accuracy) \n accuracy = digit_classifier.accuracy(testing_data, testing_targets)\n print('Test Accuracy:', accuracy)\n \n # Plot performance graph\n digit_classifier.plot_performance()\n\n # Show confusion matrix\n digit_classifier.confusion_matrix(training_data, training_targets)\n\n # Assert if it has enough accuracy\n assert digit_classifier.accuracy(training_data, training_targets) > 95\n\n@pktest_graph\ndef test_adam():\n import os.path\n\n import numpy as np\n import pykitml as pk\n from pykitml.datasets import mnist\n \n # Download dataset\n if(not os.path.exists('mnist.pkl')): mnist.get()\n\n # Load dataset\n training_data, training_targets, testing_data, testing_targets = mnist.load()\n \n # Create a new neural network\n digit_classifier = pk.NeuralNetwork([784, 100, 10])\n \n # Train it\n digit_classifier.train(\n training_data=training_data,\n targets=training_targets, \n batch_size=50, \n epochs=1200, \n optimizer=pk.Adam(learning_rate=0.012, decay_rate=0.95), \n testing_data=testing_data, \n testing_targets=testing_targets,\n testing_freq=30,\n decay_freq=15\n )\n \n # Save it\n pk.save(digit_classifier, 'digit_classifier_network.pkl')\n\n # Show performance\n accuracy = digit_classifier.accuracy(training_data, training_targets)\n print('Train Accuracy:', accuracy) \n accuracy = digit_classifier.accuracy(testing_data, testing_targets)\n print('Test Accuracy:', accuracy)\n \n # Plot performance graph\n digit_classifier.plot_performance()\n\n # Show confusion matrix\n digit_classifier.confusion_matrix(training_data, training_targets)\n\n # Assert if it has enough accuracy\n assert digit_classifier.accuracy(training_data, training_targets) > 95\n\n@pktest_graph\ndef test_predict_mnist_adam():\n import random\n\n import numpy as np\n import matplotlib.pyplot as plt\n import pykitml as pk\n from pykitml.datasets import mnist\n\n # Load dataset\n training_data, training_targets, testing_data, testing_targets = mnist.load()\n\n # Load the trained network\n digit_classifier = pk.load('digit_classifier_network.pkl')\n\n # Pick a random example from testing data\n index = random.randint(0, 9999)\n\n # Show the test data and the label\n plt.imshow(training_data[index].reshape(28, 28))\n plt.show()\n print('Label: ', training_targets[index])\n\n # Show prediction\n digit_classifier.feed(training_data[index])\n model_output = digit_classifier.get_output_onehot()\n print('Predicted: ', model_output)\n\nif __name__ == '__main__':\n # List of optimizers\n optimizers = [\n 'gradient_descent', 'momentum', 'nesterov',\n 'adagrad', 'RMSprop', 'adam' \n ]\n # Check if arguments passed to the script is correct\n if(len(sys.argv) != 2 or sys.argv[1] not in optimizers):\n print('Usage: python3 test_mnist.py OPTIMIZER')\n print('List of available optimizers:')\n print(str(optimizers))\n exit()\n \n # If the dataset is not available then download it\n if(not os.path.exists('mnist.pkl')): mnist.get()\n\n # Run the requested optimizer test function\n try:\n locals()['test_'+sys.argv[1]].__wrapped__()\n test_predict_mnist_adam.__wrapped__()\n except AssertionError:\n pass\n"
] | [
[
"matplotlib.pyplot.show"
]
] |
testinground/Proctoring-AI | [
"27b04739fa8f126e3c796ea5e9a21bdfbf48debf"
] | [
"face_detector.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 29 17:52:00 2020\n\n@author: hp\n\"\"\"\n\nimport cv2\nimport numpy as np\n\ndef get_face_detector(modelFile = \"models/res10_300x300_ssd_iter_140000.caffemodel\",\n configFile = \"models/deploy.prototxt\"):\n \"\"\"\n Get the face detection caffe model of OpenCV's DNN module\n \n Parameters\n ----------\n modelFile : string, optional\n Path to model file. The default is \"models/res10_300x300_ssd_iter_140000.caffemodel\".\n configFile : string, optional\n Path to config file. The default is \"models/deploy.prototxt\".\n\n Returns\n -------\n model : dnn_Net\n\n \"\"\"\n modelFile = \"models/res10_300x300_ssd_iter_140000.caffemodel\"\n configFile = \"models/deploy.prototxt\"\n model = cv2.dnn.readNetFromCaffe(configFile, modelFile)\n return model\n\ndef find_faces(img, model):\n \"\"\"\n Find the faces in an image\n \n Parameters\n ----------\n img : np.uint8\n Image to find faces from\n model : dnn_Net\n Face detection model\n\n Returns\n -------\n faces : list\n List of coordinates of the faces detected in the image\n\n \"\"\"\n h, w = img.shape[:2]\n blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 1.0,\n\t(300, 300), (104.0, 177.0, 123.0))\n model.setInput(blob)\n res = model.forward()\n faces = []\n for i in range(res.shape[2]):\n confidence = res[0, 0, i, 2]\n if confidence > 0.5:\n box = res[0, 0, i, 3:7] * np.array([w, h, w, h])\n (x, y, x1, y1) = box.astype(\"int\")\n faces.append([x, y, x1, y1])\n return faces\n\ndef draw_faces(img, faces):\n \"\"\"\n Draw faces on image\n\n Parameters\n ----------\n img : np.uint8\n Image to draw faces on\n faces : List of face coordinates\n Coordinates of faces to draw\n\n Returns\n -------\n None.\n\n \"\"\"\n for x, y, x1, y1 in faces:\n cv2.rectangle(img, (x, y), (x1, y1), (0, 0, 255), 3)\n "
] | [
[
"numpy.array"
]
] |
heikoschmidt1187/CarND-Advanced-Lane-Lines | [
"671c8d9a08853b4a9c00995a2ace6d25eb478e8f"
] | [
"threshold_par.py"
] | [
"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport glob\n\ndef abs_sobel_threshold(img, orientation='x', kernel_size=3, threshold=(0, 255)):\n \"\"\"\n `orientation` Input for setting the sobel operator gradient orientation (x, y)\n `kernel_size` Input for kernel size of sobel operator\n `threshold` Input tuple for lower and upper threshold\n\n This function calculates a binary image mask according to the absolute\n sobel operation on a given gradient, based on a lower and upper\n threshold.\n\n returns a binary image\n \"\"\"\n gray = cv2.GaussianBlur(img, (5, 5), 0)\n\n # calculate the sobel depending on the orientation\n if orientation == 'x':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, \\\n ksize=kernel_size))\n elif orientation == 'y':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, \\\n ksize=kernel_size))\n else:\n abs_sobel = np.zeros_like(gray)\n print(\"None\")\n\n # rescale the sobel to uint8 type\n scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))\n\n # calculate the binary output with respect to thresholds\n binary_output = np.zeros_like(scaled_sobel)\n binary_output[(scaled_sobel >= threshold[0]) & (scaled_sobel <= threshold[1])] = 1\n\n return binary_output\n\ndef direction_sobel_threshold(img, kernel_size=3, threshold=(0, np.pi / 2)):\n \"\"\"\n `kernel_size` Input for kernel size of sobel operator\n `threshold` Input tuple for lower and upper threshold in rad\n\n This function calculates the gradients and thresholds the direction based\n on given angles\n\n returns a binary image based on the given thresholds\n \"\"\"\n gray = cv2.GaussianBlur(img, (5, 5), 0)\n\n # calculate the sobel\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=kernel_size)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=kernel_size)\n\n # calculate the gradient direction\n absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))\n\n # calculate the binary output with respect to thresholds\n binary_output = np.zeros_like(absgraddir)\n binary_output[(absgraddir >= threshold[0]) & (absgraddir <= threshold[1])] = 1\n\n # Return the binary image\n return binary_output\n\ndef mag_sobel_threshold(img, kernel_size=3, threshold=(0, 255)):\n \"\"\"\n `kernel_size` Input for kernel size of sobel operator\n `threshold` Input tuple for lower and upper threshold\n\n This function calculates the magnitude of the gradients detected by the\n sobel operator in X and Y direction.\n\n returns a binary image based on the given thresholds\n \"\"\"\n gray = cv2.GaussianBlur(img, (5, 5), 0)\n\n # calculate the sobel\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=kernel_size)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=kernel_size)\n\n # calculate the gradient magnitude\n magnitude = np.sqrt(sobelx**2 + sobely**2)\n\n # rescale to 8 bit\n scale = np.max(magnitude)/255\n magnitude = (magnitude / scale).astype(np.uint8)\n\n # calculate the binary output with respect to thresholds\n binary_output = np.zeros_like(magnitude)\n binary_output[(magnitude >= threshold[0]) & (magnitude <= threshold[1])] = 1\n return binary_output\n\ndef nothing(x):\n pass\n\ncv2.namedWindow('image')\n\"\"\"\ncv2.createTrackbar('Low', 'image', 0, 255, nothing)\ncv2.createTrackbar('High', 'image', 0, 255, nothing)\n\"\"\"\ncv2.createTrackbar('Low', 'image', 0, 255, nothing)\ncv2.createTrackbar('High', 'image', 0, 255, nothing)\n\n#testimages = glob.glob('test_images/*.jpg')\ntestimages = glob.glob('output_images/debug/*.png')\n\nfor curImage in testimages:\n\n print(curImage)\n\n img = cv2.imread(curImage)\n img = img[:,:,:3]\n img = cv2.pyrDown(img)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)[:,:,1]\n\n lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)[:,:,2]\n\n \"\"\"\n f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(24, 9))\n ax1.imshow(img)\n ax1.set_title(\"RGB\")\n ax2.imshow(lab[:,:,0], cmap='gray')\n ax2.set_title(\"L\")\n ax3.imshow(lab[:,:,1], cmap='gray')\n ax3.set_title(\"A\")\n ax4.imshow(lab[:,:,2], cmap='gray')\n ax4.set_title(\"B\")\n plt.show()\n \"\"\"\n\n debug_image = np.zeros((360, 640 * 2, 3), dtype=np.uint8)\n debug_image[0:img.shape[0], 0:img.shape[1]] = img\n\n gray = cv2.equalizeHist(gray)\n\n\n while(1):\n\n k = cv2.waitKey(1) & 0xFF\n if k == 27:\n break\n\n low = cv2.getTrackbarPos('Low', 'image')\n high = cv2.getTrackbarPos('High', 'image')\n\n #binary = abs_sobel_threshold(gray, 'y', kernel_size=3, threshold=(low, high))\n #binary = mag_sobel_threshold(gray, kernel_size=3, threshold=(low, high))\n \"\"\"\n binary = np.zeros_like(hls)\n binary[(lab > low) & (lab < high)] = 1\n \"\"\"\n ret, binary = cv2.threshold(gray, thresh=low, maxval=high, type=cv2.THRESH_BINARY)\n bin = np.dstack((binary, binary, binary))\n debug_image[0:bin.shape[0], img.shape[1]:] = bin\n\n cv2.imshow('window', debug_image)\n\n\ncv2.destroyAllWindows()\n"
] | [
[
"numpy.zeros_like",
"numpy.zeros",
"numpy.dstack",
"numpy.max",
"numpy.absolute",
"numpy.sqrt"
]
] |
clebouteiller/landlab | [
"e6f47db76ea0814c4c5a24e695bbafb74c722ff7"
] | [
"landlab/components/overland_flow/generate_overland_flow_deAlmeida.py"
] | [
"\"\"\"Landlab component that simulates overland flow.\n\nThis component simulates overland flow using the 2-D numerical model of\nshallow-water flow over topography using the de Almeida et al., 2012\nalgorithm for storage-cell inundation modeling.\n\n.. codeauthor:: Jordan Adams\n\nExamples\n--------\n>>> import numpy as np\n>>> from landlab import RasterModelGrid\n>>> from landlab.components.overland_flow import OverlandFlow\n\nCreate a grid on which to calculate overland flow.\n\n>>> grid = RasterModelGrid((4, 5))\n\nThe grid will need some data to provide the overland flow component. To\ncheck the names of the fields that provide input to the overland flow\ncomponent use the *input_var_names* class property.\n\n>>> OverlandFlow.input_var_names\n('surface_water__depth', 'topographic__elevation')\n\nCreate fields of data for each of these input variables.\n\n>>> grid.at_node['topographic__elevation'] = np.array([\n... 0., 0., 0., 0., 0.,\n... 1., 1., 1., 1., 1.,\n... 2., 2., 2., 2., 2.,\n... 3., 3., 3., 3., 3.])\n>>> grid.at_node['surface_water__depth'] = np.array([\n... 0. , 0. , 0. , 0. , 0. ,\n... 0. , 0. , 0. , 0. , 0. ,\n... 0. , 0. , 0. , 0. , 0. ,\n... 0.1, 0.1, 0.1, 0.1, 0.1])\n\nInstantiate the `OverlandFlow` component to work on this grid, and run it.\n\n>>> of = OverlandFlow(grid, steep_slopes=True)\n>>> of.run_one_step()\n\nAfter calculating the overland flow, new fields have been added to the\ngrid. Use the *output_var_names* property to see the names of the fields that\nhave been changed.\n\n>>> of.output_var_names\n('surface_water__depth', 'surface_water__discharge', 'water_surface__gradient')\n\nThe `surface_water__depth` field is defined at nodes.\n\n>>> of.var_loc('surface_water__depth')\n'node'\n>>> grid.at_node['surface_water__depth'] # doctest: +NORMALIZE_WHITESPACE\narray([ 1.00000000e-05, 1.00000000e-05, 1.00000000e-05,\n 1.00000000e-05, 1.00000000e-05, 1.00000000e-05,\n 1.00000000e-05, 1.00000000e-05, 1.00000000e-05,\n 1.00000000e-05, 1.00000000e-05, 2.00100000e-02,\n 2.00100000e-02, 2.00100000e-02, 1.00000000e-05,\n 1.00010000e-01, 1.00010000e-01, 1.00010000e-01,\n 1.00010000e-01, 1.00010000e-01])\n\nThe `surface_water__discharge` field is defined at links. Because our initial\ntopography was a dipping plane, there is no water discharge in the horizontal\ndirection, only toward the bottom of the grid.\n\n>>> of.var_loc('surface_water__discharge')\n'link'\n>>> q = grid.at_link['surface_water__discharge'] # doctest: +NORMALIZE_WHITESPACE\n>>> np.all(q[grid.horizontal_links] == 0.)\nTrue\n>>> np.all(q[grid.vertical_links] <= 0.)\nTrue\n\nThe *water_surface__gradient* is also defined at links.\n\n>>> of.var_loc('water_surface__gradient')\n'link'\n>>> grid.at_link['water_surface__gradient'] # doctest: +NORMALIZE_WHITESPACE\narray([ 0. , 0. , 0. , 0. ,\n 0. , 1. , 1. , 1. , 0. ,\n 0. , 0. , 0. , 0. ,\n 0. , 1. , 1. , 1. , 0. ,\n 0. , 0. , 0. , 0. ,\n 0. , 1.1, 1.1, 1.1, 0. ,\n 0. , 0. , 0. , 0. ])\n\"\"\"\nimport numpy as np\nimport scipy.constants\n\nfrom landlab import Component, FieldError\n\nfrom . import _links as links\n\n_SEVEN_OVER_THREE = 7.0 / 3.0\n\n\ndef _active_links_at_node(grid, *args):\n \"\"\"_active_links_at_node([node_ids]) Active links of a node.\n\n .. note::\n\n This function returns links that are in *clockwise* order,\n rather than the standard *counterclockwise* ordering that\n landlab uses everywhere else.\n\n Parameters\n ----------\n grid : RasterModelGrid\n A grid.\n node_ids : int or list of ints\n ID(s) of node(s) for which to find connected active links\n\n Returns\n -------\n (4, N) ndarray\n The ids of active links attached to grid nodes with\n *node_ids*. If *node_ids* is not given, return links for all of the\n nodes in the grid. Link ids are listed in clockwise order starting\n with the south link. Diagonal links are never returned.\n\n Examples\n --------\n >>> from landlab import RasterModelGrid\n >>> from landlab.components.overland_flow.generate_overland_flow_deAlmeida import _active_links_at_node\n >>> grid = RasterModelGrid((3, 4))\n >>> grid.links_at_node[5]\n array([ 8, 11, 7, 4])\n >>> _active_links_at_node(grid, (5, 6))\n array([[ 4, 5],\n [ 7, 8],\n [11, 12],\n [ 8, 9]])\n >>> _active_links_at_node(grid)\n array([[-1, -1, -1, -1, -1, 4, 5, -1, -1, 11, 12, -1],\n [-1, -1, -1, -1, -1, 7, 8, 9, -1, -1, -1, -1],\n [-1, 4, 5, -1, -1, 11, 12, -1, -1, -1, -1, -1],\n [-1, -1, -1, -1, 7, 8, 9, -1, -1, -1, -1, -1]])\n\n LLCATS: DEPR LINF NINF\n \"\"\"\n active_links_at_node = grid.links_at_node.copy()\n active_links_at_node[grid.active_link_dirs_at_node == 0] = -1\n active_links_at_node = active_links_at_node[:, (3, 2, 1, 0)]\n\n if len(args) == 0:\n return active_links_at_node.T\n elif len(args) == 1:\n node_ids = np.broadcast_arrays(args[0])[0]\n return active_links_at_node[node_ids, :].T\n else:\n raise ValueError(\"only zero or one arguments accepted\")\n\n\nclass OverlandFlow(Component):\n\n \"\"\"Simulate overland flow using de Almeida approximations.\n\n Landlab component that simulates overland flow using the de Almeida\n et al., 2012 approximations of the 1D shallow water equations to be used\n for 2D flood inundation modeling.\n\n This component calculates discharge, depth and shear stress after some\n precipitation event across any raster grid. Default input file is named\n \"overland_flow_input.txt' and is contained in the\n landlab.components.overland_flow folder.\n\n The primary method of this class is :func:`run_one_step`.\n\n References\n ----------\n **Required Software Citation(s) Specific to this Component**\n\n Adams, J., Gasparini, N., Hobley, D., Tucker, G., Hutton, E., Nudurupati,\n S., Istanbulluoglu, E. (2017). The Landlab v1. 0 OverlandFlow component:\n a Python tool for computing shallow-water flow across watersheds.\n Geoscientific Model Development 10(4), 1645.\n https://dx.doi.org/10.5194/gmd-10-1645-2017\n\n **Additional References**\n\n de Almeida, G., Bates, P., Freer, J., Souvignet, M. (2012). Improving the\n stability of a simple formulation of the shallow water equations for 2-D\n flood modeling. Water Resources Research 48(5)\n https://dx.doi.org/10.1029/2011wr011570\n\n \"\"\"\n\n _name = \"OverlandFlow\"\n\n _unit_agnostic = False\n\n _cite_as = \"\"\"@article{adams2017landlab,\n title={The Landlab v1. 0 OverlandFlow component: a Python\n tool for computing shallow-water flow across watersheds},\n author={Adams, Jordan M and Gasparini, Nicole M and\n Hobley, Daniel EJ and Tucker, Gregory E and\n Hutton, Eric WH and Nudurupati, Sai S and\n Istanbulluoglu, Erkan},\n journal={Geoscientific Model Development},\n volume={10},\n number={4},\n pages={1645},\n year={2017},\n publisher={Copernicus GmbH}\n }\n \"\"\"\n\n _info = {\n \"surface_water__depth\": {\n \"dtype\": float,\n \"intent\": \"inout\",\n \"optional\": False,\n \"units\": \"m\",\n \"mapping\": \"node\",\n \"doc\": \"Depth of water on the surface\",\n },\n \"surface_water__discharge\": {\n \"dtype\": float,\n \"intent\": \"out\",\n \"optional\": False,\n \"units\": \"m3/s\",\n \"mapping\": \"link\",\n \"doc\": \"Volumetric discharge of surface water\",\n },\n \"topographic__elevation\": {\n \"dtype\": float,\n \"intent\": \"in\",\n \"optional\": False,\n \"units\": \"m\",\n \"mapping\": \"node\",\n \"doc\": \"Land surface topographic elevation\",\n },\n \"water_surface__gradient\": {\n \"dtype\": float,\n \"intent\": \"out\",\n \"optional\": False,\n \"units\": \"-\",\n \"mapping\": \"link\",\n \"doc\": \"Downstream gradient of the water surface.\",\n },\n }\n\n def __init__(\n self,\n grid,\n default_fixed_links=False,\n h_init=0.00001,\n alpha=0.7,\n mannings_n=0.03,\n g=scipy.constants.g,\n theta=0.8,\n rainfall_intensity=0.0,\n steep_slopes=False,\n ):\n \"\"\"Create an overland flow component.\n\n Parameters\n ----------\n grid : RasterModelGrid\n A landlab grid.\n h_init : float, optional\n Thicknes of initial thin layer of water to prevent divide by zero\n errors (m).\n alpha : float, optional\n Time step coeffcient, described in Bates et al., 2010 and\n de Almeida et al., 2012.\n mannings_n : float, optional\n Manning's roughness coefficient.\n g : float, optional\n Acceleration due to gravity (m/s^2).\n theta : float, optional\n Weighting factor from de Almeida et al., 2012.\n rainfall_intensity : float, optional\n Rainfall intensity. Default is zero.\n steep_slopes : bool, optional\n Modify the algorithm to handle steeper slopes at the expense of\n speed. If model runs become unstable, consider setting to True.\n \"\"\"\n super().__init__(grid)\n\n # First we copy our grid\n\n self._h_init = h_init\n self._alpha = alpha\n\n if isinstance(mannings_n, str):\n self._mannings_n = self._grid.at_link[mannings_n]\n else:\n self._mannings_n = mannings_n\n\n self._g = g\n self._theta = theta\n self.rainfall_intensity = rainfall_intensity\n self._steep_slopes = steep_slopes\n\n # Now setting up fields at the links...\n # For water discharge\n try:\n self._q = grid.add_zeros(\n \"surface_water__discharge\",\n at=\"link\",\n units=self._info[\"surface_water__discharge\"][\"units\"],\n )\n\n except FieldError:\n # Field was already set; still, fill it with zeros\n self._q = grid.at_link[\"surface_water__discharge\"]\n self._q.fill(0.0)\n\n # For water depths calculated at links\n try:\n self._h_links = grid.add_zeros(\n \"surface_water__depth\",\n at=\"link\",\n units=self._info[\"surface_water__depth\"][\"units\"],\n )\n except FieldError:\n self._h_links = grid.at_link[\"surface_water__depth\"]\n self._h_links.fill(0.0)\n self._h_links += self._h_init\n\n self._h = grid.at_node[\"surface_water__depth\"]\n self._h += self._h_init\n\n # For water surface slopes at links\n try:\n self._water_surface_slope = grid.add_zeros(\n \"water_surface__gradient\", at=\"link\"\n )\n except FieldError:\n self._water_surface_slope = grid.at_link[\"water_surface__gradient\"]\n self._water_surface_slope.fill(0.0)\n\n # Start time of simulation is at 1.0 s\n self._elapsed_time = 1.0\n\n self._dt = None\n self._dhdt = grid.zeros()\n\n # When we instantiate the class we recognize that neighbors have not\n # been found. After the user either calls self.set_up_neighbor_array\n # or self.overland_flow this will be set to True. This is done so\n # that every iteration of self.overland_flow does NOT need to\n # reinitalize the neighbors and saves computation time.\n self._neighbor_flag = False\n\n # When looking for neighbors, we automatically ignore inactive links\n # by default. However, what about when we want to look at fixed links\n # too? By default, we ignore these, but if they are important to your\n # model and will be updated in your driver loop, they can be used by\n # setting the flag in the initialization of the class to 'True'\n self._default_fixed_links = default_fixed_links\n\n # Assiging a class variable to the elevation field.\n self._z = self._grid.at_node[\"topographic__elevation\"]\n\n @property\n def h(self):\n \"\"\"The depth of water at each node.\"\"\"\n return self._h\n\n @property\n def dt(self):\n \"\"\"dt: Component timestep.\"\"\"\n return self._dt\n\n @dt.setter\n def dt(self, dt):\n assert dt > 0\n self._dt = dt\n\n @property\n def rainfall_intensity(self):\n \"\"\"rainfall_intensity: the rainfall rate [m/s]\n\n Must be positive.\n \"\"\"\n return self._rainfall_intensity\n\n @rainfall_intensity.setter\n def rainfall_intensity(self, rainfall_intensity):\n if rainfall_intensity >= 0:\n self._rainfall_intensity = rainfall_intensity\n else:\n raise ValueError(\"Rainfall intensity must be positive\")\n\n def calc_time_step(self):\n \"\"\"Calculate time step.\n\n Adaptive time stepper from Bates et al., 2010 and de Almeida et\n al., 2012\n \"\"\"\n self._dt = (\n self._alpha\n * self._grid.dx\n / np.sqrt(self._g * np.amax(self._grid.at_node[\"surface_water__depth\"]))\n )\n\n return self._dt\n\n def set_up_neighbor_arrays(self):\n \"\"\"Create and initialize link neighbor arrays.\n\n Set up arrays of neighboring horizontal and vertical links that\n are needed for the de Almeida solution.\n \"\"\"\n # First we identify all active links\n\n self._active_ids = links.active_link_ids(\n self._grid.shape, self._grid.status_at_node\n )\n\n self._active_links_at_open_bdy = _active_links_at_node(\n self.grid, self.grid.open_boundary_nodes\n ).transpose()\n\n self._active_links_at_open_bdy = self._active_links_at_open_bdy[\n np.where(self._active_links_at_open_bdy > -1)\n ]\n\n # And then find all horizontal link IDs (Active and Inactive)\n self._horizontal_ids = links.horizontal_link_ids(self._grid.shape)\n\n # And make the array 1-D\n self._horizontal_ids = self._horizontal_ids.flatten()\n\n # Find all horizontal active link ids\n self._horizontal_active_link_ids = links.horizontal_active_link_ids(\n self._grid.shape, self._active_ids\n )\n\n # Now we repeat this process for the vertical links.\n # First find the vertical link ids and reshape it into a 1-D array\n self._vertical_ids = links.vertical_link_ids(self._grid.shape).flatten()\n\n # Find the *active* verical link ids\n self._vertical_active_link_ids = links.vertical_active_link_ids(\n self._grid.shape, self._active_ids\n )\n\n if self._default_fixed_links is True:\n fixed_link_ids = links.fixed_link_ids(\n self._grid.shape, self._grid.status_at_node\n )\n fixed_horizontal_links = links.horizontal_fixed_link_ids(\n self._grid.shape, fixed_link_ids\n )\n fixed_vertical_links = links.vertical_fixed_link_ids(\n self._grid.shape, fixed_link_ids\n )\n self._horizontal_active_link_ids = np.maximum(\n self._horizontal_active_link_ids, fixed_horizontal_links\n )\n self._vertical_active_link_ids = np.maximum(\n self._vertical_active_link_ids, fixed_vertical_links\n )\n self._active_neighbors = find_active_neighbors_for_fixed_links(self._grid)\n\n self._vert_bdy_ids = self._active_links_at_open_bdy[\n links.is_vertical_link(self._grid.shape, self._active_links_at_open_bdy)\n ]\n\n self._vert_bdy_ids = links.nth_vertical_link(\n self._grid.shape, self._vert_bdy_ids\n )\n\n self._horiz_bdy_ids = self._active_links_at_open_bdy[\n links.is_horizontal_link(self._grid.shape, self._active_links_at_open_bdy)\n ]\n\n self._horiz_bdy_ids = links.nth_horizontal_link(\n self._grid.shape, self._horiz_bdy_ids\n )\n\n # Using the active vertical link ids we can find the north\n # and south vertical neighbors\n self._north_neighbors = links.vertical_north_link_neighbor(\n self._grid.shape, self._vertical_active_link_ids\n )\n self._south_neighbors = links.vertical_south_link_neighbor(\n self._grid.shape, self._vertical_active_link_ids\n )\n\n # Using the horizontal active link ids, we can find the west and\n # east neighbors\n self._west_neighbors = links.horizontal_west_link_neighbor(\n self._grid.shape, self._horizontal_active_link_ids\n )\n self._east_neighbors = links.horizontal_east_link_neighbor(\n self._grid.shape, self._horizontal_active_link_ids\n )\n\n # replace bdy condition links\n (ids,) = np.where(self._west_neighbors[self._horiz_bdy_ids] == -1)\n ids = self._horiz_bdy_ids[ids]\n self._west_neighbors[ids] = self._horizontal_active_link_ids[ids]\n\n (ids,) = np.where(self._east_neighbors[self._horiz_bdy_ids] == -1)\n ids = self._horiz_bdy_ids[ids]\n self._east_neighbors[ids] = self._horizontal_active_link_ids[ids]\n\n (ids,) = np.where(self._north_neighbors[self._vert_bdy_ids] == -1)\n ids = self._vert_bdy_ids[ids]\n self._north_neighbors[ids] = self._vertical_active_link_ids[ids]\n\n (ids,) = np.where(self._south_neighbors[self._vert_bdy_ids] == -1)\n ids = self._vert_bdy_ids[ids]\n self._south_neighbors[ids] = self._vertical_active_link_ids[ids]\n\n # Set up arrays for discharge in the horizontal & vertical directions.\n self._q_horizontal = np.zeros(\n links.number_of_horizontal_links(self._grid.shape)\n )\n self._q_vertical = np.zeros(links.number_of_vertical_links(self._grid.shape))\n\n # Once the neighbor arrays are set up, we change the flag to True!\n self._neighbor_flag = True\n\n def overland_flow(self, dt=None):\n \"\"\"Generate overland flow across a grid.\n\n For one time step, this generates 'overland flow' across a given grid\n by calculating discharge at each node.\n\n Using the depth slope product, shear stress is calculated at every\n node.\n\n Outputs water depth, discharge and shear stress values through time at\n every point in the input grid.\n \"\"\"\n # DH adds a loop to enable an imposed tstep while maintaining stability\n local_elapsed_time = 0.0\n if dt is None:\n dt = np.inf # to allow the loop to begin\n while local_elapsed_time < dt:\n dt_local = self.calc_time_step()\n # Can really get into trouble if nothing happens but we still run:\n if not dt_local < np.inf:\n break\n if local_elapsed_time + dt_local > dt:\n dt_local = dt - local_elapsed_time\n self._dt = dt_local\n\n # First, we check and see if the neighbor arrays have been\n # initialized\n if self._neighbor_flag is False:\n self.set_up_neighbor_arrays()\n\n # In case another component has added data to the fields, we just\n # reset our water depths, topographic elevations and water\n # discharge variables to the fields.\n self._h = self._grid[\"node\"][\"surface_water__depth\"]\n self._z = self._grid[\"node\"][\"topographic__elevation\"]\n self._q = self._grid[\"link\"][\"surface_water__discharge\"]\n self._h_links = self._grid[\"link\"][\"surface_water__depth\"]\n\n # Here we identify the core nodes and active links for later use.\n self._core_nodes = self._grid.core_nodes\n self._active_links = self._grid.active_links\n\n # Per Bates et al., 2010, this solution needs to find difference\n # between the highest water surface in the two cells and the\n # highest bed elevation\n zmax = self._grid.map_max_of_link_nodes_to_link(self._z)\n w = self._h + self._z\n wmax = self._grid.map_max_of_link_nodes_to_link(w)\n hflow = wmax[self._grid.active_links] - zmax[self._grid.active_links]\n\n # Insert this water depth into an array of water depths at the\n # links.\n self._h_links[self._active_links] = hflow\n\n # Now we calculate the slope of the water surface elevation at\n # active links\n self._water_surface__gradient = self._grid.calc_grad_at_link(w)[\n self._grid.active_links\n ]\n\n # And insert these values into an array of all links\n self._water_surface_slope[\n self._active_links\n ] = self._water_surface__gradient\n # If the user chooses to set boundary links to the neighbor value,\n # we set the discharge array to have the boundary links set to\n # their neighbor value\n if self._default_fixed_links is True:\n self._q[self._grid.fixed_links] = self._q[self._active_neighbors]\n\n # Now we can calculate discharge. To handle links with neighbors\n # that do not exist, we will do a fancy indexing trick. Non-\n # existent links or inactive links have an index of '-1', which in\n # Python, looks to the end of a list or array. To accommodate these\n # '-1' indices, we will simply insert an value of 0.0 discharge (in\n # units of L^2/T) to the end of the discharge array.\n self._q = np.append(self._q, [0])\n\n horiz = self._horizontal_ids\n vert = self._vertical_ids\n # Now we calculate discharge in the horizontal direction\n try:\n self._q[horiz] = (\n self._theta * self._q[horiz]\n + (1.0 - self._theta)\n / 2.0\n * (self._q[self._west_neighbors] + self._q[self._east_neighbors])\n - self._g\n * self._h_links[horiz]\n * self._dt\n * self._water_surface_slope[horiz]\n ) / (\n 1\n + self._g\n * self._dt\n * self._mannings_n**2.0\n * abs(self._q[horiz])\n / self._h_links[horiz] ** _SEVEN_OVER_THREE\n )\n\n # ... and in the vertical direction\n self._q[vert] = (\n self._theta * self._q[vert]\n + (1 - self._theta)\n / 2.0\n * (self._q[self._north_neighbors] + self._q[self._south_neighbors])\n - self._g\n * self._h_links[vert]\n * self._dt\n * self._water_surface_slope[vert]\n ) / (\n 1\n + self._g\n * self._dt\n * self._mannings_n**2.0\n * abs(self._q[vert])\n / self._h_links[vert] ** _SEVEN_OVER_THREE\n )\n\n except ValueError:\n self._mannings_n = self._grid[\"link\"][\"mannings_n\"]\n # if manning's n in a field\n # calc discharge in horizontal\n self._q[horiz] = (\n self._theta * self._q[horiz]\n + (1.0 - self._theta)\n / 2.0\n * (self._q[self._west_neighbors] + self._q[self._east_neighbors])\n - self._g\n * self._h_links[horiz]\n * self._dt\n * self._water_surface_slope[horiz]\n ) / (\n 1\n + self._g\n * self._dt\n * self._mannings_n[horiz] ** 2.0\n * abs(self._q[horiz])\n / self._h_links[horiz] ** _SEVEN_OVER_THREE\n )\n\n # ... and in the vertical direction\n self._q[vert] = (\n self._theta * self._q[vert]\n + (1 - self._theta)\n / 2.0\n * (self._q[self._north_neighbors] + self._q[self._south_neighbors])\n - self._g\n * self._h_links[vert]\n * self._dt\n * self._water_surface_slope[self._vertical_ids]\n ) / (\n 1\n + self._g\n * self._dt\n * self._mannings_n[vert] ** 2.0\n * abs(self._q[vert])\n / self._h_links[vert] ** _SEVEN_OVER_THREE\n )\n\n # Now to return the array to its original length (length of number\n # of all links), we delete the extra 0.0 value from the end of the\n # array.\n self._q = np.delete(self._q, len(self._q) - 1)\n\n # Updating the discharge array to have the boundary links set to\n # their neighbor\n if self._default_fixed_links is True:\n self._q[self._grid.fixed_links] = self._q[self._active_neighbors]\n\n if self._steep_slopes is True:\n # To prevent water from draining too fast for our time steps...\n # Our Froude number.\n Fr = 1.0\n # Our two limiting factors, the froude number and courant\n # number.\n # Looking a calculated q to be compared to our Fr number.\n calculated_q = (self._q / self._h_links) / np.sqrt(\n self._g * self._h_links\n )\n\n # Looking at our calculated q and comparing it to Courant no.,\n q_courant = self._q * self._dt / self._grid.dx\n\n # Water depth split equally between four links..\n water_div_4 = self._h_links / 4.0\n\n # IDs where water discharge is positive...\n (positive_q,) = np.where(self._q > 0)\n\n # ... and negative.\n (negative_q,) = np.where(self._q < 0)\n\n # Where does our calculated q exceed the Froude number? If q\n # does exceed the Froude number, we are getting supercritical\n # flow and discharge needs to be reduced to maintain stability.\n (Froude_logical,) = np.where((calculated_q) > Fr)\n (Froude_abs_logical,) = np.where(abs(calculated_q) > Fr)\n\n # Where does our calculated q exceed the Courant number and\n # water depth divided amongst 4 links? If the calculated q\n # exceeds the Courant number and is greater than the water\n # depth divided by 4 links, we reduce discharge to maintain\n # stability.\n (water_logical,) = np.where(q_courant > water_div_4)\n (water_abs_logical,) = np.where(abs(q_courant) > water_div_4)\n\n # Where are these conditions met? For positive and negative q,\n # there are specific rules to reduce q. This step finds where\n # the discharge values are positive or negative and where\n # discharge exceeds the Froude or Courant number.\n self._if_statement_1 = np.intersect1d(positive_q, Froude_logical)\n self._if_statement_2 = np.intersect1d(negative_q, Froude_abs_logical)\n self._if_statement_3 = np.intersect1d(positive_q, water_logical)\n self._if_statement_4 = np.intersect1d(negative_q, water_abs_logical)\n\n # Rules 1 and 2 reduce discharge by the Froude number.\n self._q[self._if_statement_1] = self._h_links[self._if_statement_1] * (\n np.sqrt(self._g * self._h_links[self._if_statement_1]) * Fr\n )\n\n self._q[self._if_statement_2] = 0.0 - (\n self._h_links[self._if_statement_2]\n * np.sqrt(self._g * self._h_links[self._if_statement_2])\n * Fr\n )\n\n # Rules 3 and 4 reduce discharge by the Courant number.\n self._q[self._if_statement_3] = (\n (self._h_links[self._if_statement_3] * self._grid.dx) / 5.0\n ) / self._dt\n\n self._q[self._if_statement_4] = (\n 0.0\n - (self._h_links[self._if_statement_4] * self._grid.dx / 5.0)\n / self._dt\n )\n\n # Once stability has been restored, we calculate the change in\n # water depths on all core nodes by finding the difference between\n # inputs (rainfall) and the inputs/outputs (flux divergence of\n # discharge)\n self._dhdt = self._rainfall_intensity - self._grid.calc_flux_div_at_node(\n self._q\n )\n\n # Updating our water depths...\n self._h[self._core_nodes] = (\n self._h[self._core_nodes] + self._dhdt[self._core_nodes] * self._dt\n )\n\n # To prevent divide by zero errors, a minimum threshold water depth\n # must be maintained. To reduce mass imbalances, this is set to\n # find locations where water depth is smaller than h_init (default\n # is 0.001) and the new value is self._h_init * 10^-3. This was set\n # as it showed the smallest amount of mass creation in the grid\n # during testing.\n if self._steep_slopes is True:\n self._h[self._h < self._h_init] = self._h_init * 10.0**-3\n\n # And reset our field values with the newest water depth and\n # discharge.\n self._grid.at_node[\"surface_water__depth\"] = self._h\n self._grid.at_link[\"surface_water__discharge\"] = self._q\n #\n #\n # self._helper_q = self._grid.map_upwind_node_link_max_to_node(self._q)\n # self._helper_s = self._grid.map_upwind_node_link_max_to_node(\n # self._water_surface_slope)\n #\n # self._helper_q = self._grid.map_max_of_link_nodes_to_link(self._helper_q)\n # self._helper_s = self._grid.map_max_of_link_nodes_to_link(self._helper_s)\n #\n # self._grid['link']['surface_water__discharge'][\n # self._active_links_at_open_bdy] = self._helper_q[\n # self._active_links_at_open_bdy]\n #\n # self._grid['link']['water_surface__gradient'][\n # self._active_links_at_open_bdy] = self._helper_s[\n # self._active_links_at_open_bdy]\n # Update nodes near boundary locations - nodes adjacent to\n # boundaries may have discharge and water surface slopes\n # artifically reduced due to boundary effects. This step removes\n # those errors.\n\n if dt is np.inf:\n break\n local_elapsed_time += self._dt\n\n def run_one_step(self, dt=None):\n \"\"\"Generate overland flow across a grid.\n\n For one time step, this generates 'overland flow' across a given grid\n by calculating discharge at each node.\n\n Using the depth slope product, shear stress is calculated at every\n node.\n\n Outputs water depth, discharge and shear stress values through time at\n every point in the input grid.\n \"\"\"\n self.overland_flow(dt=dt)\n\n def discharge_mapper(self, input_discharge, convert_to_volume=False):\n \"\"\"Maps discharge value from links onto nodes.\n\n This method takes the discharge values on links and determines the\n links that are flowing INTO a given node. The fluxes moving INTO a\n given node are summed.\n\n This method ignores all flow moving OUT of a given node.\n\n This takes values from the OverlandFlow component (by default) in\n units of [L^2/T]. If the convert_to_cms flag is raised as True, this\n method converts discharge to units [L^3/T] - as of Aug 2016, only\n operates for square RasterModelGrid instances.\n\n The output array is of length grid.number_of_nodes and can be used\n with the Landlab imshow_grid plotter.\n\n Returns a numpy array (discharge_vals)\n \"\"\"\n\n discharge_vals = np.zeros(self._grid.number_of_links)\n discharge_vals[:] = input_discharge[:]\n\n if convert_to_volume:\n discharge_vals *= self._grid.dx\n\n discharge_vals = (\n discharge_vals[self._grid.links_at_node] * self._grid.link_dirs_at_node\n )\n\n discharge_vals = discharge_vals.flatten()\n\n discharge_vals[np.where(discharge_vals < 0)] = 0.0\n\n discharge_vals = discharge_vals.reshape(self._grid.number_of_nodes, 4)\n\n discharge_vals = discharge_vals.sum(axis=1)\n\n return discharge_vals\n\n\ndef find_active_neighbors_for_fixed_links(grid):\n \"\"\"Find active link neighbors for every fixed link.\n\n Specialized link ID function used to ID the active links that neighbor\n fixed links in the vertical and horizontal directions.\n\n If the user wants to assign fixed gradients or values to the fixed\n links dynamically, this function identifies the nearest active_link\n neighbor.\n\n Each fixed link can either have 0 or 1 active neighbor. This function\n finds if and where that active neighbor is and stores those IDs in\n an array.\n\n Parameters\n ----------\n grid : RasterModelGrid\n A landlab grid.\n\n Returns\n -------\n ndarray of int, shape `(*, )`\n Flat array of links.\n\n\n Examples\n --------\n >>> from landlab import NodeStatus, RasterModelGrid\n >>> from landlab.components.overland_flow.generate_overland_flow_deAlmeida import find_active_neighbors_for_fixed_links\n\n >>> grid = RasterModelGrid((4, 5))\n >>> grid.status_at_node[:5] = NodeStatus.FIXED_GRADIENT\n >>> grid.status_at_node[::5] = NodeStatus.FIXED_GRADIENT\n >>> grid.status_at_node # doctest: +NORMALIZE_WHITESPACE\n array([2, 2, 2, 2, 2,\n 2, 0, 0, 0, 1,\n 2, 0, 0, 0, 1,\n 2, 1, 1, 1, 1], dtype=uint8)\n\n >>> grid.fixed_links\n array([ 5, 6, 7, 9, 18])\n >>> grid.active_links\n array([10, 11, 12, 14, 15, 16, 19, 20, 21, 23, 24, 25])\n\n >>> find_active_neighbors_for_fixed_links(grid)\n array([14, 15, 16, 10, 19])\n\n >>> rmg = RasterModelGrid((4, 7))\n\n >>> rmg.at_node['topographic__elevation'] = rmg.zeros(at='node')\n >>> rmg.at_link['topographic__slope'] = rmg.zeros(at='link')\n >>> rmg.status_at_node[rmg.perimeter_nodes] = rmg.BC_NODE_IS_FIXED_GRADIENT\n >>> find_active_neighbors_for_fixed_links(rmg)\n array([20, 21, 22, 23, 24, 14, 17, 27, 30, 20, 21, 22, 23, 24])\n \"\"\"\n neighbors = links.neighbors_at_link(grid.shape, grid.fixed_links).flat\n return neighbors[np.in1d(neighbors, grid.active_links)]\n"
] | [
[
"numpy.sqrt",
"numpy.append",
"numpy.zeros",
"numpy.broadcast_arrays",
"numpy.intersect1d",
"numpy.in1d",
"numpy.amax",
"numpy.maximum",
"numpy.where"
]
] |
MouvementMondial/OccupancyGridSLAM | [
"6473c2c33025933b937a8ed5b04fb1bcb563ebe0"
] | [
"lib/mapping.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Thorsten\n\"\"\"\n\nimport numpy as np\nfrom numba import jit\n\nimport os\nimport sys\nnb_dir = os.path.split(os.getcwd())[0]\nif nb_dir not in sys.path:\n sys.path.append(nb_dir)\n\nfrom lib import bresenham\n\n@jit\ndef addMeasurement(grid, x, y, pos_sensor, offset, resolution, l_occupied, l_free, l_min, l_max):\n \n for i in range(x.size):\n # round points to cells \n xi=int( (x[i,0]-offset[0]) / resolution )\n yi=int( (y[i,0]-offset[1]) / resolution )\n\n # set beam endpoint-cells as occupied\n grid[xi,yi] += l_occupied\n \n # value > threshold? -> clamping \n if grid[xi,yi] > l_max:\n grid[xi,yi] = l_max\n\n # calculate cells between sensor and endpoint as free\n path = bresenham.bresenham2D( ((pos_sensor-offset)/resolution).astype(int), np.array([[xi,yi]]))\n \n # set cells between sensor and endpoint as free\n updateFree(path,grid,l_free,l_min)\n \n@jit(nopython=True)\ndef updateFree(path,grid,l_free,l_min):\n for nr in range(path.shape[0]):\n path_x = int(path[nr,0])\n path_y = int(path[nr,1])\n \n grid[path_x, path_y] += l_free\n \n # value < threshold? -> clamping\n if grid[path_x, path_y] < l_min:\n grid[path_x, path_y] = l_min\n \n@jit(nopython=True)\ndef scan2mapDistance(grid,pcl,offset,resolution):\n distance = 0;\n for i in range(pcl.shape[0]):\n # round points to cells\n xi = int ( (pcl[i,0]-offset[0]) / resolution )\n yi = int ( (pcl[i,1]-offset[1]) / resolution ) \n distance += grid[xi,yi]\n return distance"
] | [
[
"numpy.array"
]
] |
i-aki-y/librosa | [
"a464b336c23a94e00943fc50e936180f503367eb"
] | [
"tests/test_onset.py"
] | [
"#!/usr/bin/env python\n# CREATED:2013-03-11 18:14:30 by Brian McFee <[email protected]>\n# unit tests for librosa.onset\n\nfrom __future__ import print_function\nimport pytest\nfrom contextlib2 import nullcontext as dnr\n\n# Disable cache\nimport os\n\ntry:\n os.environ.pop(\"LIBROSA_CACHE_DIR\")\nexcept:\n pass\n\n\nimport warnings\n\nimport numpy as np\nimport librosa\n\nfrom test_core import srand\n\n__EXAMPLE_FILE = os.path.join(\"tests\", \"data\", \"test1_22050.wav\")\n\n\[email protected](scope=\"module\")\ndef ysr():\n return librosa.load(__EXAMPLE_FILE)\n\n\[email protected](\n \"feature\", [None, librosa.feature.melspectrogram, librosa.feature.chroma_stft]\n)\[email protected](\"n_fft\", [512, 2048])\[email protected](\"hop_length\", [256, 512])\[email protected](\"lag\", [1, 2])\[email protected](\"max_size\", [1, 2])\[email protected](\"detrend\", [False, True])\[email protected](\"center\", [False, True])\[email protected](\"aggregate\", [None, np.mean, np.max])\ndef test_onset_strength_audio(\n ysr, feature, n_fft, hop_length, lag, max_size, detrend, center, aggregate\n):\n\n y, sr = ysr\n oenv = librosa.onset.onset_strength(\n y=y,\n sr=sr,\n S=None,\n detrend=detrend,\n center=center,\n aggregate=aggregate,\n feature=feature,\n n_fft=n_fft,\n hop_length=hop_length,\n lag=lag,\n max_size=max_size,\n )\n\n assert oenv.ndim == 1\n\n S = librosa.feature.melspectrogram(y=y, n_fft=n_fft, hop_length=hop_length)\n\n target_shape = S.shape[-1]\n\n if not detrend:\n assert np.all(oenv >= 0)\n\n assert oenv.shape[-1] == target_shape\n\n\[email protected](raises=librosa.ParameterError)\ndef test_onset_strength_badlag(ysr):\n y, sr = ysr\n librosa.onset.onset_strength(y=y, sr=sr, lag=0)\n\n\[email protected](raises=librosa.ParameterError)\ndef test_onset_strength_badmax(ysr):\n y, sr = ysr\n librosa.onset.onset_strength(y=y, sr=sr, max_size=0)\n\n\[email protected](raises=librosa.ParameterError)\ndef test_onset_strength_noinput():\n librosa.onset.onset_strength(y=None, S=None)\n\n\[email protected](scope=\"module\")\ndef melspec_sr(ysr):\n y, sr = ysr\n S = librosa.feature.melspectrogram(y=y, sr=sr)\n return S, sr\n\n\[email protected](\n \"feature\", [None, librosa.feature.melspectrogram, librosa.feature.chroma_stft]\n)\[email protected](\"n_fft\", [512, 2048])\[email protected](\"hop_length\", [256, 512])\[email protected](\"detrend\", [False, True])\[email protected](\"center\", [False, True])\[email protected](\"aggregate\", [None, np.mean, np.max])\ndef test_onset_strength_spectrogram(\n melspec_sr, feature, n_fft, hop_length, detrend, center, aggregate\n):\n S, sr = melspec_sr\n oenv = librosa.onset.onset_strength(\n y=None,\n sr=sr,\n S=S,\n detrend=detrend,\n center=center,\n aggregate=aggregate,\n feature=feature,\n n_fft=n_fft,\n hop_length=hop_length,\n )\n\n assert oenv.ndim == 1\n\n target_shape = S.shape[-1]\n\n if not detrend:\n assert np.all(oenv >= 0)\n\n assert oenv.shape[-1] == target_shape\n\n\[email protected](\"lag\", [1, 2, 3])\[email protected](\"aggregate\", [np.mean, np.max])\ndef test_onset_strength_multi_noagg(melspec_sr, lag, aggregate):\n\n S, sr = melspec_sr\n # We only test with max_size=1 here to make the sub-band slicing test simple\n odf_multi = librosa.onset.onset_strength_multi(\n S=S, lag=lag, max_size=1, aggregate=False\n )\n odf_mean = librosa.onset.onset_strength_multi(\n S=S, lag=lag, max_size=1, aggregate=aggregate\n )\n\n # With no aggregation, output shape should = input shape\n assert odf_multi.shape == S.shape\n\n # Result should average out to the same as mean aggregation\n assert np.allclose(odf_mean, aggregate(odf_multi, axis=0))\n\n\[email protected](scope=\"module\")\ndef channels(melspec_sr):\n S, _ = melspec_sr\n return np.linspace(0, S.shape[0], num=5, dtype=int)\n\n\[email protected](\"lag\", [1, 2, 3])\ndef test_onset_strength_multi(melspec_sr, lag, channels):\n\n S, sr = melspec_sr\n # We only test with max_size=1 here to make the sub-band slicing test simple\n odf_multi = librosa.onset.onset_strength_multi(\n S=S, lag=lag, max_size=1, channels=channels\n )\n\n assert len(odf_multi) == len(channels) - 1\n\n for i, (s, t) in enumerate(zip(channels, channels[1:])):\n odf_single = librosa.onset.onset_strength(S=S[s:t], lag=lag, max_size=1)\n assert np.allclose(odf_single, odf_multi[i])\n\n\[email protected](scope=\"module\", params=[64, 512, 2048])\ndef hop(request):\n return request.param\n\n\[email protected](scope=\"module\", params=[False, True], ids=[\"audio\", \"oenv\"])\ndef oenv(ysr, hop, request):\n\n if request.param:\n y, sr = ysr\n return librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop)\n else:\n return None\n\n\[email protected](\"bt\", [False, True])\[email protected](\"normalize\", [False, True])\ndef test_onset_detect_real(ysr, oenv, hop, bt, normalize):\n\n y, sr = ysr\n onsets = librosa.onset.onset_detect(\n y=y,\n sr=sr,\n onset_envelope=oenv,\n hop_length=hop,\n backtrack=bt,\n normalize=normalize,\n )\n if bt:\n assert np.all(onsets >= 0)\n else:\n assert np.all(onsets > 0)\n\n assert np.all(onsets < len(y) * sr // hop)\n if oenv is not None:\n assert np.all(onsets < len(oenv))\n\n\[email protected](raises=librosa.ParameterError)\ndef test_onset_detect_nosignal():\n librosa.onset.onset_detect(y=None, onset_envelope=None)\n\n\[email protected](\"sr\", [4000])\[email protected](\"y\", [np.zeros(4000), np.ones(4000), -np.ones(4000)])\[email protected](\"hop_length\", [64, 512, 2048])\ndef test_onset_detect_const(y, sr, hop_length):\n\n # Disable padding here\n onsets = librosa.onset.onset_detect(\n y=y, sr=sr, onset_envelope=None, hop_length=hop_length,\n )\n\n # We'll allow one onset at the start of the signal for these examples\n # when y is all-ones, zero-padding induces an onset at the beginning of the\n # signal\n assert len(onsets) == 0 or (y[0] != 0 and len(onsets) == 1)\n\n\[email protected](\n \"units, ctx\",\n [\n (\"frames\", dnr()),\n (\"time\", dnr()),\n (\"samples\", dnr()),\n (\"bad units\", pytest.raises(librosa.ParameterError)),\n ],\n)\[email protected](\"hop_length\", [512, 1024])\ndef test_onset_units(ysr, hop_length, units, ctx):\n\n y, sr = ysr\n\n with ctx:\n b1 = librosa.onset.onset_detect(y=y, sr=sr, hop_length=hop_length)\n b2 = librosa.onset.onset_detect(y=y, sr=sr, hop_length=hop_length, units=units)\n\n t1 = librosa.frames_to_time(b1, sr=sr, hop_length=hop_length)\n\n if units == \"time\":\n t2 = b2\n\n elif units == \"samples\":\n t2 = librosa.samples_to_time(b2, sr=sr)\n\n elif units == \"frames\":\n t2 = librosa.frames_to_time(b2, sr=sr, hop_length=hop_length)\n\n assert np.allclose(t1, t2)\n\n\[email protected](scope=\"module\", params=[False, True], ids=[\"oenv\", \"rms\"])\ndef energy(ysr, hop, request):\n y, sr = ysr\n if request.param:\n return librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop)\n else:\n return librosa.feature.rms(y=y, hop_length=hop)\n\n\ndef test_onset_backtrack(ysr, oenv, hop, energy):\n y, sr = ysr\n\n onsets = librosa.onset.onset_detect(\n y=y, sr=sr, onset_envelope=oenv, hop_length=hop, backtrack=False\n )\n\n # Test backtracking\n onsets_bt = librosa.onset.onset_backtrack(onsets, energy)\n\n # Make sure there are no negatives\n assert np.all(onsets_bt >= 0)\n\n # And that we never roll forward\n assert np.all(onsets_bt <= onsets)\n\n # And that the detected peaks are actually minima\n assert np.all(energy[onsets_bt] <= energy[np.maximum(0, onsets_bt - 1)])\n\n\[email protected](raises=librosa.ParameterError)\ndef test_onset_strength_noagg():\n S = np.zeros((3, 3))\n librosa.onset.onset_strength(S=S, aggregate=False)\n\n\[email protected](raises=librosa.ParameterError)\ndef test_onset_strength_badref():\n S = np.zeros((3, 3))\n librosa.onset.onset_strength(S=S, ref=S[:, :2])\n\n\ndef test_onset_strength_multi_ref():\n srand()\n\n # Make a random positive spectrum\n S = 1 + np.abs(np.random.randn(1025, 10))\n\n # Test with a null reference\n null_ref = np.zeros_like(S)\n\n onsets = librosa.onset.onset_strength_multi(\n S=S, ref=null_ref, aggregate=False, center=False\n )\n\n # since the reference is zero everywhere, S - ref = S\n # past the setup phase (first frame)\n assert np.allclose(onsets[:, 1:], S[:, 1:])\n\n\ndef test_onset_detect_inplace_normalize():\n\n # This test will fail if the in-place normalization modifies\n # the input onset envelope\n oenv_in = np.ones(50)\n oenv_in[10] = 2\n oenv_orig = oenv_in.copy()\n\n librosa.onset.onset_detect(onset_envelope=oenv_in, normalize=True)\n\n assert np.allclose(oenv_in, oenv_orig) and oenv_in is not oenv_orig\n"
] | [
[
"numpy.zeros_like",
"numpy.allclose",
"numpy.ones",
"numpy.zeros",
"numpy.random.randn",
"numpy.all",
"numpy.maximum",
"numpy.linspace"
]
] |
littlejgogo/MDCPE-co-training-method-for-hyperspectral-image-classification | [
"b7d367abd97ada77adc45a1120149cf247f9713c"
] | [
"training code/paviau/rnn/test/logitsmulti.py"
] | [
"\nimport tensorflow as tf\nimport cnn_indices\n\ndata = cnn_indices.read_data_sets()\nimport final_index\nimport numpy as np\nsaver = tf.train.import_meta_graph('/home/asdf/Documents/juyan/paper/salinas/cnn/model/NEW/'\n 'CNN0507.ckpt.meta')\nbatch_size = data.valid._num_examples\nwith tf.Session() as sess:\n saver.restore(sess, '/home/asdf/Documents/juyan/paper/salinas/cnn/model/NEW/'\n 'CNN0507.ckpt')\n y = sess.graph.get_tensor_by_name('Softmax:0')\n X = sess.graph.get_operation_by_name('X').outputs[0]\n keep_prob = sess.graph.get_operation_by_name('keep_prob').outputs[0]\n\n batch, Y = data.valid.next_batch_test(batch_size)\n predict_label = sess.run(y, feed_dict={X: batch, keep_prob: 1.0})\npredict_label = np.argmax(predict_label, 1) + 1\ntrue_label = np.argmax(Y, 1) + 1\nevery_class, confusion_mat = final_index.test_data_index(true_label, predict_label, 16)\nnp.savez('/home/asdf/Documents/juyan/paper/salinas/cnn/test/zhibiao0513.npz',\n every_class=every_class, confusion_mat=confusion_mat)\nprint(\"ok\")\n\n\n"
] | [
[
"tensorflow.Session",
"numpy.savez",
"numpy.argmax",
"tensorflow.train.import_meta_graph"
]
] |
galipremsagar/dask | [
"134182e05009dbb20bd8e59ccf8bf771e5d4399a"
] | [
"dask/dataframe/io/parquet/utils.py"
] | [
"import re\n\nimport pandas as pd\n\nfrom ....core import flatten\nfrom ....utils import natural_sort_key\n\n\nclass Engine:\n \"\"\" The API necessary to provide a new Parquet reader/writer \"\"\"\n\n @classmethod\n def read_metadata(\n cls,\n fs,\n paths,\n categories=None,\n index=None,\n gather_statistics=None,\n filters=None,\n **kwargs\n ):\n \"\"\"Gather metadata about a Parquet Dataset to prepare for a read\n\n This function is called once in the user's Python session to gather\n important metadata about the parquet dataset.\n\n Parameters\n ----------\n fs: FileSystem\n paths: List[str]\n A list of paths to files (or their equivalents)\n categories: list, dict or None\n Column(s) containing categorical data.\n index: str, List[str], or False\n The column name(s) to be used as the index.\n If set to ``None``, pandas metadata (if available) can be used\n to reset the value in this function\n gather_statistics: bool\n Whether or not to gather statistics data. If ``None``, we only\n gather statistics data if there is a _metadata file available to\n query (cheaply)\n filters: list\n List of filters to apply, like ``[('x', '>', 0), ...]``.\n **kwargs: dict (of dicts)\n User-specified arguments to pass on to backend.\n Top level key can be used by engine to select appropriate dict.\n\n Returns\n -------\n meta: pandas.DataFrame\n An empty DataFrame object to use for metadata.\n Should have appropriate column names and dtypes but need not have\n any actual data\n statistics: Optional[List[Dict]]\n Either None, if no statistics were found, or a list of dictionaries\n of statistics data, one dict for every partition (see the next\n return value). The statistics should look like the following:\n\n [\n {'num-rows': 1000, 'columns': [\n {'name': 'id', 'min': 0, 'max': 100},\n {'name': 'x', 'min': 0.0, 'max': 1.0},\n ]},\n ...\n ]\n parts: List[object]\n A list of objects to be passed to ``Engine.read_partition``.\n Each object should represent a piece of data (usually a row-group).\n The type of each object can be anything, as long as the\n engine's read_partition function knows how to interpret it.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def read_partition(cls, fs, piece, columns, index, **kwargs):\n \"\"\"Read a single piece of a Parquet dataset into a Pandas DataFrame\n\n This function is called many times in individual tasks\n\n Parameters\n ----------\n fs: FileSystem\n piece: object\n This is some token that is returned by Engine.read_metadata.\n Typically it represents a row group in a Parquet dataset\n columns: List[str]\n List of column names to pull out of that row group\n index: str, List[str], or False\n The index name(s).\n **kwargs:\n Includes `\"kwargs\"` values stored within the `parts` output\n of `engine.read_metadata`. May also include arguments to be\n passed to the backend (if stored under a top-level `\"read\"` key).\n\n Returns\n -------\n A Pandas DataFrame\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def initialize_write(\n cls,\n df,\n fs,\n path,\n append=False,\n partition_on=None,\n ignore_divisions=False,\n division_info=None,\n **kwargs\n ):\n \"\"\"Perform engine-specific initialization steps for this dataset\n\n Parameters\n ----------\n df: dask.dataframe.DataFrame\n fs: FileSystem\n path: str\n Destination directory for data. Prepend with protocol like ``s3://``\n or ``hdfs://`` for remote data.\n append: bool\n If True, may use existing metadata (if any) and perform checks\n against the new data being stored.\n partition_on: List(str)\n Column(s) to use for dataset partitioning in parquet.\n ignore_divisions: bool\n Whether or not to ignore old divisions when appending. Otherwise,\n overlapping divisions will lead to an error being raised.\n division_info: dict\n Dictionary containing the divisions and corresponding column name.\n **kwargs: dict\n Other keyword arguments (including `index_cols`)\n\n Returns\n -------\n tuple:\n engine-specific instance\n list of filenames, one per partition\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n def write_partition(\n cls, df, path, fs, filename, partition_on, return_metadata, **kwargs\n ):\n \"\"\"\n Output a partition of a dask.DataFrame. This will correspond to\n one output file, unless partition_on is set, in which case, it will\n correspond to up to one file in each sub-directory.\n\n Parameters\n ----------\n df: dask.dataframe.DataFrame\n path: str\n Destination directory for data. Prepend with protocol like ``s3://``\n or ``hdfs://`` for remote data.\n fs: FileSystem\n filename: str\n partition_on: List(str)\n Column(s) to use for dataset partitioning in parquet.\n return_metadata : bool\n Whether to return list of instances from this write, one for each\n output file. These will be passed to write_metadata if an output\n metadata file is requested.\n **kwargs: dict\n Other keyword arguments (including `fmd` and `index_cols`)\n\n Returns\n -------\n List of metadata-containing instances (if `return_metadata` is `True`)\n or empty list\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n def write_metadata(cls, parts, meta, fs, path, append=False, **kwargs):\n \"\"\"\n Write the shared metadata file for a parquet dataset.\n\n Parameters\n ----------\n parts: List\n Contains metadata objects to write, of the type undrestood by the\n specific implementation\n meta: non-chunk metadata\n Details that do not depend on the specifics of each chunk write,\n typically the schema and pandas metadata, in a format the writer\n can use.\n fs: FileSystem\n path: str\n Output file to write to, usually ``\"_metadata\"`` in the root of\n the output dataset\n append: boolean\n Whether or not to consolidate new metadata with existing (True)\n or start from scratch (False)\n **kwargs: dict\n Other keyword arguments (including `compression`)\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def collect_file_metadata(cls, path, fs, file_path):\n \"\"\"\n Collect parquet metadata from a file and set the file_path.\n\n Parameters\n ----------\n path: str\n Parquet-file path to extract metadata from.\n fs: FileSystem\n file_path: str\n Relative path to set as `file_path` in the metadata.\n\n Returns\n -------\n A metadata object. The specific type should be recognized\n by the aggregate_metadata method.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def aggregate_metadata(cls, meta_list, fs, out_path):\n \"\"\"\n Aggregate a list of metadata objects and optionally\n write out the final result as a _metadata file.\n\n Parameters\n ----------\n meta_list: list\n List of metadata objects to be aggregated into a single\n metadata object, and optionally written to disk. The\n specific element type can be engine specific.\n fs: FileSystem\n out_path: str or None\n Directory to write the final _metadata file. If None\n is specified, the aggregated metadata will be returned,\n and nothing will be written to disk.\n\n Returns\n -------\n If out_path is None, an aggregate metadata object is returned.\n Otherwise, None is returned.\n \"\"\"\n raise NotImplementedError()\n\n\ndef _parse_pandas_metadata(pandas_metadata):\n \"\"\"Get the set of names from the pandas metadata section\n\n Parameters\n ----------\n pandas_metadata : dict\n Should conform to the pandas parquet metadata spec\n\n Returns\n -------\n index_names : list\n List of strings indicating the actual index names\n column_names : list\n List of strings indicating the actual column names\n storage_name_mapping : dict\n Pairs of storage names (e.g. the field names for\n PyArrow) and actual names. The storage and field names will\n differ for index names for certain writers (pyarrow > 0.8).\n column_indexes_names : list\n The names for ``df.columns.name`` or ``df.columns.names`` for\n a MultiIndex in the columns\n\n Notes\n -----\n This should support metadata written by at least\n\n * fastparquet>=0.1.3\n * pyarrow>=0.7.0\n \"\"\"\n index_storage_names = [\n n[\"name\"] if isinstance(n, dict) else n\n for n in pandas_metadata[\"index_columns\"]\n ]\n index_name_xpr = re.compile(r\"__index_level_\\d+__\")\n\n # older metadatas will not have a 'field_name' field so we fall back\n # to the 'name' field\n pairs = [\n (x.get(\"field_name\", x[\"name\"]), x[\"name\"]) for x in pandas_metadata[\"columns\"]\n ]\n\n # Need to reconcile storage and real names. These will differ for\n # pyarrow, which uses __index_leveL_d__ for the storage name of indexes.\n # The real name may be None (e.g. `df.index.name` is None).\n pairs2 = []\n for storage_name, real_name in pairs:\n if real_name and index_name_xpr.match(real_name):\n real_name = None\n pairs2.append((storage_name, real_name))\n index_names = [name for (storage_name, name) in pairs2 if name != storage_name]\n\n # column_indexes represents df.columns.name\n # It was added to the spec after pandas 0.21.0+, and implemented\n # in PyArrow 0.8. It was added to fastparquet in 0.3.1.\n column_index_names = pandas_metadata.get(\"column_indexes\", [{\"name\": None}])\n column_index_names = [x[\"name\"] for x in column_index_names]\n\n # Now we need to disambiguate between columns and index names. PyArrow\n # 0.8.0+ allows for duplicates between df.index.names and df.columns\n if not index_names:\n # For PyArrow < 0.8, Any fastparquet. This relies on the facts that\n # 1. Those versions used the real index name as the index storage name\n # 2. Those versions did not allow for duplicate index / column names\n # So we know that if a name is in index_storage_names, it must be an\n # index name\n if index_storage_names and isinstance(index_storage_names[0], dict):\n # Cannot handle dictionary case\n index_storage_names = []\n index_names = list(index_storage_names) # make a copy\n index_storage_names2 = set(index_storage_names)\n column_names = [\n name for (storage_name, name) in pairs if name not in index_storage_names2\n ]\n else:\n # For newer PyArrows the storage names differ from the index names\n # iff it's an index level. Though this is a fragile assumption for\n # other systems...\n column_names = [name for (storage_name, name) in pairs2 if name == storage_name]\n\n storage_name_mapping = dict(pairs2) # TODO: handle duplicates gracefully\n\n return index_names, column_names, storage_name_mapping, column_index_names\n\n\ndef _normalize_index_columns(user_columns, data_columns, user_index, data_index):\n \"\"\"Normalize user and file-provided column and index names\n\n Parameters\n ----------\n user_columns : None, str or list of str\n data_columns : list of str\n user_index : None, str, or list of str\n data_index : list of str\n\n Returns\n -------\n column_names : list of str\n index_names : list of str\n \"\"\"\n specified_columns = user_columns is not None\n specified_index = user_index is not None\n\n if user_columns is None:\n user_columns = list(data_columns)\n elif isinstance(user_columns, str):\n user_columns = [user_columns]\n else:\n user_columns = list(user_columns)\n\n if user_index is None:\n user_index = data_index\n elif user_index is False:\n # When index is False, use no index and all fields should be treated as\n # columns (unless `columns` provided).\n user_index = []\n data_columns = data_index + data_columns\n elif isinstance(user_index, str):\n user_index = [user_index]\n else:\n user_index = list(user_index)\n\n if specified_index and not specified_columns:\n # Only `index` provided. Use specified index, and all column fields\n # that weren't specified as indices\n index_names = user_index\n column_names = [x for x in data_columns if x not in index_names]\n elif specified_columns and not specified_index:\n # Only `columns` provided. Use specified columns, and all index fields\n # that weren't specified as columns\n column_names = user_columns\n index_names = [x for x in data_index if x not in column_names]\n elif specified_index and specified_columns:\n # Both `index` and `columns` provided. Use as specified, but error if\n # they intersect.\n column_names = user_columns\n index_names = user_index\n if set(column_names).intersection(index_names):\n raise ValueError(\"Specified index and column names must not intersect\")\n else:\n # Use default columns and index from the metadata\n column_names = data_columns\n index_names = data_index\n\n return column_names, index_names\n\n\ndef _sort_and_analyze_paths(file_list, fs, root=False):\n file_list = sorted(file_list, key=natural_sort_key)\n base, fns = _analyze_paths(file_list, fs, root=root)\n return file_list, base, fns\n\n\ndef _analyze_paths(file_list, fs, root=False):\n \"\"\"Consolidate list of file-paths into parquet relative paths\n\n Note: This function was mostly copied from dask/fastparquet to\n use in both `FastParquetEngine` and `ArrowEngine`.\"\"\"\n\n def _join_path(*path):\n def _scrub(i, p):\n # Convert path to standard form\n # this means windows path separators are converted to linux\n p = p.replace(fs.sep, \"/\")\n if p == \"\": # empty path is assumed to be a relative path\n return \".\"\n if p[-1] == \"/\": # trailing slashes are not allowed\n p = p[:-1]\n if i > 0 and p[0] == \"/\": # only the first path can start with /\n p = p[1:]\n return p\n\n abs_prefix = \"\"\n if path and path[0]:\n if path[0][0] == \"/\":\n abs_prefix = \"/\"\n path = list(path)\n path[0] = path[0][1:]\n elif fs.sep == \"\\\\\" and path[0][1:].startswith(\":/\"):\n # If windows, then look for the \"c:/\" prefix\n abs_prefix = path[0][0:3]\n path = list(path)\n path[0] = path[0][3:]\n\n _scrubbed = []\n for i, p in enumerate(path):\n _scrubbed.extend(_scrub(i, p).split(\"/\"))\n simpler = []\n for s in _scrubbed:\n if s == \".\":\n pass\n elif s == \"..\":\n if simpler:\n if simpler[-1] == \"..\":\n simpler.append(s)\n else:\n simpler.pop()\n elif abs_prefix:\n raise Exception(\"can not get parent of root\")\n else:\n simpler.append(s)\n else:\n simpler.append(s)\n\n if not simpler:\n if abs_prefix:\n joined = abs_prefix\n else:\n joined = \".\"\n else:\n joined = abs_prefix + (\"/\".join(simpler))\n return joined\n\n path_parts_list = [_join_path(fn).split(\"/\") for fn in file_list]\n if root is False:\n basepath = path_parts_list[0][:-1]\n for i, path_parts in enumerate(path_parts_list):\n j = len(path_parts) - 1\n for k, (base_part, path_part) in enumerate(zip(basepath, path_parts)):\n if base_part != path_part:\n j = k\n break\n basepath = basepath[:j]\n l = len(basepath)\n else:\n basepath = _join_path(root).split(\"/\")\n l = len(basepath)\n assert all(\n p[:l] == basepath for p in path_parts_list\n ), \"All paths must begin with the given root\"\n out_list = []\n for path_parts in path_parts_list:\n out_list.append(\n \"/\".join(path_parts[l:])\n ) # use '/'.join() instead of _join_path to be consistent with split('/')\n\n return (\n \"/\".join(basepath),\n out_list,\n ) # use '/'.join() instead of _join_path to be consistent with split('/')\n\n\ndef _flatten_filters(filters):\n \"\"\"Flatten DNF-formatted filters (list of tuples)\"\"\"\n return (\n set(flatten(tuple(flatten(filters, container=list)), container=tuple))\n if filters\n else []\n )\n\n\ndef _aggregate_stats(\n file_path,\n file_row_group_stats,\n file_row_group_column_stats,\n stat_col_indices,\n):\n \"\"\"Utility to aggregate the statistics for N row-groups\n into a single dictionary.\n\n Used by `Engine._construct_parts`\n \"\"\"\n if len(file_row_group_stats) < 1:\n # Empty statistics\n return {}\n elif len(file_row_group_column_stats) == 0:\n assert len(file_row_group_stats) == 1\n return file_row_group_stats[0]\n else:\n # Note: It would be better to avoid df_rgs and df_cols\n # construction altogether. It makes it fast to aggregate\n # the statistics for many row groups, but isn't\n # worthwhile for a small number of row groups.\n if len(file_row_group_stats) > 1:\n df_rgs = pd.DataFrame(file_row_group_stats)\n s = {\n \"file_path_0\": file_path,\n \"num-rows\": df_rgs[\"num-rows\"].sum(),\n \"total_byte_size\": df_rgs[\"total_byte_size\"].sum(),\n \"columns\": [],\n }\n else:\n s = {\n \"file_path_0\": file_path,\n \"num-rows\": file_row_group_stats[0][\"num-rows\"],\n \"total_byte_size\": file_row_group_stats[0][\"total_byte_size\"],\n \"columns\": [],\n }\n\n df_cols = None\n if len(file_row_group_column_stats) > 1:\n df_cols = pd.DataFrame(file_row_group_column_stats)\n for ind, name in enumerate(stat_col_indices):\n i = ind * 2\n if df_cols is None:\n s[\"columns\"].append(\n {\n \"name\": name,\n \"min\": file_row_group_column_stats[0][i],\n \"max\": file_row_group_column_stats[0][i + 1],\n }\n )\n else:\n s[\"columns\"].append(\n {\n \"name\": name,\n \"min\": df_cols.iloc[:, i].min(),\n \"max\": df_cols.iloc[:, i + 1].max(),\n }\n )\n return s\n\n\ndef _row_groups_to_parts(\n gather_statistics,\n split_row_groups,\n file_row_groups,\n file_row_group_stats,\n file_row_group_column_stats,\n stat_col_indices,\n make_part_func,\n make_part_kwargs,\n):\n\n # Construct `parts` and `stats`\n parts = []\n stats = []\n if split_row_groups:\n # Create parts from each file,\n # limiting the number of row_groups in each piece\n split_row_groups = int(split_row_groups)\n for filename, row_groups in file_row_groups.items():\n row_group_count = len(row_groups)\n for i in range(0, row_group_count, split_row_groups):\n i_end = i + split_row_groups\n rg_list = row_groups[i:i_end]\n\n part = make_part_func(\n filename,\n rg_list,\n **make_part_kwargs,\n )\n if part is None:\n continue\n\n parts.append(part)\n if gather_statistics:\n stat = _aggregate_stats(\n filename,\n file_row_group_stats[filename][i:i_end],\n file_row_group_column_stats[filename][i:i_end],\n stat_col_indices,\n )\n stats.append(stat)\n else:\n for filename, row_groups in file_row_groups.items():\n\n part = make_part_func(\n filename,\n row_groups,\n **make_part_kwargs,\n )\n if part is None:\n continue\n\n parts.append(part)\n if gather_statistics:\n stat = _aggregate_stats(\n filename,\n file_row_group_stats[filename],\n file_row_group_column_stats[filename],\n stat_col_indices,\n )\n stats.append(stat)\n\n return parts, stats\n"
] | [
[
"pandas.DataFrame"
]
] |
isaiahnixon/python-google-trends | [
"7d8535885bf4e39c0954172bfe0dae1451c8007a"
] | [
"plot-data.py"
] | [
"import matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom datetime import datetime as dt\nimport pandas as pd \nimport numpy as np\n\n# Load the csv\ndf1 = pd.read_csv('data/aggregate-daily-values.csv')\ndf2 = pd.read_csv('data/aggregate-daily-values-covid-19.csv')\ndf3 = pd.read_csv('data/aggregate-daily-values-cats.csv')\n\ntime_values1 = []\ninterest_values1 = []\n\ntime_values2 = []\ninterest_values2 = []\n\ntime_values3 = []\ninterest_values3 = []\n\nfor i, row in df1.iterrows():\n\ttime_values1.append(mdates.date2num(dt.strptime(row['date'], '%Y-%m-%d'))) #dates\n\t# time_values.append(mdates.date2num(dt.strptime(row['date'], '%Y-%m-%d %H:%M:%S'))) #datetime\n\tinterest_values1.append(row['interest'])\n\nfor i, row in df2.iterrows():\n\ttime_values2.append(mdates.date2num(dt.strptime(row['date'], '%Y-%m-%d'))) #dates\n\t# time_values.append(mdates.date2num(dt.strptime(row['date'], '%Y-%m-%d %H:%M:%S'))) #datetime\n\tinterest_values2.append(row['interest'])\n\nfor i, row in df3.iterrows():\n\ttime_values3.append(mdates.date2num(dt.strptime(row['date'], '%Y-%m-%d'))) #dates\n\t# time_values.append(mdates.date2num(dt.strptime(row['date'], '%Y-%m-%d %H:%M:%S'))) #datetime\n\tinterest_values3.append(row['interest'])\n\nfig, ax = plt.subplots()\n\nax.set_title('Interest in COVID-19 (Google Trends)')\nax.set_ylabel('Interest')\nax.plot_date(time_values1, interest_values1, linestyle='-', marker='o', color='blue', label='Toilet Paper')\nax.plot_date(time_values2, interest_values2, linestyle='-', marker='o', color='black', label='COVID-19')\nax.plot_date(time_values3, interest_values3, linestyle='-', marker='o', color='red', label='Cats')\n\n\n# For data within the same day.\n#hfmt = mdates.DateFormatter('%H:%M:%S')\n#ax.set_xlabel('Time')\n\n# For larger data sets.\nhfmt = mdates.DateFormatter('%b %d %Y')\nax.set_xlabel('Date')\n\nax.xaxis.set_major_formatter(hfmt)\nplt.gcf().autofmt_xdate()\nplt.legend()\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.gcf",
"matplotlib.dates.DateFormatter",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
]
] |
slettner/jina | [
"4140961c62359e3acd540a6d88931665c6313824"
] | [
"tests/unit/drivers/test_kv_search_driver.py"
] | [
"from typing import Optional, Iterable\n\nimport numpy as np\nimport pytest\n\nfrom jina import Document, DocumentArray\nfrom jina.drivers.search import KVSearchDriver\nfrom jina.executors.indexers import BaseKVIndexer\nfrom jina.types.ndarray.generic import NdArray\n\n\nclass MockIndexer(BaseKVIndexer):\n def add(\n self, keys: Iterable[str], values: Iterable[bytes], *args, **kwargs\n ) -> None:\n pass\n\n def query(self, keys: Iterable[str]) -> Optional[bytes]:\n values = []\n for k in keys:\n values.append(self.db.get(k, None))\n return values\n\n def get_query_handler(self):\n pass\n\n def get_add_handler(self):\n pass\n\n def get_create_handler(self):\n pass\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.db = {}\n doc_ids = ['1', '2', '3', '4']\n doc_ids = [item * 16 for item in doc_ids]\n for doc_id in doc_ids:\n with Document() as doc:\n doc.id = doc_id\n doc.embedding = np.array([doc.id])\n self.db[doc.id] = doc.SerializeToString()\n\n\nclass SimpleKVSearchDriver(KVSearchDriver):\n def __init__(self, docs=None, traversal_paths=['r'], *args, **kwargs):\n super().__init__(traversal_paths=traversal_paths, *args, **kwargs)\n self._docs = docs\n\n @property\n def docs(self):\n return self._docs\n\n @property\n def exec_fn(self):\n return self._exec_fn\n\n\[email protected](scope='function')\ndef document():\n # 1-D embedding\n # doc: 0\n # - chunk: 1\n # - chunk: 2\n # - chunk: 3\n # - chunk: 4\n # - chunk: 5 - will be missing from KV indexer\n doc = Document()\n doc.id = '0' * 16\n for c in range(5):\n with Document() as chunk:\n chunk.id = str(c + 1) * 16\n doc.chunks.add(chunk)\n return doc\n\n\[email protected](scope='function')\ndef document_with_matches_on_chunks():\n # 1-D embedding\n # doc: 0\n # - chunk: 1\n # - match: 2\n # - match: 3\n # - match: 4\n # - match: 5 - will be missing from KV indexer\n # - match: 6 - will be missing from KV indexer\n with Document() as doc:\n doc.id = '0' * 16\n with Document() as chunk:\n chunk.id = '1' * 16\n for m in range(5):\n with Document() as match:\n match.id = str(m + 2) * 16\n match.score.value = 1.0\n chunk.matches.append(match)\n doc.chunks.append(chunk)\n return doc\n\n\ndef test_vectorsearch_driver_mock_indexer_apply_all(document):\n driver = SimpleKVSearchDriver()\n\n executor = MockIndexer()\n driver.attach(executor=executor, runtime=None)\n\n dcs = list(document.chunks)\n assert len(dcs) == 5\n for chunk in dcs:\n assert chunk.embedding is None\n\n driver._apply_all([DocumentArray(document.chunks)])\n\n dcs = list(document.chunks)\n\n # chunk idx: 5 had no matched and is removed as missing idx\n assert len(dcs) == 4\n for chunk in dcs:\n assert chunk.embedding is not None\n embedding_array = chunk.embedding\n np.testing.assert_equal(embedding_array, np.array([chunk.id]))\n\n\ndef test_vectorsearch_driver_mock_indexer(document):\n dcs = list(document.chunks)\n assert len(dcs) == 5\n for chunk in dcs:\n assert chunk.embedding is None\n\n driver = SimpleKVSearchDriver(\n docs=DocumentArray([document]), traversal_paths=('c',)\n )\n\n executor = MockIndexer()\n driver.attach(executor=executor, runtime=None)\n\n driver()\n\n # chunk idx: 5 had no matched and is removed as missing idx\n dcs = list(document.chunks)\n assert len(dcs) == 4\n for chunk in dcs:\n assert chunk.embedding is not None\n embedding_array = chunk.embedding\n np.testing.assert_equal(embedding_array, np.array([chunk.id]))\n\n\ndef test_vectorsearch_driver_mock_indexer_with_matches_on_chunks(\n document_with_matches_on_chunks,\n):\n driver = SimpleKVSearchDriver(\n docs=DocumentArray([document_with_matches_on_chunks]), traversal_paths=('cm',)\n )\n executor = MockIndexer()\n driver.attach(executor=executor, runtime=None)\n\n driver()\n\n dcs = list(document_with_matches_on_chunks.chunks)\n assert len(dcs) == 1\n chunk = dcs[0]\n matches = list(chunk.matches)\n assert len(matches) == 3\n for match in matches:\n assert NdArray(match.embedding).value is not None\n embedding_array = NdArray(match.embedding).value\n np.testing.assert_equal(embedding_array, np.array([match.id]))\n"
] | [
[
"numpy.array"
]
] |
shkolnick-kun/kalman_h_infinity_filters | [
"4e76c38d91d5cb44e5f43f675aced4b917a5dbfd"
] | [
"EKHFPost.py"
] | [
"# -*- coding: utf-8 -*-\n# pylint: disable=invalid-name,too-many-instance-attributes, too-many-arguments\n\"\"\"\nCopyright 2019 Paul A Beltyukov\nCopyright 2015 Roger R Labbe Jr.\n\nFilterPy library.\nhttp://github.com/rlabbe/filterpy\n\nDocumentation at:\nhttps://filterpy.readthedocs.org\n\nSupporting book at:\nhttps://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python\n\nThis is licensed under an MIT license. See the readme.MD file\nfor more information.\n\"\"\"\nfrom copy import deepcopy\nimport numpy as np\nfrom numpy import dot, eye, outer\nfrom scipy.stats import chi2\nimport scipy.linalg as linalg\nfrom filterpy.kalman import ExtendedKalmanFilter\n\nclass ExtendedKalmanHinfFilterPosterior(ExtendedKalmanFilter):\n def __init__(self, dim_x, dim_z, dim_u=0, alpha = 0.01, eps_mul=1.0):\n ExtendedKalmanFilter.__init__(self, dim_x, dim_z, dim_u)\n self.beta_n = chi2.ppf(1.0 - alpha, dim_z)\n self._eps_mul = eps_mul\n \n def update(self, z, HJacobian, Hx, R=None, args=(), hx_args=(),\n residual=np.subtract):\n \"\"\" Performs the update innovation of the extended Kalman/Hinfinity \n filter with posterior residuals used for Hinfinity correction.\n\n Parameters\n ----------\n\n z : np.array\n measurement for this step.\n If `None`, posterior is not computed\n\n HJacobian : function\n function which computes the Jacobian of the H matrix (measurement\n function). Takes state variable (self.x) as input, returns H.\n\n Hx : function\n function which takes as input the state variable (self.x) along\n with the optional arguments in hx_args, and returns the measurement\n that would correspond to that state.\n\n R : np.array, scalar, or None\n Optionally provide R to override the measurement noise for this\n one call, otherwise self.R will be used.\n\n args : tuple, optional, default (,)\n arguments to be passed into HJacobian after the required state\n variable. for robot localization you might need to pass in\n information about the map and time of day, so you might have\n `args=(map_data, time)`, where the signature of HCacobian will\n be `def HJacobian(x, map, t)`\n\n hx_args : tuple, optional, default (,)\n arguments to be passed into Hx function after the required state\n variable.\n\n residual : function (z, z2), optional\n Optional function that computes the residual (difference) between\n the two measurement vectors. If you do not provide this, then the\n built in minus operator will be used. You will normally want to use\n the built in unless your residual computation is nonlinear (for\n example, if they are angles)\n \"\"\"\n if z is None:\n self.z = np.array([[None]*self.dim_z]).T\n self.x_post = self.x.copy()\n self.P_post = self.P.copy()\n return\n\n if not isinstance(args, tuple):\n args = (args,)\n\n if not isinstance(hx_args, tuple):\n hx_args = (hx_args,)\n\n if R is None:\n R = self.R\n elif np.isscalar(R):\n R = eye(self.dim_z) * R\n\n if np.isscalar(z) and self.dim_z == 1:\n z = np.asarray([z], float)\n \n H = HJacobian(self.x, *args)\n hx = Hx(self.x, *hx_args)\n self.y = residual(z, hx)\n\n PHT = self.P.dot(H.T)\n self.S = H.dot(PHT) + R\n self.SI = linalg.inv(self.S)\n \n #Now we may update self.K, self.P, self.y, self.x\n self.K = PHT.dot(self.SI)\n x = self.x + dot(self.K, self.y)\n \n # P = (I-KH)P(I-KH)' + KRK' is more numerically stable\n # and works for non-optimal K vs the equation\n # P = (I-KH)P usually seen in the literature.\n I_KH = self._I - dot(self.K, H)\n self.P = dot(I_KH, self.P).dot(I_KH.T) + dot(self.K, R).dot(self.K.T)\n \n #Will test for filter divergence\n H_hat = HJacobian(x, *args)\n hx_hat = Hx(x, *hx_args)\n eta = residual(z, hx_hat)\n \n PHT = self.P.dot(H_hat.T)\n S = H_hat.dot(PHT) + R\n SI = linalg.inv(S)\n \n thr = self.beta_n\n if dot(eta.T, dot(SI, eta)) > thr: \n #Divergence detected, H-infinity correction needed\n A = outer(eta, eta.T)/thr - S\n \n H_tilde = dot(H_hat, I_KH)\n PHT = dot(self.P, H_tilde.T)\n C = dot(H_hat, PHT)\n \n D = PHT.dot(linalg.pinv(C))\n newP = self.P + D.dot(A.dot(D.T))\n PHT = newP.dot(H.T)\n newS = H.dot(PHT) + R\n #Check H-infinity correction quality\n ev = np.linalg.eigvals(newS)\n eps = np.finfo(ev.dtype).eps * self._eps_mul\n if np.all(ev > eps * np.max(ev)):\n self.P = newP\n #Recompute self.S and self.SI for debug purposes\n self.S = newS\n self.SI = linalg.inv(self.S)\n #Need to recompute self.K and self.x\n self.K = dot(dot(self.P, H.T), linalg.inv(R))\n x = self.x + dot(self.K, self.y)\n\n self.x = x\n \n # set to None to force recompute\n self._log_likelihood = None\n self._likelihood = None\n self._mahalanobis = None\n\n # save measurement and posterior state\n self.z = deepcopy(z)\n self.x_post = self.x.copy()\n self.P_post = self.P.copy()\n \n def predict_update(self, z, HJacobian, Hx, args=(), hx_args=(), u=0):\n \"\"\" Performs the predict/update innovation of the extended Kalman\n filter.\n\n Parameters\n ----------\n\n z : np.array\n measurement for this step.\n If `None`, only predict step is perfomed.\n\n HJacobian : function\n function which computes the Jacobian of the H matrix (measurement\n function). Takes state variable (self.x) as input, along with the\n optional arguments in args, and returns H.\n\n Hx : function\n function which takes as input the state variable (self.x) along\n with the optional arguments in hx_args, and returns the measurement\n that would correspond to that state.\n\n args : tuple, optional, default (,)\n arguments to be passed into HJacobian after the required state\n variable.\n\n hx_args : tuple, optional, default (,)\n arguments to be passed into Hx after the required state\n variable.\n\n u : np.array or scalar\n optional control vector input to the filter.\n \"\"\"\n self.predict(u)\n self.update(z, HJacobian, Hx, self.R, args, hx_args, residual=np.subtract) \n"
] | [
[
"numpy.eye",
"scipy.stats.chi2.ppf",
"numpy.finfo",
"numpy.asarray",
"scipy.linalg.inv",
"numpy.linalg.eigvals",
"scipy.linalg.pinv",
"numpy.max",
"numpy.array",
"numpy.dot",
"numpy.isscalar",
"numpy.outer"
]
] |
Ronak1958/blog | [
"b477bda7641970ed1f1438994aa7a084c921b898"
] | [
"docs/downloads/code/digitize-graph/digitize-data.py"
] | [
"from pynput import mouse\n\nclass MyException(Exception):pass\n\nX = []\nY = []\nNumberOfMouseClicks = 0\nprint('Click Origin')\n\ndef on_click(x, y, button, pressed):\n button = str(button)\n global NumberOfMouseClicks\n\n NumberOfMouseClicks = NumberOfMouseClicks + 1\n if NumberOfMouseClicks==1:\n print('Click Top Right') \n if NumberOfMouseClicks==3:\n print('Click data points. Right-click to end.')\n \n X.append(x)\n Y.append(y)\n \n if button!='Button.left':\n raise MyException(button)\n\n\ndef plot_data(X, Y, Xmin, Xmax, Ymin, Ymax):\n import matplotlib.pyplot as plt\n\n plt.plot(X,Y,'b-')\n plt.xlim((Xmin, Xmax))\n plt.ylim((Ymin, Ymax))\n plt.show() \n\n\ndef main(X,Y):\n with mouse.Listener(on_click=on_click) as listener:\n try:\n listener.join()\n except MyException as e:\n pass\n\n # drop duplicates\n X = X[::2]\n Y = Y[::2]\n\n # input boundaries\n Xmin = float(input('Input X-min: '))\n Xmax = float(input('Input X-max: '))\n Ymin = float(input('Input Y-min: '))\n Ymax = float(input('Input Y-max: '))\n\n # define scales from data\n origin = [X[0],Y[0]]\n topRight = [X[1],Y[1]]\n XminScale = origin[0]\n XmaxScale = topRight[0]\n YminScale = origin[1]\n YmaxScale = topRight[1]\n\n # drop extras\n X = X[2:-1]\n Y = Y[2:-1]\n\n # scale\n ## (old_value - old_min) / (old_max - old_min) * (new_max - new_min) + new_min\n Xplot = [(i - XminScale) / (XmaxScale - XminScale) * (Xmax - Xmin) + Xmin for i in X]\n Yplot = [(i - YminScale) / (YmaxScale - YminScale) * (Ymax - Ymin) + Ymin for i in Y]\n\n # print outputs\n print('Origin: {}'.format([round(i, 2) for i in origin]))\n print('Top Right: {}'.format([round(i, 2) for i in topRight]))\n print('X: {}'.format([round(i, 2) for i in Xplot]))\n print('Y: {}'.format([round(i, 2) for i in Yplot]))\n\n # plot\n plot_data(Xplot, Yplot, Xmin, Xmax, Ymin, Ymax)\n\n\nif __name__ == '__main__':\n main(X,Y)"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim"
]
] |
Komutsou/StructPy | [
"6b25b369ff14b31dbff4eb2cb4d6c43963ec7b3b"
] | [
"Examples/In progress/CEE213 CP4.py"
] | [
"import numpy as np\nimport cross_sections as xs\n\nxs1 = xs.IBeam(1, 1, 0.1, 0.1)\n\nL = 10\np = 1\nE = 29000\n\ndef constant(x, **kwargs):\n\treturn 1\n\ndef linearup(s, **kwargs):\n\treturn x\n\nload = constant\n\ndef simpsons(f, a, b, n): #function, start, stop, intervals\n\tif n % 2 == 0:\n\t\th = (b-a)/n\n\t\tk = 0.0\n\t\tx = a + h\n\t\t\n\t\tfor i in range(1, int(n/2) + 1):\n\t\t\tk += 4*f(x)\n\t\t\tx += 2*h\n\t\t\n\t\tx = a + 2*h\n\t\tfor i in range(1, n//2):\n\t\t\tk += 2*f(x)\n\t\t\tx += 2*h\n\t\treturn (h/3)*(f(a) + f(b) + k)\n\telse:\n\t\tprint('n must be even')\n\nI0 = lambda x: p * load(x, L=L)\nI1 = lambda x: p * (L-x) * load(x, L=L)\nI2 = lambda x: p * (L-x)**2 * load(x, L=L)\nI3 = lambda x: p * (L-x)**3 * load(x, L=L)\n\nInt0 = simpsons(I0, 0, L, 100)\nInt1 = simpsons(I0, 0, L, 100)\nInt2 = simpsons(I0, 0, L, 100)\nInt3 = -simpsons(I0, 0, L, 100)\n\nz = np.array([Int0, Int1, Int2, Int3])\n\na = L\nb = L/(E * xs1.Ix)\nc = L**2/(2 * E * xs1.Ix)\nd = L**3/(6 * E * xs1.Ix)\n\nB = np.matrix([[1, 0, 0, 0, -1, 0, 0, 0], \n\t\t\t\t\t\t\t [a, 1, 0, 0, 0, -1, 0, 0],\n\t\t\t\t\t\t\t [c, b, 1, 0, 0, 0, -1, 0],\n\t\t\t\t\t\t\t [-d, -c, -a, 1, 0, 0, 0, -1]])\n\t\t\t\t\t\t\t \nfixed = [1, 1, 0, 0]\nfree = [0, 0, 1, 1]\n\nBC = np.array(fixed + free)\n\nC = B[:, BC==1]\n\ns = np.linalg.solve(C, z)\n"
] | [
[
"numpy.array",
"numpy.matrix",
"numpy.linalg.solve"
]
] |
kiminh/tequila | [
"464085265e125222c63e65446861e9c0a2428bab"
] | [
"src/tequila/quantumchemistry/qc_base.py"
] | [
"from dataclasses import dataclass\nfrom tequila import TequilaException, BitString, TequilaWarning\nfrom tequila.hamiltonian import QubitHamiltonian\n\nfrom tequila.circuit import QCircuit, gates\nfrom tequila.objective.objective import Variable, Variables, ExpectationValue\n\nfrom tequila.simulators.simulator_api import simulate\nfrom tequila.utils import to_float\n\nimport typing, numpy, numbers\nfrom itertools import product\n\nimport openfermion\nfrom openfermion.hamiltonians import MolecularData\n\nimport warnings\n\n\ndef prepare_product_state(state: BitString) -> QCircuit:\n \"\"\"Small convenience function\n\n Parameters\n ----------\n state :\n product state encoded into a bitstring\n state: BitString :\n \n\n Returns\n -------\n type\n unitary circuit which prepares the product state\n\n \"\"\"\n result = QCircuit()\n for i, v in enumerate(state.array):\n if v == 1:\n result += gates.X(target=i)\n return result\n\n\n@dataclass\nclass ParametersQC:\n \"\"\"Specialization of ParametersHamiltonian\"\"\"\n basis_set: str = '' # Quantum chemistry basis set\n geometry: str = '' # geometry of the underlying molecule (units: Angstrom!),\n # this can be a filename leading to an .xyz file or the geometry given as a string\n description: str = ''\n multiplicity: int = 1\n charge: int = 0\n closed_shell: bool = True\n name: str = \"molecule\"\n\n @property\n def filename(self):\n \"\"\" \"\"\"\n return \"{}_{}\".format(self.name, self.basis_set)\n\n @property\n def molecular_data_param(self) -> dict:\n \"\"\":return: Give back all parameters for the MolecularData format from openfermion as dictionary\"\"\"\n return {'basis': self.basis_set, 'geometry': self.get_geometry(), 'description': self.description,\n 'charge': self.charge, 'multiplicity': self.multiplicity, 'filename': self.filename\n }\n\n @staticmethod\n def format_element_name(string):\n \"\"\"OpenFermion uses case sensitive hash tables for chemical elements\n I.e. you need to name Lithium: 'Li' and 'li' or 'LI' will not work\n this convenience function does the naming\n :return: first letter converted to upper rest to lower\n\n Parameters\n ----------\n string :\n \n\n Returns\n -------\n\n \"\"\"\n assert (len(string) > 0)\n assert (isinstance(string, str))\n fstring = string[0].upper() + string[1:].lower()\n return fstring\n\n @staticmethod\n def convert_to_list(geometry):\n \"\"\"Convert a molecular structure given as a string into a list suitable for openfermion\n\n Parameters\n ----------\n geometry :\n a string specifying a mol. structure. E.g. geometry=\"h 0.0 0.0 0.0\\n h 0.0 0.0 1.0\"\n\n Returns\n -------\n type\n A list with the correct format for openfermion E.g return [ ['h',[0.0,0.0,0.0], [..]]\n\n \"\"\"\n result = []\n for line in geometry.split('\\n'):\n words = line.split()\n if len(words) != 4: break\n try:\n tmp = (ParametersQC.format_element_name(words[0]),\n (float(words[1]), float(words[2]), float(words[3])))\n result.append(tmp)\n except ValueError:\n print(\"get_geometry list unknown line:\\n \", line, \"\\n proceed with caution!\")\n return result\n\n def get_geometry_string(self) -> str:\n \"\"\"returns the geometry as a string\n :return: geometry string\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n if self.geometry.split('.')[-1] == 'xyz':\n geomstring, comment = self.read_xyz_from_file(self.geometry)\n if comment is not None:\n self.description = comment\n return geomstring\n else:\n return self.geometry\n\n def get_geometry(self):\n \"\"\"Returns the geometry\n If a xyz filename was given the file is read out\n otherwise it is assumed that the geometry was given as string\n which is then reformatted as a list usable as input for openfermion\n :return: geometry as list\n e.g. [(h,(0.0,0.0,0.35)),(h,(0.0,0.0,-0.35))]\n Units: Angstrom!\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n if self.geometry.split('.')[-1] == 'xyz':\n geomstring, comment = self.read_xyz_from_file(self.geometry)\n if self.description == '':\n self.description = comment\n if self.name == \"molecule\":\n self.name = self.geometry.split('.')[0]\n return self.convert_to_list(geomstring)\n elif self.geometry is not None:\n return self.convert_to_list(self.geometry)\n else:\n raise Exception(\"Parameters.qc.geometry is None\")\n\n @staticmethod\n def read_xyz_from_file(filename):\n \"\"\"Read XYZ filetype for molecular structures\n https://en.wikipedia.org/wiki/XYZ_file_format\n Units: Angstrom!\n\n Parameters\n ----------\n filename :\n return:\n\n Returns\n -------\n\n \"\"\"\n with open(filename, 'r') as file:\n content = file.readlines()\n natoms = int(content[0])\n comment = str(content[1]).strip('\\n')\n coord = ''\n for i in range(natoms):\n coord += content[2 + i]\n return coord, comment\n\n\n@dataclass\nclass ClosedShellAmplitudes:\n \"\"\" \"\"\"\n tIjAb: numpy.ndarray = None\n tIA: numpy.ndarray = None\n\n def make_parameter_dictionary(self, threshold=1.e-8):\n \"\"\"\n\n Parameters\n ----------\n threshold :\n (Default value = 1.e-8)\n\n Returns\n -------\n\n \"\"\"\n variables = {}\n if self.tIjAb is not None:\n nvirt = self.tIjAb.shape[2]\n nocc = self.tIjAb.shape[0]\n assert (self.tIjAb.shape[1] == nocc and self.tIjAb.shape[3] == nvirt)\n for (I, J, A, B), value in numpy.ndenumerate(self.tIjAb):\n if not numpy.isclose(value, 0.0, atol=threshold):\n variables[(nocc + A, I, nocc + B, J)] = value\n if self.tIA is not None:\n nocc = self.tIA.shape[0]\n for (I, A), value, in numpy.ndenumerate(self.tIA):\n if not numpy.isclose(value, 0.0, atol=threshold):\n variables[(A + nocc, I)] = value\n\n return dict(sorted(variables.items(), key=lambda x: numpy.abs(x[1]), reverse=True))\n\n\n@dataclass\nclass Amplitudes:\n \"\"\"Coupled-Cluster Amplitudes\n We adopt the Psi4 notation for consistency\n I,A for alpha\n i,a for beta\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n\n @classmethod\n def from_closed_shell(cls, cs: ClosedShellAmplitudes):\n \"\"\"\n Initialize from closed-shell Amplitude structure\n\n Parameters\n ----------\n cs: ClosedShellAmplitudes :\n \n\n Returns\n -------\n\n \"\"\"\n tijab = cs.tIjAb - numpy.einsum(\"ijab -> ijba\", cs.tIjAb, optimize='greedy')\n return cls(tIjAb=cs.tIjAb, tIA=cs.tIA, tiJaB=cs.tIjAb, tia=cs.tIA, tijab=tijab, tIJAB=tijab)\n\n tIjAb: numpy.ndarray = None\n tIA: numpy.ndarray = None\n tiJaB: numpy.ndarray = None\n tijab: numpy.ndarray = None\n tIJAB: numpy.ndarray = None\n tia: numpy.ndarray = None\n\n def make_parameter_dictionary(self, threshold=1.e-8):\n \"\"\"\n\n Parameters\n ----------\n threshold :\n (Default value = 1.e-8)\n Neglect amplitudes below the threshold\n\n Returns\n -------\n Dictionary of tequila variables (hash is in the style of (a,i,b,j))\n\n \"\"\"\n variables = {}\n if self.tIjAb is not None:\n nvirt = self.tIjAb.shape[2]\n nocc = self.tIjAb.shape[0]\n assert (self.tIjAb.shape[1] == nocc and self.tIjAb.shape[3] == nvirt)\n\n for (I, j, A, b), value in numpy.ndenumerate(self.tIjAb):\n if not numpy.isclose(value, 0.0, atol=threshold):\n variables[(2 * (nocc + A), 2 * I, 2 * (nocc + b) + 1, j + 1)] = value\n for (i, J, a, B), value in numpy.ndenumerate(self.tiJaB):\n if not numpy.isclose(value, 0.0, atol=threshold):\n variables[(2 * (nocc + a) + 1, 2 * i + 1, 2 * (nocc + B), J)] = value\n for (i, j, a, b), value in numpy.ndenumerate(self.tijab):\n if not numpy.isclose(value, 0.0, atol=threshold):\n variables[(2 * (nocc + a) + 1, 2 * i + 1, 2 * (nocc + b) + 1, j + 1)] = value\n for (I, J, A, B), value in numpy.ndenumerate(self.tijab):\n if not numpy.isclose(value, 0.0, atol=threshold):\n variables[(2 * (nocc + A), 2 * I, 2 * (nocc + B), J)] = value\n\n if self.tIA is not None:\n nocc = self.tIjAb.shape[0]\n assert (self.tia.shape[0] == nocc)\n for (I, A), value, in numpy.ndenumerate(self.tIA):\n if not numpy.isclose(value, 0.0, atol=threshold):\n variables[(2 * (A + nocc), 2 * I)] = value\n for (i, a), value, in numpy.ndenumerate(self.tIA):\n if not numpy.isclose(value, 0.0, atol=threshold):\n variables[(2 * (a + nocc) + 1, 2 * i + 1)] = value\n\n return variables\n\n\nclass NBodyTensor:\n \"\"\" Convenience class for handling N-body tensors \"\"\"\n\n def __init__(self, elems: numpy.ndarray = None, active_indices: list = None, scheme: str = None,\n size_full: int = None):\n\n # Set elements\n self.elems = elems\n # Active indices only as list of indices (e.g. spatial orbital indices), not as a dictionary of irreducible\n # representations\n if active_indices is not None:\n self.active_indices = active_indices\n self._passive_indices = None\n self._full_indices = None\n self._indices_set: bool = False\n\n # Determine order of tensor\n # Assume, that tensor is entered in desired shape, not as flat array.\n self.order = len(self.elems.shape)\n # Can use size_full < self.elems.shape[0] -> 'full' space is to be considered a subspace as well\n if size_full is None:\n self._size_full = self.elems.shape[0]\n else:\n self._size_full = size_full\n # 2-body tensors (<=> order 4) currently allow reordering\n if self.order == 4:\n if scheme is None:\n self.scheme = 'chem'\n else:\n self.scheme = scheme.lower()\n else:\n if scheme is not None:\n raise Exception(\"Ordering only implemented for tensors of order 4 / 2-body tensors.\")\n self.scheme = None\n\n def sub_lists(self, idx_lists: list = None) -> numpy.ndarray:\n \"\"\"\n Get subspace of tensor by a set of index lists\n according to hPQ.sub_lists(idx_lists=[p, q]) = [hPQ for P in p and Q in q]\n\n This essentially is an implementation of a non-contiguous slicing using numpy.take\n\n Parameters\n ----------\n idx_lists :\n List of lists, each defining the desired subspace per axis\n Size needs to match order of tensor, and lists successively correspond to axis=0,1,2,...,N\n\n Returns\n -------\n out :\n Sliced tensor as numpy.ndarray\n \"\"\"\n # Check if index list has correct size\n if len(idx_lists) != self.order:\n raise Exception(\"Need to pass an index list for each dimension!\" +\n \" Length of idx_lists needs to match order of tensor.\")\n\n # Perform slicing via numpy.take\n out = self.elems\n for ax in range(self.order):\n if idx_lists[ax] is not None: # None means, we want the full space in this direction\n out = numpy.take(out, idx_lists[ax], axis=ax)\n\n return out\n\n def set_index_lists(self):\n \"\"\" Set passive and full index lists based on class inputs \"\"\"\n tmp_size = self._size_full\n if self._size_full is None:\n tmp_size = self.elems.shape[0]\n\n self._passive_indices = [i for i in range(tmp_size)\n if i not in self.active_indices]\n self._full_indices = [i for i in range(tmp_size)]\n\n def sub_str(self, name: str) -> numpy.ndarray:\n \"\"\"\n Get subspace of tensor by a string\n Currently is able to resolve an active space, named 'a', full space 'f', and the complement 'p' = 'f' - 'a'.\n Full space in this context may also be smaller than actual tensor dimension.\n\n The specification of active space in this context only allows to pick a set from a list of orbitals, and\n is not able to resolve an active space from irreducible representations.\n\n Example for one-body tensor:\n hPQ.sub_lists(name='ap') = [hPQ for P in active_indices and Q in _passive_indices]\n\n Parameters\n ----------\n name :\n String specifying the desired subspace, elements need to be a (active), f (full), p (full - active)\n\n Returns\n -------\n out :\n Sliced tensor as numpy.ndarray\n \"\"\"\n if not self._indices_set:\n self.set_index_lists()\n self._indices_set = True\n\n if name is None:\n raise Exception(\"No name specified.\")\n if len(name) != self.order:\n raise Exception(\"Name does not match order of the tensor.\")\n if self.active_indices is None:\n raise Exception(\"Need to set an active space in order to call this function.\")\n\n idx_lists = []\n # Parse name as string of space indices\n for char in name:\n if char.lower() == 'a':\n idx_lists.append(self.active_indices)\n elif char.lower() == 'p':\n idx_lists.append(self._passive_indices)\n elif char.lower() == 'f':\n if self._size_full is None:\n idx_lists.append(None)\n else:\n idx_lists.append(self._full_indices)\n else:\n raise Exception(\"Need to specify a valid letter (a,p,f).\")\n\n out = self.sub_lists(idx_lists)\n\n return out\n\n def is_openfermion(self) -> bool:\n \"\"\"\n Checks whether current ordering scheme is 'openfermion'\n \"\"\"\n if self.scheme == 'openfermion' or self.scheme == 'of':\n return True\n else:\n return False\n\n def is_chem(self) -> bool:\n \"\"\"\n Checks whether current ordering scheme is 'chem'\n \"\"\"\n if self.scheme == 'chem' or self.scheme == 'c':\n return True\n else:\n return False\n\n def is_phys(self) -> bool:\n \"\"\"\n Checks whether current ordering scheme is 'phys'\n \"\"\"\n if self.scheme == 'phys' or self.scheme == 'p':\n return True\n else:\n return False\n\n def reorder(self, to: str = 'of'):\n \"\"\"\n Function to reorder tensors according to some convention.\n\n Parameters\n ----------\n to :\n Ordering scheme of choice.\n 'openfermion', 'of' (default) :\n openfermion - ordering, corresponds to integrals of the type\n h^pq_rs = int p(1)* q(2)* O(1,2) r(2) s(1) (O(1,2)\n with operators a^pq_rs = a^p a^q a_r a_s (a^p == a^dagger_p)\n currently needed for dependencies on openfermion-library\n 'chem', 'c' :\n quantum chemistry ordering, collect particle terms,\n more convenient for real-space methods\n h^pq_rs = int p(1) q(1) O(1,2) r(2) s(2)\n This is output by psi4\n 'phys', 'p' :\n typical physics ordering, integrals of type\n h^pq_rs = int p(1)* q(2)* O(1,2) r(1) s(2)\n with operators a^pq_rs = a^p a^q a_s a_r\n\n Returns\n -------\n \"\"\"\n if self.order != 4:\n raise Exception('Reordering currently only implemented for two-body tensors.')\n to = to.lower()\n\n if self.is_chem():\n if to == 'chem' or to == 'c':\n pass\n elif to == 'openfermion' or to == 'of':\n self.elems = numpy.einsum(\"psqr -> pqrs\", self.elems, optimize='greedy')\n self.scheme = 'openfermion'\n elif to == 'phys' or to == 'p':\n self.elems = numpy.einsum(\"prqs -> pqrs\", self.elems, optimize='greedy')\n self.scheme = 'phys'\n elif self.is_openfermion():\n if to == 'chem' or to == 'c':\n self.elems = numpy.einsum(\"pqrs -> psqr\", self.elems, optimize='greedy')\n self.scheme = 'chem'\n elif to == 'openfermion' or to == 'of':\n pass\n elif to == 'phys' or to == 'p':\n self.elems = numpy.einsum(\"pqrs -> pqsr\", self.elems, optimize='greedy')\n self.scheme = 'phys'\n elif self.is_phys():\n if to == 'chem' or to == 'c':\n self.elems = numpy.einsum(\"pqrs -> prqs\", self.elems, optimize='greedy')\n self.scheme = 'chem'\n elif to == 'openfermion' or to == 'of':\n self.elems = numpy.einsum(\"pqsr -> pqrs\", self.elems, optimize='greedy')\n self.scheme = 'openfermion'\n elif to == 'phys' or to == 'p':\n pass\n\n\nclass QuantumChemistryBase:\n \"\"\" \"\"\"\n\n class _QubitEncoding:\n \"\"\"\n Small wrapper class for the Qubit Transformation\n Provides more controlled output and handles special cases\n \"\"\"\n\n def __init__(self, transformation: typing.Callable, **kwargs):\n self._trafo = transformation\n self._kwargs = kwargs\n\n def __call__(self, op):\n errlog = \"\"\n try:\n try:\n # return self._trafo(op, **self._kwargs)\n return self._trafo(op, **self._kwargs)\n except TypeError as E:\n print(\"converting to interaction operator\")\n errlog += \"\\n\" + str(E)\n return self._trafo(openfermion.get_interaction_operator(op), **self._kwargs)\n except Exception as E:\n errlog += \"\\n\" + str(E)\n raise TequilaException(\"Error in QubitEncoding \" + str(self) + errlog)\n\n def __repr__(self):\n if len(self._kwargs) > 0:\n return \"transformation=\" + str(self._trafo) + \"\\nadditional keys: \" + str(self._kwargs)\n else:\n return \"transformation=\" + str(self._trafo)\n\n def __str__(self):\n return self.__repr__()\n\n def __init__(self, parameters: ParametersQC,\n transformation: typing.Union[str, typing.Callable] = None,\n active_orbitals: list = None,\n reference: list = None,\n *args,\n **kwargs):\n\n self.parameters = parameters\n\n if \"molecule\" in kwargs:\n self.molecule = kwargs[\"molecule\"]\n else:\n self.molecule = self.make_molecule(*args, **kwargs)\n\n assert (parameters.basis_set.lower() == self.molecule.basis.lower())\n assert (parameters.multiplicity == self.molecule.multiplicity)\n assert (parameters.charge == self.molecule.charge)\n\n self.active_space = None\n if active_orbitals is not None:\n self.active_space = self._make_active_space_data(active_orbitals=active_orbitals, reference=reference)\n\n if reference is None:\n self.reference = [i for i in range(self.n_electrons // 2)]\n else:\n self.reference = reference\n\n self.transformation = self._initialize_transformation(transformation=transformation, *args, **kwargs)\n\n self._rdm1 = None\n self._rdm2 = None\n\n def _initialize_transformation(self, transformation, *args, **kwargs):\n # filter out arguments to the transformation\n trafo_args = {k.split(\"__\")[1]: v for k, v in kwargs.items() if\n (hasattr(k, \"lower\") and \"transformation__\" in k.lower())}\n\n if transformation is None:\n trafo = openfermion.jordan_wigner\n elif hasattr(transformation, \"lower\") and transformation.lower() in [\"jordan-wigner\", \"jw\", \"j-w\",\n \"jordanwigner\"]:\n trafo = openfermion.jordan_wigner\n elif hasattr(transformation, \"lower\") and transformation.lower() in [\"bravyi-kitaev\", \"bk\", \"b-k\",\n \"bravyikitaev\"]:\n trafo = openfermion.bravyi_kitaev\n elif hasattr(transformation, \"lower\") and transformation.lower() in [\"bravyi-kitaev-tree\", \"bkt\",\n \"bravykitaevtree\", \"b-k-t\"]:\n trafo = openfermion.bravyi_kitaev_tree\n elif hasattr(transformation, \"lower\") and transformation.lower() in [\"tapered_bravyi_kitaev\", \"tbk\", \"t-b-k\",\n \"symmetry_conserving_bravyi_kitaev\"]:\n if \"active_orbitals\" not in trafo_args:\n trafo_args[\"active_orbitals\"] = self.n_orbitals * 2\n if \"active_fermions\" not in trafo_args:\n trafo_args[\"active_fermions\"] = self.n_electrons\n print(\"trafo_args = \", trafo_args)\n # trafo = openfermion.symmetry_conserving_bravyi_kitaev\n # Current hotfix, to be changed once it works again straightforward with OpenFermion\n from ._openfermion_symmetry_conserving_bk_hotfix import symmetry_conserving_bravyi_kitaev_HOTFIX\n trafo = symmetry_conserving_bravyi_kitaev_HOTFIX\n elif hasattr(transformation, \"lower\"):\n trafo = getattr(openfermion, transformation.lower())\n else:\n assert (callable(transformation))\n trafo = transformation\n\n return self._QubitEncoding(transformation=trafo, **trafo_args)\n\n def _make_active_space_data(self, active_orbitals, reference=None):\n \"\"\"\n Small helper function\n Internal use only\n Parameters\n ----------\n active_orbitals: dictionary :\n list: Give a list of spatial orbital indices\n i.e. occ = [0,1,3] means that spatial orbital 0, 1 and 3 are used\n reference: (Default value=None)\n List of orbitals which form the reference\n Can be given in the same format as active_orbitals\n If given as None then the first N_electron/2 orbitals are taken\n for closed-shell systems.\n\n Returns\n -------\n Dataclass with active indices and reference indices (in spatial notation)\n\n \"\"\"\n\n if active_orbitals is None:\n return None\n\n @dataclass\n class ActiveSpaceData:\n active_orbitals: list # active orbitals (spatial, c1)\n reference_orbitals: list # reference orbitals (spatial, c1)\n\n def __str__(self):\n result = \"Active Space Data:\\n\"\n result += \"{key:15} : {value:15} \\n\".format(key=\"active_orbitals\", value=str(self.active_orbitals))\n result += \"{key:15} : {value:15} \\n\".format(key=\"reference_orbitals\",\n value=str(self.reference_orbitals))\n result += \"{key:15} : {value:15} \\n\".format(key=\"frozen_docc\", value=str(self.frozen_docc))\n result += \"{key:15} : {value:15} \\n\".format(key=\"frozen_uocc\", value=str(self.frozen_uocc))\n return result\n\n @property\n def frozen_reference_orbitals(self):\n return [i for i in self.reference_orbitals if i not in self.active_orbitals]\n\n @property\n def active_reference_orbitals(self):\n return [i for i in self.reference_orbitals if i in self.active_orbitals]\n\n if reference is None:\n # auto assignment only for closed-shell\n assert (self.n_electrons % 2 == 0)\n reference = sorted([i for i in range(self.n_electrons // 2)])\n\n return ActiveSpaceData(active_orbitals=sorted(active_orbitals),\n reference_orbitals=sorted(reference))\n\n @classmethod\n def from_openfermion(cls, molecule: openfermion.MolecularData,\n transformation: typing.Union[str, typing.Callable] = None,\n *args,\n **kwargs):\n \"\"\"\n Initialize direclty from openfermion MolecularData object\n\n Parameters\n ----------\n molecule\n The openfermion molecule\n Returns\n -------\n The Tequila molecule\n \"\"\"\n parameters = ParametersQC(basis_set=molecule.basis, geometry=molecule.geometry,\n description=molecule.description, multiplicity=molecule.multiplicity,\n charge=molecule.charge)\n return cls(parameters=parameters, transformation=transformation, molecule=molecule, *args, **kwargs)\n\n def make_excitation_generator(self, indices: typing.Iterable[typing.Tuple[int, int]]) -> QubitHamiltonian:\n \"\"\"\n Notes\n ----------\n Creates the transformed hermitian generator of UCC type unitaries:\n M(a^\\dagger_{a_0} a_{i_0} a^\\dagger{a_1}a_{i_1} ... - h.c.)\n where the qubit map M depends is self.transformation\n\n Parameters\n ----------\n indices : typing.Iterable[typing.Tuple[int, int]] :\n List of tuples [(a_0, i_0), (a_1, i_1), ... ] - recommended format, in spin-orbital notation (alpha odd numbers, beta even numbers)\n can also be given as one big list: [a_0, i_0, a_1, i_1 ...]\n Returns\n -------\n type\n 1j*Transformed qubit excitation operator, depends on self.transformation\n \"\"\"\n\n if self.transformation._trafo == openfermion.bravyi_kitaev_fast:\n raise TequilaException(\n \"The Bravyi-Kitaev-Superfast transformation does not support general FermionOperators yet\")\n\n # check indices and convert to list of tuples if necessary\n if len(indices) == 0:\n raise TequilaException(\"make_excitation_operator: no indices given\")\n elif not isinstance(indices[0], typing.Iterable):\n if len(indices) % 2 != 0:\n raise TequilaException(\"make_excitation_generator: unexpected input format of indices\\n\"\n \"use list of tuples as [(a_0, i_0),(a_1, i_1) ...]\\n\"\n \"or list as [a_0, i_0, a_1, i_1, ... ]\\n\"\n \"you gave: {}\".format(indices))\n converted = [(indices[2 * i], indices[2 * i + 1]) for i in range(len(indices) // 2)]\n else:\n converted = indices\n\n # convert to openfermion input format\n ofi = []\n dag = []\n for pair in converted:\n assert (len(pair) == 2)\n ofi += [(int(pair[0]), 1),\n (int(pair[1]), 0)] # openfermion does not take other types of integers like numpy.int64\n dag += [(int(pair[0]), 0), (int(pair[1]), 1)]\n\n op = openfermion.FermionOperator(tuple(ofi), 1.j) # 1j makes it hermitian\n op += openfermion.FermionOperator(tuple(reversed(dag)), -1.j)\n qop = QubitHamiltonian(qubit_hamiltonian=self.transformation(op))\n # check if the operator is hermitian and cast coefficients to floats\n # in order to avoid trouble with the simulation backends\n assert qop.is_hermitian()\n for k, v in qop.qubit_operator.terms.items():\n qop.qubit_operator.terms[k] = to_float(v)\n\n qop = qop.simplify()\n\n if len(qop) == 0:\n warnings.warn(\"Excitation generator is a unit operator.\\n\"\n \"Non-standard transformations might not work with general fermionic operators\\n\"\n \"indices = \" + str(indices), category=TequilaWarning)\n return qop\n\n def reference_state(self, reference_orbitals: list = None, n_qubits: int = None) -> BitString:\n \"\"\"Does a really lazy workaround ... but it works\n :return: Hartree-Fock Reference as binary-number\n\n Parameters\n ----------\n reference_orbitals: list:\n give list of doubly occupied orbitals\n default is None which leads to automatic list of the\n first n_electron/2 orbitals\n\n Returns\n -------\n\n \"\"\"\n\n if n_qubits is None:\n n_qubits = 2 * self.n_orbitals\n\n if self.transformation._trafo == openfermion.symmetry_conserving_bravyi_kitaev:\n def tapering(fop):\n fermion_hamiltonian_reorder = openfermion.utils.reorder(fop, openfermion.utils.up_then_down,\n num_modes=n_qubits)\n qubit_hamiltonian = openfermion.bravyi_kitaev_tree(fermion_hamiltonian_reorder, n_qubits=n_qubits)\n qubit_hamiltonian.compress()\n return qubit_hamiltonian\n\n transformation = tapering\n elif self.transformation._trafo == openfermion.bravyi_kitaev_fast:\n raise TequilaException(\n \"The Bravyi-Kitaev-Superfast transformation does not support general FermionOperators yet\")\n\n else:\n transformation = self.transformation\n\n if reference_orbitals is None:\n reference_orbitals = self.reference\n\n spin_orbitals = sorted([2 * i for i in reference_orbitals] + [2 * i + 1 for i in reference_orbitals])\n\n string = \"1.0 [\"\n for i in spin_orbitals:\n string += str(i) + \"^ \"\n string += \"]\"\n\n fop = openfermion.FermionOperator(string, 1.0)\n op = QubitHamiltonian(qubit_hamiltonian=transformation(fop))\n from tequila.wavefunction.qubit_wavefunction import QubitWaveFunction\n wfn = QubitWaveFunction.from_int(0, n_qubits=n_qubits)\n wfn = wfn.apply_qubitoperator(operator=op)\n assert (len(wfn.keys()) == 1)\n key = list(wfn.keys())[0]\n if self.transformation._trafo == openfermion.symmetry_conserving_bravyi_kitaev:\n active_qubits = [i for i in range(n_qubits) if i not in [n_qubits - 1, n_qubits // 2 - 1]]\n array = [key.array[i] for i in active_qubits]\n key = BitString.from_array(array=array)\n return key\n\n def make_molecule(self, *args, **kwargs) -> MolecularData:\n \"\"\"Creates a molecule in openfermion format by running psi4 and extracting the data\n Will check for previous outputfiles before running\n Will not recompute if a file was found\n\n Parameters\n ----------\n parameters :\n An instance of ParametersQC, which also holds an instance of ParametersPsi4 via parameters.psi4\n The molecule will be saved in parameters.filename, if this file exists before the call the molecule will be imported from the file\n\n Returns\n -------\n type\n the molecule in openfermion.MolecularData format\n\n \"\"\"\n molecule = MolecularData(**self.parameters.molecular_data_param)\n # try to load\n\n do_compute = True\n try:\n import os\n if os.path.exists(self.parameters.filename):\n molecule.load()\n do_compute = False\n except OSError:\n do_compute = True\n\n if do_compute:\n molecule = self.do_make_molecule(*args, **kwargs)\n\n molecule.save()\n return molecule\n\n def do_make_molecule(self, *args, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n args\n kwargs\n\n Returns\n -------\n\n \"\"\"\n # integrals need to be passed in base class\n assert (\"one_body_integrals\" in kwargs)\n assert (\"two_body_integrals\" in kwargs)\n one_body_integrals = kwargs[\"one_body_integrals\"]\n two_body_integrals = kwargs[\"two_body_integrals\"]\n if \"nuclear_repulsion\" in kwargs:\n nuclear_repulsion = kwargs[\"nuclear_repulsion\"]\n else:\n nuclear_repulsion = 0.0\n warnings.warn(\"No nuclear_repulsion given for custom molecule, setting to zero\", category=TequilaWarning)\n\n\n if (\"n_orbitals\" in kwargs):\n n_orbitals = kwargs[\"n_orbitals\"]\n else:\n n_orbitals = one_body_integrals.shape[0]\n for i in [0,1,2,3]:\n assert n_orbitals == two_body_integrals.shape[i]\n \n molecule = MolecularData(**self.parameters.molecular_data_param)\n\n molecule.one_body_integrals = one_body_integrals\n molecule.two_body_integrals = two_body_integrals\n molecule.nuclear_repulsion = nuclear_repulsion\n molecule.n_orbitals = n_orbitals\n molecule.save()\n return molecule\n\n @property\n def n_orbitals(self) -> int:\n \"\"\" \"\"\"\n if self.active_space is None:\n return self.molecule.n_orbitals\n else:\n return len(self.active_space.active_orbitals)\n\n @property\n def n_electrons(self) -> int:\n \"\"\" \"\"\"\n if self.active_space is None:\n return self.molecule.n_electrons\n else:\n return 2 * len(self.active_space.active_reference_orbitals)\n\n def make_hamiltonian(self, occupied_indices=None, active_indices=None) -> QubitHamiltonian:\n \"\"\" \"\"\"\n if occupied_indices is None and self.active_space is not None:\n occupied_indices = self.active_space.frozen_reference_orbitals\n if active_indices is None and self.active_space is not None:\n active_indices = self.active_space.active_orbitals\n\n fop = openfermion.transforms.get_fermion_operator(\n self.molecule.get_molecular_hamiltonian(occupied_indices, active_indices))\n try:\n qop = self.transformation(fop)\n except TypeError:\n qop = self.transformation(openfermion.transforms.get_interaction_operator(fop))\n return QubitHamiltonian(qubit_hamiltonian=qop)\n\n def compute_one_body_integrals(self):\n \"\"\" \"\"\"\n if hasattr(self, \"molecule\"):\n return self.molecule.one_body_integrals\n\n def compute_two_body_integrals(self):\n \"\"\" \"\"\"\n if hasattr(self, \"molecule\"):\n return self.molecule.two_body_integrals\n\n def compute_ccsd_amplitudes(self) -> ClosedShellAmplitudes:\n \"\"\" \"\"\"\n raise Exception(\"BaseClass Method\")\n\n def prepare_reference(self, *args, **kwargs):\n \"\"\"\n\n Returns\n -------\n A tequila circuit object which prepares the reference of this molecule in the chosen transformation\n \"\"\"\n\n return prepare_product_state(self.reference_state(*args, **kwargs))\n\n def make_upccgsd_ansatz(self,\n include_singles:bool=True,\n include_reference:bool=True,\n indices:list=None,\n label: str=None,\n order:int =1,\n *args, **kwargs):\n \"\"\"\n UpGCCSD Ansatz similar as described by Lee et. al.\n\n Parameters\n ----------\n include_singles\n include singles excitations\n include_reference\n include the HF reference state as initial state\n indices\n pass custom defined set of indices from which the ansatz will be created\n List of tuples of tuples spin-indices e.g. [((2*p,2*q),(2*p+1,2*q+1)), ...]\n label\n An additional label that is set with the variables\n default is None and no label will be set: variables names will be\n (x, (p,q)) for x in range(order)\n with a label the variables will be named\n (label, (x, (p,q))) \n order\n Order of the ansatz (default is 1)\n determines how often the ordering gets repeated\n parameters of repeating layers are independent\n Returns\n -------\n UpGCCSD ansatz\n \"\"\"\n\n # indices defining the UpCCD ansatz\n if indices is None:\n indices = []\n for i in range(self.n_orbitals):\n for a in range(i + 1, self.n_orbitals):\n indices.append(((2 * i, 2 * a), (2 * i + 1, 2 * a + 1)))\n if include_singles:\n indices.append(((2 * i, 2 * a)))\n indices.append(((2 * i + 1, 2 * a + 1)))\n\n U = QCircuit()\n if include_reference:\n U = self.prepare_reference()\n\n generators = [self.make_excitation_generator(i, *args, **kwargs) for i in indices]\n\n for k in range(order):\n idx = [(k,i) for i in indices]\n prefix = order\n if label is not None:\n prefix = (label, order)\n names = [(prefix, i) for i in idx]\n U += gates.Trotterized(generators=generators, angles=names, steps=1)\n return U\n\n def make_uccsd_ansatz(self, trotter_steps: int,\n initial_amplitudes: typing.Union[str, Amplitudes, ClosedShellAmplitudes] = \"mp2\",\n include_reference_ansatz=True,\n parametrized=True,\n threshold=1.e-8,\n trotter_parameters: gates.TrotterParameters = None) -> QCircuit:\n \"\"\"\n\n Parameters\n ----------\n initial_amplitudes :\n initial amplitudes given as ManyBodyAmplitudes structure or as string\n where 'mp2', 'cc2' or 'ccsd' are possible initializations\n include_reference_ansatz :\n Also do the reference ansatz (prepare closed-shell Hartree-Fock) (Default value = True)\n parametrized :\n Initialize with variables, otherwise with static numbers (Default value = True)\n trotter_steps: int :\n\n initial_amplitudes: typing.Union[str :\n\n Amplitudes :\n\n ClosedShellAmplitudes] :\n (Default value = \"mp2\")\n trotter_parameters: gates.TrotterParameters :\n (Default value = None)\n\n Returns\n -------\n type\n Parametrized QCircuit\n\n \"\"\"\n\n if self.n_electrons % 2 != 0:\n raise TequilaException(\"make_uccsd_ansatz currently only for closed shell systems\")\n\n nocc = self.n_electrons // 2\n nvirt = self.n_orbitals // 2 - nocc\n\n Uref = QCircuit()\n if include_reference_ansatz:\n Uref = self.prepare_reference()\n\n amplitudes = initial_amplitudes\n if hasattr(initial_amplitudes, \"lower\"):\n if initial_amplitudes.lower() == \"mp2\":\n amplitudes = self.compute_mp2_amplitudes()\n elif initial_amplitudes.lower() == \"ccsd\":\n amplitudes = self.compute_ccsd_amplitudes()\n else:\n try:\n amplitudes = self.compute_amplitudes(method=initial_amplitudes.lower())\n except Exception as exc:\n raise TequilaException(\n \"{}\\nDon't know how to initialize \\'{}\\' amplitudes\".format(exc, initial_amplitudes))\n\n if amplitudes is None:\n amplitudes = ClosedShellAmplitudes(\n tIjAb=numpy.zeros(shape=[nocc, nocc, nvirt, nvirt]),\n tIA=numpy.zeros(shape=[nocc, nvirt]))\n\n closed_shell = isinstance(amplitudes, ClosedShellAmplitudes)\n generators = []\n variables = []\n\n if not isinstance(amplitudes, dict):\n amplitudes = amplitudes.make_parameter_dictionary(threshold=threshold)\n amplitudes = dict(sorted(amplitudes.items(), key=lambda x: x[1]))\n\n for key, t in amplitudes.items():\n assert (len(key) % 2 == 0)\n if not numpy.isclose(t, 0.0, atol=threshold):\n\n if closed_shell:\n spin_indices = []\n if len(key) == 2:\n spin_indices = [[2 * key[0], 2 * key[1]], [2 * key[0] + 1, 2 * key[1] + 1]]\n partner = None\n else:\n spin_indices.append([2 * key[0] + 1, 2 * key[1] + 1, 2 * key[2], 2 * key[3]])\n spin_indices.append([2 * key[0], 2 * key[1], 2 * key[2] + 1, 2 * key[3] + 1])\n if key[0] != key[2] and key[1] != key[3]:\n spin_indices.append([2 * key[0], 2 * key[1], 2 * key[2], 2 * key[3]])\n spin_indices.append([2 * key[0] + 1, 2 * key[1] + 1, 2 * key[2] + 1, 2 * key[3] + 1])\n partner = tuple([key[2], key[1], key[0], key[3]]) # taibj -> tbiaj\n for idx in spin_indices:\n idx = [(idx[2 * i], idx[2 * i + 1]) for i in range(len(idx) // 2)]\n generators.append(self.make_excitation_generator(indices=idx))\n\n if parametrized:\n variables.append(Variable(name=key)) # abab\n variables.append(Variable(name=key)) # baba\n if partner is not None and key[0] != key[1] and key[2] != key[3]:\n variables.append(Variable(name=key) - Variable(partner)) # aaaa\n variables.append(Variable(name=key) - Variable(partner)) # bbbb\n else:\n variables.append(t)\n variables.append(t)\n if partner is not None and key[0] != key[1] and key[2] != key[3]:\n variables.append(t - amplitudes[partner])\n variables.append(t - amplitudes[partner])\n else:\n generators.append(self.make_excitation_operator(indices=spin_indices))\n if parametrized:\n variables.append(Variable(name=key))\n else:\n variables.append(t)\n\n return Uref + gates.Trotterized(generators=generators, angles=variables, steps=trotter_steps,\n parameters=trotter_parameters)\n\n def compute_amplitudes(self, method: str, *args, **kwargs):\n \"\"\"\n Compute closed-shell CC amplitudes\n\n Parameters\n ----------\n method :\n coupled-cluster methods like cc2, ccsd, cc3, ccsd(t)\n Success might depend on backend\n got an extra function for MP2\n *args :\n\n **kwargs :\n\n\n Returns\n -------\n\n \"\"\"\n raise TequilaException(\"compute amplitudes: Needs to be overwritten by backend\")\n\n def compute_mp2_amplitudes(self) -> ClosedShellAmplitudes:\n \"\"\"\n\n Compute closed-shell mp2 amplitudes\n\n .. math::\n t(a,i,b,j) = 0.25 * g(a,i,b,j)/(e(i) + e(j) -a(i) - b(j) )\n\n :return:\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n assert self.parameters.closed_shell\n g = self.molecule.two_body_integrals\n fij = self.molecule.orbital_energies\n nocc = self.molecule.n_electrons // 2 # this is never the active space\n ei = fij[:nocc]\n ai = fij[nocc:]\n abgij = g[nocc:, nocc:, :nocc, :nocc]\n amplitudes = abgij * 1.0 / (\n ei.reshape(1, 1, -1, 1) + ei.reshape(1, 1, 1, -1) - ai.reshape(-1, 1, 1, 1) - ai.reshape(1, -1, 1, 1))\n E = 2.0 * numpy.einsum('abij,abij->', amplitudes, abgij) - numpy.einsum('abji,abij', amplitudes, abgij,\n optimize='greedy')\n\n self.molecule.mp2_energy = E + self.molecule.hf_energy\n return ClosedShellAmplitudes(tIjAb=numpy.einsum('abij -> ijab', amplitudes, optimize='greedy'))\n\n def compute_cis_amplitudes(self):\n \"\"\"\n Compute the CIS amplitudes of the molecule\n \"\"\"\n\n @dataclass\n class ResultCIS:\n \"\"\" \"\"\"\n omegas: typing.List[numbers.Real] # excitation energies [omega0, ...]\n amplitudes: typing.List[ClosedShellAmplitudes] # corresponding amplitudes [x_{ai}_0, ...]\n\n def __getitem__(self, item):\n return (self.omegas[item], self.amplitudes[item])\n\n def __len__(self):\n return len(self.omegas)\n\n g = self.molecule.two_body_integrals\n fij = self.molecule.orbital_energies\n\n nocc = self.n_alpha_electrons\n nvirt = self.n_orbitals - nocc\n\n pairs = []\n for i in range(nocc):\n for a in range(nocc, nocc + nvirt):\n pairs.append((a, i))\n M = numpy.ndarray(shape=[len(pairs), len(pairs)])\n\n for xx, x in enumerate(pairs):\n eia = fij[x[0]] - fij[x[1]]\n a, i = x\n for yy, y in enumerate(pairs):\n b, j = y\n delta = float(y == x)\n gpart = 2.0 * g[a, i, b, j] - g[a, i, j, b]\n M[xx, yy] = eia * delta + gpart\n\n omega, xvecs = numpy.linalg.eigh(M)\n\n # convert amplitudes to ndarray sorted by excitation energy\n nex = len(omega)\n amplitudes = []\n for ex in range(nex):\n t = numpy.ndarray(shape=[nvirt, nocc])\n exvec = xvecs[ex]\n for xx, x in enumerate(pairs):\n a, i = x\n t[a - nocc, i] = exvec[xx]\n amplitudes.append(ClosedShellAmplitudes(tIA=t))\n\n return ResultCIS(omegas=list(omega), amplitudes=amplitudes)\n\n @property\n def rdm1(self):\n \"\"\" \"\"\"\n if self._rdm1 is not None:\n return self._rdm1\n else:\n print(\"1-RDM has not been computed. Return None for 1-RDM.\")\n return None\n\n @property\n def rdm2(self):\n \"\"\" \"\"\"\n if self._rdm2 is not None:\n return self._rdm2\n else:\n print(\"2-RDM has not been computed. Return None for 2-RDM.\")\n return None\n\n def compute_rdms(self, U: QCircuit = None, variables: Variables = None, spin_free: bool = True,\n get_rdm1: bool = True, get_rdm2: bool = True):\n \"\"\"\n Computes the one- and two-particle reduced density matrices (rdm1 and rdm2) given\n a unitary U. This method uses the standard ordering in physics as denoted below.\n Note, that the representation of the density matrices depends on the qubit transformation\n used. The Jordan-Wigner encoding corresponds to 'classical' second quantized density\n matrices in the occupation picture.\n\n We only consider real orbitals and thus real-valued RDMs.\n The matrices are set as private members _rdm1, _rdm2 and can be accessed via the properties rdm1, rdm2.\n\n .. math :\n \\\\text{rdm1: } \\\\gamma^p_q = \\\\langle \\\\psi | a^p a_q | \\\\psi \\\\rangle\n = \\\\langle U 0 | a^p a_q | U 0 \\\\rangle\n \\\\text{rdm2: } \\\\gamma^{pq}_{rs} = \\\\langle \\\\psi | a^p a^q a_s a_r | \\\\psi \\\\rangle\n = \\\\langle U 0 | a^p a^q a_s a_r | U 0 \\\\rangle\n\n Parameters\n ----------\n U :\n Quantum Circuit to achieve the desired state \\\\psi = U |0\\\\rangle, non-optional\n variables :\n If U is parametrized, then need to hand over a set of fixed variables\n spin_free :\n Set whether matrices should be spin-free (summation over spin) or defined by spin-orbitals\n get_rdm1, get_rdm2 :\n Set whether either one or both rdm1, rdm2 should be computed. If both are needed at some point,\n it is recommended to compute them at once.\n\n Returns\n -------\n \"\"\"\n # Check whether unitary circuit is not 0\n if U is None:\n raise TequilaException('Need to specify a Quantum Circuit.')\n\n # Check whether transformation is BKSF.\n # Issue here: when a single operator acts only on a subset of qubits, BKSF might not yield the correct\n # transformation, because it computes the number of qubits incorrectly in this case.\n # A hotfix such as for symmetry_conserving_bravyi_kitaev would require deeper changes, thus omitted for now\n if self.transformation._trafo == openfermion.bravyi_kitaev_fast:\n raise TequilaException(\n \"The Bravyi-Kitaev-Superfast transformation does not support general FermionOperators yet.\")\n\n # Set up number of spin-orbitals and molecular orbitals respectively\n n_SOs = 2 * self.n_orbitals\n n_MOs = self.n_orbitals\n\n # Check whether unitary circuit is not 0\n if U is None:\n raise TequilaException('Need to specify a Quantum Circuit.')\n\n def _get_of_op(operator_tuple):\n \"\"\" Returns operator given by a operator tuple as OpenFermion - Fermion operator \"\"\"\n op = openfermion.FermionOperator(operator_tuple)\n return op\n\n def _get_qop_hermitian(of_operator) -> QubitHamiltonian:\n \"\"\" Returns Hermitian part of Fermion operator as QubitHamiltonian \"\"\"\n qop = QubitHamiltonian(self.transformation(of_operator))\n real, imag = qop.split(hermitian=True)\n if real:\n return real\n elif not real:\n print(of_operator)\n raise TequilaException(\"Qubit Hamiltonian does not have a Hermitian part. Check this...\")\n\n def _build_1bdy_operators_spinful() -> list:\n \"\"\" Returns spinful one-body operators as a symmetry-reduced list of QubitHamiltonians \"\"\"\n # Exploit symmetry pq = qp\n ops = []\n for p in range(n_SOs):\n for q in range(p + 1):\n op_tuple = ((p, 1), (q, 0))\n op = _get_of_op(op_tuple)\n ops += [op]\n\n return ops\n\n def _build_2bdy_operators_spinful() -> list:\n \"\"\" Returns spinful two-body operators as a symmetry-reduced list of QubitHamiltonians \"\"\"\n # Exploit symmetries pqrs = -pqsr = -qprs = qpsr\n # and = rspq\n ops = []\n for p in range(n_SOs):\n for q in range(p):\n for r in range(n_SOs):\n for s in range(r):\n if p * n_SOs + q >= r * n_SOs + s:\n op_tuple = ((p, 1), (q, 1), (s, 0), (r, 0))\n op = _get_of_op(op_tuple)\n ops += [op]\n\n return ops\n\n def _build_1bdy_operators_spinfree() -> list:\n \"\"\" Returns spinfree one-body operators as a symmetry-reduced list of QubitHamiltonians \"\"\"\n # Exploit symmetry pq = qp (not changed by spin-summation)\n ops = []\n for p in range(n_MOs):\n for q in range(p + 1):\n # Spin aa\n op_tuple = ((2 * p, 1), (2 * q, 0))\n op = _get_of_op(op_tuple)\n # Spin bb\n op_tuple = ((2 * p + 1, 1), (2 * q + 1, 0))\n op += _get_of_op(op_tuple)\n ops += [op]\n\n return ops\n\n def _build_2bdy_operators_spinfree() -> list:\n \"\"\" Returns spinfree two-body operators as a symmetry-reduced list of QubitHamiltonians \"\"\"\n # Exploit symmetries pqrs = qpsr (due to spin summation, '-pqsr = -qprs' drops out)\n # and = rspq\n ops = []\n for p, q, r, s in product(range(n_MOs), repeat=4):\n if p * n_MOs + q >= r * n_MOs + s and (p >= q or r >= s):\n # Spin aaaa\n op_tuple = ((2 * p, 1), (2 * q, 1), (2 * s, 0), (2 * r, 0)) if (p!=q and r!=s) else '0.0 []'\n op = _get_of_op(op_tuple)\n # Spin abab\n op_tuple = ((2 * p, 1), (2 * q + 1, 1), (2 * s + 1, 0), (2 * r, 0)) if (2*p!=2*q+1 and 2*r!=2*s+1) else '0.0 []'\n op += _get_of_op(op_tuple)\n # Spin baba\n op_tuple = ((2 * p + 1, 1), (2 * q, 1), (2 * s, 0), (2 * r + 1, 0)) if (2*p+1!=2*q and 2*r+1!=2*s) else '0.0 []'\n op += _get_of_op(op_tuple)\n # Spin bbbb\n op_tuple = ((2 * p + 1, 1), (2 * q + 1, 1), (2 * s + 1, 0), (2 * r + 1, 0)) if (p!=q and r!=s) else '0.0 []'\n op += _get_of_op(op_tuple)\n\n ops += [op]\n\n return ops\n\n def _assemble_rdm1(evals) -> numpy.ndarray:\n \"\"\"\n Returns spin-ful or spin-free one-particle RDM built by symmetry conditions\n Same symmetry with or without spin, so we can use the same function\n \"\"\"\n N = n_MOs if spin_free else n_SOs\n rdm1 = numpy.zeros([N, N])\n ctr: int = 0\n for p in range(N):\n for q in range(p + 1):\n rdm1[p, q] = evals[ctr]\n # Symmetry pq = qp\n rdm1[q, p] = rdm1[p, q]\n ctr += 1\n\n return rdm1\n\n def _assemble_rdm2_spinful(evals) -> numpy.ndarray:\n \"\"\" Returns spin-ful two-particle RDM built by symmetry conditions \"\"\"\n ctr: int = 0\n rdm2 = numpy.zeros([n_SOs, n_SOs, n_SOs, n_SOs])\n for p in range(n_SOs):\n for q in range(p):\n for r in range(n_SOs):\n for s in range(r):\n if p * n_SOs + q >= r * n_SOs + s:\n rdm2[p, q, r, s] = evals[ctr]\n # Symmetry pqrs = rspq\n rdm2[r, s, p, q] = rdm2[p, q, r, s]\n ctr += 1\n\n # Further permutational symmetries due to anticommutation relations\n for p in range(n_SOs):\n for q in range(p):\n for r in range(n_SOs):\n for s in range(r):\n rdm2[p, q, s, r] = -1 * rdm2[p, q, r, s] # pqrs = -pqsr\n rdm2[q, p, r, s] = -1 * rdm2[p, q, r, s] # pqrs = -qprs\n rdm2[q, p, s, r] = rdm2[p, q, r, s] # pqrs = qpsr\n\n return rdm2\n\n def _assemble_rdm2_spinfree(evals) -> numpy.ndarray:\n \"\"\" Returns spin-free two-particle RDM built by symmetry conditions \"\"\"\n ctr: int = 0\n rdm2 = numpy.zeros([n_MOs, n_MOs, n_MOs, n_MOs])\n for p, q, r, s in product(range(n_MOs), repeat=4):\n if p * n_MOs + q >= r * n_MOs + s and (p >= q or r >= s):\n rdm2[p, q, r, s] = evals[ctr]\n # Symmetry pqrs = rspq\n rdm2[r, s, p, q] = rdm2[p, q, r, s]\n ctr += 1\n\n # Further permutational symmetry: pqrs = qpsr\n for p, q, r, s in product(range(n_MOs), repeat=4):\n if p >= q or r >= s:\n rdm2[q, p, s, r] = rdm2[p, q, r, s]\n\n return rdm2\n\n # Build operator lists\n qops = []\n if spin_free:\n qops += _build_1bdy_operators_spinfree() if get_rdm1 else []\n qops += _build_2bdy_operators_spinfree() if get_rdm2 else []\n else:\n qops += _build_1bdy_operators_spinful() if get_rdm1 else []\n qops += _build_2bdy_operators_spinful() if get_rdm2 else []\n\n # Transform operator lists to QubitHamiltonians\n qops = [_get_qop_hermitian(op) for op in qops]\n # Compute expected values\n evals = simulate(ExpectationValue(H=qops, U=U, shape=[len(qops)]), variables=variables)\n\n # Assemble density matrices\n # If self._rdm1, self._rdm2 exist, reset them if they are of the other spin-type\n def _reset_rdm(rdm):\n if rdm is not None:\n if spin_free and rdm.shape[0] != n_MOs:\n return None\n if not spin_free and rdm.shape[0] != n_SOs:\n return None\n return rdm\n\n self._rdm1 = _reset_rdm(self._rdm1)\n self._rdm2 = _reset_rdm(self._rdm2)\n # Split expectation values in 1- and 2-particle expectation values\n if get_rdm1:\n len_1 = n_MOs * (n_MOs + 1) // 2 if spin_free else n_SOs * (n_SOs + 1) // 2\n else:\n len_1 = 0\n evals_1, evals_2 = evals[:len_1], evals[len_1:]\n # Build matrices using the expectation values\n self._rdm1 = _assemble_rdm1(evals_1) if get_rdm1 else self._rdm1\n if spin_free:\n self._rdm2 = _assemble_rdm2_spinfree(evals_2) if get_rdm2 else self._rdm2\n else:\n self._rdm2 = _assemble_rdm2_spinful(evals_2) if get_rdm2 else self._rdm2\n\n def rdm_spinsum(self, sum_rdm1: bool = True, sum_rdm2: bool = True) -> tuple:\n \"\"\"\n Given the spin-ful 1- and 2-particle reduced density matrices, compute the spin-free RDMs by spin summation.\n\n Parameters\n ----------\n sum_rdm1, sum_rdm2 :\n If set to true, perform spin summation on rdm1, rdm2\n\n Returns\n -------\n rdm1_spinsum, rdm2_spinsum :\n The desired spin-free matrices\n \"\"\"\n n_MOs = self.n_orbitals\n rdm1_spinsum = None\n rdm2_spinsum = None\n\n # Spin summation on rdm1\n if sum_rdm1:\n # Check whether spin-rdm2 exists\n if self._rdm1 is None:\n raise TequilaException(\"The spin-RDM for the 1-RDM does not exist!\")\n # Check whether existing rdm1 is in spin-orbital basis\n if self._rdm1.shape[0] != 2 * n_MOs:\n raise TequilaException(\"The existing RDM needs to be in spin-orbital basis, it is already spin-free!\")\n # Do summation\n rdm1_spinsum = numpy.zeros([n_MOs, n_MOs])\n for p in range(n_MOs):\n for q in range(p + 1):\n rdm1_spinsum[p, q] += self._rdm1[2 * p, 2 * q]\n rdm1_spinsum[p, q] += self._rdm1[2 * p + 1, 2 * q + 1]\n for p in range(n_MOs):\n for q in range(p):\n rdm1_spinsum[q, p] = rdm1_spinsum[p, q]\n\n # Spin summation on rdm2\n if sum_rdm2:\n # Check whether spin-rdm2 exists\n if self._rdm2 is None:\n raise TequilaException(\"The spin-RDM for the 2-RDM does not exist!\")\n # Check whether existing rdm2 is in spin-orbital basis\n if self._rdm2.shape[0] != 2 * n_MOs:\n raise TequilaException(\"The existing RDM needs to be in spin-orbital basis, it is already spin-free!\")\n # Do summation\n rdm2_spinsum = numpy.zeros([n_MOs, n_MOs, n_MOs, n_MOs])\n for p, q, r, s in product(range(n_MOs), repeat=4):\n rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p, 2 * q, 2 * r, 2 * s]\n rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p + 1, 2 * q, 2 * r + 1, 2 * s]\n rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p, 2 * q + 1, 2 * r, 2 * s + 1]\n rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p + 1, 2 * q + 1, 2 * r + 1, 2 * s + 1]\n\n return rdm1_spinsum, rdm2_spinsum\n\n def __str__(self) -> str:\n result = str(type(self)) + \"\\n\"\n result += \"Qubit Encoding\\n\"\n result += str(self.transformation) + \"\\n\"\n for k, v in self.parameters.__dict__.items():\n result += \"{key:15} : {value:15} \\n\".format(key=str(k), value=str(v))\n return result\n"
] | [
[
"numpy.zeros",
"numpy.take",
"numpy.linalg.eigh",
"numpy.isclose",
"numpy.abs",
"numpy.ndenumerate",
"numpy.ndarray",
"numpy.einsum"
]
] |
justindujardin/allennlp | [
"c4559f3751775aa8bc018db417edc119d29d8051"
] | [
"allennlp/modules/elmo_lstm.py"
] | [
"\"\"\"\nA stacked bidirectional LSTM with skip connections between layers.\n\"\"\"\nfrom typing import Optional, Tuple, List\nimport warnings\n\nimport torch\nfrom torch.nn.utils.rnn import PackedSequence, pad_packed_sequence\n\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=FutureWarning)\n import h5py\nimport numpy\n\nfrom allennlp.modules.lstm_cell_with_projection import LstmCellWithProjection\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.modules.encoder_base import _EncoderBase\nfrom allennlp.common.file_utils import cached_path\n\n\nclass ElmoLstm(_EncoderBase):\n \"\"\"\n A stacked, bidirectional LSTM which uses\n [`LstmCellWithProjection`'s](./lstm_cell_with_projection.md)\n with highway layers between the inputs to layers.\n The inputs to the forward and backward directions are independent - forward and backward\n states are not concatenated between layers.\n\n Additionally, this LSTM maintains its `own` state, which is updated every time\n `forward` is called. It is dynamically resized for different batch sizes and is\n designed for use with non-continuous inputs (i.e inputs which aren't formatted as a stream,\n such as text used for a language modeling task, which is how stateful RNNs are typically used).\n This is non-standard, but can be thought of as having an \"end of sentence\" state, which is\n carried across different sentences.\n\n # Parameters\n\n input_size : `int`, required\n The dimension of the inputs to the LSTM.\n hidden_size : `int`, required\n The dimension of the outputs of the LSTM.\n cell_size : `int`, required.\n The dimension of the memory cell of the `LstmCellWithProjection`.\n num_layers : `int`, required\n The number of bidirectional LSTMs to use.\n requires_grad : `bool`, optional\n If True, compute gradient of ELMo parameters for fine tuning.\n recurrent_dropout_probability : `float`, optional (default = 0.0)\n The dropout probability to be used in a dropout scheme as stated in\n [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks]\n (https://arxiv.org/abs/1512.05287).\n state_projection_clip_value : `float`, optional, (default = None)\n The magnitude with which to clip the hidden_state after projecting it.\n memory_cell_clip_value : `float`, optional, (default = None)\n The magnitude with which to clip the memory cell.\n \"\"\"\n\n def __init__(\n self,\n input_size: int,\n hidden_size: int,\n cell_size: int,\n num_layers: int,\n requires_grad: bool = False,\n recurrent_dropout_probability: float = 0.0,\n memory_cell_clip_value: Optional[float] = None,\n state_projection_clip_value: Optional[float] = None,\n ) -> None:\n super().__init__(stateful=True)\n\n # Required to be wrapped with a `PytorchSeq2SeqWrapper`.\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.cell_size = cell_size\n self.requires_grad = requires_grad\n\n forward_layers = []\n backward_layers = []\n\n lstm_input_size = input_size\n go_forward = True\n for layer_index in range(num_layers):\n forward_layer = LstmCellWithProjection(\n lstm_input_size,\n hidden_size,\n cell_size,\n go_forward,\n recurrent_dropout_probability,\n memory_cell_clip_value,\n state_projection_clip_value,\n )\n backward_layer = LstmCellWithProjection(\n lstm_input_size,\n hidden_size,\n cell_size,\n not go_forward,\n recurrent_dropout_probability,\n memory_cell_clip_value,\n state_projection_clip_value,\n )\n lstm_input_size = hidden_size\n\n self.add_module(\"forward_layer_{}\".format(layer_index), forward_layer)\n self.add_module(\"backward_layer_{}\".format(layer_index), backward_layer)\n forward_layers.append(forward_layer)\n backward_layers.append(backward_layer)\n self.forward_layers = forward_layers\n self.backward_layers = backward_layers\n\n def forward(self, inputs: torch.Tensor, mask: torch.LongTensor) -> torch.Tensor:\n \"\"\"\n # Parameters\n\n inputs : `torch.Tensor`, required.\n A Tensor of shape `(batch_size, sequence_length, hidden_size)`.\n mask : `torch.LongTensor`, required.\n A binary mask of shape `(batch_size, sequence_length)` representing the\n non-padded elements in each sequence in the batch.\n\n # Returns\n\n A `torch.Tensor` of shape (num_layers, batch_size, sequence_length, hidden_size),\n where the num_layers dimension represents the LSTM output from that layer.\n \"\"\"\n batch_size, total_sequence_length = mask.size()\n stacked_sequence_output, final_states, restoration_indices = self.sort_and_run_forward(\n self._lstm_forward, inputs, mask\n )\n\n num_layers, num_valid, returned_timesteps, encoder_dim = stacked_sequence_output.size()\n # Add back invalid rows which were removed in the call to sort_and_run_forward.\n if num_valid < batch_size:\n zeros = stacked_sequence_output.new_zeros(\n num_layers, batch_size - num_valid, returned_timesteps, encoder_dim\n )\n stacked_sequence_output = torch.cat([stacked_sequence_output, zeros], 1)\n\n # The states also need to have invalid rows added back.\n new_states = []\n for state in final_states:\n state_dim = state.size(-1)\n zeros = state.new_zeros(num_layers, batch_size - num_valid, state_dim)\n new_states.append(torch.cat([state, zeros], 1))\n final_states = new_states\n\n # It's possible to need to pass sequences which are padded to longer than the\n # max length of the sequence to a Seq2StackEncoder. However, packing and unpacking\n # the sequences mean that the returned tensor won't include these dimensions, because\n # the RNN did not need to process them. We add them back on in the form of zeros here.\n sequence_length_difference = total_sequence_length - returned_timesteps\n if sequence_length_difference > 0:\n zeros = stacked_sequence_output.new_zeros(\n num_layers,\n batch_size,\n sequence_length_difference,\n stacked_sequence_output[0].size(-1),\n )\n stacked_sequence_output = torch.cat([stacked_sequence_output, zeros], 2)\n\n self._update_states(final_states, restoration_indices)\n\n # Restore the original indices and return the sequence.\n # Has shape (num_layers, batch_size, sequence_length, hidden_size)\n return stacked_sequence_output.index_select(1, restoration_indices)\n\n def _lstm_forward(\n self,\n inputs: PackedSequence,\n initial_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,\n ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"\n # Parameters\n\n inputs : `PackedSequence`, required.\n A batch first `PackedSequence` to run the stacked LSTM over.\n initial_state : `Tuple[torch.Tensor, torch.Tensor]`, optional, (default = None)\n A tuple (state, memory) representing the initial hidden state and memory\n of the LSTM, with shape (num_layers, batch_size, 2 * hidden_size) and\n (num_layers, batch_size, 2 * cell_size) respectively.\n\n # Returns\n\n output_sequence : `torch.FloatTensor`\n The encoded sequence of shape (num_layers, batch_size, sequence_length, hidden_size)\n final_states : `Tuple[torch.FloatTensor, torch.FloatTensor]`\n The per-layer final (state, memory) states of the LSTM, with shape\n (num_layers, batch_size, 2 * hidden_size) and (num_layers, batch_size, 2 * cell_size)\n respectively. The last dimension is duplicated because it contains the state/memory\n for both the forward and backward layers.\n \"\"\"\n if initial_state is None:\n hidden_states: List[Optional[Tuple[torch.Tensor, torch.Tensor]]] = [None] * len(\n self.forward_layers\n )\n elif initial_state[0].size()[0] != len(self.forward_layers):\n raise ConfigurationError(\n \"Initial states were passed to forward() but the number of \"\n \"initial states does not match the number of layers.\"\n )\n else:\n hidden_states = list(zip(initial_state[0].split(1, 0), initial_state[1].split(1, 0)))\n\n inputs, batch_lengths = pad_packed_sequence(inputs, batch_first=True)\n forward_output_sequence = inputs\n backward_output_sequence = inputs\n\n final_states = []\n sequence_outputs = []\n for layer_index, state in enumerate(hidden_states):\n forward_layer = getattr(self, \"forward_layer_{}\".format(layer_index))\n backward_layer = getattr(self, \"backward_layer_{}\".format(layer_index))\n\n forward_cache = forward_output_sequence\n backward_cache = backward_output_sequence\n\n if state is not None:\n forward_hidden_state, backward_hidden_state = state[0].split(self.hidden_size, 2)\n forward_memory_state, backward_memory_state = state[1].split(self.cell_size, 2)\n forward_state = (forward_hidden_state, forward_memory_state)\n backward_state = (backward_hidden_state, backward_memory_state)\n else:\n forward_state = None\n backward_state = None\n\n forward_output_sequence, forward_state = forward_layer(\n forward_output_sequence, batch_lengths, forward_state\n )\n backward_output_sequence, backward_state = backward_layer(\n backward_output_sequence, batch_lengths, backward_state\n )\n # Skip connections, just adding the input to the output.\n if layer_index != 0:\n forward_output_sequence += forward_cache\n backward_output_sequence += backward_cache\n\n sequence_outputs.append(\n torch.cat([forward_output_sequence, backward_output_sequence], -1)\n )\n # Append the state tuples in a list, so that we can return\n # the final states for all the layers.\n final_states.append(\n (\n torch.cat([forward_state[0], backward_state[0]], -1),\n torch.cat([forward_state[1], backward_state[1]], -1),\n )\n )\n\n stacked_sequence_outputs: torch.FloatTensor = torch.stack(sequence_outputs)\n # Stack the hidden state and memory for each layer into 2 tensors of shape\n # (num_layers, batch_size, hidden_size) and (num_layers, batch_size, cell_size)\n # respectively.\n final_hidden_states, final_memory_states = zip(*final_states)\n final_state_tuple: Tuple[torch.FloatTensor, torch.FloatTensor] = (\n torch.cat(final_hidden_states, 0),\n torch.cat(final_memory_states, 0),\n )\n return stacked_sequence_outputs, final_state_tuple\n\n def load_weights(self, weight_file: str) -> None:\n \"\"\"\n Load the pre-trained weights from the file.\n \"\"\"\n requires_grad = self.requires_grad\n\n with h5py.File(cached_path(weight_file), \"r\") as fin:\n for i_layer, lstms in enumerate(zip(self.forward_layers, self.backward_layers)):\n for j_direction, lstm in enumerate(lstms):\n # lstm is an instance of LSTMCellWithProjection\n cell_size = lstm.cell_size\n\n dataset = fin[\"RNN_%s\" % j_direction][\"RNN\"][\"MultiRNNCell\"][\n \"Cell%s\" % i_layer\n ][\"LSTMCell\"]\n\n # tensorflow packs together both W and U matrices into one matrix,\n # but pytorch maintains individual matrices. In addition, tensorflow\n # packs the gates as input, memory, forget, output but pytorch\n # uses input, forget, memory, output. So we need to modify the weights.\n tf_weights = numpy.transpose(dataset[\"W_0\"][...])\n torch_weights = tf_weights.copy()\n\n # split the W from U matrices\n input_size = lstm.input_size\n input_weights = torch_weights[:, :input_size]\n recurrent_weights = torch_weights[:, input_size:]\n tf_input_weights = tf_weights[:, :input_size]\n tf_recurrent_weights = tf_weights[:, input_size:]\n\n # handle the different gate order convention\n for torch_w, tf_w in [\n [input_weights, tf_input_weights],\n [recurrent_weights, tf_recurrent_weights],\n ]:\n torch_w[(1 * cell_size) : (2 * cell_size), :] = tf_w[\n (2 * cell_size) : (3 * cell_size), :\n ]\n torch_w[(2 * cell_size) : (3 * cell_size), :] = tf_w[\n (1 * cell_size) : (2 * cell_size), :\n ]\n\n lstm.input_linearity.weight.data.copy_(torch.FloatTensor(input_weights))\n lstm.state_linearity.weight.data.copy_(torch.FloatTensor(recurrent_weights))\n lstm.input_linearity.weight.requires_grad = requires_grad\n lstm.state_linearity.weight.requires_grad = requires_grad\n\n # the bias weights\n tf_bias = dataset[\"B\"][...]\n # tensorflow adds 1.0 to forget gate bias instead of modifying the\n # parameters...\n tf_bias[(2 * cell_size) : (3 * cell_size)] += 1\n torch_bias = tf_bias.copy()\n torch_bias[(1 * cell_size) : (2 * cell_size)] = tf_bias[\n (2 * cell_size) : (3 * cell_size)\n ]\n torch_bias[(2 * cell_size) : (3 * cell_size)] = tf_bias[\n (1 * cell_size) : (2 * cell_size)\n ]\n lstm.state_linearity.bias.data.copy_(torch.FloatTensor(torch_bias))\n lstm.state_linearity.bias.requires_grad = requires_grad\n\n # the projection weights\n proj_weights = numpy.transpose(dataset[\"W_P_0\"][...])\n lstm.state_projection.weight.data.copy_(torch.FloatTensor(proj_weights))\n lstm.state_projection.weight.requires_grad = requires_grad\n"
] | [
[
"torch.FloatTensor",
"torch.stack",
"numpy.transpose",
"torch.cat",
"torch.nn.utils.rnn.pad_packed_sequence"
]
] |
chukren/seisflows | [
"c4a5a8a9411b365c9bba818f6ed3ba03f24e681b"
] | [
"seisflows/postprocess/total_variation.py"
] | [
"\nimport numpy as np\n\nfrom seisflows.tools import unix\nfrom seisflows.tools.array import loadnpy, savenpy\nfrom seisflows.tools.array import grid2mesh, mesh2grid, stack\nfrom seisflows.tools.code import exists\nfrom seisflows.tools.config import SeisflowsParameters, SeisflowsPaths, \\\n ParameterError, custom_import\nfrom seisflows.tools.math import nabla, tv\n\n\nPAR = SeisflowsParameters()\nPATH = SeisflowsPaths()\n\nimport system\nimport solver\n\n\nclass total_variation(custom_import('postprocess', 'regularize')):\n \"\"\" Adds regularization options to base class\n\n So far, can only be used for 2D inversion, because the required spatial\n derivative operator \"nabla\" is not yet available for 3D grids.\n \"\"\"\n\n def check(self):\n \"\"\" Checks parameters and paths\n \"\"\"\n super(total_variation, self).check()\n\n if not PAR.LAMBDA:\n raise ValueError\n\n if not hasattr(PAR, 'EPSILON'):\n setattr(PAR, 'EPSILON', 0.)\n\n\n def nabla(self, mesh, m, g):\n M, grid = mesh2grid(g, mesh)\n DM = tv(M, epsilon=PAR.EPSILON)\n dm = grid2mesh(DM, grid, mesh)\n return dm/np.mean(m)\n\n"
] | [
[
"numpy.mean"
]
] |
zhenlohuang/tvm | [
"fd2e6d17120a79533852c6bb705429d9c7bc286b",
"fd2e6d17120a79533852c6bb705429d9c7bc286b"
] | [
"vta/tests/python/integration/test_benchmark_topi_conv2d_transpose.py",
"tests/python/frontend/pytorch/test_forward.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Testing topi conv2d_transpose operator for VTA\"\"\"\n\nimport json\nimport os\n\nimport pytest\nimport numpy as np\nfrom collections import namedtuple\n\nimport tvm\nfrom tvm import te\nfrom tvm import relay\nfrom tvm import autotvm\nfrom tvm.contrib import utils\nfrom tvm.contrib.pickle_memoize import memoize\nfrom tvm import topi\nimport tvm.topi.testing\nimport vta\nfrom vta import program_fpga, reconfig_runtime\nimport vta.testing\nfrom vta.testing import simulator\n\n\nWorkload = namedtuple(\n \"Conv2DTransposeWorkload\",\n [\n \"batch\",\n \"height\",\n \"width\",\n \"in_filter\",\n \"out_filter\",\n \"hkernel\",\n \"wkernel\",\n \"hpad\",\n \"wpad\",\n \"hstride\",\n \"wstride\",\n \"o_hpad\",\n \"o_wpad\",\n ],\n)\n\n# Get batch info from env\nenv = vta.get_env()\n\n# DCGAN workloads\ndcgan_wklds = [\n # dcgan\n (\"DCGAN.CT1\", Workload(env.BATCH, 4, 4, 1024, 512, 4, 4, 1, 1, 2, 2, 0, 0)),\n (\"DCGAN.CT2\", Workload(env.BATCH, 8, 8, 512, 256, 4, 4, 1, 1, 2, 2, 0, 0)),\n (\"DCGAN.CT3\", Workload(env.BATCH, 16, 16, 256, 128, 4, 4, 1, 1, 2, 2, 0, 0)),\n]\n\n# FIXME: we need a custom clip operator to circumvent a pattern detection limitation\[email protected]_scope(tag=topi.tag.ELEMWISE)\ndef my_clip(x, a_min, a_max):\n \"\"\"Unlike topi's current clip, put min and max into two stages.\"\"\"\n const_min = tvm.tir.const(a_min, x.dtype)\n const_max = tvm.tir.const(a_max, x.dtype)\n x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name=\"clipA\")\n x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name=\"clipB\")\n return x\n\n\n# Helper function to get factors\ndef _find_factors(n):\n factors = []\n for f in range(1, n + 1):\n if n % f == 0:\n factors.append(f)\n return factors\n\n\ndef run_conv2d_transpose(\n env, remote, wl, target, check_correctness=True, print_ir=False, samples=4\n):\n\n # Workload assertions\n assert wl.hpad == wl.wpad\n\n # Perform packing only if we are targeting the accelerator\n if \"arm_cpu\" in target.keys:\n data_pack = False\n layout = \"NCHW\"\n fcompute = topi.arm_cpu.conv2d_transpose_nchw\n fschedule = topi.arm_cpu.schedule_conv2d_transpose_nchw\n elif \"vta\" in target.keys:\n data_pack = True\n layout = \"NCHW%dn%dc\" % (env.BATCH, env.BLOCK_IN)\n fcompute = vta.top.conv2d_transpose_packed\n fschedule = vta.top.schedule_conv2d_transpose_packed\n\n # Derive shapes depending upon packing\n\n a_shape = (wl.batch, wl.in_filter, wl.height, wl.width)\n w_shape = (wl.in_filter, wl.out_filter, wl.hkernel, wl.wkernel)\n if data_pack:\n data_shape = (\n wl.batch // env.BATCH,\n wl.in_filter // env.BLOCK_IN,\n wl.height,\n wl.width,\n env.BATCH,\n env.BLOCK_IN,\n )\n kernel_shape = (\n wl.out_filter // env.BLOCK_OUT,\n wl.in_filter // env.BLOCK_IN,\n wl.hkernel,\n wl.wkernel,\n env.BLOCK_OUT,\n env.BLOCK_IN,\n )\n else:\n data_shape = a_shape\n kernel_shape = w_shape\n data = te.placeholder(data_shape, name=\"data\", dtype=env.inp_dtype)\n kernel = te.placeholder(kernel_shape, name=\"kernel\", dtype=env.wgt_dtype)\n padding = relay.nn.get_pad_tuple2d((wl.hpad, wl.wpad))\n\n # Define base computation schedule\n with target:\n\n res = fcompute(\n data, kernel, (wl.hstride, wl.wstride), padding, env.acc_dtype, (wl.o_hpad, wl.o_wpad)\n )\n res = topi.right_shift(res, env.WGT_WIDTH)\n res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)\n res = topi.cast(res, env.out_dtype)\n # Derive base schedule\n s = fschedule([res])\n if print_ir:\n print(vta.lower(s, [data, kernel, res], simple_mode=True))\n\n # Derive number of ops\n fout_height = (wl.height - 1) * wl.hstride - 2 * wl.hpad + wl.hkernel + wl.o_hpad\n fout_width = (wl.width - 1) * wl.wstride - 2 * wl.wpad + wl.wkernel + wl.o_wpad\n num_ops = (\n 2\n * wl.batch\n * fout_height\n * fout_width\n * wl.hkernel\n * wl.wkernel\n * wl.out_filter\n * wl.in_filter\n )\n\n # @memoize(\"vta.tests.test_benchmark_topi.conv2d.verify_nhwc\")\n def get_ref_data():\n # derive min max for act and wgt types (max non inclusive)\n a_min, a_max = 0 - (1 << (env.INP_WIDTH - 1)), (1 << (env.INP_WIDTH - 1))\n w_min, w_max = 0 - (1 << (env.WGT_WIDTH - 1)), (1 << (env.WGT_WIDTH - 1))\n a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype)\n w_np = np.random.randint(\n w_min, w_max, size=(wl.in_filter, wl.out_filter, wl.hkernel, wl.wkernel)\n ).astype(kernel.dtype)\n r_np = tvm.topi.testing.conv2d_transpose_nchw_python(\n a_np.astype(env.acc_dtype),\n w_np.astype(env.acc_dtype),\n (wl.hstride, wl.wstride),\n wl.hpad,\n (wl.o_hpad, wl.o_wpad),\n ).astype(env.acc_dtype)\n return a_np, w_np, r_np\n\n # Data in original format\n data_np, kernel_np, res_ref = get_ref_data()\n if data_pack:\n data_np = data_np.reshape(\n wl.batch // env.BATCH,\n env.BATCH,\n wl.in_filter // env.BLOCK_IN,\n env.BLOCK_IN,\n wl.height,\n wl.width,\n ).transpose((0, 2, 4, 5, 1, 3))\n kernel_np = kernel_np.reshape(\n wl.in_filter // env.BLOCK_IN,\n env.BLOCK_IN,\n wl.out_filter // env.BLOCK_OUT,\n env.BLOCK_OUT,\n wl.hkernel,\n wl.wkernel,\n ).transpose((2, 0, 4, 5, 3, 1))\n kernel_np = np.flip(kernel_np, 2)\n kernel_np = np.flip(kernel_np, 3)\n\n # Build\n if \"vta\" in target.keys:\n mod = vta.build(\n s,\n [data, kernel, res],\n target=target,\n target_host=env.target_host,\n name=\"conv2d_transpose\",\n )\n else:\n mod = tvm.build(\n s,\n [data, kernel, res],\n target=target,\n target_host=env.target_host,\n name=\"conv2d_transpose\",\n )\n temp = utils.tempdir()\n mod.save(temp.relpath(\"conv2d_transpose.o\"))\n remote.upload(temp.relpath(\"conv2d_transpose.o\"))\n f = remote.load_module(\"conv2d_transpose.o\")\n ctx = remote.context(str(target))\n\n res_np = np.zeros(topi.utils.get_const_tuple(res.shape)).astype(res.dtype)\n data_arr = tvm.nd.array(data_np, ctx)\n kernel_arr = tvm.nd.array(kernel_np, ctx)\n res_arr = tvm.nd.array(res_np, ctx)\n time_f = f.time_evaluator(\"conv2d_transpose\", ctx, number=samples)\n\n # In vta sim mode, collect simulator runtime statistics\n stats = {}\n cost = None\n if env.TARGET in [\"sim\", \"tsim\"]:\n # Check if we're in local RPC mode (allows us to rebuild the\n # runtime on the fly when varying the VTA designs)\n local_rpc = int(os.environ.get(\"VTA_LOCAL_SIM_RPC\", \"0\"))\n if local_rpc:\n if env.TARGET == \"sim\":\n remote.get_function(\"vta.simulator.profiler_clear\")()\n else:\n remote.get_function(\"vta.tsim.profiler_clear\")()\n cost = time_f(data_arr, kernel_arr, res_arr)\n if env.TARGET == \"sim\":\n stats = json.loads(remote.get_function(\"vta.simulator.profiler_status\")())\n else:\n stats = json.loads(remote.get_function(\"vta.tsim.profiler_status\")())\n else:\n simulator.clear_stats()\n cost = time_f(data_arr, kernel_arr, res_arr)\n stats = simulator.stats()\n else:\n cost = time_f(data_arr, kernel_arr, res_arr)\n\n # Check correctness\n correct = False\n if check_correctness:\n res_orig = res_arr.asnumpy()\n if data_pack:\n res_orig = res_orig.transpose((0, 4, 1, 5, 2, 3)).reshape(\n wl.batch, wl.out_filter, fout_height, fout_width\n )\n res_ref = res_ref >> env.WGT_WIDTH\n res_ref = np.clip(res_ref, 0, (1 << env.OUT_WIDTH - 1) - 1)\n res_ref = res_ref.astype(env.out_dtype)\n correct = np.allclose(res_orig, res_ref)\n\n gops = (num_ops / cost.mean) / float(10 ** 9)\n status = \"PASSED\" if correct else \"FAILED\"\n if \"arm_cpu\" in target.keys:\n device = \"CPU\"\n elif \"vta\" in target.keys:\n device = \"VTA\"\n print(\"%s CONV2D TEST %s: Time cost = %g sec/op, %g GOPS\" % (device, status, cost.mean, gops))\n\n return correct, cost, stats\n\n\[email protected](\"device\", [\"vta\", \"arm_cpu\"])\ndef test_conv2d_transpose(device):\n def _run(env, remote):\n if device == \"vta\":\n target = env.target\n if env.TARGET not in [\"sim\", \"tsim\"]:\n assert tvm.runtime.enabled(\"rpc\")\n program_fpga(remote, bitstream=None)\n reconfig_runtime(remote)\n elif device == \"arm_cpu\":\n target = env.target_vta_cpu\n with autotvm.tophub.context(target): # load pre-tuned schedule parameters\n for _, wl in dcgan_wklds:\n print(wl)\n run_conv2d_transpose(env, remote, wl, target)\n\n vta.testing.run(_run)\n\n\nif __name__ == \"__main__\":\n test_conv2d_transpose(device=\"arm_cpu\")\n test_conv2d_transpose(device=\"vta\")\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=import-self, invalid-name, unused-argument\n\"\"\"Unit tests for various models and operators\"\"\"\nfrom time import time\nimport os\nimport sys\nfrom scipy.stats import t as tdistr\nimport numpy as np\nimport torch\nimport torchvision\nfrom torch.nn import Module\nimport tvm\nfrom tvm import relay\nfrom tvm.contrib import graph_runtime\nfrom tvm.contrib.nvcc import have_fp16\nimport tvm.testing\nfrom packaging import version as package_version\n\nsys.setrecursionlimit(10000)\n\n\ndef list_ops(expr):\n class OpLister(tvm.relay.ExprVisitor):\n def visit_op(self, expr):\n if expr not in self.node_set:\n self.node_list.append(expr)\n return super().visit_op(expr)\n\n def list_nodes(self, expr):\n self.node_set = {}\n self.node_list = []\n self.visit(expr)\n return self.node_list\n\n return OpLister().list_nodes(expr)\n\n\ndef assert_shapes_match(tru, est):\n if tru.shape != est.shape:\n msg = \"Output shapes {} and {} don't match\"\n raise AssertionError(msg.format(tru.shape, est.shape))\n\n\ndef load_torchvision(model_name):\n \"\"\"Given a model name, returns a Torchvision model in eval mode as well\n as an example input.\"\"\"\n with torch.no_grad():\n if model_name.startswith(\"inception\"):\n height = width = 299\n mean = [0.5, 0.5, 0.5]\n std = [0.5, 0.5, 0.5]\n else:\n height = width = 224\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n input_shape = [1, 3, height, width]\n input_data = torch.randn(input_shape).float()\n for channel in range(3):\n input_data[:, channel] -= mean[channel]\n input_data[:, channel] /= std[channel]\n\n if model_name.startswith(\"googlenet\"):\n model = getattr(torchvision.models, model_name)(pretrained=True, aux_logits=True)\n else:\n model = getattr(torchvision.models, model_name)(pretrained=True)\n model = model.float().eval()\n return model, [input_data]\n\n\ndef load_pretrainedmodels(model_name):\n \"\"\"Given a model name, returns a pretrainedmodels.pytorch model in eval\n mode as well as an example input.\"\"\"\n import pretrainedmodels # https://github.com/Cadene/pretrained-models.pytorch\n\n model = getattr(pretrainedmodels, model_name)().float().eval()\n input_shape = [1, *model.input_size]\n input_data = torch.rand(input_shape).float() * 256\n for channel in range(3):\n input_data[:, channel] -= model.mean[channel]\n input_data[:, channel] /= model.std[channel]\n return model, [input_data]\n\n\ndef load_model(model_name):\n \"\"\"Given a model name, returns a model as well as an example input.\"\"\"\n if hasattr(torchvision.models, model_name):\n return load_torchvision(model_name)\n try:\n import pretrainedmodels\n\n if hasattr(pretrainedmodels, model_name):\n return load_pretrainedmodels(model_name)\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"Please install pretrainedmodels.pytorch\")\n raise RuntimeError(\"Model not supported\")\n\n\ndef confidence_interval(mean, stdev, count, alpha=0.01):\n \"\"\"Returns the lower and upper bounds of the confidence interval of a random\n variable. Confidence is 1 - alpha (default confidence is 99%).\"\"\"\n stdval = tdistr.ppf(1 - alpha / 2, count - 1)\n lower, upper = mean + np.array([-1, 1]) * stdval * stdev / np.sqrt(count)\n return lower, upper\n\n\ndef measure_latency(model, input_shapes, output_shapes, thresh, dryruns=40):\n \"\"\"Compute the latency of the given model\"\"\"\n latencies = []\n count = 0\n while True:\n if isinstance(model, Module):\n input_data = [torch.rand(shape).float() for shape in input_shapes]\n if torch.cuda.is_available():\n input_data = list(map(lambda x: x.cuda(), input_data))\n model = model.cuda()\n t_start = time()\n with torch.no_grad():\n model(*input_data)\n t_end = time()\n latencies.append(t_end - t_start)\n else:\n input_data = {}\n for i, shape in enumerate(input_shapes):\n name = \"input\" + str(i)\n arr = np.random.random(shape).astype(\"float32\")\n input_data[name] = tvm.nd.array(arr)\n t_start = time()\n model.set_input(**input_data)\n model.run()\n for i, shape in enumerate(output_shapes):\n arr = np.zeros(shape).astype(\"float32\")\n model.get_output(i, tvm.nd.array(arr))\n t_end = time()\n count += 1\n if count < dryruns:\n continue\n latencies.append(t_end - t_start)\n mean = np.mean(latencies)\n stdev = np.std(latencies)\n sample_size = len(latencies)\n if sample_size > dryruns:\n lower, upper = confidence_interval(mean, stdev, sample_size)\n est = (upper + lower) / 2\n err = (upper - lower) / 2\n if err < thresh:\n return est\n\n\ndef verify_model(model_name, input_data=[], custom_convert_map={}, rtol=1e-5, atol=1e-5):\n \"\"\"Assert that the output of a compiled model matches with that of its\n baseline.\"\"\"\n if isinstance(model_name, str):\n baseline_model, baseline_input = load_model(model_name)\n elif isinstance(input_data, list):\n baseline_model = model_name\n baseline_input = input_data\n elif isinstance(input_data, torch.Tensor) or len(input_data.shape) == 0:\n baseline_model = model_name\n baseline_input = [input_data]\n else:\n assert False, \"Unexpected input format\"\n\n if torch.cuda.is_available():\n if isinstance(baseline_model, torch.nn.Module):\n baseline_model = baseline_model.cuda()\n baseline_input = [inp.cuda() for inp in baseline_input]\n\n with torch.no_grad():\n baseline_outputs = baseline_model(*[input.clone() for input in baseline_input])\n\n if isinstance(baseline_outputs, tuple):\n baseline_outputs = tuple(out.cpu().numpy() for out in baseline_outputs)\n else:\n baseline_outputs = (baseline_outputs.cpu().numpy(),)\n\n trace = torch.jit.trace(baseline_model, [input.clone() for input in baseline_input])\n if isinstance(baseline_model, torch.nn.Module):\n trace = trace.float().eval()\n\n if torch.cuda.is_available():\n trace = trace.cuda()\n else:\n trace = trace.cpu()\n\n input_names = [\"input{}\".format(idx) for idx, inp in enumerate(baseline_input)]\n input_shapes = list(zip(input_names, [inp.shape for inp in baseline_input]))\n mod, params = relay.frontend.from_pytorch(trace, input_shapes, custom_convert_map)\n compiled_input = dict(zip(input_names, [inp.clone().cpu().numpy() for inp in baseline_input]))\n\n with tvm.transform.PassContext(opt_level=3):\n for target, ctx in tvm.testing.enabled_targets():\n relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params)\n relay_model = graph_runtime.create(relay_graph, relay_lib, ctx)\n relay_model.set_input(**relay_params)\n for name, inp in compiled_input.items():\n relay_model.set_input(name, inp)\n relay_model.run()\n\n for i, baseline_output in enumerate(baseline_outputs):\n compiled_output = relay_model.get_output(i).asnumpy()\n\n assert_shapes_match(baseline_output, compiled_output)\n tvm.testing.assert_allclose(baseline_output, compiled_output, rtol=rtol, atol=atol)\n\n del model_name\n del baseline_model\n torch.cuda.empty_cache()\n\n\n# Single operator tests\[email protected]_gpu\ndef test_forward_pixel_shuffle():\n torch.set_grad_enabled(False)\n input_shape = [1, 144, 16, 16]\n\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.PixelShuffle(2).float().eval(), input_data=input_data)\n verify_model(torch.nn.PixelShuffle(3).float().eval(), input_data=input_data)\n verify_model(torch.nn.PixelShuffle(4).float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_add():\n torch.set_grad_enabled(False)\n input_shape = [10]\n\n class Add1(Module):\n def forward(self, *args):\n return args[0] + args[0]\n\n class Add2(Module):\n def forward(self, *args):\n return args[0] + 1\n\n class Add3(Module):\n def forward(self, *args):\n ones = torch.ones(input_shape, dtype=torch.float)\n if torch.cuda.is_available():\n ones = ones.cuda()\n return args[0] + ones\n\n class Add4(Module):\n def forward(self, *args):\n ones = torch.ones([], dtype=torch.float)\n if torch.cuda.is_available():\n ones = ones.cuda()\n return args[0] + ones\n\n input_data = torch.rand(input_shape).float()\n verify_model(Add1().float().eval(), input_data=input_data)\n verify_model(Add2().float().eval(), input_data=input_data)\n verify_model(Add3().float().eval(), input_data=input_data)\n verify_model(Add4().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_subtract():\n torch.set_grad_enabled(False)\n input_shape = [10]\n\n class Subtract1(Module):\n def forward(self, *args):\n return args[0] - args[0]\n\n class Subtract2(Module):\n def forward(self, *args):\n return args[0] - 1\n\n class Subtract3(Module):\n def forward(self, *args):\n ones = torch.ones(input_shape)\n if torch.cuda.is_available():\n ones = ones.cuda()\n return args[0] - ones\n\n class Subtract4(Module):\n def forward(self, *args):\n ones = torch.ones([])\n if torch.cuda.is_available():\n ones = ones.cuda()\n return args[0] - ones\n\n input_data = torch.rand(input_shape).float()\n verify_model(Subtract1().float().eval(), input_data=input_data)\n verify_model(Subtract2().float().eval(), input_data=input_data)\n verify_model(Subtract3().float().eval(), input_data=input_data)\n verify_model(Subtract4().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_multiply():\n torch.set_grad_enabled(False)\n input_shape = [10]\n\n class Multiply1(Module):\n def forward(self, *args):\n return args[0] * args[0]\n\n class Multiply2(Module):\n def forward(self, *args):\n return args[0] * 1.0\n\n class Multiply3(Module):\n def forward(self, *args):\n ones = torch.ones(input_shape)\n if torch.cuda.is_available():\n ones = ones.cuda()\n return args[0] * ones\n\n class Multiply4(Module):\n def forward(self, *args):\n ones = torch.ones([])\n if torch.cuda.is_available():\n ones = ones.cuda()\n return args[0] * ones\n\n input_data = torch.rand(input_shape).float()\n verify_model(Multiply1().float().eval(), input_data=input_data)\n verify_model(Multiply2().float().eval(), input_data=input_data)\n verify_model(Multiply3().float().eval(), input_data=input_data)\n verify_model(Multiply4().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_min_max():\n class Max(Module):\n def forward(self, inp):\n return torch.max(inp)\n\n class Min(Module):\n def forward(self, inp):\n return torch.min(inp)\n\n class Max2(Module):\n def forward(self, inp):\n out, _ = torch.max(inp, 1, keepdim=True)\n return out\n\n class Min2(Module):\n def forward(self, inp):\n out, _ = torch.min(inp, 0, keepdim=False)\n return out\n\n class Max3(Module):\n def forward(self, lhs, rhs):\n return torch.max(lhs, rhs)\n\n class Min3(Module):\n def forward(self, lhs, rhs):\n return torch.min(lhs, rhs)\n\n input_data = [torch.rand((10, 10)), torch.rand((10, 10))]\n\n verify_model(Max(), input_data=input_data[0])\n verify_model(Min(), input_data=input_data[0])\n verify_model(Max2(), input_data=input_data[0])\n verify_model(Min2(), input_data=input_data[0])\n verify_model(Max3(), input_data=input_data)\n verify_model(Min3(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_reciprocal():\n torch.set_grad_enabled(False)\n input_shape = [2, 1, 10, 1, 10]\n\n class Reciprocal1(Module):\n def forward(self, *args):\n return args[0].reciprocal()\n\n input_data = torch.rand(input_shape).float()\n verify_model(Reciprocal1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_repeat():\n torch.set_grad_enabled(False)\n input_shape = [1, 3]\n\n class Repeat1(Module):\n def forward(self, *args):\n return args[0].repeat(1, 1)\n\n class Repeat2(Module):\n def forward(self, *args):\n return args[0].repeat(4, 2)\n\n class Repeat3(Module):\n def forward(self, *args):\n return args[0].repeat(4, 2, 1)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Repeat1().float().eval(), input_data=input_data)\n verify_model(Repeat2().float().eval(), input_data=input_data)\n verify_model(Repeat3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_repeat_interleave():\n torch.set_grad_enabled(False)\n input_shape = [2, 2, 3]\n\n class RepeatInterleave1(Module):\n def forward(self, *args):\n return args[0].repeat_interleave(2)\n\n class RepeatInterleave2(Module):\n def forward(self, *args):\n return args[0].repeat_interleave(3, dim=0)\n\n class RepeatInterleave3(Module):\n def forward(self, *args):\n return args[0].repeat_interleave(2, dim=1)\n\n class RepeatInterleave4(Module):\n def forward(self, *args):\n return args[0].repeat_interleave(4, dim=2)\n\n input_data = torch.rand(input_shape).float()\n verify_model(RepeatInterleave1().float().eval(), input_data=input_data)\n verify_model(RepeatInterleave2().float().eval(), input_data=input_data)\n verify_model(RepeatInterleave3().float().eval(), input_data=input_data)\n verify_model(RepeatInterleave4().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_unsqueeze():\n torch.set_grad_enabled(False)\n input_shape = [10, 10]\n\n class Unsqueeze1(Module):\n def forward(self, *args):\n return args[0].unsqueeze(2)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Unsqueeze1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_squeeze():\n torch.set_grad_enabled(False)\n input_shape = [2, 1, 10, 1, 10]\n\n class Squeeze1(Module):\n def forward(self, *args):\n return args[0].squeeze()\n\n class Squeeze2(Module):\n def forward(self, *args):\n return args[0].squeeze(1)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Squeeze1().float().eval(), input_data=input_data)\n verify_model(Squeeze2().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_arange():\n torch.set_grad_enabled(False)\n\n class Arange1(Module):\n def forward(self, *args):\n return torch.arange(5)\n\n class Arange2(Module):\n def forward(self, *args):\n return torch.arange(2.5)\n\n class Arange3(Module):\n def forward(self, *args):\n return torch.arange(1, 4)\n\n class Arange4(Module):\n def forward(self, *args):\n return torch.arange(1, 2.5, 0.5)\n\n class Arange5(Module):\n def forward(self, *args):\n return torch.arange(1, 2, 1, dtype=torch.int32)\n\n class Arange6(Module):\n def forward(self, *args):\n return torch.arange(start=1, end=6, step=2)\n\n class Arange7(Module):\n def forward(self, *args):\n return torch.arange(1, 4, dtype=torch.float32)\n\n class Arange8(Module):\n def forward(self, *args):\n return torch.arange(1, 2, 1, dtype=torch.int16)\n\n class Arange9(Module):\n def forward(self, *args):\n end = torch.add(torch.tensor(4), 1)\n return torch.arange(end) + torch.ones((5,), dtype=torch.int64)\n\n class Arange10(Module):\n def forward(self, *args):\n end = torch.add(torch.tensor(4.0), torch.tensor(1.0))\n return torch.arange(end) + torch.ones((5,), dtype=torch.float)\n\n class Arange11(Module):\n def forward(self, *args):\n start = torch.add(torch.tensor(1), 1)\n end = torch.add(torch.tensor(4), 1)\n step = torch.add(torch.tensor(2), 1)\n out = torch.arange(start, end, step)\n return out + torch.ones((3,), dtype=torch.int64)\n\n class Arange12(Module):\n def forward(self, *args):\n start = torch.add(torch.tensor(1), 1)\n end = torch.add(torch.tensor(4), 1)\n step = torch.add(torch.tensor(2.5), torch.tensor(4.1))\n out = torch.arange(start, end, step)\n return out + torch.ones((3,), dtype=torch.float)\n\n verify_model(Arange1().float().eval())\n verify_model(Arange2().float().eval())\n verify_model(Arange3().float().eval())\n verify_model(Arange4().float().eval())\n verify_model(Arange5().float().eval())\n verify_model(Arange6().float().eval())\n verify_model(Arange7().float().eval())\n verify_model(Arange8().float().eval())\n verify_model(Arange9().float().eval())\n verify_model(Arange10().float().eval())\n verify_model(Arange11().float().eval())\n verify_model(Arange12().float().eval())\n\n\[email protected]_gpu\ndef test_forward_mesh_grid():\n torch.set_grad_enabled(False)\n\n class MeshGrid1(Module):\n def forward(self, *args):\n x = torch.tensor([1, 2, 3])\n y = torch.tensor([4, 5, 6])\n grid_x, grid_y = torch.meshgrid([x, y])\n return grid_x, grid_y\n\n class MeshGrid2(Module):\n def forward(self, *args):\n x = torch.tensor([1, 2, 3], dtype=torch.float32)\n y = torch.add(torch.tensor(5, dtype=torch.float32), 1)\n grid_x, grid_y = torch.meshgrid([x, y])\n return grid_x, grid_y\n\n verify_model(MeshGrid1().float().eval())\n verify_model(MeshGrid2().float().eval())\n\n\[email protected]_gpu\ndef test_forward_abs():\n torch.set_grad_enabled(False)\n input_shape = [2, 1, 10, 1, 10]\n\n class Abs1(Module):\n def forward(self, *args):\n return args[0].abs()\n\n input_data = torch.rand(input_shape).float()\n verify_model(Abs1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_concatenate():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Concatenate1(Module):\n def forward(self, *args):\n return torch.cat([args[0][:, 0].unsqueeze(1), args[0][:, 1].unsqueeze(1)], 1)\n\n class Concatenate2(Module):\n def forward(self, *args):\n a = (args[0][:, :, 0] + 2) * 7\n b = (args[0][:, :, 1] + 3) * 11\n c = (args[0][:, :, 2] + 5) * 13\n return torch.cat([t.unsqueeze(2) for t in [a, b, c]], 2)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Concatenate1().float().eval(), input_data=input_data)\n verify_model(Concatenate2().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_relu():\n torch.set_grad_enabled(False)\n input_shape = [10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.ReLU().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_prelu():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.PReLU(num_parameters=3).eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_leakyrelu():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.LeakyReLU().eval(), input_data=input_data)\n verify_model(torch.nn.LeakyReLU(negative_slope=0.05).eval(), input_data=input_data)\n verify_model(torch.nn.LeakyReLU(negative_slope=1.0, inplace=True).eval(), input_data=input_data)\n verify_model(\n torch.nn.LeakyReLU(negative_slope=1.25, inplace=True).eval(), input_data=input_data\n )\n\n\[email protected]_gpu\ndef test_forward_elu():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.ELU().eval(), input_data=input_data)\n verify_model(torch.nn.ELU(alpha=0.3).eval(), input_data=input_data)\n verify_model(torch.nn.ELU(alpha=1.0).eval(), input_data=input_data)\n verify_model(torch.nn.ELU(alpha=1.3).eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_celu():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.CELU().eval(), input_data=input_data)\n verify_model(torch.nn.CELU(alpha=0.3).eval(), input_data=input_data)\n verify_model(torch.nn.CELU(alpha=1.0).eval(), input_data=input_data)\n verify_model(torch.nn.CELU(alpha=1.3).eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_gelu():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.GELU().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_selu():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.SELU().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_softplus():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.Softplus().eval(), input_data=input_data)\n verify_model(torch.nn.Softplus(beta=1.5, threshold=20).eval(), input_data=input_data)\n verify_model(torch.nn.Softplus(beta=5, threshold=10).eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_softsign():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.Softsign().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_log_sigmoid():\n torch.set_grad_enabled(False)\n input_shape = [10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.LogSigmoid().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_adaptiveavgpool():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.AdaptiveAvgPool2d([1, 1]).eval(), input_data=input_data)\n verify_model(torch.nn.AdaptiveAvgPool2d([10, 10]).eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_maxpool2d():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n\n verify_model(torch.nn.MaxPool2d(kernel_size=[1, 1]).eval(), input_data)\n verify_model(torch.nn.MaxPool2d(kernel_size=[10, 10]).eval(), input_data)\n verify_model(torch.nn.MaxPool2d(kernel_size=[4, 4], padding=2, stride=2).eval(), input_data)\n\n # A functional variant (default strides = None case)\n class MaxPool2D(Module):\n def forward(self, *args):\n return torch.nn.functional.max_pool2d(args[0], kernel_size=[10, 10])\n\n verify_model(MaxPool2D(), input_data=input_data)\n\n class MaxPool2DWithIndices(Module):\n def __init__(self):\n super(MaxPool2DWithIndices, self).__init__()\n self.pool = torch.nn.MaxPool2d(kernel_size=[1, 1], return_indices=True)\n\n def forward(self, *args):\n output, indices = self.pool(args[0])\n return output\n\n verify_model(MaxPool2DWithIndices().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_maxpool1d():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10]\n input_data = torch.rand(input_shape).float()\n\n verify_model(torch.nn.MaxPool1d(kernel_size=1).eval(), input_data)\n verify_model(torch.nn.MaxPool1d(kernel_size=10).eval(), input_data)\n verify_model(torch.nn.MaxPool1d(kernel_size=4, padding=2, stride=2).eval(), input_data)\n\n # A functional variant (default strides = None case)\n class MaxPool1D(Module):\n def forward(self, *args):\n return torch.nn.functional.max_pool1d(args[0], kernel_size=10)\n\n verify_model(MaxPool1D(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_maxpool3d():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10, 10]\n input_data = torch.rand(input_shape).float()\n\n verify_model(torch.nn.MaxPool3d(kernel_size=[1, 1, 1]).eval(), input_data)\n verify_model(torch.nn.MaxPool3d(kernel_size=[10, 10, 10]).eval(), input_data)\n verify_model(torch.nn.MaxPool3d(kernel_size=[4, 4, 4], padding=2, stride=2).eval(), input_data)\n\n # A functional variant (default strides = None case)\n class MaxPool3D(Module):\n def forward(self, *args):\n return torch.nn.functional.max_pool3d(args[0], kernel_size=[10, 10, 10])\n\n verify_model(MaxPool3D(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_split():\n torch.set_grad_enabled(False)\n input_shape = [4, 10]\n\n class Split(Module):\n def __init__(self, split_size_or_sections, dim):\n super(Split, self).__init__()\n self.split_size_or_sections = split_size_or_sections\n self.dim = dim\n\n def forward(self, *args):\n return torch.split(args[0], self.split_size_or_sections, self.dim)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Split(2, 0).float().eval(), input_data=input_data)\n verify_model(Split(3, 1).float().eval(), input_data=input_data)\n verify_model(Split(4, 1).float().eval(), input_data=input_data)\n verify_model(Split([2, 3, 5], 1).float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_avgpool():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class AvgPool2D2(Module):\n def forward(self, *args):\n return torch.nn.functional.avg_pool2d(args[0], kernel_size=[10, 10])\n\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.AvgPool2d(kernel_size=[10, 10]).eval(), input_data=input_data)\n verify_model(AvgPool2D2().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_avgpool3d():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10, 10]\n\n class AvgPool3D1(Module):\n def forward(self, *args):\n return torch.nn.functional.avg_pool3d(args[0], kernel_size=[10, 10, 10])\n\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.AvgPool3d(kernel_size=[10, 10, 10]).eval(), input_data=input_data)\n verify_model(AvgPool3D1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_hardtanh():\n torch.set_grad_enabled(False)\n input_shape = [10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.Hardtanh().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_conv():\n torch.set_grad_enabled(False)\n conv1d_input_shape = [1, 3, 10]\n conv2d_input_shape = [1, 3, 10, 10]\n\n class Conv2D1(Module):\n def __init__(self):\n super(Conv2D1, self).__init__()\n self.conv = torch.nn.Conv2d(3, 6, 7, bias=True)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, *args):\n return self.softmax(self.conv(args[0]))\n\n class Conv2D2(Module):\n def __init__(self):\n super(Conv2D2, self).__init__()\n self.conv = torch.nn.Conv2d(3, 6, 7, bias=False)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, *args):\n return self.softmax(self.conv(args[0]))\n\n class Conv2D3(Module):\n def __init__(self):\n super(Conv2D3, self).__init__()\n self.conv = torch.nn.Conv2d(3, 6, 7, groups=3, bias=False)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, *args):\n return self.softmax(self.conv(args[0]))\n\n class Conv1D1(Module):\n def __init__(self):\n super(Conv1D1, self).__init__()\n self.conv = torch.nn.Conv1d(3, 6, 7)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, *args):\n return self.softmax(self.conv(args[0]))\n\n class Conv1D2(Module):\n def __init__(self):\n super(Conv1D2, self).__init__()\n self.conv = torch.nn.Conv1d(3, 6, 7, bias=False)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, *args):\n return self.softmax(self.conv(args[0]))\n\n class Conv1D3(Module):\n def __init__(self):\n super(Conv1D3, self).__init__()\n self.conv = torch.nn.Conv1d(3, 6, 7, groups=3, bias=False)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, *args):\n return self.softmax(self.conv(args[0]))\n\n conv2d_input_data = torch.rand(conv2d_input_shape).float()\n verify_model(Conv2D1().float().eval(), input_data=conv2d_input_data)\n verify_model(Conv2D2().float().eval(), input_data=conv2d_input_data)\n # depth wise conv with channel mult 2\n verify_model(Conv2D3().float().eval(), input_data=conv2d_input_data)\n # group conv\n verify_model(\n torch.nn.Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), groups=2).eval(),\n input_data=torch.randn((1, 8, 16, 16)),\n )\n\n conv1d_input_data = torch.rand(conv1d_input_shape).float()\n verify_model(Conv1D1().float().eval(), input_data=conv1d_input_data)\n verify_model(Conv1D2().float().eval(), input_data=conv1d_input_data)\n verify_model(Conv1D3().float().eval(), input_data=conv1d_input_data)\n\n\[email protected]_gpu\ndef test_forward_conv_transpose():\n torch.set_grad_enabled(False)\n conv2d_input_shape = [1, 3, 10, 10]\n conv2d_input_data = torch.rand(conv2d_input_shape).float()\n verify_model(torch.nn.ConvTranspose2d(3, 6, 7, bias=True), input_data=conv2d_input_data)\n verify_model(torch.nn.ConvTranspose2d(3, 12, 3, bias=False), input_data=conv2d_input_data)\n\n conv1d_input_shape = [1, 3, 10]\n conv1d_input_data = torch.rand(conv1d_input_shape).float()\n verify_model(torch.nn.ConvTranspose1d(3, 6, 7, bias=True), input_data=conv1d_input_data)\n verify_model(torch.nn.ConvTranspose1d(3, 12, 3, bias=False), input_data=conv1d_input_data)\n\n\[email protected]_gpu\ndef test_forward_threshold():\n torch.set_grad_enabled(False)\n input_shape = [1, 3]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.Threshold(0, 0).float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_contiguous():\n torch.set_grad_enabled(False)\n input_shape = [10]\n\n class Contiguous1(Module):\n def forward(self, *args):\n return args[0].contiguous()\n\n input_data = torch.rand(input_shape).float()\n verify_model(Contiguous1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_batchnorm():\n def init_weight(m):\n torch.nn.init.normal_(m.weight, 0, 0.01)\n torch.nn.init.normal_(m.bias)\n\n inp_2d = torch.rand((1, 16, 10, 10))\n inp_3d = torch.rand((1, 16, 10, 10, 10))\n\n for bn, inp in [(torch.nn.BatchNorm2d(16), inp_2d), (torch.nn.BatchNorm3d(16), inp_3d)]:\n init_weight(bn.eval())\n verify_model(bn.eval(), input_data=inp)\n\n\[email protected]_gpu\ndef test_forward_instancenorm():\n inp_2d = torch.rand((1, 16, 10, 10))\n inp_3d = torch.rand((1, 16, 10, 10, 10))\n\n for ins_norm, inp in [\n (torch.nn.InstanceNorm2d(16), inp_2d),\n (torch.nn.InstanceNorm3d(16), inp_3d),\n ]:\n verify_model(ins_norm.eval(), input_data=inp)\n\n\[email protected]_gpu\ndef test_forward_layernorm():\n def init_weight(m):\n torch.nn.init.normal_(m.weight, 0, 0.01)\n torch.nn.init.normal_(m.bias, 0.02)\n\n inp_2d = torch.rand((1, 16, 10, 10))\n inp_3d = torch.rand((1, 16, 10, 10, 10))\n for ln, inp in [(torch.nn.LayerNorm(10), inp_2d), (torch.nn.LayerNorm(10), inp_3d)]:\n init_weight(ln.eval())\n verify_model(ln.eval(), input_data=inp)\n\n\[email protected]_gpu\ndef test_forward_groupnorm():\n input_shape = [10, 6, 5, 5]\n input_data = torch.rand(input_shape).float()\n\n # Separate 6 channels into 3 groups\n verify_model(torch.nn.GroupNorm(3, 6).eval(), input_data=input_data)\n\n # Put all 6 channels into a single group (equivalent with LayerNorm)\n verify_model(torch.nn.GroupNorm(1, 6).eval(), input_data=input_data)\n\n # Separate 6 channels into 6 groups (equivalent with InstanceNorm)\n verify_model(torch.nn.GroupNorm(6, 6).eval(), input_data=input_data)\n\n input_shape = [1, 10, 4, 7]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.GroupNorm(1, 10).eval(), input_data=input_data)\n verify_model(torch.nn.GroupNorm(2, 10).eval(), input_data=input_data)\n verify_model(torch.nn.GroupNorm(5, 10).eval(), input_data=input_data)\n verify_model(torch.nn.GroupNorm(10, 10).eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_reshape():\n torch.set_grad_enabled(False)\n input_shape = [2, 1, 10, 1, 10]\n new_shape = [2, 1, 10, 10]\n\n class Reshape1(Module):\n def forward(self, *args):\n return args[0].reshape(new_shape)\n\n class Reshape2(Module):\n def forward(self, *args):\n return args[0].reshape([-1])\n\n class Reshape3(torch.nn.Module):\n def forward(self, x):\n x_shape = x.shape\n return x.reshape((x_shape[0] * x_shape[1], x_shape[2]))\n\n input_data = torch.rand(input_shape).float()\n verify_model(Reshape1(), input_data=input_data)\n verify_model(Reshape2(), input_data=input_data)\n verify_model(Reshape3(), input_data=torch.randn(2, 3, 4))\n\n\[email protected]_gpu\ndef test_flatten():\n class Flatten(Module):\n def forward(self, x):\n return torch.flatten(x)\n\n class BatchFlatten(Module):\n def forward(self, x):\n return torch.flatten(x, start_dim=1)\n\n inp = torch.rand((5, 2, 2))\n verify_model(Flatten(), input_data=inp)\n verify_model(BatchFlatten(), input_data=inp)\n\n\[email protected]_gpu\ndef test_forward_transpose():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Transpose1(Module):\n def forward(self, *args):\n return args[0].transpose(2, 3)\n\n class Transpose2(Module):\n def forward(self, *args):\n return args[0].transpose(-2, -1)\n\n class Transpose3(Module):\n def forward(self, *args):\n return args[0].permute(0, 2, 3, 1)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Transpose1().float().eval(), input_data=input_data)\n verify_model(Transpose2().float().eval(), input_data=input_data)\n verify_model(Transpose3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_size():\n torch.set_grad_enabled(False)\n input_shape = [1, 3]\n\n class Size1(Module):\n def forward(self, *args):\n return float(args[0].size(0)) * args[0]\n\n input_data = torch.rand(input_shape).float()\n verify_model(Size1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_type_as():\n torch.set_grad_enabled(False)\n input_shape = [1, 3]\n\n def _create_module(dtype):\n class TypeAs(Module):\n def forward(self, *args):\n expected_type_tensor = torch.zeros(1, 3, dtype=dtype)\n return args[0].type_as(expected_type_tensor)\n\n return TypeAs()\n\n input_data = torch.randn(input_shape).float()\n verify_model(_create_module(torch.float64), input_data=input_data)\n verify_model(_create_module(torch.float32), input_data=input_data)\n verify_model(_create_module(torch.int64), input_data=input_data)\n verify_model(_create_module(torch.int32), input_data=input_data)\n verify_model(_create_module(torch.int16), input_data=input_data)\n verify_model(_create_module(torch.int8), input_data=input_data)\n\n if torch.cuda.is_available():\n check_fp16 = False\n try:\n # Only check half precision on supported hardwares.\n if have_fp16(tvm.gpu(0).compute_version):\n check_fp16 = True\n except Exception as e:\n # If GPU is not enabled in TVM, skip the fp16 test.\n pass\n\n # Temporary disable fp16 test\n check_fp16 = False\n\n if check_fp16:\n verify_model(_create_module(torch.float16), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_view():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class View1(Module):\n def forward(self, *args):\n return args[0].view((1, 3 * 10 * 10))\n\n class View2(Module):\n def forward(self, *args):\n return args[0].view(args[0].shape[0], -1)\n\n class View3(Module):\n def forward(self, *args):\n d1 = torch.tensor(3) * torch.tensor(10) * torch.tensor(10)\n return args[0].view(args[0].shape[0], d1)\n\n input_data = torch.rand(input_shape).float()\n verify_model(View1().float().eval(), input_data=input_data)\n verify_model(View2().float().eval(), input_data=input_data)\n verify_model(View3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_select():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Select1(Module):\n def forward(self, *args):\n return args[0].select(1, 1)\n\n class IndexedSelect(Module):\n def __init__(self, inp, dim):\n super().__init__()\n self.inp = inp\n self.dim = dim\n if torch.cuda.is_available():\n self.inp = self.inp.cuda()\n\n def forward(self, index):\n return torch.index_select(self.inp, self.dim, index)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Select1().float().eval(), input_data=input_data)\n\n x = torch.randn(3, 4)\n indices = torch.tensor([0, 2])\n verify_model(IndexedSelect(x, 0).eval(), input_data=indices)\n verify_model(IndexedSelect(x, 1).eval(), input_data=indices)\n\n\[email protected]_gpu\ndef test_forward_clone():\n torch.set_grad_enabled(False)\n input_shape = [10]\n\n class Clone1(Module):\n def forward(self, *args):\n return args[0].clone()\n\n input_data = torch.rand(input_shape).float()\n verify_model(Clone1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_gather():\n torch.set_grad_enabled(False)\n\n class Gather1(Module):\n def forward(self, *args):\n return torch.gather(args[0], 0, args[1])\n\n class Gather2(Module):\n def forward(self, *args):\n return torch.gather(args[0], 1, args[1])\n\n class Gather3(Module):\n def forward(self, *args):\n return torch.gather(args[0], 2, args[1])\n\n input_data = torch.rand((4,)).float()\n index = torch.tensor([1])\n verify_model(Gather1().float().eval(), input_data=[input_data, index])\n\n input_data = torch.rand((2, 2)).float()\n index = torch.tensor([[1, 0], [0, 1]])\n verify_model(Gather1().float().eval(), input_data=[input_data, index])\n\n input_data = torch.tensor([[1, 2], [3, 4]])\n index = torch.tensor([[0, 0], [1, 0]])\n verify_model(Gather2().float().eval(), input_data=[input_data, index])\n\n input_data = torch.rand((2, 2)).float()\n index = torch.tensor([[1, 0], [0, 1]])\n verify_model(Gather2().float().eval(), input_data=[input_data, index])\n\n input_data = torch.rand((3, 3, 3)).float()\n index = torch.tensor(\n [\n [[1, 0, 0], [1, 0, 1], [0, 1, 1]],\n [[1, 1, 1], [1, 2, 1], [1, 0, 1]],\n [[1, 2, 1], [1, 2, 1], [1, 2, 1]],\n ]\n )\n verify_model(Gather3().float().eval(), input_data=[input_data, index])\n\n\[email protected]_gpu\ndef test_forward_logsoftmax():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class LogSoftmax1(Module):\n def forward(self, *args):\n return torch.nn.LogSoftmax(dim=1)(args[0][0, 0])\n\n input_data = torch.rand(input_shape).float()\n verify_model(LogSoftmax1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_norm():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Norm1(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(\"inf\"), dim=None, keepdim=False)\n\n class Norm2(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(\"-inf\"), dim=None, keepdim=False)\n\n class Norm3(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(\"-inf\"), dim=None, keepdim=True)\n\n class Norm4(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(\"inf\"), dim=(1, 2), keepdim=False)\n\n class Norm5(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(\"inf\"), dim=(1), keepdim=True)\n\n class Norm6(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(0.5), dim=(1), keepdim=True)\n\n class Norm7(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(1), dim=None, keepdim=False)\n\n class Norm8(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(2.0), dim=(1), keepdim=True)\n\n class Norm9(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(-0.5), dim=(1, 2), keepdim=True)\n\n class Norm10(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(-2), dim=(1), keepdim=False)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Norm1().float().eval(), input_data=input_data)\n verify_model(Norm2().float().eval(), input_data=input_data)\n verify_model(Norm3().float().eval(), input_data=input_data)\n verify_model(Norm4().float().eval(), input_data=input_data)\n verify_model(Norm5().float().eval(), input_data=input_data)\n verify_model(Norm6().float().eval(), input_data=input_data)\n verify_model(Norm7().float().eval(), input_data=input_data)\n verify_model(Norm8().float().eval(), input_data=input_data)\n verify_model(Norm9().float().eval(), input_data=input_data)\n verify_model(Norm10().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_frobenius_norm():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class FroNorm1(Module):\n def forward(self, *args):\n return torch.norm(args[0])\n\n class FroNorm2(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=\"fro\", dim=None, keepdim=True)\n\n class FroNorm3(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=\"fro\", dim=(1), keepdim=True)\n\n class FroNorm4(Module):\n def forward(self, *args):\n return torch.norm(args[0], dim=None, keepdim=False)\n\n input_data = torch.rand(input_shape).float()\n verify_model(FroNorm1().float().eval(), input_data=input_data)\n verify_model(FroNorm2().float().eval(), input_data=input_data)\n verify_model(FroNorm3().float().eval(), input_data=input_data)\n verify_model(FroNorm4().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_sigmoid():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.Sigmoid().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_dense():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Dense1(Module):\n def __init__(self):\n super(Dense1, self).__init__()\n self.linear = torch.nn.Linear(10, 7, bias=True)\n\n def forward(self, *args):\n return self.linear(args[0][0, 0])\n\n class Dense2(Module):\n def __init__(self):\n super(Dense2, self).__init__()\n self.linear = torch.nn.Linear(10, 7, bias=False)\n\n def forward(self, *args):\n return self.linear(args[0][0, 0])\n\n input_data = torch.rand(input_shape).float()\n verify_model(Dense1().float().eval(), input_data=input_data)\n verify_model(Dense2().float().eval(), input_data=input_data)\n\n trace = torch.jit.trace(Dense1(), [input_data])\n mod, params = relay.frontend.from_pytorch(\n trace,\n [(\"input\", input_shape)],\n )\n assert not any([op.name == \"multiply\" for op in list_ops(mod[\"main\"])])\n\n\[email protected]_gpu\ndef test_forward_dropout():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.Dropout(p=0.5).eval(), input_data=input_data[0, 0])\n verify_model(torch.nn.Dropout2d(p=0.5).eval(), input_data=input_data[0])\n verify_model(torch.nn.Dropout3d(p=0.5).eval(), input_data=input_data)\n verify_model(torch.nn.AlphaDropout(p=0.5).eval(), input_data=input_data[0, 0])\n\n\[email protected]_gpu\ndef test_forward_slice():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Slice1(Module):\n def forward(self, *args):\n return args[0][:, :, :, :3]\n\n class Slice2(Module):\n def forward(self, *args):\n return args[0][0, :, :-3, :]\n\n class Slice3(Module):\n def forward(self, *args):\n x0 = torch.tensor(2) - torch.tensor(1)\n x1 = torch.tensor(3) + torch.tensor(1)\n return args[0][:, x0:, 1:x1, :]\n\n class SliceWithStride(torch.nn.Module):\n def forward(self, x):\n return x[..., 0::2] + x[..., 1::2]\n\n class SliceWithStride2(torch.nn.Module):\n def forward(self, x):\n return x[0::2, 0::2] + x[1::2, 1::2]\n\n input_data = torch.rand(input_shape).float()\n verify_model(Slice1(), input_data=input_data)\n verify_model(Slice2(), input_data=input_data)\n verify_model(Slice3(), input_data=input_data)\n verify_model(SliceWithStride(), input_data=torch.randn(1, 4))\n verify_model(SliceWithStride2(), input_data=torch.randn(4, 4))\n\n\[email protected]_gpu\ndef test_forward_mean():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Mean1(Module):\n def forward(self, *args):\n return args[0].mean(2)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Mean1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_expand():\n torch.set_grad_enabled(False)\n\n class Expand1(Module):\n def forward(self, *args):\n return args[0].expand((3, -1, -1, -1))\n\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(Expand1().float().eval(), input_data=input_data)\n\n class Expand2(Module):\n def forward(self, *args):\n return args[0].expand((3, 3, 3, 1))\n\n input_shape = [3, 1]\n input_data = torch.rand(input_shape).float()\n verify_model(Expand2().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_pow():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Pow1(Module):\n def forward(self, *args):\n return args[0] ** 2\n\n input_data = torch.rand(input_shape).float()\n verify_model(Pow1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_chunk():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 14, 14]\n\n class Chunk1(Module):\n def forward(self, *args):\n chunks = args[0].chunk(7, 2)\n return torch.cat(chunks, 2)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Chunk1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_upsample():\n class Upsample(Module):\n def __init__(self, size=None, scale=None, mode=\"nearest\", align_corners=None):\n super().__init__()\n self.size = size\n self.scale = scale\n self.mode = mode\n self.align_corners = align_corners\n\n def forward(self, x):\n return torch.nn.functional.interpolate(\n x,\n size=self.size,\n scale_factor=self.scale,\n mode=self.mode,\n align_corners=self.align_corners,\n )\n\n inp = torch.rand((1, 3, 32, 32))\n verify_model(Upsample(size=(64, 64), mode=\"nearest\"), inp)\n verify_model(Upsample(scale=2, mode=\"nearest\"), inp)\n verify_model(Upsample(size=(50, 50), mode=\"nearest\"), inp)\n verify_model(Upsample(size=(64, 64), mode=\"bilinear\", align_corners=True), inp)\n verify_model(Upsample(scale=2, mode=\"bilinear\", align_corners=True), inp)\n verify_model(Upsample(size=(50, 50), mode=\"bilinear\", align_corners=True), inp)\n\n\[email protected]_gpu\ndef test_to():\n \"\"\" test for aten::to(...) \"\"\"\n\n class ToCPU(Module):\n def forward(self, x):\n return x.to(\"cpu\")\n\n class ToFloat(Module):\n def forward(self, x):\n return x.float()\n\n class ToInt(Module):\n def forward(self, x):\n return x.int()\n\n class ToLong(Module):\n def forward(self, x):\n return x.long()\n\n class ToDouble(Module):\n def forward(self, x):\n return x.double()\n\n class ToFloat16(Module):\n def forward(self, x):\n return x.to(torch.float16)\n\n verify_model(ToCPU().eval(), torch.rand((1, 3, 32, 32)))\n verify_model(ToFloat().eval(), torch.zeros((1, 3, 32, 32), dtype=torch.int))\n verify_model(ToFloat().eval(), torch.tensor(2, dtype=torch.int))\n verify_model(ToInt().eval(), torch.zeros((1, 3, 32, 32)))\n verify_model(ToInt().eval(), torch.tensor(0.8))\n verify_model(ToLong().eval(), torch.tensor(0.8))\n verify_model(ToDouble().eval(), torch.tensor(0.8))\n verify_model(ToFloat16().eval(), torch.tensor(2, dtype=torch.float32))\n verify_model(ToFloat16().eval(), torch.zeros((1, 3, 32, 32), dtype=torch.int))\n\n\[email protected]_gpu\ndef test_adaptive_pool3d():\n for ishape in [(1, 32, 16, 16, 16), (1, 32, 9, 15, 15), (1, 32, 13, 7, 7)]:\n inp = torch.rand(ishape)\n verify_model(torch.nn.AdaptiveMaxPool3d((1, 1, 1)).eval(), inp)\n verify_model(torch.nn.AdaptiveMaxPool3d((2, 2, 2)).eval(), inp)\n verify_model(torch.nn.AdaptiveAvgPool3d((1, 1, 1)).eval(), inp)\n verify_model(torch.nn.AdaptiveAvgPool3d((2, 2, 2)).eval(), inp)\n verify_model(torch.nn.AdaptiveAvgPool3d((4, 8, 8)).eval(), inp)\n verify_model(torch.nn.AdaptiveMaxPool3d((7, 8, 9)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_functional_pad():\n torch.set_grad_enabled(False)\n pad = (0, 0)\n\n class Pad1(Module):\n def forward(self, *args):\n return torch.nn.functional.pad(args[0], pad, \"constant\", 0)\n\n input_data = torch.rand((3, 3, 4, 2))\n pad = (1, 1)\n verify_model(Pad1().float().eval(), input_data=input_data)\n\n pad = (1, 1, 2, 2)\n verify_model(Pad1().float().eval(), input_data=input_data)\n\n pad = (0, 1, 2, 1, 3, 3)\n verify_model(Pad1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_zero_pad2d():\n inp = torch.rand((1, 1, 3, 3))\n verify_model(torch.nn.ZeroPad2d(2).eval(), inp)\n verify_model(torch.nn.ZeroPad2d((1, 1, 2, 0)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_constant_pad1d():\n inp = torch.rand((1, 2, 4))\n verify_model(torch.nn.ConstantPad2d(2, 3.5).eval(), inp)\n\n inp = torch.rand((1, 2, 3))\n verify_model(torch.nn.ConstantPad2d((3, 1), 3.5).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_constant_pad2d():\n inp = torch.rand((1, 2, 2, 2))\n verify_model(torch.nn.ConstantPad2d(2, 3.5).eval(), inp)\n verify_model(torch.nn.ConstantPad2d((3, 0, 2, 1), 3.5).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_constant_pad3d():\n inp = torch.rand((1, 3, 2, 2, 2))\n verify_model(torch.nn.ConstantPad3d(3, 3.5).eval(), inp)\n verify_model(torch.nn.ConstantPad3d((3, 4, 5, 6, 0, 1), 3.5).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_reflection_pad1d():\n inp = torch.rand((1, 2, 4))\n verify_model(torch.nn.ReflectionPad1d(2).eval(), inp)\n verify_model(torch.nn.ReflectionPad1d((3, 1)).eval(), inp)\n\n inp = torch.rand((2, 4, 5))\n verify_model(torch.nn.ReflectionPad1d((2, 3)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_reflection_pad2d():\n inp = torch.rand((1, 1, 3, 3))\n verify_model(torch.nn.ReflectionPad2d(2).eval(), inp)\n verify_model(torch.nn.ReflectionPad2d((1, 1, 2, 0)).eval(), inp)\n\n inp = torch.rand((2, 4, 5, 6))\n verify_model(torch.nn.ReflectionPad2d((1, 3, 2, 4)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_replication_pad1d():\n inp = torch.rand((1, 2, 4))\n verify_model(torch.nn.ReplicationPad1d(2).eval(), inp)\n verify_model(torch.nn.ReplicationPad1d((3, 1)).eval(), inp)\n\n inp = torch.rand((2, 4, 5))\n verify_model(torch.nn.ReplicationPad1d((2, 3)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_replication_pad2d():\n inp = torch.rand((1, 1, 3, 3))\n verify_model(torch.nn.ReplicationPad2d(2).eval(), inp)\n verify_model(torch.nn.ReplicationPad2d((1, 1, 2, 0)).eval(), inp)\n\n inp = torch.rand((2, 4, 5, 6))\n verify_model(torch.nn.ReplicationPad2d((1, 3, 2, 4)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_replication_pad3d():\n inp = torch.rand((1, 1, 3, 3, 3))\n verify_model(torch.nn.ReplicationPad3d(3).eval(), inp)\n verify_model(torch.nn.ReplicationPad3d((1, 1, 2, 2, 1, 1)).eval(), inp)\n\n inp = torch.rand((7, 5, 4, 5, 6))\n verify_model(torch.nn.ReplicationPad3d((2, 3, 2, 5, 1, 4)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_upsample3d():\n inp = torch.arange(1, 9, dtype=torch.float32).view(1, 1, 2, 2, 2)\n verify_model(torch.nn.Upsample(scale_factor=2, mode=\"nearest\").eval(), inp)\n verify_model(torch.nn.Upsample(scale_factor=2, mode=\"trilinear\").eval(), inp)\n verify_model(\n torch.nn.Upsample(scale_factor=2, mode=\"trilinear\", align_corners=True).eval(), inp\n )\n\n\ndef test_forward_nms():\n \"\"\"dynamic Non-Maximum Suppression\"\"\"\n torch.set_grad_enabled(False)\n\n class NonMaxSupression(Module):\n def __init__(self, iou_thres):\n super().__init__()\n self.iou_threshold = iou_thres\n\n def forward(self, *args):\n return torchvision.ops.nms(args[0], args[1], self.iou_threshold)\n\n # Generate random input data\n def _gen_rand_inputs(num_boxes):\n box_len = 4\n boxes = torch.rand(num_boxes, box_len, dtype=torch.float) * 0.5\n boxes[:, 2] += boxes[:, 0]\n boxes[:, 3] += boxes[:, 1]\n scores = torch.from_numpy(np.random.uniform(-1, 1, size=(num_boxes,)).astype(np.float32))\n return boxes, scores\n\n targets = [\"llvm\", \"cuda\"]\n\n for num_boxes, iou_thres in [(10, 0.3), (100, 0.5), (500, 0.9)]:\n in_boxes, in_scores = _gen_rand_inputs(num_boxes)\n verify_trace_model(NonMaxSupression(iou_thres), [in_boxes, in_scores], targets)\n\n\ndef test_forward_roi_align():\n \"\"\"ROI align\"\"\"\n torch.set_grad_enabled(False)\n\n class ROIAlgin(Module):\n def __init__(self, output_sizes, spatial_scale=1.0, sampling_ratio=-1):\n super().__init__()\n self.spatial_scale = spatial_scale\n self.sampling_ratio = sampling_ratio\n self.output_sizes = output_sizes\n\n def forward(self, *args):\n return torchvision.ops.roi_align(\n args[0],\n args[1],\n self.output_sizes,\n self.spatial_scale,\n self.sampling_ratio,\n )\n\n in_data = torch.Tensor(np.random.uniform(size=(1, 8, 100, 100)))\n in_boxes = torch.Tensor(np.random.uniform(0.0, 100.0, size=(35, 4)))\n in_batch = torch.zeros((35, 1), dtype=torch.float)\n in_boxes = torch.cat([in_batch, in_boxes], dim=1)\n\n verify_model(ROIAlgin(7), [in_data, in_boxes])\n verify_model(ROIAlgin((10, 10), 0.7, 5), [in_data, in_boxes])\n verify_model(ROIAlgin(15, 0.9, 3), [in_data, in_boxes])\n\n\[email protected]_gpu\ndef test_conv3d():\n for ishape in [(1, 32, 16, 16, 16), (1, 32, 9, 15, 15), (1, 32, 13, 7, 7)]:\n inp = torch.rand(ishape)\n verify_model(torch.nn.Conv3d(32, 16, (3, 3, 3), padding=(1, 1, 1)).eval(), inp),\n verify_model(torch.nn.Conv3d(32, 16, (5, 5, 5), padding=(2, 2, 2)).eval(), inp),\n verify_model(torch.nn.Conv3d(32, 16, kernel_size=1).eval(), inp)\n # downsample\n verify_model(torch.nn.Conv3d(32, 16, kernel_size=1, stride=2).eval(), inp)\n\n\[email protected]_gpu\ndef test_conv3d_transpose():\n for ishape in [(1, 8, 10, 5, 10), (1, 8, 5, 8, 8), (1, 8, 13, 7, 7)]:\n inp = torch.rand(ishape)\n verify_model(\n torch.nn.ConvTranspose3d(\n in_channels=8, out_channels=33, kernel_size=3, stride=2\n ).eval(),\n inp,\n ),\n verify_model(\n torch.nn.ConvTranspose3d(\n in_channels=8,\n out_channels=20,\n kernel_size=(3, 5, 2),\n stride=(2, 1, 1),\n padding=(0, 4, 2),\n ).eval(),\n inp,\n ),\n verify_model(\n torch.nn.ConvTranspose3d(in_channels=8, out_channels=20, kernel_size=1).eval(), inp\n )\n verify_model(\n torch.nn.ConvTranspose3d(in_channels=8, out_channels=5, kernel_size=1, stride=2).eval(),\n inp,\n )\n\n\n# Model tests\[email protected]_gpu\ndef test_resnet18():\n torch.set_grad_enabled(False)\n verify_model(\"resnet18\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_squeezenet1_0():\n torch.set_grad_enabled(False)\n verify_model(\"squeezenet1_0\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_squeezenet1_1():\n torch.set_grad_enabled(False)\n verify_model(\"squeezenet1_1\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_densenet121():\n torch.set_grad_enabled(False)\n verify_model(\"densenet121\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_inception_v3():\n torch.set_grad_enabled(False)\n verify_model(\"inception_v3\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_googlenet():\n torch.set_grad_enabled(False)\n verify_model(\"googlenet\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_mnasnet0_5():\n torch.set_grad_enabled(False)\n verify_model(\"mnasnet0_5\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_mobilenet_v2():\n torch.set_grad_enabled(False)\n verify_model(\"mobilenet_v2\", atol=1e-4, rtol=1e-4)\n\n\n\"\"\"\n#TODO: Fix VGG and AlexNet issues (probably due to pooling)\[email protected]_gpu\ndef test_alexnet():\n torch.set_grad_enabled(False)\n verify_model(\"alexnet\")\n\[email protected]_gpu\ndef test_vgg11():\n torch.set_grad_enabled(False)\n verify_model(\"vgg11\")\n\[email protected]_gpu\ndef test_vgg11_bn():\n torch.set_grad_enabled(False)\n verify_model(\"vgg11_bn\")\n\"\"\"\n\n\[email protected]_gpu\ndef test_custom_conversion_map():\n def get_roi_align():\n pool_size = 5\n n_channels = 2 * (pool_size ** 2)\n x = torch.rand(2, n_channels, 10, 10)\n rois = torch.tensor(\n [\n [0, 0, 0, 9, 9], # format is (xyxy)\n [0, 0, 5, 4, 9],\n [0, 5, 5, 9, 9],\n [1, 0, 0, 9, 9],\n ],\n dtype=torch.float,\n )\n roi_align = torchvision.ops.RoIAlign(pool_size, spatial_scale=1, sampling_ratio=-1)\n return roi_align.eval(), [x, rois]\n\n def convert_roi_align():\n def _impl(inputs, input_types):\n spatial_scale = inputs[2]\n pooled_size = (inputs[3], inputs[4])\n sampling_ratio = inputs[5]\n return relay.op.vision.roi_align(\n inputs[0], inputs[1], pooled_size, spatial_scale, sampling_ratio\n )\n\n return _impl\n\n custom_map = {\"torchvision::roi_align\": convert_roi_align()}\n model, inputs = get_roi_align()\n\n verify_model(model, inputs, custom_map)\n\n\[email protected]_gpu\ndef test_segmentaton_models():\n class SegmentationModelWrapper(Module):\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n def forward(self, inp):\n out = self.model(inp)\n return out[\"out\"]\n\n fcn = torchvision.models.segmentation.fcn_resnet101(pretrained=True)\n deeplab = torchvision.models.segmentation.deeplabv3_resnet101(pretrained=True)\n\n inp = [torch.rand((1, 3, 300, 300), dtype=torch.float)]\n\n verify_model(SegmentationModelWrapper(fcn.eval()), inp, atol=1e-4, rtol=1e-4)\n verify_model(SegmentationModelWrapper(deeplab.eval()), inp, atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_3d_models():\n input_shape = (1, 3, 4, 56, 56)\n resnet3d = torchvision.models.video.r3d_18(pretrained=True).eval()\n verify_model(resnet3d, [torch.rand(input_shape)], atol=1e-4, rtol=1e-4)\n\n\ndef _get_default_vm_targets():\n return [tgt for (tgt, _) in tvm.testing.enabled_targets()]\n\n\ndef verify_script_model(pt_model, ishapes, targets, idtype=None):\n script_module = torch.jit.script(pt_model)\n\n verify_model_vm(script_module, ishapes, idtype=idtype, targets=targets)\n\n\ndef verify_trace_model(pt_model, idata, targets):\n traced_model = torch.jit.trace(pt_model, idata)\n ishapes = [data.shape for data in idata]\n verify_model_vm(traced_model, ishapes, idata=idata, targets=targets)\n\n\ndef convert_pt_to_tvm_type(idtype):\n \"\"\" Accepts a pytorch dtype and returns string TVM dtype.\"\"\"\n # TVM does not support PyTorch complex dtypes\n if idtype == torch.float64:\n curr_dtype = \"float64\"\n elif idtype == torch.float32:\n curr_dtype = \"float32\"\n elif idtype == torch.float16:\n curr_dtype = \"float16\"\n elif idtype == torch.bfloat16:\n curr_dtype = \"bfloat16\"\n elif idtype == torch.int64:\n curr_dtype = \"int64\"\n elif idtype == torch.int32:\n curr_dtype = \"int32\"\n elif idtype == torch.int16:\n curr_dtype = \"int16\"\n elif idtype == torch.int8:\n curr_dtype = \"int8\"\n elif idtype == torch.uint8:\n curr_dtype = \"uint8\"\n elif idtype == torch.bool:\n curr_dtype = \"bool\"\n else:\n raise NotImplementedError(\"Unsupported dtype: {}\".format(idtype))\n return curr_dtype\n\n\ndef verify_model_vm(input_model, ishapes, idtype=None, idata=None, targets=[\"llvm\"]):\n if not idtype:\n idtype = torch.float\n\n input_names = [\"i{}\".format(idx) for idx, ish in enumerate(ishapes)]\n tvm_dtype = convert_pt_to_tvm_type(idtype)\n input_dtypes = [tvm_dtype] * len(input_names)\n input_shapes = list(zip(input_names, list(zip(ishapes, input_dtypes))))\n\n if idata:\n input_data = idata\n # If no input_data provided, generate random data of specified dtype\n else:\n if idtype == torch.bool:\n input_data = [\n torch.Tensor.bool(torch.randint(low=0, high=2, size=shape)) for shape in ishapes\n ]\n # Torch dtype can be float, complex, int, or Bool. Complex not supported, so if not float or Bool,\n # dtype must be int!\n elif not idtype.is_floating_point:\n input_data = [\n torch.randint(low=0, high=10, size=shape, dtype=idtype) for shape in ishapes\n ]\n else:\n input_data = [torch.randn(shape, dtype=idtype) for shape in ishapes]\n\n # Compile via VM\n mod, params = relay.frontend.from_pytorch(input_model, input_shapes)\n\n for tgt in targets:\n print(\"Running on target\", tgt)\n ctx = tvm.context(tgt, 0)\n\n executor = relay.create_executor(\"vm\", mod=mod, ctx=ctx, target=tgt)\n evaluator = executor.evaluate()\n\n # Inference\n for name, inp in zip(input_names, input_data):\n params[name] = inp.numpy()\n vm_res = evaluator(**params)\n\n # Baseline result\n with torch.no_grad():\n pt_result = input_model(*input_data)\n\n # Verify the accuracy\n if not isinstance(pt_result, torch.Tensor):\n tvm_res = vm_res.asnumpy().item()\n assert pt_result == tvm_res\n else:\n tvm.testing.assert_allclose(vm_res.asnumpy(), pt_result.numpy(), rtol=1e-5, atol=1e-5)\n\n\[email protected]_gpu\ndef test_control_flow():\n class SimpleIf(torch.nn.Module):\n def __init__(self, N, M):\n super().__init__()\n self.weight = torch.nn.Parameter(torch.rand(N, M))\n\n def forward(self, inp):\n if inp.sum() > 0.0:\n output = self.weight + inp\n else:\n output = self.weight - inp\n return output\n\n class NestedIf(torch.nn.Module):\n def __init__(self, N, M):\n super().__init__()\n self.weight = torch.nn.Parameter(torch.rand(N, M))\n\n def forward(self, inp):\n if inp.sum() > 0.0:\n if inp.mean() > 0.0:\n output = self.weight + inp\n else:\n output = self.weight - inp\n else:\n if inp.mean() >= 0.0:\n output = self.weight * inp\n else:\n output = self.weight / inp\n\n return output\n\n class ScalarLoop(torch.nn.Module):\n def forward(self, inp):\n a = 0\n for i in range(inp.size(0)):\n b = i * i\n b = b + 1\n a += b\n if a != 0:\n a += 1\n else:\n a += 2\n return a\n\n class SimpleLoop(torch.nn.Module):\n def forward(self, inp):\n a = inp\n for i in range(inp.size(0)):\n b = a * 2.0\n c = a + b\n a += c\n return a\n\n class LoopWithIf(torch.nn.Module):\n def forward(self, inp):\n a = inp\n for i in range(inp.size(0)):\n b = a * 2.0\n b = a + b\n if b.sum() > 0.0:\n a += b\n else:\n a -= b\n return a\n\n class NestedLoop(torch.nn.Module):\n def forward(self, inp):\n a = inp\n for i in range(inp.size(0)):\n b = a * float(i)\n for j in range(inp.size(1)):\n a += b * float(j)\n return a\n\n class SimpleScalarWhileLoop(torch.nn.Module):\n def forward(self, inp):\n a = 1\n i = 0\n while i <= inp.size(0):\n a += i\n i += 2\n i = 0\n # also test constant init cond\n while i < 10:\n a += i\n i += 3\n return a\n\n class SimpleWhileLoop(torch.nn.Module):\n def forward(self, inp):\n a = inp\n i = 0\n while i < inp.size(0):\n a += a * float(i) * 2.0\n i += 1\n return a\n\n models = [\n SimpleIf(10, 20),\n NestedIf(10, 20),\n ScalarLoop(),\n SimpleLoop(),\n LoopWithIf(),\n SimpleScalarWhileLoop(),\n SimpleWhileLoop(),\n NestedLoop(),\n ]\n\n for pt_model in models:\n verify_script_model(pt_model.eval(), [(10, 20)], _get_default_vm_targets())\n\n\[email protected]_gpu\ndef test_simple_rnn():\n # The mixed tracing and scripting example from\n # https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html#mixing-scripting-and-tracing\n class DecisionGate(torch.nn.Module):\n def forward(self, x):\n if x.sum() > 0:\n return x\n else:\n return -x\n\n class Cell(torch.nn.Module):\n def __init__(self, dg):\n super(Cell, self).__init__()\n self.dg = dg\n self.linear = torch.nn.Linear(4, 4)\n\n def forward(self, x, h):\n new_h = torch.tanh(self.dg(self.linear(x)) + h)\n return new_h, new_h\n\n class RNNLoop(torch.nn.Module):\n def __init__(self):\n super().__init__()\n x = torch.rand(10, 4, dtype=torch.float)\n h = torch.rand(10, 4, dtype=torch.float)\n self.cell = torch.jit.trace(Cell(DecisionGate()), (x, h))\n\n def forward(self, xs):\n h = torch.zeros(10, 4, dtype=torch.float)\n y = torch.zeros(10, 4, dtype=torch.float)\n for i in range(xs.size(0)):\n y, h = self.cell(xs[i], h)\n return y\n\n verify_script_model(RNNLoop().eval(), [(10, 10, 4)], _get_default_vm_targets())\n\n\[email protected]_gpu\ndef test_forward_reduce_sum():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class ReduceSum1(Module):\n def forward(self, *args):\n return args[0].sum(1)\n\n class ReduceSum2(Module):\n def forward(self, *args):\n return args[0].sum(dim=1, keepdim=False)\n\n class ReduceSum3(Module):\n def forward(self, *args):\n return args[0].sum(dim=2, keepdim=True)\n\n class ReduceSum4(Module):\n def forward(self, *args):\n return args[0].sum(dim=(2, 3), keepdim=True)\n\n class ReduceSum5(Module):\n def forward(self, *args):\n return args[0].sum(dim=(2, 3), keepdim=False)\n\n input_data = torch.rand(input_shape).float()\n verify_model(ReduceSum1().float().eval(), input_data=input_data)\n verify_model(ReduceSum2().float().eval(), input_data=input_data)\n verify_model(ReduceSum3().float().eval(), input_data=input_data)\n verify_model(ReduceSum4().float().eval(), input_data=input_data)\n verify_model(ReduceSum5().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_reduce_prod():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class ReduceProd1(Module):\n def forward(self, *args):\n return args[0].prod(1)\n\n class ReduceProd2(Module):\n def forward(self, *args):\n return args[0].prod(dim=1, keepdim=False)\n\n class ReduceProd3(Module):\n def forward(self, *args):\n return args[0].prod(dim=2, keepdim=True)\n\n input_data = torch.rand(input_shape).float()\n verify_model(ReduceProd1().float().eval(), input_data=input_data)\n verify_model(ReduceProd2().float().eval(), input_data=input_data)\n verify_model(ReduceProd3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_argmin():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class ArgMin1(Module):\n def forward(self, *args):\n return args[0].argmin(1)\n\n class ArgMin2(Module):\n def forward(self, *args):\n return args[0].argmin(dim=1, keepdim=False)\n\n class ArgMin3(Module):\n def forward(self, *args):\n return args[0].argmin(dim=2, keepdim=True)\n\n input_data = torch.rand(input_shape).float()\n verify_model(ArgMin1().float().eval(), input_data=input_data)\n verify_model(ArgMin2().float().eval(), input_data=input_data)\n verify_model(ArgMin3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_argmax():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class ArgMax1(Module):\n def forward(self, *args):\n return args[0].argmax(1)\n\n class ArgMax2(Module):\n def forward(self, *args):\n return args[0].argmax(dim=1, keepdim=False)\n\n class ArgMax3(Module):\n def forward(self, *args):\n return args[0].argmax(dim=2, keepdim=True)\n\n input_data = torch.rand(input_shape).float()\n verify_model(ArgMax1().float().eval(), input_data=input_data)\n verify_model(ArgMax2().float().eval(), input_data=input_data)\n verify_model(ArgMax3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_std():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Std1(Module):\n def forward(self, *args):\n return args[0].std(1, unbiased=False)\n\n class Std2(Module):\n def forward(self, *args):\n return args[0].std(dim=1, keepdim=False, unbiased=False)\n\n class Std3(Module):\n def forward(self, *args):\n return args[0].std(dim=2, keepdim=True, unbiased=False)\n\n class Std4(Module):\n def forward(self, *args):\n return args[0].std(dim=(2, 3), keepdim=True, unbiased=False)\n\n class Std5(Module):\n def forward(self, *args):\n return args[0].std(dim=(2, 3), keepdim=False, unbiased=False)\n\n class Std6(Module):\n def forward(self, *args):\n return args[0].std(unbiased=False)\n\n class Std7(Module):\n def forward(self, *args):\n return args[0].std(dim=1, keepdim=False, unbiased=True)\n\n class Std8(Module):\n def forward(self, *args):\n return args[0].std(dim=(2, 3), keepdim=True, unbiased=True)\n\n class Std9(Module):\n def forward(self, *args):\n return args[0].std(unbiased=True)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Std1().float().eval(), input_data=input_data)\n verify_model(Std2().float().eval(), input_data=input_data)\n verify_model(Std3().float().eval(), input_data=input_data)\n verify_model(Std4().float().eval(), input_data=input_data)\n verify_model(Std5().float().eval(), input_data=input_data)\n verify_model(Std6().float().eval(), input_data=input_data)\n verify_model(Std7().float().eval(), input_data=input_data)\n verify_model(Std8().float().eval(), input_data=input_data)\n verify_model(Std9().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_variance():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Variance1(Module):\n def forward(self, *args):\n return args[0].var(1, unbiased=False)\n\n class Variance2(Module):\n def forward(self, *args):\n return args[0].var(dim=1, keepdim=False, unbiased=False)\n\n class Variance3(Module):\n def forward(self, *args):\n return args[0].var(dim=2, keepdim=True, unbiased=False)\n\n class Variance4(Module):\n def forward(self, *args):\n return args[0].var(dim=(2, 3), keepdim=True, unbiased=False)\n\n class Variance5(Module):\n def forward(self, *args):\n return args[0].var(dim=(2, 3), keepdim=False, unbiased=False)\n\n class Variance6(Module):\n def forward(self, *args):\n return args[0].var(unbiased=False)\n\n class Variance7(Module):\n def forward(self, *args):\n return args[0].var(dim=1, keepdim=False, unbiased=True)\n\n class Variance8(Module):\n def forward(self, *args):\n return args[0].var(dim=(2, 3), keepdim=True, unbiased=True)\n\n class Variance9(Module):\n def forward(self, *args):\n return args[0].var(unbiased=True)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Variance1().float().eval(), input_data=input_data)\n verify_model(Variance2().float().eval(), input_data=input_data)\n verify_model(Variance3().float().eval(), input_data=input_data)\n verify_model(Variance4().float().eval(), input_data=input_data)\n verify_model(Variance5().float().eval(), input_data=input_data)\n verify_model(Variance6().float().eval(), input_data=input_data)\n verify_model(Variance7().float().eval(), input_data=input_data)\n verify_model(Variance8().float().eval(), input_data=input_data)\n verify_model(Variance9().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_rsub():\n torch.set_grad_enabled(False)\n\n class Rsub1(Module):\n def forward(self, *args):\n return torch.rsub(args[0], args[1])\n\n class Rsub2(Module):\n def forward(self, *args):\n return torch.rsub(args[0], args[1], alpha=0.5)\n\n d1 = torch.rand([1, 3]).float()\n d2 = torch.rand([1, 3]).float()\n d3 = torch.rand([1, 3]).int()\n verify_model(Rsub1().float().eval(), input_data=[d1, d2])\n verify_model(Rsub1().float().eval(), input_data=[d1, d3])\n verify_model(Rsub2().float().eval(), input_data=[d1, d2])\n verify_model(Rsub2().float().eval(), input_data=[d1, d3])\n\n\[email protected]_gpu\ndef test_forward_embedding():\n torch.set_grad_enabled(False)\n\n input_data = torch.randint(0, 10, [2, 4]).long()\n verify_model(torch.nn.Embedding(10, 3).float().eval(), input_data=input_data)\n\n input_data = torch.randint(0, 4, [2, 3, 4]).long()\n verify_model(torch.nn.Embedding(4, 5, sparse=False).float().eval(), input_data=input_data)\n\n input_data = torch.randint(0, 4, [2, 3, 4]).long()\n verify_model(torch.nn.Embedding(4, 5, sparse=True).float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_onehot():\n torch.set_grad_enabled(False)\n\n class OneHot1(Module):\n def forward(self, *args):\n return torch.nn.functional.one_hot(args[0], num_classes=3)\n\n class OneHot2(Module):\n def forward(self, *args):\n return torch.nn.functional.one_hot(args[0], num_classes=5)\n\n input_data = torch.arange(0, 5) % 3\n verify_model(OneHot1().float().eval(), input_data=input_data)\n\n input_data = torch.arange(0, 5) % 4\n verify_model(OneHot2().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_isfinite():\n torch.set_grad_enabled(False)\n\n class IsFinite1(Module):\n def forward(self, *args):\n return torch.isfinite(args[0])\n\n input_data = torch.tensor([1, float(\"inf\"), 2, float(\"-inf\"), float(\"nan\")]).float()\n verify_model(IsFinite1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_isnan():\n torch.set_grad_enabled(False)\n\n class IsNan1(Module):\n def forward(self, *args):\n return torch.isnan(args[0])\n\n input_data = torch.tensor([1, float(\"inf\"), 2, float(\"-inf\"), float(\"nan\")]).float()\n verify_model(IsNan1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_isinf():\n torch.set_grad_enabled(False)\n\n class IsInf1(Module):\n def forward(self, *args):\n return torch.isinf(args[0])\n\n input_data = torch.tensor([1, float(\"inf\"), 2, float(\"-inf\"), float(\"nan\")]).float()\n verify_model(IsInf1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_clamp():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Clamp1(Module):\n def forward(self, *args):\n return torch.clamp(args[0], min=-0.5, max=0.5)\n\n class Clamp2(Module):\n def forward(self, *args):\n return torch.clamp(args[0], min=-0.3)\n\n class Clamp3(Module):\n def forward(self, *args):\n return torch.clamp(args[0], max=1.0)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Clamp1().float().eval(), input_data=input_data)\n verify_model(Clamp2().float().eval(), input_data=input_data)\n verify_model(Clamp3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_clamp_():\n torch.set_grad_enabled(False)\n\n class ClampInPlace(Module):\n def __init__(self, min, max):\n super(ClampInPlace, self).__init__()\n self.min = min\n self.max = max\n\n def forward(self, *args):\n return torch.clamp_(args[0], self.min, self.max)\n\n for ishape, min, max in (([4, 8], 0.1, 0.9), ([7, 6], 0.2, 0.5)):\n input_data = torch.rand(ishape).float()\n verify_model(ClampInPlace(min, max).float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_ones():\n torch.set_grad_enabled(False)\n\n class Ones1(Module):\n def forward(self, *args):\n return torch.ones(2, 3)\n\n verify_model(Ones1().float().eval(), input_data=[])\n\n\[email protected]_gpu\ndef test_forward_ones_like():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class OnesLike1(Module):\n def forward(self, *args):\n return torch.ones_like(args[0])\n\n class OnesLike2(Module):\n def forward(self, *args):\n return torch.ones_like(args[0], dtype=torch.int8)\n\n class OnesLike3(Module):\n def forward(self, *args):\n return torch.ones_like(args[0], dtype=torch.float)\n\n input_data = torch.rand(input_shape).float()\n verify_model(OnesLike1().float().eval(), input_data=input_data)\n verify_model(OnesLike2().float().eval(), input_data=input_data)\n verify_model(OnesLike3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_zeros():\n torch.set_grad_enabled(False)\n\n class Zeros1(Module):\n def forward(self, *args):\n return torch.zeros(2, 3)\n\n verify_model(Zeros1().float().eval(), input_data=[])\n\n\[email protected]_gpu\ndef test_forward_zeros_like():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class ZerosLike1(Module):\n def forward(self, *args):\n return torch.zeros_like(args[0])\n\n class ZerosLike2(Module):\n def forward(self, *args):\n return torch.zeros_like(args[0], dtype=torch.int32)\n\n class ZerosLike3(Module):\n def forward(self, *args):\n return torch.zeros_like(args[0], dtype=torch.float)\n\n input_data = torch.rand(input_shape).float()\n verify_model(ZerosLike1().float().eval(), input_data=input_data)\n verify_model(ZerosLike2().float().eval(), input_data=input_data)\n verify_model(ZerosLike3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_full():\n torch.set_grad_enabled(False)\n\n class Full1(Module):\n def forward(self, *args):\n return torch.full((2, 3), 3.14)\n\n class Full2(Module):\n def forward(self, *args):\n return torch.full((1, 2, 3), 1.0, dtype=torch.int32)\n\n verify_model(Full1().float().eval(), input_data=[])\n verify_model(Full2().float().eval(), input_data=[])\n\n\[email protected]_gpu\ndef test_forward_full_like():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class FullLike1(Module):\n def forward(self, *args):\n return torch.full_like(args[0], 3.14)\n\n class FullLike2(Module):\n def forward(self, *args):\n return torch.full_like(args[0], 22.22, dtype=torch.int32)\n\n class FullLike3(Module):\n def forward(self, *args):\n return torch.full_like(args[0], 1.4, dtype=torch.float)\n\n input_data = torch.rand(input_shape).float()\n verify_model(FullLike1().float().eval(), input_data=input_data)\n verify_model(FullLike2().float().eval(), input_data=input_data)\n verify_model(FullLike3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_linspace():\n torch.set_grad_enabled(False)\n\n class Linspace1(Module):\n def forward(self, *args):\n return torch.linspace(5, 10, steps=100)\n\n class Linspace2(Module):\n def forward(self, *args):\n return torch.linspace(-10, 10, steps=5)\n\n class Linspace3(Module):\n def forward(self, *args):\n return torch.linspace(start=-10, end=10, steps=5)\n\n class Linspace4(Module):\n def forward(self, *args):\n return torch.linspace(start=-10, end=10, steps=1)\n\n class Linspace5(Module):\n def forward(self, *args):\n return torch.linspace(1, 2, 1, dtype=torch.int32)\n\n class Linspace6(Module):\n def forward(self, *args):\n return torch.linspace(start=1, end=6, steps=2)\n\n class Linspace7(Module):\n def forward(self, *args):\n return torch.linspace(1, 4, steps=100, dtype=torch.float32)\n\n class Linspace8(Module):\n def forward(self, *args):\n return torch.linspace(1, 2, 1, dtype=torch.int16)\n\n verify_model(Linspace1().float().eval())\n verify_model(Linspace2().float().eval())\n verify_model(Linspace3().float().eval())\n verify_model(Linspace4().float().eval())\n verify_model(Linspace5().float().eval())\n verify_model(Linspace6().float().eval())\n verify_model(Linspace7().float().eval())\n verify_model(Linspace8().float().eval())\n\n\[email protected]_gpu\ndef test_forward_take():\n torch.set_grad_enabled(False)\n\n class Take1(Module):\n def forward(self, *args):\n indices = torch.tensor([[0, 0], [1, 0]])\n if torch.cuda.is_available():\n indices = indices.cuda()\n return torch.take(args[0], indices)\n\n class Take2(Module):\n def forward(self, *args):\n return torch.take(args[0], args[1])\n\n input_data = torch.tensor([[1, 2], [3, 4]])\n verify_model(Take1().float().eval(), input_data=input_data)\n indices = torch.tensor([[0, 0], [1, 0]])\n verify_model(Take2().float().eval(), input_data=[input_data, indices])\n\n\[email protected]_gpu\ndef test_forward_topk():\n torch.set_grad_enabled(False)\n\n class Topk1(Module):\n def forward(self, *args):\n return torch.topk(args[0], k=3)\n\n class Topk2(Module):\n def forward(self, *args):\n return torch.topk(args[0], k=3, dim=-2)\n\n class Topk3(Module):\n def forward(self, *args):\n return torch.topk(args[0], k=3, dim=3)\n\n class Topk4(Module):\n def forward(self, *args):\n return torch.topk(args[0], k=3, largest=True)\n\n class Topk5(Module):\n def forward(self, *args):\n return torch.topk(args[0], k=3, largest=False)\n\n class Topk6(Module):\n def forward(self, *args):\n return torch.topk(args[0], k=3, sorted=True)\n\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(Topk1().float().eval(), input_data=input_data)\n verify_model(Topk2().float().eval(), input_data=input_data)\n verify_model(Topk3().float().eval(), input_data=input_data)\n verify_model(Topk4().float().eval(), input_data=input_data)\n verify_model(Topk5().float().eval(), input_data=input_data)\n verify_model(Topk6().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_logical_not():\n torch.set_grad_enabled(False)\n\n class LogicalNot1(Module):\n def forward(self, *args):\n return torch.logical_not(args[0])\n\n input_data = torch.tensor([True, False])\n verify_model(LogicalNot1().float().eval(), input_data=input_data)\n\n input_data = torch.tensor([0, 1, -10], dtype=torch.int8)\n verify_model(LogicalNot1().float().eval(), input_data=input_data)\n\n input_data = torch.tensor([0.0, 1.5, -10.0], dtype=torch.double)\n verify_model(LogicalNot1().float().eval(), input_data=input_data)\n\n input_data = torch.tensor([0.0, 1.0, -10.0], dtype=torch.int32)\n verify_model(LogicalNot1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_bitwise_not():\n torch.set_grad_enabled(False)\n\n class BitwiseNot1(Module):\n def forward(self, *args):\n return torch.bitwise_not(args[0])\n\n input_data = torch.tensor([0, 1, -10], dtype=torch.int8)\n verify_model(BitwiseNot1().float().eval(), input_data=input_data)\n\n input_data = torch.tensor([0.0, 1.0, -10.0], dtype=torch.int32)\n verify_model(BitwiseNot1().float().eval(), input_data=input_data)\n\n input_data = torch.tensor([True, False])\n verify_model(BitwiseNot1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_bitwise_xor():\n torch.set_grad_enabled(False)\n\n class BitwiseXor1(Module):\n def forward(self, *args):\n return torch.bitwise_xor(args[0], args[1])\n\n class BitwiseXor2(Module):\n def forward(self, *args):\n rhs = torch.tensor([1, 0, 3], dtype=torch.int8)\n if torch.cuda.is_available():\n rhs = rhs.cuda()\n return torch.bitwise_xor(args[0], rhs)\n\n lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)\n rhs = torch.tensor([1, 0, 3], dtype=torch.int8)\n verify_model(BitwiseXor1().float().eval(), input_data=[lhs, rhs])\n\n lhs = torch.tensor([True, True, False])\n rhs = torch.tensor([False, True, False])\n verify_model(BitwiseXor1().float().eval(), input_data=[lhs, rhs])\n\n lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)\n verify_model(BitwiseXor2().float().eval(), input_data=[lhs])\n\n\[email protected]_gpu\ndef test_forward_logical_xor():\n torch.set_grad_enabled(False)\n\n class LogicalXor1(Module):\n def forward(self, *args):\n return torch.logical_xor(args[0], args[1])\n\n class LogicalXor2(Module):\n def forward(self, *args):\n rhs = torch.tensor([1, 0, 3], dtype=torch.int8)\n if torch.cuda.is_available():\n rhs = rhs.cuda()\n return torch.logical_xor(args[0], rhs)\n\n lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)\n rhs = torch.tensor([1, 0, 3], dtype=torch.int8)\n verify_model(LogicalXor1().float().eval(), input_data=[lhs, rhs])\n\n lhs = torch.tensor([True, True, False])\n rhs = torch.tensor([False, True, False])\n verify_model(LogicalXor1().float().eval(), input_data=[lhs, rhs])\n\n lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)\n verify_model(LogicalXor2().float().eval(), input_data=[lhs])\n\n\[email protected]_gpu\ndef test_forward_unary():\n torch.set_grad_enabled(False)\n\n class Sqrt1(Module):\n def forward(self, *args):\n return torch.sqrt(args[0])\n\n class RSqrt1(Module):\n def forward(self, *args):\n return torch.rsqrt(args[0])\n\n class Ceil1(Module):\n def forward(self, *args):\n return torch.ceil(args[0])\n\n class Floor1(Module):\n def forward(self, *args):\n return torch.floor(args[0])\n\n class Round1(Module):\n def forward(self, *args):\n return torch.round(args[0])\n\n class Cos1(Module):\n def forward(self, *args):\n return torch.cos(args[0])\n\n class Sin1(Module):\n def forward(self, *args):\n return torch.sin(args[0])\n\n class Tan1(Module):\n def forward(self, *args):\n return torch.tan(args[0])\n\n class Tanh1(Module):\n def forward(self, *args):\n return torch.tanh(args[0])\n\n class Acos1(Module):\n def forward(self, *args):\n return torch.acos(args[0])\n\n class Asin1(Module):\n def forward(self, *args):\n return torch.asin(args[0])\n\n class Atan1(Module):\n def forward(self, *args):\n return torch.atan(args[0])\n\n class Log1(Module):\n def forward(self, *args):\n return torch.log(args[0])\n\n class Exp1(Module):\n def forward(self, *args):\n return torch.exp(args[0])\n\n class Erf1(Module):\n def forward(self, *args):\n return torch.erf(args[0])\n\n class Trunc1(Module):\n def forward(self, *args):\n return torch.trunc(args[0])\n\n class Sign1(Module):\n def forward(self, *args):\n return torch.sign(args[0])\n\n class Neg1(Module):\n def forward(self, *args):\n return torch.neg(args[0])\n\n class Sinh1(Module):\n def forward(self, *args):\n return torch.sinh(args[0])\n\n class Cosh1(Module):\n def forward(self, *args):\n return torch.cosh(args[0])\n\n class Log2_1(Module):\n def forward(self, *args):\n return torch.log2(args[0])\n\n class Log10_1(Module):\n def forward(self, *args):\n return torch.log10(args[0])\n\n class Log1p_1(Module):\n def forward(self, *args):\n return torch.log1p(args[0])\n\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(Sqrt1().float().eval(), input_data=input_data)\n verify_model(RSqrt1().float().eval(), input_data=input_data)\n verify_model(Ceil1().float().eval(), input_data=input_data)\n verify_model(Floor1().float().eval(), input_data=input_data)\n verify_model(Round1().float().eval(), input_data=input_data)\n verify_model(Cos1().float().eval(), input_data=input_data)\n verify_model(Cosh1().float().eval(), input_data=input_data)\n verify_model(Sin1().float().eval(), input_data=input_data)\n verify_model(Sinh1().float().eval(), input_data=input_data)\n verify_model(Tan1().float().eval(), input_data=input_data)\n verify_model(Tanh1().float().eval(), input_data=input_data)\n verify_model(Acos1().float().eval(), input_data=input_data)\n verify_model(Asin1().float().eval(), input_data=input_data)\n verify_model(Atan1().float().eval(), input_data=input_data)\n verify_model(Log1().float().eval(), input_data=input_data)\n verify_model(Log2_1().float().eval(), input_data=input_data)\n verify_model(Log10_1().float().eval(), input_data=input_data)\n verify_model(Log1p_1().float().eval(), input_data=input_data)\n verify_model(Exp1().float().eval(), input_data=input_data)\n verify_model(Erf1().float().eval(), input_data=input_data)\n verify_model(Trunc1().float().eval(), input_data=input_data)\n verify_model(Sign1().float().eval(), input_data=input_data)\n verify_model(Neg1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_where():\n torch.set_grad_enabled(False)\n\n class Where1(Module):\n def forward(self, *args):\n y = torch.ones([3, 2])\n if torch.cuda.is_available():\n y = y.cuda()\n return torch.where(args[0] > 0, args[0], y)\n\n class Where2(Module):\n def forward(self, *args):\n return torch.where(args[0] > 0, args[0], args[1])\n\n class Where3(Module):\n def forward(self, *args):\n return torch.where(args[0])[0]\n\n x = torch.rand([3, 2]).float()\n verify_model(Where1(), input_data=[x])\n y = torch.rand([3, 2])\n verify_model(Where2(), input_data=[x, y])\n\n # a single argument variant, equivalent to torch.nonzero(..., as_tuple=True)\n inp = torch.rand([10])\n inp[3:8] = 0\n verify_trace_model(Where3(), [inp], [\"llvm\"])\n\n\[email protected]_gpu\ndef test_forward_addcdiv():\n torch.set_grad_enabled(False)\n\n class Addcdiv1(Module):\n def forward(self, *args):\n t1 = torch.ones([3, 1])\n t2 = torch.ones([1, 3])\n if torch.cuda.is_available():\n t1 = t1.cuda()\n t2 = t2.cuda()\n return torch.addcdiv(args[0], 0.1, t1, t2)\n\n class Addcdiv2(Module):\n def forward(self, *args):\n return torch.addcdiv(args[0], 0.5, args[1], args[2])\n\n input_data = torch.rand([1, 3]).float()\n verify_model(Addcdiv1().float().eval(), input_data=input_data)\n t1 = torch.rand([3, 1]).float()\n t2 = torch.rand([1, 3]).float()\n verify_model(Addcdiv2().float().eval(), input_data=[input_data, t1, t2])\n\n\[email protected]_gpu\ndef test_forward_addcmul():\n torch.set_grad_enabled(False)\n\n class Addcmul1(Module):\n def forward(self, *args):\n t1 = torch.ones([3, 1])\n t2 = torch.ones([1, 3])\n if torch.cuda.is_available():\n t1 = t1.cuda()\n t2 = t2.cuda()\n return torch.addcmul(args[0], 0.1, t1, t2)\n\n class Addcmul2(Module):\n def forward(self, *args):\n return torch.addcmul(args[0], 0.5, args[1], args[2])\n\n input_data = torch.rand([1, 3]).float()\n verify_model(Addcmul1().float().eval(), input_data=input_data)\n t1 = torch.rand([3, 1]).float()\n t2 = torch.rand([1, 3]).float()\n verify_model(Addcmul2().float().eval(), input_data=[input_data, t1, t2])\n\n\[email protected]_gpu\ndef test_forward_true_divide():\n if package_version.parse(torch.__version__) < package_version.parse(\"1.5.0\"):\n return\n torch.set_grad_enabled(False)\n\n class TrueDivide(Module):\n def forward(self, *args):\n return torch.true_divide(args[0], args[1])\n\n dividend = torch.rand([5, 3]).float()\n # divisor could be either tensor or scalar\n divisor_tensor = torch.rand([5, 3]).float() + 0.5\n divisor_scalar = torch.tensor(1.0, dtype=torch.float32)\n verify_model(\n TrueDivide().float().eval(), input_data=[dividend, divisor_tensor], atol=1e-4, rtol=1e-4\n )\n verify_model(\n TrueDivide().float().eval(), input_data=[dividend, divisor_scalar], atol=1e-4, rtol=1e-4\n )\n\n\[email protected]_gpu\ndef test_forward_is_floating_point():\n torch.set_grad_enabled(False)\n\n class IsFloatingPoint(Module):\n def forward(self, arg):\n # `torch.jit.trace` cannot accept something that outputs\n # a Bool, so `torch.jit.script` will be used instead\n return torch.is_floating_point(arg)\n\n targets = _get_default_vm_targets()\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.float64)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.float32)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.float16)\n # todo(dvisnty): Run the test for bfloat16 when full bfloat16 support is implemented\n # verify_script_model(IsFloatingPoint(), [(1,1)], targets, idtype=torch.bfloat16)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int64)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int32)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int16)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int8)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.uint8)\n\n\[email protected]_gpu\ndef test_forward_traced_function():\n def fn(t1, t2):\n return t1 + t2\n\n tensor1 = torch.randn(3, 4)\n tensor2 = torch.randn(3, 4)\n verify_model(fn, input_data=[tensor1, tensor2])\n\n\[email protected]_gpu\ndef test_forward_dtypes():\n def fn(t1, t2):\n return 2.5 * t1 + t2\n\n for dt in [torch.int32, torch.int64, torch.double]:\n tensor1 = torch.randn(3, 4).to(dtype=dt)\n tensor2 = torch.randn(3, 4).to(dtype=dt)\n verify_model(fn, input_data=[tensor1, tensor2])\n\n class ModuleWithIntParameters(Module):\n def __init__(self, arr):\n super().__init__()\n self.param = torch.nn.Parameter(torch.LongTensor(arr), requires_grad=False)\n\n def forward(self, x):\n return x.long() + self.param\n\n shape = (10, 10)\n param = torch.ones(shape, dtype=torch.long)\n inp = torch.ones(shape, dtype=torch.int)\n verify_model(ModuleWithIntParameters(param), input_data=inp)\n\n\[email protected]_gpu\ndef test_weight_names():\n tm = torch.jit.trace(torch.nn.Linear(3, 4), [torch.randn(2, 3)])\n mod, params = relay.frontend.from_pytorch(tm, [(\"input\", (2, 3))])\n assert set(params.keys()) == set(n for n, p in tm.named_parameters())\n\n\[email protected]_gpu\ndef test_duplicate_weight_use():\n # The test cases doesn't make any sense as a neural network,\n # the issue popped up in shared input/output embeddings of bert,\n # but this is quicker\n class Test(Module):\n def __init__(self):\n super().__init__()\n self.lin = torch.nn.Linear(5, 3)\n\n def forward(self, x):\n x = self.lin(x)\n x = x @ self.lin.weight\n return x\n\n verify_model(Test(), input_data=[torch.randn(5, 5)])\n\n\[email protected]_gpu\ndef test_forward_matmul():\n torch.set_grad_enabled(False)\n\n class MatMul1(Module):\n def forward(self, *args):\n return torch.matmul(args[0], args[1])\n\n # matrix x vector\n tensor1 = torch.randn(3, 4)\n tensor2 = torch.randn(4)\n verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])\n\n # matrix x matrix\n tensor1 = torch.randn(10, 4)\n tensor2 = torch.randn(4, 10)\n verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])\n\n # batched matrix x batched matrix\n tensor1 = torch.randn(10, 3, 4)\n tensor2 = torch.randn(10, 4, 5)\n verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])\n\n # batched matrix x broadcasted matrix\n tensor1 = torch.randn(10, 3, 4)\n tensor2 = torch.randn(4, 5)\n verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])\n\n # batched matrix x batched matrix\n tensor1 = torch.randn(1, 12, 14, 64)\n tensor2 = torch.randn(1, 12, 64, 14)\n verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])\n\n\ndef test_forward_index():\n torch.set_grad_enabled(False)\n input_shape = [3, 4, 5, 6]\n\n class Index0(Module):\n def forward(self, x):\n return x[[0, 1], [0, 2], :2, 4]\n\n input_data = torch.rand(input_shape).float()\n verify_model(Index0().eval(), input_data=input_data)\n\n class Index1(Module):\n def forward(self, x):\n return x[[0], [1, 2, 3, 0], [3, 1, 2, 2], [4, 2, 1, 0]]\n\n input_data = torch.rand(input_shape).float()\n verify_model(Index1().eval(), input_data=input_data)\n\n\ndef test_logsumexp():\n class Logsumexp(Module):\n def __init__(self, dim, keepdim=False):\n super().__init__()\n self.dim = dim\n self.keepdim = keepdim\n\n def forward(self, x):\n return torch.logsumexp(x, self.dim, self.keepdim)\n\n input_shape = (100, 100)\n input_data = torch.rand(input_shape)\n\n verify_model(Logsumexp(0), input_data=input_data)\n verify_model(Logsumexp(0, keepdim=True), input_data=input_data)\n # Also test on double\n verify_model(Logsumexp(1, keepdim=True), input_data=input_data.double())\n\n\ndef test_stack():\n class Stack(torch.nn.Module):\n def __init__(self, axis=0):\n super().__init__()\n self.axis = axis\n\n def forward(self, x):\n return torch.stack((x, x), dim=self.axis)\n\n inp = torch.randn(8, 8, 8)\n verify_model(Stack(), input_data=inp)\n verify_model(Stack(axis=-1), input_data=inp)\n verify_model(Stack(axis=3), input_data=inp)\n verify_model(Stack(axis=-4), input_data=inp)\n\n\ndef test_stack_dynamic():\n class Stack(torch.nn.Module):\n def forward(self, x):\n tensor_list = []\n for i in range(x.size(0)):\n # this is a workaround to avoid generating impure aten::append op\n tensor_list += [x[i]]\n # relay tensor array only supports stacking on the first axis\n return torch.stack(tensor_list, dim=0)\n\n verify_script_model(Stack(), [(8, 8, 8)], _get_default_vm_targets())\n\n\ndef test_forward_unbind():\n class Unbind(torch.nn.Module):\n def __init__(self, axis=0):\n super().__init__()\n self.axis = axis\n\n def forward(self, x):\n return torch.unbind(x, self.axis)\n\n inp = torch.randn(8, 8, 8)\n verify_model(Unbind(0), input_data=inp)\n verify_model(Unbind(1), input_data=inp)\n verify_model(Unbind(2), input_data=inp)\n\n\ndef test_forward_nonzero():\n class Nonzero(Module):\n def __init__(self, as_tuple=False):\n super().__init__()\n self.as_tuple = as_tuple\n\n def forward(self, data):\n return torch.nonzero(data, as_tuple=self.as_tuple)\n\n inp = torch.Tensor(np.array([[0, 1, 0], [2, 0, 9], [-1, -1, 0]]).astype(\"float32\"))\n verify_trace_model(Nonzero(), [inp], [\"llvm\"])\n\n\ndef test_forward_scatter():\n # integer cannot be traced\n def test_fn_scatter(dim):\n return lambda data, index, src: torch.scatter(data, dim=dim, index=index, src=src)\n\n def test_fn_scatter_add(dim):\n return lambda data, index, src: torch.scatter_add(data, dim=dim, index=index, src=src)\n\n in_data = torch.zeros(3, 5)\n in_index = torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]])\n in_src = torch.rand(2, 5)\n\n targets = [\"llvm\", \"cuda\"]\n verify_trace_model(test_fn_scatter(0), [in_data, in_index, in_src], targets)\n verify_trace_model(test_fn_scatter_add(0), [in_data, in_index, in_src], targets)\n\n in_data = torch.zeros(2, 4)\n in_index = torch.tensor([[2], [3]])\n in_src = torch.rand(2, 1)\n\n verify_trace_model(test_fn_scatter(1), [in_data, in_index, in_src], targets)\n verify_trace_model(test_fn_scatter_add(1), [in_data, in_index, in_src], targets)\n\n\ndef test_numel():\n class Numel(Module):\n def forward(self, data):\n return torch.tensor(torch.numel(data))\n\n targets = _get_default_vm_targets()\n verify_script_model(Numel(), [(1,)], targets)\n verify_script_model(Numel(), [(3, 5)], targets)\n verify_script_model(Numel(), [(3, 5, 8)], targets)\n\n\ndef test_forward_pretrained_bert_base_uncased():\n ######################################################################\n # This is an example how to run BERT models using TVM\n # ---------------------------------------------------\n \"\"\"\n Refer the bert example given in https://pypi.org/project/pytorch-pretrained-bert\n\n # To get started, pretrained bert package needs to be installed as prerequisite.\n\n .. code-block:: bash\n\n # install bert package\n pip install pytorch_pretrained_bert==0.6.2 --user\n \"\"\"\n\n try:\n from pytorch_pretrained_bert import BertTokenizer, BertForMaskedLM\n except:\n print(\"Torch pretrained bert package must be installed to run this script.\")\n return\n\n ######################################################################\n # Load the tokenizer and tokenize the input\n # -----------------------------------------\n\n # Load pre-trained model tokenizer (vocabulary)\n tokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\")\n\n # Tokenized input\n text = \"[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]\"\n tokenized_text = tokenizer.tokenize(text)\n\n # Mask a token that we will try to predict back with `BertForMaskedLM`\n masked_index = 8\n tokenized_text[masked_index] = \"[MASK]\"\n assert tokenized_text == [\n \"[CLS]\",\n \"who\",\n \"was\",\n \"jim\",\n \"henson\",\n \"?\",\n \"[SEP]\",\n \"jim\",\n \"[MASK]\",\n \"was\",\n \"a\",\n \"puppet\",\n \"##eer\",\n \"[SEP]\",\n ]\n\n # Convert token to vocabulary indices\n indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)\n # Define sentence A and B indices associated to 1st and 2nd sentences (see paper)\n segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]\n\n # Convert inputs to PyTorch tensors\n tokens_tensor = torch.tensor([indexed_tokens])\n segments_tensors = torch.tensor([segments_ids])\n\n ######################################################################\n # Load a pretrained PyTorch model bert-base-uncased\n # -------------------------------------------------\n\n # Bert Model with a language modeling\n model = BertForMaskedLM.from_pretrained(\"bert-base-uncased\")\n model.eval()\n\n ######################################################################\n # Predict all tokens with pytorch\n # -------------------------------\n\n with torch.no_grad():\n torch_preds = model(tokens_tensor, segments_tensors)\n\n ######################################################################\n # Make TorchScripted model via jit trace\n # --------------------------------------\n\n scripted_model = torch.jit.trace(model, (tokens_tensor, segments_tensors)).eval()\n\n ######################################################################\n # Import the graph to Relay\n # -------------------------\n # Convert PyTorch graph to Relay graph. The input name can be arbitrary.\n\n input_1 = \"input_ids\"\n input_2 = \"input.2\"\n shape_list = [(input_1, list(tokens_tensor.shape)), (input_2, list(segments_tensors.shape))]\n\n mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)\n\n ######################################################################\n # Compile the model with relay\n # ----------------------------\n\n target = \"llvm\"\n with tvm.transform.PassContext(opt_level=3):\n relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params)\n\n ######################################################################\n # Execute on TVM\n # --------------\n\n ctx = tvm.context(target, 0)\n relay_model = graph_runtime.create(relay_graph, relay_lib, ctx)\n relay_model.set_input(**relay_params)\n relay_model.set_input(input_1, tokens_tensor)\n relay_model.set_input(input_2, segments_tensors)\n relay_model.run()\n compiled_output = relay_model.get_output(0).asnumpy()\n\n ######################################################################\n # Validate the outputs\n # --------------------\n # Compare the torch and tvm outputs\n\n tvm.testing.assert_allclose(torch_preds, compiled_output, rtol=1e-3, atol=1e-3)\n\n ######################################################################\n # Process the output\n # ------------------\n # Process the model output to token.\n\n # Torch output to token\n torch_pred_idx = torch.argmax(torch_preds[0, masked_index]).item()\n torch_pred_token = tokenizer.convert_ids_to_tokens([torch_pred_idx])[0]\n\n # TVM output to token\n tvm_pred_idx = compiled_output[0, masked_index].argmax()\n tvm_pred_token = tokenizer.convert_ids_to_tokens([tvm_pred_idx])[0]\n\n assert torch_pred_idx == tvm_pred_idx\n assert torch_pred_token == tvm_pred_token\n\n # Print the outputs\n print(\"Torch top-1 id: {}, token: {}\".format(torch_pred_idx, torch_pred_token))\n print(\"TVM top-1 id: {}, token: {}\".format(tvm_pred_idx, tvm_pred_token))\n\n\ndef test_convert_torch_script_with_input_types():\n def model_fn(x, y):\n x = x.to(dtype=torch.int32)\n y = x + y\n return y\n\n ishape = (4, 5)\n input_x = torch.rand(ishape, dtype=torch.float32)\n input_y = torch.randint(low=0, high=100, size=ishape, dtype=torch.int32)\n inputs = [input_x, input_y]\n script_module = torch.jit.trace(model_fn, inputs)\n\n fname = \"tmp.pt\"\n torch.jit.save(script_module, fname)\n loaded = torch.jit.load(fname)\n os.remove(fname)\n\n verify_model(loaded.eval(), input_data=inputs)\n\n def expected(x_shape, y_shape):\n # use a fixed order of args so alpha equal check can pass\n x = relay.var(\"x\", shape=x_shape, dtype=\"float32\")\n y = relay.var(\"y\", shape=y_shape, dtype=\"int32\")\n args = [x, y]\n x1 = relay.cast(x, \"int32\")\n y1 = relay.add(x1, y)\n mod = tvm.IRModule.from_expr(relay.Function(args, y1))\n return mod[\"main\"]\n\n input_infos = [(\"input0\", (ishape, \"float\")), (\"input1\", (ishape, \"int\"))]\n mod, params = relay.frontend.from_pytorch(loaded, input_infos)\n\n expected_mod = expected(ishape, ishape)\n\n assert tvm.ir.structural_equal(expected_mod, mod[\"main\"], map_free_vars=True)\n\n\ndef test_bincount():\n def test_fn(x, weights=None):\n return torch.bincount(x, weights=weights)\n\n inp = torch.randint(0, 100, (10000,), dtype=torch.int64)\n weights = torch.linspace(0, 100, steps=10000)\n\n targets = [\"llvm\", \"cuda\"]\n verify_trace_model(test_fn, [inp], targets)\n verify_trace_model(test_fn, [inp, weights], targets)\n\n\ndef test_hard_swish():\n examples = [torch.rand(8).float(), torch.rand(8, 10).float(), torch.rand(1, 1, 10).float()]\n for input in examples:\n verify_model(torch.nn.Hardswish().eval(), input_data=input)\n verify_model(torch.nn.Hardswish(inplace=True).eval(), input_data=input)\n\n\nif __name__ == \"__main__\":\n # some structural tests\n test_forward_traced_function()\n test_forward_dtypes()\n test_weight_names()\n test_duplicate_weight_use()\n\n # Single operator tests\n test_forward_pixel_shuffle()\n test_forward_add()\n test_forward_subtract()\n test_forward_multiply()\n test_forward_matmul()\n test_forward_rsub()\n test_forward_onehot()\n test_forward_embedding()\n test_forward_reshape()\n test_forward_reciprocal()\n test_forward_repeat()\n test_forward_repeat_interleave()\n test_forward_squeeze()\n test_forward_unsqueeze()\n test_forward_concatenate()\n test_forward_reduce_sum()\n test_forward_reduce_prod()\n test_forward_argmin()\n test_forward_argmax()\n test_forward_norm()\n test_forward_frobenius_norm()\n test_forward_std()\n test_forward_variance()\n test_forward_relu()\n test_forward_prelu()\n test_forward_leakyrelu()\n test_forward_elu()\n test_forward_celu()\n test_forward_gelu()\n test_forward_selu()\n test_forward_log_sigmoid()\n test_forward_adaptiveavgpool()\n test_forward_maxpool2d()\n test_forward_maxpool1d()\n test_forward_maxpool3d()\n test_forward_hardtanh()\n test_forward_conv()\n test_forward_conv_transpose()\n test_forward_threshold()\n test_forward_contiguous()\n test_forward_batchnorm()\n test_forward_instancenorm()\n test_forward_layernorm()\n test_forward_groupnorm()\n test_forward_transpose()\n test_forward_size()\n test_forward_view()\n test_forward_select()\n test_forward_take()\n test_forward_topk()\n test_forward_where()\n test_forward_addcdiv()\n test_forward_addcmul()\n test_forward_true_divide()\n test_forward_is_floating_point()\n test_forward_clone()\n test_forward_softplus()\n test_forward_softsign()\n test_forward_logsoftmax()\n test_forward_sigmoid()\n test_forward_dense()\n test_forward_avgpool()\n test_forward_avgpool3d()\n test_forward_dropout()\n test_forward_slice()\n test_forward_mean()\n test_forward_expand()\n test_forward_pow()\n test_forward_unary()\n test_forward_clamp()\n test_forward_clamp_()\n test_forward_logical_not()\n test_forward_bitwise_not()\n test_forward_bitwise_xor()\n test_forward_logical_xor()\n test_forward_isfinite()\n test_forward_isnan()\n test_forward_isinf()\n test_forward_ones()\n test_forward_ones_like()\n test_forward_zeros()\n test_forward_zeros_like()\n test_forward_full()\n test_forward_full_like()\n test_forward_linspace()\n test_forward_arange()\n test_forward_mesh_grid()\n test_forward_chunk()\n test_forward_split()\n test_forward_gather()\n test_upsample()\n test_forward_upsample3d()\n test_forward_nms()\n test_forward_roi_align()\n test_to()\n test_flatten()\n test_type_as()\n test_forward_functional_pad()\n test_forward_zero_pad2d()\n test_forward_constant_pad1d()\n test_forward_constant_pad2d()\n test_forward_constant_pad3d()\n test_forward_reflection_pad1d()\n test_forward_reflection_pad2d()\n test_forward_replication_pad1d()\n test_forward_replication_pad2d()\n test_forward_replication_pad3d()\n test_adaptive_pool3d()\n test_conv3d()\n test_conv3d_transpose()\n test_forward_index()\n test_min_max()\n test_logsumexp()\n test_stack()\n test_stack_dynamic()\n test_forward_unbind()\n test_forward_nonzero()\n test_forward_scatter()\n test_numel()\n test_bincount()\n\n # Model tests\n test_resnet18()\n test_squeezenet1_0()\n test_squeezenet1_1()\n test_densenet121()\n # disable inception test for now, since loading it takes ~5min on torchvision-0.5 due to scipy bug\n # See https://discuss.pytorch.org/t/torchvisions-inception-v3-takes-much-longer-to-load-than-other-models/68756\n # test_inception_v3()\n test_googlenet()\n test_mnasnet0_5()\n test_mobilenet_v2()\n\n test_custom_conversion_map()\n\n test_segmentaton_models()\n test_3d_models()\n\n # Quantization test\n from qnn_test import test_quantized_imagenet, test_quantized_modules\n\n test_quantized_modules()\n test_quantized_imagenet()\n\n # Test simple conditionals and loop\n test_control_flow()\n test_simple_rnn()\n\n # More complex recurrent models\n from test_lstm import test_custom_lstm\n\n test_custom_lstm()\n\n # Test bert model\n test_forward_pretrained_bert_base_uncased()\n\n # Test convert torch script(jit) with specific inputs' types\n test_convert_torch_script_with_input_types()\n test_hard_swish()\n"
] | [
[
"numpy.allclose",
"numpy.clip",
"numpy.flip",
"numpy.random.randint"
],
[
"torch.nn.ReflectionPad1d",
"torch.addcdiv",
"torch.rand",
"torch.jit.save",
"torch.nn.Conv2d",
"torch.meshgrid",
"torch.cat",
"torch.neg",
"torch.jit.trace",
"torch.nn.ConstantPad2d",
"torch.nn.BatchNorm3d",
"torch.randn",
"torch.isinf",
"torch.take",
"torch.norm",
"torch.nn.functional.max_pool3d",
"torch.sin",
"torch.nn.functional.avg_pool3d",
"numpy.mean",
"torch.ceil",
"torch.nn.functional.max_pool2d",
"torch.bincount",
"torch.logical_not",
"torch.nn.ReplicationPad2d",
"torch.nn.ConstantPad3d",
"torch.split",
"torch.nn.Softsign",
"torch.zeros_like",
"torch.zeros",
"torch.nn.ConvTranspose1d",
"numpy.array",
"torch.nn.functional.interpolate",
"torch.jit.script",
"torch.nonzero",
"torch.asin",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.ReflectionPad2d",
"torch.max",
"torch.jit.load",
"torch.nn.Sigmoid",
"torch.logical_xor",
"torch.nn.Dropout",
"torch.log1p",
"torch.atan",
"torch.scatter",
"torch.cos",
"torch.set_grad_enabled",
"torch.erf",
"torch.nn.Softmax",
"torch.true_divide",
"torch.nn.init.normal_",
"torch.gather",
"torch.nn.ConvTranspose3d",
"torch.nn.Threshold",
"torch.nn.LogSigmoid",
"torch.nn.AvgPool3d",
"torch.nn.SELU",
"torch.nn.MaxPool1d",
"scipy.stats.t.ppf",
"torch.argmax",
"torch.flatten",
"torch.nn.LogSoftmax",
"torch.log2",
"torch.nn.AlphaDropout",
"torch.nn.Hardswish",
"torch.nn.Hardtanh",
"torch.topk",
"torch.is_floating_point",
"torch.scatter_add",
"numpy.sqrt",
"torch.nn.Dropout3d",
"torch.nn.Conv3d",
"torch.clamp",
"torch.nn.LeakyReLU",
"torch.stack",
"torch.nn.functional.avg_pool2d",
"torch.nn.MaxPool3d",
"torch.nn.AdaptiveMaxPool3d",
"torch.nn.functional.one_hot",
"torch.log",
"torch.nn.Upsample",
"torch.nn.InstanceNorm2d",
"torch.floor",
"torch.cuda.empty_cache",
"torch.nn.BatchNorm2d",
"torch.nn.functional.pad",
"torch.index_select",
"torch.log10",
"torch.nn.AdaptiveAvgPool3d",
"torch.nn.Softplus",
"numpy.random.uniform",
"torch.nn.MaxPool2d",
"torch.ones_like",
"numpy.zeros",
"torch.nn.AdaptiveAvgPool2d",
"torch.cosh",
"torch.logsumexp",
"torch.nn.Linear",
"torch.nn.PReLU",
"torch.nn.Dropout2d",
"torch.sinh",
"torch.nn.Embedding",
"torch.exp",
"torch.nn.GELU",
"torch.clamp_",
"torch.where",
"torch.numel",
"torch.full_like",
"torch.tan",
"torch.nn.ZeroPad2d",
"torch.matmul",
"torch.min",
"torch.randint",
"torch.addcmul",
"torch.LongTensor",
"torch.trunc",
"torch.nn.CELU",
"torch.sqrt",
"torch.bitwise_not",
"torch.bitwise_xor",
"torch.nn.ConvTranspose2d",
"torch.nn.GroupNorm",
"torch.round",
"torch.linspace",
"torch.nn.InstanceNorm3d",
"torch.sign",
"torch.arange",
"torch.nn.LayerNorm",
"torch.tanh",
"torch.nn.AvgPool2d",
"torch.acos",
"torch.isfinite",
"torch.nn.PixelShuffle",
"torch.ones",
"torch.nn.ReplicationPad3d",
"torch.rsub",
"torch.tensor",
"torch.nn.Conv1d",
"torch.nn.functional.max_pool1d",
"torch.nn.ReplicationPad1d",
"numpy.std",
"torch.isnan",
"torch.unbind",
"torch.full",
"numpy.random.random",
"torch.rsqrt",
"torch.nn.ELU",
"torch.nn.ReLU"
]
] |
jaidevd/scikit-image | [
"62d6a3d7e95a228c729c9ff99b4f45336a210885"
] | [
"skimage/morphology/selem.py"
] | [
"\"\"\"\n:author: Damian Eads, 2009\n:license: modified BSD\n\"\"\"\n\nimport numpy as np\nfrom scipy import ndimage\nfrom skimage import draw\n\ndef square(width, dtype=np.uint8):\n \"\"\"Generates a flat, square-shaped structuring element.\n\n Every pixel along the perimeter has a chessboard distance\n no greater than radius (radius=floor(width/2)) pixels.\n\n Parameters\n ----------\n width : int\n The width and height of the square.\n\n Other Parameters\n ----------------\n dtype : data-type\n The data type of the structuring element.\n\n Returns\n -------\n selem : ndarray\n A structuring element consisting only of ones, i.e. every\n pixel belongs to the neighborhood.\n\n \"\"\"\n return np.ones((width, width), dtype=dtype)\n\n\ndef rectangle(width, height, dtype=np.uint8):\n \"\"\"Generates a flat, rectangular-shaped structuring element.\n\n Every pixel in the rectangle generated for a given width and given height\n belongs to the neighboorhood.\n\n Parameters\n ----------\n width : int\n The width of the rectangle.\n height : int\n The height of the rectangle.\n\n Other Parameters\n ----------------\n dtype : data-type\n The data type of the structuring element.\n\n Returns\n -------\n selem : ndarray\n A structuring element consisting only of ones, i.e. every\n pixel belongs to the neighborhood.\n\n \"\"\"\n return np.ones((width, height), dtype=dtype)\n\n\ndef diamond(radius, dtype=np.uint8):\n \"\"\"Generates a flat, diamond-shaped structuring element.\n\n A pixel is part of the neighborhood (i.e. labeled 1) if\n the city block/manhattan distance between it and the center of\n the neighborhood is no greater than radius.\n\n Parameters\n ----------\n radius : int\n The radius of the diamond-shaped structuring element.\n\n Other Parameters\n ----------------\n dtype : data-type\n The data type of the structuring element.\n\n Returns\n -------\n\n selem : ndarray\n The structuring element where elements of the neighborhood\n are 1 and 0 otherwise.\n \"\"\"\n L = np.arange(0, radius * 2 + 1)\n I, J = np.meshgrid(L, L)\n return np.array(np.abs(I - radius) + np.abs(J - radius) <= radius,\n dtype=dtype)\n\n\ndef disk(radius, dtype=np.uint8):\n \"\"\"Generates a flat, disk-shaped structuring element.\n\n A pixel is within the neighborhood if the euclidean distance between\n it and the origin is no greater than radius.\n\n Parameters\n ----------\n radius : int\n The radius of the disk-shaped structuring element.\n\n Other Parameters\n ----------------\n dtype : data-type\n The data type of the structuring element.\n\n Returns\n -------\n selem : ndarray\n The structuring element where elements of the neighborhood\n are 1 and 0 otherwise.\n \"\"\"\n L = np.arange(-radius, radius + 1)\n X, Y = np.meshgrid(L, L)\n return np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype)\n\n\ndef ellipse(width, height, dtype=np.uint8):\n \"\"\"Generates a flat, ellipse-shaped structuring element.\n\n Every pixel along the perimeter of ellipse satisfies\n the equation ``(x/width+1)**2 + (y/height+1)**2 = 1``.\n\n Parameters\n ----------\n width : int\n The width of the ellipse-shaped structuring element.\n height : int\n The height of the ellipse-shaped structuring element.\n\n Other Parameters\n ----------------\n dtype : data-type\n The data type of the structuring element.\n\n Returns\n -------\n selem : ndarray\n The structuring element where elements of the neighborhood\n are 1 and 0 otherwise.\n\n Examples\n --------\n >>> from skimage.morphology import selem\n >>> selem.ellipse(5, 3)\n array([[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0]], dtype=uint8)\n\n \"\"\"\n selem = np.zeros((2 * height + 1, 2 * width + 1), dtype=dtype)\n rows, cols = draw.ellipse(height, width, height + 1, width + 1)\n selem[rows, cols] = 1\n return selem\n\n\ndef cube(width, dtype=np.uint8):\n \"\"\" Generates a cube-shaped structuring element.\n\n This is the 3D equivalent of a square.\n Every pixel along the perimeter has a chessboard distance\n no greater than radius (radius=floor(width/2)) pixels.\n\n Parameters\n ----------\n width : int\n The width, height and depth of the cube.\n\n Other Parameters\n ----------------\n dtype : data-type\n The data type of the structuring element.\n\n Returns\n -------\n selem : ndarray\n A structuring element consisting only of ones, i.e. every\n pixel belongs to the neighborhood.\n\n \"\"\"\n return np.ones((width, width, width), dtype=dtype)\n\n\ndef octahedron(radius, dtype=np.uint8):\n \"\"\"Generates a octahedron-shaped structuring element.\n\n This is the 3D equivalent of a diamond.\n A pixel is part of the neighborhood (i.e. labeled 1) if\n the city block/manhattan distance between it and the center of\n the neighborhood is no greater than radius.\n\n Parameters\n ----------\n radius : int\n The radius of the octahedron-shaped structuring element.\n\n Other Parameters\n ----------------\n dtype : data-type\n The data type of the structuring element.\n\n Returns\n -------\n\n selem : ndarray\n The structuring element where elements of the neighborhood\n are 1 and 0 otherwise.\n \"\"\"\n # note that in contrast to diamond(), this method allows non-integer radii\n n = 2 * radius + 1\n Z, Y, X = np.mgrid[-radius:radius:n * 1j,\n -radius:radius:n * 1j,\n -radius:radius:n * 1j]\n s = np.abs(X) + np.abs(Y) + np.abs(Z)\n return np.array(s <= radius, dtype=dtype)\n\n\ndef ball(radius, dtype=np.uint8):\n \"\"\"Generates a ball-shaped structuring element.\n\n This is the 3D equivalent of a disk.\n A pixel is within the neighborhood if the euclidean distance between\n it and the origin is no greater than radius.\n\n Parameters\n ----------\n radius : int\n The radius of the ball-shaped structuring element.\n\n Other Parameters\n ----------------\n dtype : data-type\n The data type of the structuring element.\n\n Returns\n -------\n selem : ndarray\n The structuring element where elements of the neighborhood\n are 1 and 0 otherwise.\n \"\"\"\n n = 2 * radius + 1\n Z, Y, X = np.mgrid[-radius:radius:n * 1j,\n -radius:radius:n * 1j,\n -radius:radius:n * 1j]\n s = X ** 2 + Y ** 2 + Z ** 2\n return np.array(s <= radius * radius, dtype=dtype)\n\n\ndef octagon(m, n, dtype=np.uint8):\n \"\"\"Generates an octagon shaped structuring element.\n\n For a given size of (m) horizontal and vertical sides\n and a given (n) height or width of slanted sides octagon is generated.\n The slanted sides are 45 or 135 degrees to the horizontal axis\n and hence the widths and heights are equal.\n\n Parameters\n ----------\n m : int\n The size of the horizontal and vertical sides.\n n : int\n The height or width of the slanted sides.\n\n Other Parameters\n ----------------\n dtype : data-type\n The data type of the structuring element.\n\n Returns\n -------\n selem : ndarray\n The structuring element where elements of the neighborhood\n are 1 and 0 otherwise.\n\n \"\"\"\n from . import convex_hull_image\n selem = np.zeros((m + 2 * n, m + 2 * n))\n selem[0, n] = 1\n selem[n, 0] = 1\n selem[0, m + n - 1] = 1\n selem[m + n - 1, 0] = 1\n selem[-1, n] = 1\n selem[n, -1] = 1\n selem[-1, m + n - 1] = 1\n selem[m + n - 1, -1] = 1\n selem = convex_hull_image(selem).astype(dtype)\n return selem\n\n\ndef star(a, dtype=np.uint8):\n \"\"\"Generates a star shaped structuring element.\n\n Start has 8 vertices and is an overlap of square of size `2*a + 1`\n with its 45 degree rotated version.\n The slanted sides are 45 or 135 degrees to the horizontal axis.\n\n Parameters\n ----------\n a : int\n Parameter deciding the size of the star structural element. The side\n of the square array returned is `2*a + 1 + 2*floor(a / 2)`.\n\n Other Parameters\n ----------------\n dtype : data-type\n The data type of the structuring element.\n\n Returns\n -------\n selem : ndarray\n The structuring element where elements of the neighborhood\n are 1 and 0 otherwise.\n\n \"\"\"\n from . import convex_hull_image\n\n if a == 1:\n bfilter = np.zeros((3, 3), dtype)\n bfilter[:] = 1\n return bfilter\n\n m = 2 * a + 1\n n = a // 2\n selem_square = np.zeros((m + 2 * n, m + 2 * n))\n selem_square[n: m + n, n: m + n] = 1\n\n c = (m + 2 * n - 1) // 2\n selem_rotated = np.zeros((m + 2 * n, m + 2 * n))\n selem_rotated[0, c] = selem_rotated[-1, c] = 1\n selem_rotated[c, 0] = selem_rotated[c, -1] = 1\n selem_rotated = convex_hull_image(selem_rotated).astype(int)\n\n selem = selem_square + selem_rotated\n selem[selem > 0] = 1\n\n return selem.astype(dtype)\n\n\ndef _default_selem(ndim):\n \"\"\"Generates a cross-shaped structuring element (connectivity=1).\n\n This is the default structuring element (selem) if no selem was specified.\n\n Parameters\n ----------\n ndim : int\n Number of dimensions of the image.\n\n Returns\n -------\n selem : ndarray\n The structuring element where elements of the neighborhood\n are 1 and 0 otherwise.\n\n \"\"\"\n return ndimage.morphology.generate_binary_structure(ndim, 1)\n"
] | [
[
"numpy.ones",
"numpy.zeros",
"numpy.abs",
"numpy.arange",
"scipy.ndimage.morphology.generate_binary_structure",
"numpy.array",
"numpy.meshgrid"
]
] |
shan18/taxi | [
"286e2c9a97c1e0b52d63bbb3508045001f449714"
] | [
"jnt/freq.py"
] | [
"from pandas import read_csv\nimport _pickle as pickle\nfrom traceback import format_exc\n\nfrom .common import exists, preprocess_pandas_csv\nfrom .common import try_remove\n\n\nDEFAULT_FREQ = 1\n\n\ndef load_freq(freq_fpath, min_freq=1, preprocess=True, sep='\\t', strip_pos=True, use_pickle=True):\n f = FreqDictionary(freq_fpath, min_freq=min_freq, preprocess=preprocess, sep=sep, strip_pos=strip_pos, use_pickle=use_pickle)\n return f.data\n\n\nclass FreqDictionary(object):\n def __init__(self, freq_fpath, min_freq=1, preprocess=True, sep='\\t', strip_pos=True, use_pickle=True):\n \"\"\" Reads a word frequency list in CSV format \"word<TAB>freq\" \"\"\"\n\n if not exists(freq_fpath):\n self._freq = {}\n return\n\n pkl_fpath = freq_fpath + \".pkl\"\n if use_pickle and exists(pkl_fpath):\n voc = pickle.load(open(pkl_fpath, \"rb\"))\n else:\n # load words to datafame\n if preprocess:\n freq_cln_fpath = freq_fpath + \"-cln\"\n preprocess_pandas_csv(freq_fpath, freq_cln_fpath)\n word_df = read_csv(freq_cln_fpath, sep, encoding='utf-8', error_bad_lines=False)\n try_remove(freq_cln_fpath)\n else:\n word_df = read_csv(freq_fpath, sep, encoding='utf-8', error_bad_lines=False)\n\n # load from dataframe to dictionary\n word_df = word_df.drop(word_df[word_df[\"freq\"] < min_freq].index)\n if strip_pos:\n voc = {}\n for i, row in word_df.iterrows():\n try:\n word = str(row[\"word\"]).split(\"#\")[0]\n freq = int(row[\"freq\"])\n if word not in voc or voc[word] < freq: voc[word] = freq\n except:\n print(\"Bad row:\", row)\n print(format_exc())\n else:\n voc = { row[\"word\"]: row[\"freq\"] for i, row in word_df.iterrows() }\n\n print(\"dictionary is loaded:\", len(voc))\n\n if use_pickle:\n pickle.dump(voc, open(pkl_fpath, \"wb\"))\n print(\"Pickled voc:\", pkl_fpath)\n\n print(\"Loaded %d words from: %s\" % (len(voc), pkl_fpath if pkl_fpath else freq_fpath))\n\n self._freq = voc\n\n\n @property\n def data(self):\n return self._freq\n\n def freq(self, word):\n \"\"\" Returns frequency of the word or 1 \"\"\"\n\n if word in self._freq: return self._freq[word]\n else: return DEFAULT_FREQ\n"
] | [
[
"pandas.read_csv"
]
] |
hmthanh/LaTeX_OCR | [
"bf5cf4642aff9cbbd5c4f8f232cd993a38ee6d81"
] | [
"models/layers/norm_act.py"
] | [
"from typing import Union, List\n\nimport torch\nfrom torch import nn as nn\nfrom torch.nn import functional as F\n\nfrom models.layers.create_act import get_act_layer\nfrom .trace_utils import _assert\n\n\nclass BatchNormAct2d(nn.BatchNorm2d):\n \"\"\"BatchNorm + Activation\n This module performs BatchNorm + Activation in a manner that will remain backwards\n compatible with weights trained with separate bn, act. This is why we inherit from BN\n instead of composing it as a .bn member.\n \"\"\"\n\n def __init__(\n self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True,\n apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None):\n super(BatchNormAct2d, self).__init__(\n num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)\n self.drop = drop_layer() if drop_layer is not None else nn.Identity()\n act_layer = get_act_layer(act_layer) # string -> nn.Module\n if act_layer is not None and apply_act:\n act_args = dict(inplace=True) if inplace else {}\n self.act = act_layer(**act_args)\n else:\n self.act = nn.Identity()\n\n def forward(self, x):\n # cut & paste of torch.nn.BatchNorm2d.forward impl to avoid issues with torchscript and tracing\n _assert(x.ndim == 4, f'expected 4D input (got {x.ndim}D input)')\n\n # exponential_average_factor is set to self.momentum\n # (when it is available) only so that it gets updated\n # in ONNX graph when this node is exported to ONNX.\n if self.momentum is None:\n exponential_average_factor = 0.0\n else:\n exponential_average_factor = self.momentum\n\n if self.training and self.track_running_stats:\n # TODO: if statement only here to tell the jit to skip emitting this when it is None\n if self.num_batches_tracked is not None: # type: ignore[has-type]\n self.num_batches_tracked = self.num_batches_tracked + \\\n 1 # type: ignore[has-type]\n if self.momentum is None: # use cumulative moving average\n exponential_average_factor = 1.0 / \\\n float(self.num_batches_tracked)\n else: # use exponential moving average\n exponential_average_factor = self.momentum\n\n r\"\"\"\n Decide whether the mini-batch stats should be used for normalization rather than the buffers.\n Mini-batch stats are used in training mode, and in eval mode when buffers are None.\n \"\"\"\n if self.training:\n bn_training = True\n else:\n bn_training = (self.running_mean is None) and (\n self.running_var is None)\n\n r\"\"\"\n Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be\n passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are\n used for normalization (i.e. in eval mode when buffers are not None).\n \"\"\"\n x = F.batch_norm(\n x,\n # If buffers are not to be tracked, ensure that they won't be updated\n self.running_mean if not self.training or self.track_running_stats else None,\n self.running_var if not self.training or self.track_running_stats else None,\n self.weight,\n self.bias,\n bn_training,\n exponential_average_factor,\n self.eps,\n )\n x = self.drop(x)\n x = self.act(x)\n return x\n\n\ndef _num_groups(num_channels, num_groups, group_size):\n if group_size:\n assert num_channels % group_size == 0\n return num_channels // group_size\n return num_groups\n\n\nclass GroupNormAct(nn.GroupNorm):\n # NOTE num_channel and num_groups order flipped for easier layer swaps / binding of fixed args\n def __init__(\n self, num_channels, num_groups=32, eps=1e-5, affine=True, group_size=None,\n apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None):\n super(GroupNormAct, self).__init__(\n _num_groups(num_channels, num_groups, group_size), num_channels, eps=eps, affine=affine)\n self.drop = drop_layer() if drop_layer is not None else nn.Identity()\n act_layer = get_act_layer(act_layer) # string -> nn.Module\n if act_layer is not None and apply_act:\n act_args = dict(inplace=True) if inplace else {}\n self.act = act_layer(**act_args)\n else:\n self.act = nn.Identity()\n\n def forward(self, x):\n x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)\n x = self.drop(x)\n x = self.act(x)\n return x\n\n\nclass LayerNormAct(nn.LayerNorm):\n def __init__(\n self, normalization_shape: Union[int, List[int], torch.Size], eps=1e-5, affine=True,\n apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None):\n super(LayerNormAct, self).__init__(\n normalization_shape, eps=eps, elementwise_affine=affine)\n self.drop = drop_layer() if drop_layer is not None else nn.Identity()\n act_layer = get_act_layer(act_layer) # string -> nn.Module\n if act_layer is not None and apply_act:\n act_args = dict(inplace=True) if inplace else {}\n self.act = act_layer(**act_args)\n else:\n self.act = nn.Identity()\n\n def forward(self, x):\n x = F.layer_norm(x, self.normalized_shape,\n self.weight, self.bias, self.eps)\n x = self.drop(x)\n x = self.act(x)\n return x\n\n\nclass LayerNormAct2d(nn.LayerNorm):\n def __init__(\n self, num_channels, eps=1e-5, affine=True,\n apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None):\n super(LayerNormAct2d, self).__init__(\n num_channels, eps=eps, elementwise_affine=affine)\n self.drop = drop_layer() if drop_layer is not None else nn.Identity()\n act_layer = get_act_layer(act_layer) # string -> nn.Module\n if act_layer is not None and apply_act:\n act_args = dict(inplace=True) if inplace else {}\n self.act = act_layer(**act_args)\n else:\n self.act = nn.Identity()\n\n def forward(self, x):\n x = F.layer_norm(\n x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2)\n x = self.drop(x)\n x = self.act(x)\n return x\n"
] | [
[
"torch.nn.Identity",
"torch.nn.functional.group_norm",
"torch.nn.functional.batch_norm",
"torch.nn.functional.layer_norm"
]
] |
joycenerd/bird-images-classification | [
"9430f65ba22523809d62b3d84c3e40d8bc47111f"
] | [
"dataset.py"
] | [
"from torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms\nfrom PIL import Image\nimport torch.nn as nn\nimport numpy as np\nimport torch\n\nfrom pathlib import Path\nimport collections\nimport numbers\nimport random\nimport os\n\n\nclass BirdDataset(Dataset):\n def __init__(self, root_dir, mode, transform=None):\n self.root_dir = root_dir\n self.x = []\n self.y = []\n self.transform = transform\n\n if mode == \"train\":\n labels = open(os.path.join(self.root_dir, 'new_train_label.txt'))\n\n elif mode == 'eval':\n labels = open(os.path.join(self.root_dir, 'new_eval_label.txt'))\n\n for label in labels:\n label_list = label.split(',')\n self.x.append(label_list[0])\n self.y.append(int(label_list[1]))\n\n def __len__(self):\n return len(self.x)\n\n def __getitem__(self, index):\n image_path = self.x[index]\n image = Image.open(image_path).convert('RGB')\n image = image.copy()\n\n if self.transform:\n image = self.transform(image)\n\n return image, self.y[index]\n\n\ndef Dataloader(dataset, batch_size, shuffle, num_workers):\n data_loader = DataLoader(\n dataset=dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)\n return data_loader\n\n\ndef _random_colour_space(x):\n output = x.convert(\"HSV\")\n return output\n\n\nclass RandomShift(object):\n def __init__(self, shift):\n self.shift = shift\n\n @staticmethod\n def get_params(shift):\n \"\"\"Get parameters for ``rotate`` for a random rotation.\n Returns:\n sequence: params to be passed to ``rotate`` for random rotation.\n \"\"\"\n hshift, vshift = np.random.uniform(-shift, shift, size=2)\n\n return hshift, vshift\n\n def __call__(self, img):\n hshift, vshift = self.get_params(self.shift)\n\n return img.transform(img.size, Image.AFFINE, (1, 0, hshift, 0, 1, vshift), resample=Image.BICUBIC, fill=1)\n\n\ndef make_dataset(mode, data_root, img_size):\n colour_transform = transforms.Lambda(lambda x: _random_colour_space(x))\n\n transform = [\n transforms.RandomAffine(degrees=30, shear=50, fillcolor=0),\n transforms.RandomGrayscale(p=0.5),\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.RandomPerspective(\n distortion_scale=0.5, p=0.5, fill=0),\n transforms.RandomVerticalFlip(p=0.5),\n transforms.ColorJitter(\n brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5),\n RandomShift(3),\n transforms.RandomApply([colour_transform]),\n ]\n\n data_transform_train = transforms.Compose([\n transforms.RandomResizedCrop(img_size),\n transforms.RandomApply(transform, p=0.5),\n transforms.RandomApply([transforms.RandomRotation(\n (-90, 90), expand=False, center=None)], p=0.5),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[\n 0.229, 0.224, 0.225])\n ])\n\n data_transform_dev = transforms.Compose([\n transforms.Resize((img_size, img_size)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[\n 0.229, 0.224, 0.225])\n ])\n\n data_transform_test = transforms.Compose([\n transforms.Resize((img_size, img_size)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n\n if (mode == \"train\"):\n data_set = BirdDataset(data_root, mode, data_transform_train)\n elif (mode == \"eval\"):\n data_set = BirdDataset(data_root, mode, data_transform_dev)\n elif (mode == \"test\"):\n data_set = BirdDataset(data_root, mode, data_transform_test)\n\n return data_set\n"
] | [
[
"numpy.random.uniform",
"torch.utils.data.DataLoader"
]
] |
BB88Lee/mmdetection3d | [
"62aeeadf70ac1229c595e3a4fe09d8a49df808f1"
] | [
"tests/test_voxel_encoders.py"
] | [
"import torch\n\nfrom mmdet3d.models.builder import build_voxel_encoder\n\n\ndef test_pillar_feature_net():\n pillar_feature_net_cfg = dict(\n type='PillarFeatureNet',\n in_channels=5,\n feat_channels=[64],\n with_distance=False,\n voxel_size=(0.2, 0.2, 8),\n point_cloud_range=(-51.2, -51.2, -5.0, 51.2, 51.2, 3.0),\n norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01))\n\n pillar_feature_net = build_voxel_encoder(pillar_feature_net_cfg)\n\n features = torch.rand([97297, 20, 5])\n num_voxels = torch.randint(1, 100, [97297])\n coors = torch.randint(0, 100, [97297, 4])\n\n features = pillar_feature_net(features, num_voxels, coors)\n assert features.shape == torch.Size([97297, 64])\n\n\ndef test_hard_simple_VFE():\n hard_simple_VFE_cfg = dict(type='HardSimpleVFE', num_features=5)\n hard_simple_VFE = build_voxel_encoder(hard_simple_VFE_cfg)\n features = torch.rand([240000, 10, 5])\n num_voxels = torch.randint(1, 10, [240000])\n\n outputs = hard_simple_VFE(features, num_voxels, None)\n assert outputs.shape == torch.Size([240000, 5])\n"
] | [
[
"torch.rand",
"torch.Size",
"torch.randint"
]
] |
bhklab/ptl-oar-segmentation | [
"354c3ee7f042a025f74e210a7b8462beac9b727d"
] | [
"utils/models/unetplusplus/model.py"
] | [
"import torch\nfrom torch import nn\nfrom .parts import *\n\n__all__ = [\"VGGUNet\", \"NestedUNet\"]\n\n\nclass VGGUNet(nn.Module):\n def __init__(self, num_classes, input_channels=3, leak_p=0.1, factor=1, **kwargs):\n super().__init__()\n\n nb_filter = [\n 32 // factor,\n 64 // factor,\n 128 // factor,\n 256 // factor,\n 512 // factor,\n ]\n\n self.pool = nn.MaxPool3d(2, 2)\n self.up = nn.Upsample(scale_factor=2, mode=\"trilinear\", align_corners=True)\n\n self.conv0_0 = VGGBlock(input_channels, nb_filter[0], nb_filter[0])\n self.conv1_0 = VGGBlock(nb_filter[0], nb_filter[1], nb_filter[1])\n self.conv2_0 = VGGBlock(nb_filter[1], nb_filter[2], nb_filter[2])\n self.conv3_0 = VGGBlock(nb_filter[2], nb_filter[3], nb_filter[3])\n self.conv4_0 = VGGBlock(nb_filter[3], nb_filter[4], nb_filter[4])\n\n self.conv3_1 = VGGBlock(nb_filter[3] + nb_filter[4], nb_filter[3], nb_filter[3])\n self.conv2_2 = VGGBlock(nb_filter[2] + nb_filter[3], nb_filter[2], nb_filter[2])\n self.conv1_3 = VGGBlock(nb_filter[1] + nb_filter[2], nb_filter[1], nb_filter[1])\n self.conv0_4 = VGGBlock(nb_filter[0] + nb_filter[1], nb_filter[0], nb_filter[0])\n\n self.final = nn.Conv3d(nb_filter[0], num_classes, kernel_size=1)\n\n def forward(self, input):\n x0_0 = self.conv0_0(input)\n x1_0 = self.conv1_0(self.pool(x0_0))\n x2_0 = self.conv2_0(self.pool(x1_0))\n x3_0 = self.conv3_0(self.pool(x2_0))\n x4_0 = self.conv4_0(self.pool(x3_0))\n\n x3_1 = self.conv3_1(torch.cat([x3_0, self.up(x4_0)], 1))\n x2_2 = self.conv2_2(torch.cat([x2_0, self.up(x3_1)], 1))\n x1_3 = self.conv1_3(torch.cat([x1_0, self.up(x2_2)], 1))\n x0_4 = self.conv0_4(torch.cat([x0_0, self.up(x1_3)], 1))\n\n output = self.final(x0_4)\n return output\n\n\nclass NestedUNet(nn.Module):\n def __init__(\n self,\n num_classes,\n input_channels=1,\n deep_supervision=False,\n leak_p=0.1,\n factor=1,\n **kwargs,\n ):\n super().__init__()\n\n nb_filter = [\n 40 // factor,\n 80 // factor,\n 160 // factor,\n 320 // factor,\n 640 // factor,\n ]\n\n self.deep_supervision = deep_supervision\n self.pool = nn.MaxPool3d(2, 2)\n # self.up = nn.Upsample(scale_factor=2, mode=\"trilinear\", align_corners=True)\n self.conv0_0 = VGGBlock(input_channels, nb_filter[0], nb_filter[0])\n self.conv1_0 = VGGBlock(nb_filter[0], nb_filter[1], nb_filter[1])\n self.conv2_0 = VGGBlock(nb_filter[1], nb_filter[2], nb_filter[2])\n self.conv3_0 = VGGBlock(nb_filter[2], nb_filter[3], nb_filter[3])\n self.conv4_0 = VGGBlock(nb_filter[3], nb_filter[4], nb_filter[4])\n self.conv0_1 = VGGBlock(nb_filter[0] + nb_filter[1], nb_filter[0], nb_filter[0])\n self.conv1_1 = VGGBlock(nb_filter[1] + nb_filter[2], nb_filter[1], nb_filter[1])\n self.conv2_1 = VGGBlock(nb_filter[2] + nb_filter[3], nb_filter[2], nb_filter[2])\n self.conv3_1 = VGGBlock(nb_filter[3] + nb_filter[4], nb_filter[3], nb_filter[3])\n\n self.conv0_2 = VGGBlock(\n nb_filter[0] * 2 + nb_filter[1], nb_filter[0], nb_filter[0]\n )\n self.conv1_2 = VGGBlock(\n nb_filter[1] * 2 + nb_filter[2], nb_filter[1], nb_filter[1]\n )\n self.conv2_2 = VGGBlock(\n nb_filter[2] * 2 + nb_filter[3], nb_filter[2], nb_filter[2]\n )\n\n self.conv0_3 = VGGBlock(\n nb_filter[0] * 3 + nb_filter[1], nb_filter[0], nb_filter[0]\n )\n self.conv1_3 = VGGBlock(\n nb_filter[1] * 3 + nb_filter[2], nb_filter[1], nb_filter[1]\n )\n\n self.conv0_4 = VGGBlock(\n nb_filter[0] * 4 + nb_filter[1], nb_filter[0], nb_filter[0]\n )\n\n if self.deep_supervision:\n\n self.final1 = nn.Conv3d(nb_filter[0], num_classes, kernel_size=1)\n self.final2 = nn.Conv3d(nb_filter[0], num_classes, kernel_size=1)\n self.final3 = nn.Conv3d(nb_filter[0], num_classes, kernel_size=1)\n self.final4 = nn.Conv3d(nb_filter[0], num_classes, kernel_size=1)\n self.final_bn = nn.BatchNorm3d(num_classes * 4)\n self.final_relu = nn.LeakyReLU(leak_p, inplace=True)\n self.final = nn.Conv3d(num_classes * 4, num_classes, kernel_size=1)\n\n else:\n\n self.final = nn.Conv3d(nb_filter[0], num_classes, kernel_size=1)\n\n self.upconv1_0 = VGGBlockUP(nb_filter[1], nb_filter[1])\n self.upconv2_0 = VGGBlockUP(nb_filter[2], nb_filter[2])\n self.upconv2_1 = VGGBlockUP(nb_filter[1], nb_filter[1])\n self.upconv3_0 = VGGBlockUP(nb_filter[3], nb_filter[3])\n self.upconv3_1 = VGGBlockUP(nb_filter[2], nb_filter[2])\n self.upconv3_2 = VGGBlockUP(nb_filter[1], nb_filter[1])\n self.upconv4_0 = VGGBlockUP(nb_filter[4], nb_filter[4])\n self.upconv4_1 = VGGBlockUP(nb_filter[3], nb_filter[3])\n self.upconv4_2 = VGGBlockUP(nb_filter[2], nb_filter[2])\n self.upconv4_3 = VGGBlockUP(nb_filter[1], nb_filter[1])\n\n def forward(self, input):\n x0_0 = self.conv0_0(input)\n x1_0 = self.conv1_0(self.pool(x0_0))\n x0_1 = self.conv0_1(torch.cat([x0_0, self.upconv1_0(x1_0)], 1))\n\n x2_0 = self.conv2_0(self.pool(x1_0))\n x1_1 = self.conv1_1(torch.cat([x1_0, self.upconv2_0(x2_0)], 1))\n x0_2 = self.conv0_2(torch.cat([x0_0, x0_1, self.upconv2_1(x1_1)], 1))\n\n x3_0 = self.conv3_0(self.pool(x2_0))\n x2_1 = self.conv2_1(torch.cat([x2_0, self.upconv3_0(x3_0)], 1))\n x1_2 = self.conv1_2(torch.cat([x1_0, x1_1, self.upconv3_1(x2_1)], 1))\n x0_3 = self.conv0_3(torch.cat([x0_0, x0_1, x0_2, self.upconv3_2(x1_2)], 1))\n\n x4_0 = self.conv4_0(self.pool(x3_0))\n x3_1 = self.conv3_1(torch.cat([x3_0, self.upconv4_0(x4_0)], 1))\n x2_2 = self.conv2_2(torch.cat([x2_0, x2_1, self.upconv4_1(x3_1)], 1))\n x1_3 = self.conv1_3(torch.cat([x1_0, x1_1, x1_2, self.upconv4_2(x2_2)], 1))\n x0_4 = self.conv0_4(torch.cat([x0_0, x0_1, x0_2, x0_3, self.upconv4_3(x1_3)], 1))\n\n if self.deep_supervision:\n\n output1 = self.final1(x0_1)\n output2 = self.final2(x0_2)\n output3 = self.final3(x0_3)\n output4 = self.final4(x0_4)\n\n # added this...\n final = self.final_relu(\n self.final_bn(torch.cat((output1, output2, output3, output4), 1))\n )\n final = self.final(final)\n\n return final # [output1, output2, output3, output4]\n\n else:\n output = self.final(x0_4)\n return output\n"
] | [
[
"torch.nn.MaxPool3d",
"torch.nn.BatchNorm3d",
"torch.nn.Upsample",
"torch.cat",
"torch.nn.Conv3d",
"torch.nn.LeakyReLU"
]
] |
WendyBaiYunwei/FSL | [
"e20470872d52332efdb1449b4593445c5d94e4fb"
] | [
"cifar/trans_trans.py"
] | [
"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.optim.lr_scheduler import StepLR\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\nfrom self_attention_cv import TransformerEncoder\nimport argparse\nimport math\nimport numpy as np\nfrom torchvision import datasets, models\nimport os\nfrom cifar_generator import CIFAR10\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-test\",\"--isTest\",type = bool, default=False)\nargs = parser.parse_args()\n\ntorch.manual_seed(0)\n\nisTest = args.isTest\nCHECKTEACHER = False\nEPOCH = 1\nBATCH_SIZE = 1\nDIM = 28\nDIM2 = 6\nHIDDEN = False\nstudentPth = './trans_learnt_student.pth'\nteacherPth = './trans_teacher_test.pth'\nlFunc = nn.CrossEntropyLoss()\ntokenSize = 8\ncropIs = [tokenSize * i for i in range(1, DIM // tokenSize + 1)]\n\nclass Classifier(nn.Module):\n def __init__(self):\n super(Classifier, self).__init__()\n self.hidden = nn.Linear(12 * 192, 100)\n self.out = nn.Linear(100, 10)\n\n def forward(self, x):\n x = x.reshape(len(x), -1)\n x = self.hidden(x)\n x = self.out(x)\n return x\n\ndef getCrops(inputs):\n batch = np.zeros((len(inputs), (DIM ** 2) // (tokenSize ** 2), 3, tokenSize, tokenSize))\n for batchI, input in enumerate(inputs):\n tokenI = 0\n for i in cropIs:\n for j in cropIs:\n token = input[:, i - tokenSize:i, j - tokenSize:j]\n batch[batchI, tokenI, :, :, :] = token\n tokenI += 1\n batch = torch.from_numpy(batch)\n batch = torch.flatten(batch, start_dim = -3)\n return batch\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n m.weight.data.normal_(0, 0.01)\n m.bias.data = torch.ones(m.bias.data.size())\n\ndef get_loss(out, target):\n loss = torch.square(out - target)\n return loss\n\ndef train(trainloader, student, teacher, optimizer, scheduler, device):\n print(\"Training...\")\n student.train()\n\n for i in range(EPOCH):\n epoch_loss = 0\n count = 0\n for inputs, _ in trainloader:\n inputs = getCrops(inputs).float()\n sample_features = student(Variable(inputs).to(device))\n\n baseline_features = teacher(Variable(inputs).to(device)) # 16 * 32 * 7 * 7\n\n optimizer.zero_grad()\n\n loss = get_loss(sample_features, baseline_features)\n\n loss.backward(torch.ones_like(sample_features))\n\n optimizer.step()\n\n epoch_loss += torch.sum(torch.sum(loss)).item()\n if count % 1000 == 0:\n print(count, epoch_loss / (count + 1))\n count += 1\n scheduler.step()\n torch.save(student.state_dict(), studentPth)\n\ndef trainClassifier(trainloader, student, classifier, optimizer, device):\n student.train()\n\n count = 0\n for inputs, label in trainloader:\n count += 1\n if count % 100 == 0:\n print(count)\n inputs = getCrops(inputs).float()\n \n sample_features = student(Variable(inputs).to(device))\n\n # print(sample_features.shape)\n y = classifier(sample_features)\n optimizer.zero_grad()\n\n label = Variable(label).to(device)\n loss = lFunc(y, label)\n loss.backward()\n\n optimizer.step()\n\ndef test(testloader, model, classifier, device):\n print(\"Testing...\")\n model.eval()\n accuracy = 0\n count = 0\n for inputs, labels in testloader:\n inputs = getCrops(inputs).float()\n sample_features = model(Variable(inputs).to(device))\n y = classifier(sample_features)\n pred_y = torch.max(y, 1)[1].data.squeeze()\n labels = Variable(labels).to(device)\n accuracy += (pred_y == labels).sum().item()\n count += 1\n if count % 1000 == 0:\n print(count)\n print('Test Accuracy of the model on the 10000 test images:', accuracy / 10000 * 100)\n return accuracy\n\ndef main():\n device = torch.device(\"cuda\")\n\n assert os.path.exists(teacherPth)\n teacher = TransformerEncoder(dim=tokenSize ** 2 * 3,blocks=2,heads=8)\n for param in teacher.parameters():\n param.requires_grad = False\n\n teacher.to(device)\n\n student = TransformerEncoder(dim=tokenSize ** 2 * 3,blocks=6,heads=8)\n student.to(device)\n\n classifier = Classifier()\n classifier.apply(weights_init)\n classifier.to(device)\n\n optimizer = torch.optim.Adam([\n #{\"params\": student.hidden.parameters(), \"lr\": 0.001}, ##train classifier\n {\"params\": student.parameters(), \"lr\": 0.00001},\n ])\n\n scheduler = StepLR(optimizer,step_size=10000,gamma=1.1)\n\n transform = transforms.Compose(\n [#transforms.Resize((DIM, DIM)),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n train_data = CIFAR10(\n root = 'data',\n train = True, \n transform = transform,\n download = False, \n )\n\n trainloader = torch.utils.data.DataLoader(train_data, \n batch_size=BATCH_SIZE, \n shuffle=True, \n num_workers=1)\n\n \n student.load_state_dict(torch.load(teacherPth))\n student.to(device)\n\n # train(trainloader, student, teacher, optimizer, scheduler, device)\n\n test_data = datasets.CIFAR10(\n root = 'data',\n train = False, \n transform = transforms.Compose([transforms.Resize((56, 56)), transforms.ToTensor()]),\n download = True, \n )\n\n testloader = torch.utils.data.DataLoader(test_data, \n batch_size=50, \n shuffle=True, \n num_workers=1)\n \n optimizer = torch.optim.Adam([\n #{\"params\": student.hidden.parameters(), \"lr\": 0.001}, ##train classifier\n {\"params\": student.parameters(), \"lr\": 0.001},\n {\"params\": classifier.hidden.parameters(), \"lr\": 0.01},\n {\"params\": classifier.out.parameters(), \"lr\": 0.005},\n ])\n\n trainloader = torch.utils.data.DataLoader(train_data, \n batch_size=100, \n shuffle=True, \n num_workers=1)\n for i in range(3):\n trainClassifier(trainloader, student, classifier, optimizer, device) ##try freezing encoder\n test(testloader, student, classifier, device)\n \n print('Done.')\n\nif __name__ == '__main__':\n main()"
] | [
[
"torch.ones_like",
"torch.utils.data.DataLoader",
"torch.sum",
"torch.nn.Linear",
"torch.load",
"torch.square",
"torch.flatten",
"torch.manual_seed",
"torch.autograd.Variable",
"torch.nn.CrossEntropyLoss",
"torch.from_numpy",
"torch.max",
"torch.optim.lr_scheduler.StepLR",
"torch.device"
]
] |
k4ntz/mushroom-rl | [
"17c8e9b2a9648a59169f3599c4ef8d259afc39f4"
] | [
"mushroom_rl/algorithms/value/td/q_lambda.py"
] | [
"import numpy as np\n\nfrom mushroom_rl.algorithms.value.td import TD\nfrom mushroom_rl.utils.eligibility_trace import EligibilityTrace\nfrom mushroom_rl.utils.table import Table\n\n\nclass QLambda(TD):\n \"\"\"\n Q(Lambda) algorithm.\n \"Learning from Delayed Rewards\". Watkins C.J.C.H.. 1989.\n\n \"\"\"\n def __init__(self, mdp_info, policy, learning_rate, lambda_coeff,\n trace='replacing'):\n \"\"\"\n Constructor.\n\n Args:\n lambda_coeff (float): eligibility trace coefficient;\n trace (str, 'replacing'): type of eligibility trace to use.\n\n \"\"\"\n Q = Table(mdp_info.size)\n self._lambda = lambda_coeff\n\n self.e = EligibilityTrace(Q.shape, trace)\n self._add_save_attr(\n _lambda='primitive',\n e='pickle'\n )\n\n super().__init__(mdp_info, policy, Q, learning_rate)\n\n def _update(self, state, action, reward, next_state, absorbing):\n q_current = self.Q[state, action]\n\n q_next = np.max(self.Q[next_state, :]) if not absorbing else 0.\n\n delta = reward + self.mdp_info.gamma*q_next - q_current\n self.e.update(state, action)\n\n self.Q.table += self.alpha(state, action) * delta * self.e.table\n self.e.table *= self.mdp_info.gamma * self._lambda\n\n def episode_start(self):\n self.e.reset()\n\n super().episode_start()\n"
] | [
[
"numpy.max"
]
] |
manda-creator/probability | [
"5238303f39973b7a365914732fe72f179a86cc97"
] | [
"tensorflow_probability/python/experimental/mcmc/sample_sequential_monte_carlo.py"
] | [
"# Copyright 2020 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Experimental MCMC driver, `sample_sequential_monte_carlo`.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.math.generic import reduce_logmeanexp\nfrom tensorflow_probability.python.mcmc import hmc\nfrom tensorflow_probability.python.mcmc import random_walk_metropolis\nfrom tensorflow_probability.python.mcmc import transformed_kernel\nfrom tensorflow_probability.python.mcmc.internal import util as mcmc_util\nfrom tensorflow_probability.python.mcmc.sample_annealed_importance import _find_inner_mh_results\nfrom tensorflow_probability.python.util.seed_stream import SeedStream\n\n\n__all__ = [\n 'sample_sequential_monte_carlo',\n]\n\n\nPRINT_DEBUG = False\nTUNE_STEPS = True\n\nParticleInfo = collections.namedtuple(\n 'ParticleInfo',\n [\n 'accept_prob', # acceptance probability per particle\n 'scalings',\n 'tempered_log_prob',\n 'likelihood_log_prob',\n ])\n\nSMCResults = collections.namedtuple(\n 'SMCResults',\n [\n 'num_steps',\n 'inverse_temperature',\n 'log_marginal_likelihood',\n 'particle_info', # A namedtuple of ParticleInfo\n ])\n\n\ndef _make_tempered_target_log_prob_fn(\n prior_log_prob_fn, likelihood_log_prob_fn, temperatures):\n \"\"\"Helper which creates inner kernel target_log_prob_fn.\"\"\"\n def _tempered_target_log_prob(*args):\n priorlogprob = tf.identity(prior_log_prob_fn(*args),\n name='prior_log_prob')\n loglike = tf.identity(likelihood_log_prob_fn(*args),\n name='likelihood_log_prob')\n return tf.identity(priorlogprob + loglike * temperatures,\n name='tempered_logp')\n return _tempered_target_log_prob\n\n\ndef make_rwmh_kernel_fn(target_log_prob_fn, init_state, scalings, seed=None):\n \"\"\"Generate a Random Walk MH kernel.\"\"\"\n with tf.name_scope('make_rwmh_kernel_fn'):\n seed = SeedStream(seed, salt='make_rwmh_kernel_fn')\n state_std = [\n tf.math.reduce_std(x, axis=0, keepdims=True) for x in init_state\n ]\n step_size = [\n s * ps.cast( # pylint: disable=g-complex-comprehension\n mcmc_util.left_justified_expand_dims_like(scalings, s),\n s.dtype) for s in state_std\n ]\n return random_walk_metropolis.RandomWalkMetropolis(\n target_log_prob_fn,\n new_state_fn=random_walk_metropolis.random_walk_normal_fn(\n scale=step_size),\n seed=seed)\n\n\ndef compute_hmc_step_size(scalings, state_std, num_leapfrog_steps):\n return [\n s / ps.cast(num_leapfrog_steps, s.dtype) * ps.cast( # pylint: disable=g-complex-comprehension\n mcmc_util.left_justified_expand_dims_like(scalings, s),\n s.dtype) for s in state_std\n ]\n\n\ndef gen_make_transform_hmc_kernel_fn(unconstraining_bijectors,\n num_leapfrog_steps=10):\n \"\"\"Generate a transformed hmc kernel.\"\"\"\n\n def make_transform_hmc_kernel_fn(\n target_log_prob_fn,\n init_state,\n scalings,\n seed=None):\n \"\"\"Generate a transform hmc kernel.\"\"\"\n\n with tf.name_scope('make_transformed_hmc_kernel_fn'):\n seed = SeedStream(seed, salt='make_transformed_hmc_kernel_fn')\n state_std = [\n bij.inverse(\n tf.math.reduce_std(bij.forward(x), axis=0, keepdims=True))\n for x, bij in zip(init_state, unconstraining_bijectors)\n ]\n step_size = compute_hmc_step_size(scalings, state_std, num_leapfrog_steps)\n return transformed_kernel.TransformedTransitionKernel(\n hmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target_log_prob_fn,\n num_leapfrog_steps=num_leapfrog_steps,\n step_size=step_size,\n seed=seed),\n unconstraining_bijectors)\n\n return make_transform_hmc_kernel_fn\n\n\ndef gen_make_hmc_kernel_fn(num_leapfrog_steps=10):\n \"\"\"Generate a transformed hmc kernel.\"\"\"\n def make_hmc_kernel_fn(\n target_log_prob_fn,\n init_state,\n scalings,\n seed=None):\n \"\"\"Generate a hmc without transformation kernel.\"\"\"\n\n with tf.name_scope('make_hmc_kernel_fn'):\n seed = SeedStream(seed, salt='make_hmc_kernel_fn')\n state_std = [\n tf.math.reduce_std(x, axis=0, keepdims=True)\n for x in init_state\n ]\n step_size = compute_hmc_step_size(scalings, state_std, num_leapfrog_steps)\n return hmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target_log_prob_fn,\n num_leapfrog_steps=num_leapfrog_steps,\n step_size=step_size,\n seed=seed)\n\n return make_hmc_kernel_fn\n\n# Generate a default `make_hmc_kernel_fn`\ndefault_make_hmc_kernel_fn = gen_make_hmc_kernel_fn()\n\n\n# TODO(b/152412213) Experitment to improve recommendation on static parmaeters\ndef sample_sequential_monte_carlo(\n prior_log_prob_fn,\n likelihood_log_prob_fn,\n current_state,\n max_num_steps=25,\n max_stage=100,\n make_kernel_fn=make_rwmh_kernel_fn,\n optimal_accept=0.234,\n target_accept_prob=0.99,\n ess_threshold_ratio=0.5,\n parallel_iterations=10,\n seed=None,\n name=None):\n \"\"\"Runs Sequential Monte Carlo to sample from the posterior distribution.\n\n This function uses an MCMC transition operator (e.g., Hamiltonian Monte Carlo)\n to sample from a series of distributions that slowly interpolates between\n an initial 'prior' distribution:\n\n `exp(prior_log_prob_fn(x))`\n\n and the target 'posterior' distribution:\n\n `exp(prior_log_prob_fn(x) + target_log_prob_fn(x))`,\n\n by mutating a collection of MC samples (i.e., particles). The approach is also\n known as Particle Filter in some literature.\n\n Args:\n prior_log_prob_fn: Python callable that returns the log density of the\n prior distribution.\n likelihood_log_prob_fn: Python callable which takes an argument like\n `current_state` (or `*current_state` if it's a list) and returns its\n (possibly unnormalized) log-density under the likelihood distribution.\n current_state: `Tensor` or Python `list` of `Tensor`s representing the\n current state(s) of the Markov chain(s). The first `r` dimensions index\n independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.\n max_num_steps: The maximum number of kernel transition steps in one mutation\n of the MC samples. Note that the actual number of steps in one mutation is\n tuned during sampling and likely lower than the max_num_steps.\n max_stage: Integer number of the stage for increasing the temperature\n from 0 to 1.\n make_kernel_fn: Python `callable` which returns a `TransitionKernel`-like\n object. Must take one argument representing the `TransitionKernel`'s\n `target_log_prob_fn`. The `target_log_prob_fn` argument represents the\n `TransitionKernel`'s target log distribution. Note:\n `sample_annealed_importance_chain` creates a new `target_log_prob_fn`\n which is an interpolation between the supplied `target_log_prob_fn` and\n `proposal_log_prob_fn`; it is this interpolated function which is used as\n an argument to `make_kernel_fn`.\n optimal_accept: Optimal acceptance ratio for a Transitional Kernel. Default\n to 0.234 for Random Walk Metropolis kernel.\n target_accept_prob: Target acceptance probability at the end of one mutation\n step.\n ess_threshold_ratio: Target ratio for effective sample size.\n parallel_iterations: The number of iterations allowed to run in parallel.\n It must be a positive integer. See `tf.while_loop` for more details.\n seed: Python integer or TFP seedstream to seed the random number generator.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., 'sample_annealed_importance_chain').\n\n Returns:\n n_stage: Number of the mutation stage SMC ran.\n final_state: `Tensor` or Python `list` of `Tensor`s representing the\n final state(s) of the Markov chain(s). The output are the posterior\n samples.\n final_kernel_results: `collections.namedtuple` of internal calculations used\n to advance the chain.\n\n \"\"\"\n\n with tf.name_scope(name or 'sample_sequential_monte_carlo'):\n seed_stream = SeedStream(seed, salt='smc_seed')\n\n unwrap_state_list = not tf.nest.is_nested(current_state)\n if unwrap_state_list:\n current_state = [current_state]\n current_state = [\n tf.convert_to_tensor(s, dtype_hint=tf.float32) for s in current_state\n ]\n\n num_replica = ps.size0(current_state[0])\n effective_sample_size_threshold = tf.cast(\n num_replica * ess_threshold_ratio, tf.int32)\n\n def preprocess_state(init_state):\n \"\"\"Initial preprocessing at Stage 0.\"\"\"\n dimension = ps.reduce_sum([\n ps.reduce_prod(ps.shape(x)[1:]) for x in init_state])\n likelihood_log_prob = likelihood_log_prob_fn(*init_state)\n\n # Default to the optimal for normal distributed targets.\n # TODO(b/152412213): Revisit this tuning.\n scale_start = (\n tf.constant(2.38 ** 2, dtype=likelihood_log_prob.dtype) /\n tf.constant(dimension, dtype=likelihood_log_prob.dtype))\n # TODO(b/152412213): Enable batch of batches style by using non-scalar\n # inverse_temperature\n inverse_temperature = tf.zeros([], dtype=likelihood_log_prob.dtype)\n scalings = ps.ones_like(likelihood_log_prob) * ps.minimum(scale_start, 1.)\n kernel = make_kernel_fn(\n _make_tempered_target_log_prob_fn(\n prior_log_prob_fn,\n likelihood_log_prob_fn,\n inverse_temperature),\n init_state,\n scalings,\n seed=seed_stream())\n pkr = kernel.bootstrap_results(current_state)\n mh_results = _find_inner_mh_results(pkr)\n\n particle_info = ParticleInfo(\n accept_prob=ps.ones_like(likelihood_log_prob),\n scalings=scalings,\n tempered_log_prob=mh_results.accepted_results.target_log_prob,\n likelihood_log_prob=likelihood_log_prob,\n )\n\n return SMCResults(\n num_steps=tf.convert_to_tensor(\n max_num_steps, dtype=tf.int32, name='num_steps'),\n inverse_temperature=inverse_temperature,\n log_marginal_likelihood=tf.constant(\n 0., dtype=likelihood_log_prob.dtype),\n particle_info=particle_info\n )\n\n def update_weights_temperature(inverse_temperature, likelihood_log_prob):\n \"\"\"Calculate the next inverse temperature and update weights.\"\"\"\n\n likelihood_diff = likelihood_log_prob - tf.reduce_max(likelihood_log_prob)\n\n def _body_fn(new_beta, upper_beta, lower_beta, eff_size, log_weights):\n \"\"\"One iteration of the temperature and weight update.\"\"\"\n new_beta = (lower_beta + upper_beta) / 2.0\n log_weights = (new_beta - inverse_temperature) * likelihood_diff\n log_weights_norm = (log_weights -\n tf.math.reduce_logsumexp(log_weights))\n eff_size = tf.cast(\n tf.exp(-tf.math.reduce_logsumexp(2 * log_weights_norm)), tf.int32)\n upper_beta = tf.where(\n eff_size < effective_sample_size_threshold,\n new_beta, upper_beta)\n lower_beta = tf.where(\n eff_size < effective_sample_size_threshold,\n lower_beta, new_beta)\n return new_beta, upper_beta, lower_beta, eff_size, log_weights\n\n (new_beta, upper_beta, lower_beta, eff_size, log_weights) = tf.while_loop( # pylint: disable=unused-variable\n cond=lambda new_beta, upper_beta, lower_beta, eff_size, *_: # pylint: disable=g-long-lambda\n (upper_beta - lower_beta > 1e-6) &\n (eff_size != effective_sample_size_threshold),\n body=_body_fn,\n loop_vars=(\n tf.zeros_like(inverse_temperature),\n tf.cast(2.0, inverse_temperature.dtype),\n inverse_temperature,\n tf.cast(0, tf.int32),\n tf.zeros_like(likelihood_diff)),\n parallel_iterations=parallel_iterations\n )\n\n log_weights = tf.where(new_beta < 1.,\n log_weights,\n (1. - inverse_temperature) * likelihood_diff)\n marginal_loglike_ = reduce_logmeanexp(\n (new_beta - inverse_temperature) * likelihood_log_prob)\n\n return marginal_loglike_, tf.clip_by_value(new_beta, 0., 1.), log_weights\n\n def resample(log_weights, current_state, particle_info, seed=None):\n \"\"\"Resample particles based on importance weights.\"\"\"\n with tf.name_scope('resample_particles'):\n seed = SeedStream(seed, salt='resample_particles')\n resampling_indexes = tf.random.categorical(\n [log_weights], ps.reduce_prod(*ps.shape(log_weights)), seed=seed())\n next_state = tf.nest.map_structure(\n lambda x: tf.reshape(tf.gather(x, resampling_indexes), ps.shape(x)),\n current_state)\n next_particle_info = tf.nest.map_structure(\n lambda x: tf.reshape(tf.gather(x, resampling_indexes), ps.shape(x)),\n particle_info)\n\n return next_state, next_particle_info\n\n def tuning(num_steps, scalings, accept_prob):\n \"\"\"Tune scaling and/or num_steps based on the acceptance rate.\"\"\"\n num_proposed = num_replica * num_steps\n accept_prob = tf.cast(accept_prob, dtype=scalings.dtype)\n avg_scaling = tf.exp(tf.math.log(tf.reduce_mean(scalings))\n + (tf.reduce_mean(accept_prob) - optimal_accept))\n scalings = 0.5 * (\n avg_scaling +\n tf.exp(tf.math.log(scalings) +\n (accept_prob - optimal_accept))\n )\n\n if TUNE_STEPS:\n avg_accept = tf.math.maximum(\n 1.0 / tf.cast(num_proposed, dtype=accept_prob.dtype),\n tf.reduce_mean(accept_prob))\n num_steps = tf.clip_by_value(\n tf.cast(\n tf.math.log1p(\n -tf.cast(target_accept_prob, dtype=avg_accept.dtype)) /\n tf.math.log1p(-avg_accept),\n dtype=num_steps.dtype), 2, max_num_steps)\n\n return num_steps, scalings\n\n def mutate(\n current_state,\n scalings,\n num_steps,\n inverse_temperature):\n \"\"\"Mutate the state using a Transition kernel.\"\"\"\n with tf.name_scope('mutate_states'):\n kernel = make_kernel_fn(\n _make_tempered_target_log_prob_fn(\n prior_log_prob_fn,\n likelihood_log_prob_fn,\n inverse_temperature),\n current_state,\n scalings,\n seed=seed_stream())\n pkr = kernel.bootstrap_results(current_state)\n mh_results = _find_inner_mh_results(pkr)\n\n def mutate_onestep(i, state, pkr, accept_count):\n next_state, next_kernel_results = kernel.one_step(state, pkr)\n mh_results = _find_inner_mh_results(pkr)\n # TODO(b/152412213) Cumulate log_acceptance_ratio instead.\n accept_count += tf.cast(mh_results.is_accepted, accept_count.dtype)\n return i+1, next_state, next_kernel_results, accept_count\n\n (\n _,\n next_state,\n next_kernel_results,\n accept_count\n ) = tf.while_loop(\n cond=lambda i, *args: i < num_steps,\n body=mutate_onestep,\n loop_vars=(\n tf.zeros([], dtype=tf.int32),\n current_state,\n pkr,\n tf.zeros_like(mh_results.is_accepted, tf.float32)),\n parallel_iterations=parallel_iterations\n )\n next_mh_results = _find_inner_mh_results(next_kernel_results)\n\n return (next_state,\n accept_count / tf.cast(num_steps + 1, accept_count.dtype),\n next_mh_results.accepted_results.target_log_prob)\n\n pkr = preprocess_state(current_state)\n # Run once\n new_marginal, new_inv_temperature, log_weights = update_weights_temperature(\n pkr.inverse_temperature,\n pkr.particle_info.likelihood_log_prob)\n if PRINT_DEBUG:\n tf.print(\n 'Stage:', 0,\n 'Beta:', new_inv_temperature,\n 'n_steps:', pkr.num_steps,\n 'accept:', tf.reduce_mean(\n pkr.particle_info.accept_prob),\n 'scaling:', tf.reduce_mean(pkr.particle_info.scalings)\n )\n resampled_state, resampled_particle_info = resample(\n log_weights, current_state, pkr.particle_info, seed_stream())\n next_state, acceptance_rate, tempered_log_prob = mutate(\n resampled_state,\n resampled_particle_info.scalings,\n pkr.num_steps,\n new_inv_temperature)\n next_pkr = SMCResults(\n num_steps=pkr.num_steps,\n inverse_temperature=new_inv_temperature,\n log_marginal_likelihood=pkr.log_marginal_likelihood + new_marginal,\n particle_info=ParticleInfo(\n accept_prob=acceptance_rate,\n scalings=resampled_particle_info.scalings,\n tempered_log_prob=tempered_log_prob,\n likelihood_log_prob=likelihood_log_prob_fn(*next_state),\n ))\n\n # Stage > 0\n def smc_body_fn(stage, state, smc_kernel_result):\n \"\"\"Run one stage of SMC with constant temperature.\"\"\"\n (\n new_marginal,\n new_inv_temperature,\n log_weights\n ) = update_weights_temperature(\n smc_kernel_result.inverse_temperature,\n smc_kernel_result.particle_info.likelihood_log_prob)\n # TODO(b/152412213) Use a tf.scan to better collect debug info.\n if PRINT_DEBUG:\n tf.print(\n 'Stage:', stage,\n 'Beta:', new_inv_temperature,\n 'n_steps:', smc_kernel_result.num_steps,\n 'accept:', tf.reduce_mean(\n smc_kernel_result.particle_info.accept_prob),\n 'scaling:', tf.reduce_mean(smc_kernel_result.particle_info.scalings)\n )\n resampled_state, resampled_particle_info = resample(\n log_weights, state, smc_kernel_result.particle_info, seed_stream())\n num_steps, scalings = tuning(\n smc_kernel_result.num_steps,\n resampled_particle_info.scalings,\n resampled_particle_info.accept_prob)\n next_state, acceptance_rate, tempered_log_prob = mutate(\n resampled_state, scalings, num_steps, new_inv_temperature)\n next_pkr = SMCResults(\n num_steps=num_steps,\n inverse_temperature=new_inv_temperature,\n log_marginal_likelihood=(new_marginal +\n smc_kernel_result.log_marginal_likelihood),\n particle_info=ParticleInfo(\n accept_prob=acceptance_rate,\n scalings=scalings,\n tempered_log_prob=tempered_log_prob,\n likelihood_log_prob=likelihood_log_prob_fn(*next_state),\n ))\n return stage + 1, next_state, next_pkr\n\n (\n n_stage,\n final_state,\n final_kernel_results\n ) = tf.while_loop(\n cond=lambda i, state, pkr: (i < max_stage) & ( # pylint: disable=g-long-lambda\n pkr.inverse_temperature < 1.\n ),\n body=smc_body_fn,\n loop_vars=(\n tf.ones([], dtype=tf.int32),\n next_state,\n next_pkr),\n parallel_iterations=parallel_iterations\n )\n if unwrap_state_list:\n final_state = final_state[0]\n return n_stage, final_state, final_kernel_results\n"
] | [
[
"tensorflow.compat.v2.math.log",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v2.reduce_max",
"tensorflow.compat.v2.reduce_mean",
"tensorflow.compat.v2.math.reduce_logsumexp",
"tensorflow.compat.v2.math.log1p",
"tensorflow.compat.v2.identity",
"tensorflow.compat.v2.where",
"tensorflow.compat.v2.clip_by_value",
"tensorflow.compat.v2.ones",
"tensorflow.compat.v2.gather",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.nest.is_nested",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.math.reduce_std",
"tensorflow.compat.v2.zeros_like"
]
] |
raj713335/AI-102 | [
"15f4b61dbcbf84abf25ce2f967afc0d52795e9f8"
] | [
"15-computer-vision/Python/image-analysis/image-analysis.py"
] | [
"from dotenv import load_dotenv\nimport os\nfrom array import array\nfrom PIL import Image, ImageDraw\nimport sys\nimport time\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\n# import namespaces\nfrom azure.cognitiveservices.vision.computervision import ComputerVisionClient\nfrom azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes\nfrom msrest.authentication import CognitiveServicesCredentials\n\n\ndef main():\n global cv_client\n\n try:\n # Get Configuration Settings\n load_dotenv()\n cog_endpoint = os.getenv('COG_SERVICE_ENDPOINT')\n cog_key = os.getenv('COG_SERVICE_KEY')\n\n # Get image\n image_file = 'images/street.jpg'\n if len(sys.argv) > 1:\n image_file = sys.argv[1]\n\n # Authenticate Computer Vision client\n credential = CognitiveServicesCredentials(cog_key) \n cv_client = ComputerVisionClient(cog_endpoint, credential)\n\n\n # Analyze image\n AnalyzeImage(image_file)\n\n # Generate thumbnail\n GetThumbnail(image_file)\n\n except Exception as ex:\n print(ex)\n\ndef AnalyzeImage(image_file):\n print('Analyzing', image_file)\n\n # Specify features to be retrieved\n features = [VisualFeatureTypes.description,\n VisualFeatureTypes.tags,\n VisualFeatureTypes.categories,\n VisualFeatureTypes.brands,\n VisualFeatureTypes.objects,\n VisualFeatureTypes.adult]\n \n \n # Get image analysis\n with open(image_file, mode=\"rb\") as image_data:\n analysis = cv_client.analyze_image_in_stream(image_data , features)\n\n # Get image description\n for caption in analysis.description.captions:\n print(\"Description: '{}' (confidence: {:.2f}%)\".format(caption.text, caption.confidence * 100))\n\n # Get image tags\n if (len(analysis.tags) > 0):\n print(\"Tags: \")\n for tag in analysis.tags:\n print(\" -'{}' (confidence: {:.2f}%)\".format(tag.name, tag.confidence * 100))\n\n # Get image categories (including celebrities and landmarks)\n if (len(analysis.categories) > 0):\n print(\"Categories:\")\n landmarks = []\n celebrities = []\n for category in analysis.categories:\n # Print the category\n print(\" -'{}' (confidence: {:.2f}%)\".format(category.name, category.score * 100))\n if category.detail:\n # Get landmarks in this category\n if category.detail.landmarks:\n for landmark in category.detail.landmarks:\n if landmark not in landmarks:\n landmarks.append(landmark)\n\n # Get celebrities in this category\n if category.detail.celebrities:\n for celebrity in category.detail.celebrities:\n if celebrity not in celebrities:\n celebrities.append(celebrity)\n\n # If there were landmarks, list them\n if len(landmarks) > 0:\n print(\"Landmarks:\")\n for landmark in landmarks:\n print(\" -'{}' (confidence: {:.2f}%)\".format(landmark.name, landmark.confidence * 100))\n\n # If there were celebrities, list them\n if len(celebrities) > 0:\n print(\"Celebrities:\")\n for celebrity in celebrities:\n print(\" -'{}' (confidence: {:.2f}%)\".format(celebrity.name, celebrity.confidence * 100))\n\n # Get brands in the image\n if (len(analysis.brands) > 0):\n print(\"Brands: \")\n for brand in analysis.brands:\n print(\" -'{}' (confidence: {:.2f}%)\".format(brand.name, brand.confidence * 100))\n\n # Get objects in the image\n if len(analysis.objects) > 0:\n print(\"Objects in image:\")\n\n # Prepare image for drawing\n fig = plt.figure(figsize=(8, 8))\n plt.axis('off')\n image = Image.open(image_file)\n draw = ImageDraw.Draw(image)\n color = 'cyan'\n for detected_object in analysis.objects:\n # Print object name\n print(\" -{} (confidence: {:.2f}%)\".format(detected_object.object_property, detected_object.confidence * 100))\n \n # Draw object bounding box\n r = detected_object.rectangle\n bounding_box = ((r.x, r.y), (r.x + r.w, r.y + r.h))\n draw.rectangle(bounding_box, outline=color, width=3)\n plt.annotate(detected_object.object_property,(r.x, r.y), backgroundcolor=color)\n # Save annotated image\n plt.imshow(image)\n outputfile = 'objects.jpg'\n fig.savefig(outputfile)\n print(' Results saved in', outputfile)\n\n # Get moderation ratings\n ratings = 'Ratings:\\n -Adult: {}\\n -Racy: {}\\n -Gore: {}'.format(analysis.adult.is_adult_content,\n analysis.adult.is_racy_content,\n analysis.adult.is_gory_content)\n print(ratings)\n\n \n\ndef GetThumbnail(image_file):\n print('Generating thumbnail')\n\n # Generate a thumbnail\n with open(image_file, mode=\"rb\") as image_data:\n # Get thumbnail data\n thumbnail_stream = cv_client.generate_thumbnail_in_stream(100, 100, image_data, True)\n\n # Save thumbnail image\n thumbnail_file_name = 'thumbnail.png'\n with open(thumbnail_file_name, \"wb\") as thumbnail_file:\n for chunk in thumbnail_stream:\n thumbnail_file.write(chunk)\n\n print('Thumbnail saved in.', thumbnail_file_name)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.imshow"
]
] |
coltonbh/QCElemental | [
"b75fb72d7f45c8b605ae1a54773d4a8be6655752"
] | [
"qcelemental/datum.py"
] | [
"\"\"\"\nDatum Object Model\n\"\"\"\n\nfrom decimal import Decimal\nfrom typing import Any, Dict, Optional\n\nimport numpy as np\nfrom pydantic import BaseModel, validator\n\n\nclass Datum(BaseModel):\n r\"\"\"Facilitates the storage of quantum chemical results by labeling them with basic metadata.\n\n Attributes\n ----------\n label : str\n Official label for `data`, often qcvar. May contain spaces.\n units : str\n ASCII, LaTeX-like representation of units, without square brackets.\n data : float or decimal.Decimal or numpy.ndarray\n Value for `label`.\n comment : str\n Additional notes.\n doi : str\n Literature citation or definition DOI link.\n glossary : str\n Extended description or definition.\n numeric : bool\n Whether `data` is numeric. Pass `True` to disable validating `data` as float/Decimal/np.ndarray.\n\n \"\"\"\n\n numeric: bool\n label: str\n units: str\n data: Any\n comment: str = \"\"\n doi: Optional[str] = None\n glossary: str = \"\"\n\n class Config:\n extra = \"forbid\"\n allow_mutation = False\n json_encoders = {np.ndarray: lambda v: v.flatten().tolist(), complex: lambda v: (v.real, v.imag)}\n\n def __init__(self, label, units, data, *, comment=None, doi=None, glossary=None, numeric=True):\n kwargs = {\"label\": label, \"units\": units, \"data\": data, \"numeric\": numeric}\n if comment is not None:\n kwargs[\"comment\"] = comment\n if doi is not None:\n kwargs[\"doi\"] = doi\n if glossary is not None:\n kwargs[\"glossary\"] = glossary\n\n super().__init__(**kwargs)\n\n @validator(\"data\")\n def must_be_numerical(cls, v, values, **kwargs):\n try:\n 1.0 * v\n except TypeError:\n try:\n Decimal(\"1.0\") * v\n except TypeError:\n if values[\"numeric\"]:\n raise ValueError(f\"Datum data should be float, Decimal, or np.ndarray, not {type(v)}.\")\n else:\n values[\"numeric\"] = True\n else:\n values[\"numeric\"] = True\n\n return v\n\n def __str__(self, label=\"\"):\n width = 40\n text = [\"-\" * width, \"{:^{width}}\".format(\"Datum \" + self.label, width=width)]\n if label:\n text.append(\"{:^{width}}\".format(label, width=width))\n text.append(\"-\" * width)\n text.append(\"Data: {}\".format(self.data))\n text.append(\"Units: [{}]\".format(self.units))\n text.append(\"doi: {}\".format(self.doi))\n text.append(\"Comment: {}\".format(self.comment))\n text.append(\"Glossary: {}\".format(self.glossary))\n text.append(\"-\" * width)\n return \"\\n\".join(text)\n\n def dict(self, *args, **kwargs):\n return super().dict(*args, **{**kwargs, **{\"exclude_unset\": True}})\n\n def to_units(self, units=None):\n from .physical_constants import constants\n\n to_unit = self.units if units is None else units\n factor = constants.conversion_factor(self.units, to_unit)\n\n if isinstance(self.data, Decimal):\n return factor * float(self.data)\n else:\n return factor * self.data\n\n\ndef print_variables(qcvars: Dict[str, \"Datum\"]) -> str:\n r\"\"\"Form a printable representation of qcvariables.\n\n Parameters\n ----------\n qcvars\n Group of Datum objects to print.\n\n Returns\n -------\n str\n Printable string representation of label, data, and unit in Datum-s.\n\n \"\"\"\n text = [\"\\n Variable Map:\", \" ----------------------------------------------------------------------------\"]\n\n if len(qcvars) == 0:\n text.append(\" (none)\")\n return \"\\n\".join(text)\n\n largest_key = max(len(k) for k in qcvars) + 2 # for quotation marks\n largest_characteristic = 8\n for k, v in qcvars.items():\n try:\n exp = int(str(v.data).split(\"E\")[1])\n except IndexError:\n pass\n else:\n largest_characteristic = max(exp, largest_characteristic)\n\n for k, qca in sorted(qcvars.items()):\n # if k != qca.lbl:\n # raise ValidationError('Huh? {} != {}'.format(k, qca.label))\n\n if isinstance(qca.data, np.ndarray):\n data = np.array_str(qca.data, max_line_width=120, precision=8, suppress_small=True)\n data = \"\\n\".join(\" \" + ln for ln in data.splitlines())\n text.append(\n \"\"\" {:{keywidth}} => {:{width}} [{}]\"\"\".format(\n '\"' + k + '\"', \"\", qca.units, keywidth=largest_key, width=largest_characteristic + 14\n )\n )\n text.append(data)\n elif isinstance(qca.data, Decimal):\n text.append(\n \"\"\" {:{keywidth}} => {:{width}} [{}]\"\"\".format(\n '\"' + k + '\"', qca.data, qca.units, keywidth=largest_key, width=largest_characteristic + 14\n )\n )\n elif not qca.numeric:\n text.append(\n \"\"\" {:{keywidth}} => {:>{width}} [{}]\"\"\".format(\n '\"' + k + '\"', str(qca.data), qca.units, keywidth=largest_key, width=largest_characteristic + 14\n )\n )\n else:\n text.append(\n \"\"\" {:{keywidth}} => {:{width}.{prec}f} [{}]\"\"\".format(\n '\"' + k + '\"', qca.data, qca.units, keywidth=largest_key, width=largest_characteristic + 14, prec=12\n )\n )\n\n text.append(\"\")\n return \"\\n\".join(text)\n"
] | [
[
"numpy.array_str"
]
] |
jhardenberg/EnsClus | [
"c7591aa39d649fc4321ac4db219f241aabcaf295"
] | [
"clus/sel_season_area.py"
] | [
"# Standard packages\nfrom netCDF4 import Dataset, num2date\nfrom datetime import datetime\nimport numpy as np\nimport pandas as pd\n\n#____________Selecting a season (DJF,DJFM,NDJFM,JJA)\ndef sel_season(var,dates,season,timestep):\n #----------------------------------------------------------------------------------------\n #print('____________________________________________________________________________________________________________________')\n #print('Selecting only {0} data'.format(season))\n #----------------------------------------------------------------------------------------\n dates_pdh = pd.to_datetime(dates)\n print(dates_pdh)\n\n mesi_short = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\n\n if season=='DJF': #ONLY DEC-JAN-FEB\n m=[12,1,2]\n mask=(dates_pdh.month==12) | (dates_pdh.month==1) | (dates_pdh.month==2)\n elif season=='DJFM': #ONLY DEC-JAN-FEB-MAR\n m=[12,1,2,3]\n mask=(dates_pdh.month==12) | (dates_pdh.month==1) | (dates_pdh.month==2) | (dates_pdh.month==3)\n elif season=='NDJFM': #ONLY NOV-DEC-JAN-FEB-MAR\n m=[11,12,1,2,3]\n mask=(dates_pdh.month==11) | (dates_pdh.month==12) | (dates_pdh.month==1) | (dates_pdh.month==2) | (dates_pdh.month==3)\n elif season=='JJA': #ONLY JUN-JUL-AUG\n m=[6,7,8]\n mask=(dates_pdh.month==6) | (dates_pdh.month==7) | (dates_pdh.month==8)\n elif season=='MAM': #ONLY MAR-APR-MAY\n m=[3,4,5]\n mask=(dates_pdh.month==6) | (dates_pdh.month==7) | (dates_pdh.month==8)\n elif season=='SON': #ONLY SEP-OCT-NOV\n m=[9,10,11]\n mask=(dates_pdh.month==6) | (dates_pdh.month==7) | (dates_pdh.month==8)\n elif season in mesi_short:\n print(mesi_short.index(season)+1)\n mask = (dates_pdh.month == mesi_short.index(season)+1)\n else:\n print('season is not one of the following: DJF, DJFM, NDJFM, JJA, MAM, SON')\n #print(np.sum(mask))\n var_season = var[mask,:,:]\n dates_season=dates[mask]\n\n if var_season.ndim == 2:\n var_season = var_season[np.newaxis, :]\n\n cut = False\n if timestep == 'month':\n # count number of months\n n_months = var_season.shape[0]\n if n_months%len(season) != 0:\n cut = True\n elif timestep == 'day':\n cut = True\n\n if season in mesi_short:\n cut = False\n\n if cut:\n if (12 in m) or (1 in m):\n #REMOVING THE FIRST MONTHS (for the first year) because there is no previuos december\n print(np.where(dates_season==datetime(dates_pdh.year[0], m[0], dates_pdh.day[0], dates_pdh.hour[0], dates_pdh.minute[0]) ))\n start=int(np.where(dates_season==datetime(dates_pdh.year[0], m[0], dates_pdh.day[0], dates_pdh.hour[0], dates_pdh.minute[0]) )[0])\n #REMOVING THE LAST MONTHS (for the last year) because there is no following january\n last_sea = dates_season==datetime(dates_pdh.year[-1], m[0], dates_pdh.day[0], dates_pdh.hour[0], dates_pdh.minute[0])\n\n if np.sum(last_sea) > 0:\n end = np.argmax(last_sea)\n else:\n end = -1\n\n var_season=var_season[start:end,:,:]\n dates_season=dates_season[start:end]\n\n return var_season,dates_season\n\n#____________Selecting only [latS-latN, lonW-lonE] box region\ndef sel_area(lat,lon,var,area):\n '''\n GOAL\n Selecting the area of interest\n USAGE\n var_area, lat_area, lon_area =sel_area(lat,lon,var,area)\n area can be 'EAT', 'PNA', 'NH'\n '''\n if area=='EAT':\n printarea='Euro-Atlantic'\n latN = 87.5\n latS = 30.0\n lonW =-80.0 #280\n lonE = 40.0 #40\n # lat and lon are extracted from the netcdf file, assumed to be 1D\n #If 0<lon<360, convert to -180<lon<180\n if lon.min() >= 0:\n lon_new=lon-180\n var_roll=np.roll(var,int(len(lon)/2),axis=-1)\n else:\n var_roll=var\n lon_new=lon\n\n elif area=='PNA':\n printarea='Pacific North American'\n latN = 87.5\n latS = 30.0\n lonW = 140.0\n lonE = 280.0\n # lat and lon are extracted from the netcdf file, assumed to be 1D\n #If -180<lon<180, convert to 0<lon<360\n if lon.min() < 0:\n lon_new=lon+180\n var_roll=np.roll(var,int(len(lon)/2),axis=-1)\n else:\n var_roll=var\n lon_new=lon\n\n elif area=='NH':\n printarea='Northern Hemisphere'\n latN = 90.0\n latS = 0.0\n lonW = lon.min()\n lonE = lon.max()\n var_roll=var\n lon_new=lon\n\n elif area=='Eu':\n printarea='Europe'\n latN = 72.0\n latS = 27.0\n lonW = -22.0\n lonE = 45.0\n # lat and lon are extracted from the netcdf file, assumed to be 1D\n #If 0<lon<360, convert to -180<lon<180\n if lon.min() >= 0:\n lon_new=lon-180\n var_roll=np.roll(var,int(len(lon)/2),axis=-1)\n else:\n var_roll=var\n lon_new=lon\n elif area=='Med':\n printarea='Mediterranean'\n latN = 50.0\n latS = 25.0\n lonW = -10.0\n lonE = 40.0\n # lat and lon are extracted from the netcdf file, assumed to be 1D\n #If 0<lon<360, convert to -180<lon<180\n if lon.min() >= 0:\n lon_new=lon-180\n print(var.shape)\n var_roll=np.roll(var,int(len(lon)/2),axis=-1)\n else:\n var_roll=var\n lon_new=lon\n\n #----------------------------------------------------------------------------------------\n #print('____________________________________________________________________________________________________________________')\n #print('Selecting the area of interest: {0}'.format(printarea))\n #----------------------------------------------------------------------------------------\n #-------------------------Selecting only an area\n\n latidx = (lat >= latS) & (lat <= latN)\n lonidx = (lon_new >= lonW) & (lon_new <= lonE)\n\n print(var_roll.shape, len(latidx), len(lonidx))\n if var.ndim == 3:\n var_area = var_roll[:, latidx][..., lonidx]\n elif var.ndim == 2:\n var_area = var_roll[latidx, ...][..., lonidx]\n else:\n raise ValueError('Variable has {} dimensions, should have 2 or 3.'.format(var.ndim))\n\n #print('Grid dimension of the selected area ---> {0}'.format(var_area[0].shape))\n\n return var_area,lat[latidx],lon_new[lonidx]\n"
] | [
[
"numpy.sum",
"pandas.to_datetime",
"numpy.argmax"
]
] |
pengfei-ma/Google-Play-Store-Subjects-Analysis | [
"65d224eef9c0b6a2714f329edcfd5a4c32f6a2dd"
] | [
"Gradient Descent finding parameters.py"
] | [
"import sys\nfrom operator import add\nfrom pyspark.sql import SparkSession\nfrom pyspark import SparkContext\nimport pyspark\nfrom pyspark.ml.linalg import Vectors\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom pyspark.sql.types import *\nfrom pyspark.sql import functions as func\nfrom pyspark.sql.functions import *\nfrom pyspark.sql import SQLContext\nimport matplotlib.pyplot as plt \nimport time\nfrom pandas import Series,DataFrame\nimport pandas as pd\nimport re\nfrom collections import Counter\nfrom sklearn.linear_model import LinearRegression\nfrom pyspark.ml.classification import LogisticRegression\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\nfrom pyspark.ml.classification import LinearSVC\n\n\n# building functions\n\ndef isfloat(value):\n try:\n float(value)\n return True\n except:\n return False\n \ndef correctRows(p):\n if isfloat(p[3]) and isfloat(p[4]) and isfloat(p[6]) and isfloat(p[7]) and isfloat(p[9]):\n return p\n \ndef to_list(a):\n return [a]\n\ndef addToList(x, y):\n x.append(y)\n return x\n\ndef extend(x,y):\n x.extend(y)\n return x\n\nspark = SparkSession.builder.master(\"local[*]\").getOrCreate()\nsc = SparkContext.getOrCreate()\nsqlContext = SQLContext(sc)\n\n# load data set\nlines2 = sc.textFile(\"Google-Playstore.csv\")\n\n\nprint(\"##### Finding Simple Linear Regression Equation #####\")\n\n# data pre-processing\ncorrectLine = lines2.map(lambda x: x.split(','))\ncleaned = correctLine.filter(correctRows)\n\nmax_install = cleaned.map(lambda p: (float(p[7])))\nrating = cleaned.map(lambda p: (float(p[3])))\n\n# apply linear regression\nx = np.array(max_install.collect())\ny = np.array(rating.collect())\n\nX = np.stack([x], axis = 1)\n\nreg = LinearRegression(fit_intercept=True).fit(X, y)\n\nprint(\"The m (coefficient) =\",reg.coef_)\nprint(\"The b (y-intercept) =\",reg.intercept_)\nprint(\"The equation is: y = \"+str(reg.coef_[0])+\"X + \"+str(reg.intercept_))\n\nprint(\"##### Finding the parameters using gradient descent #####\")\n\nstart1 = time.time()\ndf = np.stack([y, x], axis=1)\ndff = map(lambda x: (float(x[0]), Vectors.dense(x[1:])), df)\nmydf = spark.createDataFrame(dff, schema=[\"Money\", \"Distance\"])\nmyRDD=mydf.rdd.map(tuple).map(lambda x: (float(x[0]), np.array(x[1]) ))\n\nlearningRate = 0.00001\nnum_iteration = 100\nsize = float(len(y))\nbeta = np.array([0.1])\ncosts = []\n\nfor i in range(num_iteration):\n gradientCost=myRDD.map(lambda x: (x[1], (x[0] - x[1] * beta) ))\\\n .map(lambda x: (x[0]*x[1], x[1]**2 )).reduce(lambda x, y: (x[0] +y[0], x[1]+y[1] ))\n cost= gradientCost[1]\n gradient=(-1/float(size))* gradientCost[0]\n print(i, \"Beta\", beta, \" Cost\", cost)\n beta = beta - learningRate * gradient\n costs.append(cost[0])\n\nend1 = time.time()\n\nprint(f\"Computation time of BGD is {(end1 - start1)/60} Minutes\")\n\n# making plot\nxValues = [i for i in range(len(costs))]\nplt.plot(xValues, costs, 'o', markersize=2)\nplt.xlabel(\"Number of Iteration\")\nplt.ylabel(\"Cost\")\nplt.title(\"Cost with the number of iteration\")\nplt.show()\n"
] | [
[
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.stack",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.xlabel"
]
] |
Ahmed-elshorbagy/dog_web_app | [
"058b79328d3ed16a77c312f39b5b150eb6423612"
] | [
"web/dog/dog/views.py"
] | [
"# import the necessary packages \r\nfrom django.shortcuts import render\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nfrom django.http import JsonResponse,HttpResponse\r\nimport numpy as np\r\nimport urllib\r\nimport json\r\nimport cv2\r\nimport os\r\nfrom .face import dog_ear\r\nfrom glob import glob\r\nfrom .forms import ImgForm,UrlForm\r\nimport base64\r\nimport requests\r\nfrom keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions\r\nfrom keras.applications.inception_v3 import InceptionV3, preprocess_input\r\nfrom keras.preprocessing import image \r\nfrom keras.models import load_model\r\n\r\nimport io\r\nimport tensorflow as tf\r\nfrom PIL import Image\r\ngraph = tf.get_default_graph()\r\n# define ResNet50 model\r\ndog_names = [item[9:-1] for item in sorted(glob(\"test/*/\"))]\r\nResNet50_model = ResNet50(weights='imagenet')\r\nInceptionV3_model=load_model('dog/saved_models/weights.best.InceptionV3.hdf5') \r\n# define the path to the face detector\r\nFACE_DETECTOR_PATH = r\"{base_path}/haarcascades/haarcascade_frontalface_alt.xml\".format(\r\n\tbase_path=os.path.abspath(os.path.dirname(__file__)))\r\n\r\ndef main(request):\r\n \tcon={'form1':ImgForm,'form2':UrlForm}\r\n \treturn render(request,'main.html',con)\r\n@csrf_exempt\r\ndef detect(request):\r\n\t# initialize the data dictionary to be returned by the request\r\n\tglobal graph\r\n\twith graph.as_default():\r\n\t\tdata = {\"success\": False}\r\n\r\n\t\t# check to see if this is a post request\r\n\t\tif request.method == \"POST\":\r\n\t\t\t# check to see if an image was uploaded\r\n\t\t\tif request.FILES.get(\"image\", None) is not None:\r\n\t\t\t\t# grab the uploaded image\r\n\t\t\t\timage,dog = _grab_image(stream=request.FILES[\"image\"])\r\n\t\t\t\tad=request.POST.get(\"overlay\", None)\r\n\t\t\t# otherwise, assume that a URL was passed in\r\n\t\t\telse:\r\n\t\t\t\t# grab the URL from the request\r\n\t\t\t\turl = request.POST.get(\"url\", None)\r\n\t\t\t\tad=request.POST.get(\"overlay\", None)\r\n\t\t\t\t# if the URL is None, then return an error\r\n\t\t\t\tif url is None:\r\n\t\t\t\t\tdata[\"error\"] = \"No URL provided.\"\r\n\t\t\t\t\treturn JsonResponse(data)\r\n\r\n\t\t\t\t# load the image and convert\r\n\t\t\t\timage,dog = _grab_image(url=url)\r\n\r\n\t\t\t# convert the image to grayscale, load the face cascade detector,\r\n\t\t\t# and detect faces in the image\r\n\t\t\t\r\n\t\t\timg = cv2.cvtColor(dog_ear(image,ad), cv2.COLOR_BGR2RGB)\r\n\t\t\timg2 = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n\t\t\timage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n\t\t\tdetector = cv2.CascadeClassifier(FACE_DETECTOR_PATH)\r\n\t\t\trects = detector.detectMultiScale(image)\r\n\r\n\t\t\t# construct a list of bounding boxes from the detection\r\n\t\t\trects = [(int(x), int(y), int(x + w), int(y + h)) for (x, y, w, h) in rects]\r\n\t\t\t\r\n\t\t\tresponse=imgenc(img,rects)\r\n\t\t\t# if len(rects)<2:\r\n\t\t\t# \tbreed = InceptionV3_predict_breed(img2)\r\n\t\t\t\r\n\t\t\t# update the data dictionary with the faces detected\r\n\t\t\tdata.update({\"num_faces\": len(rects), \"faces\": rects, \"success\": True,\"dog\":str(dog),\"img\":response,'breed':\"breed\"})\r\n\t\t\r\n\t\treturn render(request,'main.html',data)\t\r\n\t\t# return a JSON response\r\n\t\t# return JsonResponse(data)\r\n\t\t\r\n\r\ndef _grab_image(path=None, stream=None, url=None):\r\n\t# if the path is not None, then load the image from disk\r\n\tif path is not None:\r\n\t\timage = cv2.imread(path)\r\n\r\n\t# otherwise, the image does not reside on disk\r\n\telse:\t\r\n\t\t# if the URL is not None, then download the image\r\n\t\tif url is not None:\r\n\t\t\tresp = urllib.request.urlopen(url)\r\n\t\t\tdata = resp.read()\r\n\t\t\t\r\n\t\t# if the stream is not None, then the image has been uploaded\r\n\t\telif stream is not None:\r\n\t\t\tdata = stream.read()\r\n\t\t\t\r\n\t\t# convert the image to a NumPy array and then read it into\r\n\t\t# OpenCV format\r\n\t\timage = np.asarray(bytearray(data), dtype=\"uint8\")\r\n\t\timage = cv2.imdecode(image, cv2.IMREAD_COLOR)\r\n\t\t\r\n\t\timg = preprocess_input(path_to_tensor(image))\r\n\t\tprediction = np.argmax(ResNet50_model.predict(img))\r\n\t\t#boolean variable of presence of dog in image or not\r\n\t\tdog=((prediction <= 268) & (prediction >= 151)) \r\n\t\t\r\n\t# return the image,and bool dog\r\n\treturn image,dog\r\n\r\ndef imgenc(image,rects):\r\n\t# for (startX, startY, endX, endY) in rects:\r\n\t# \tcv2.rectangle(image, (startX, startY), (endX, endY), (0, 255, 0), 2)\r\n\r\n\t# r = 300.0 / image.shape[1]\r\n\t# dim = (300, int(image.shape[0] * r))\r\n\t\r\n\t# # perform the actual resizing of the image and show it\r\n\t# resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)\r\n\tCDF=Image.fromarray(image)\r\n\tin_mem_file=io.BytesIO()\t\t\t\r\n\tCDF.save(in_mem_file, format = \"PNG\")\r\n\t# reset file pointer to start\r\n\tin_mem_file.seek(0)\r\n\timg_bytes = in_mem_file.read()\r\n\r\n\tbase64_encoded_result_bytes = base64.b64encode(img_bytes)\r\n\tbase64_encoded_result_str = base64_encoded_result_bytes.decode('ascii')\r\n\treturn \"data:image/png;base64,{0} \".format(base64_encoded_result_str)\r\n\r\ndef path_to_tensor(image):\r\n\t# resize the shape of image\r\n\timage2 =cv2.resize(image, (224,224), interpolation = cv2.INTER_AREA)\r\n\t# change the data type to float to be accepted\r\n\timage2 = image2.astype(np.float32)\r\n\t# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor\r\n\treturn np.expand_dims(image2, axis=0)\t\r\ndef extract_InceptionV3(tensor):\r\n return InceptionV3(weights='imagenet', include_top=False).predict(preprocess_input(tensor))\r\ndef InceptionV3_predict_breed(image):\r\n # extract bottleneck features\r\n bottleneck_feature = extract_InceptionV3(path_to_tensor(image))\r\n # obtain predicted vector\r\n predicted_vector = InceptionV3_model.predict(bottleneck_feature)\r\n # return dog breed that is predicted by the model\r\n return dog_names[np.argmax(predicted_vector)]\r\n\r\n"
] | [
[
"numpy.expand_dims",
"numpy.argmax",
"tensorflow.get_default_graph"
]
] |
itouchz/TRepNet | [
"5fa9f273dc57b778ac0a94fffcb926de333ecc37"
] | [
"OnlyWavenet.py"
] | [
"import numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport os\nimport warnings\nimport time\n\nwarnings.filterwarnings('ignore') \n\nfrom tensorflow import keras\nfrom sklearn.preprocessing import RobustScaler, Normalizer, StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom datasets import load_data, random_benchmark, list_datasets\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import f1_score, accuracy_score\n\nnp.random.seed(7)\ntf.random.set_seed(7)\n\nfrom tensorflow.keras.layers import Conv1D, LSTM, GRU, Bidirectional, MaxPool1D, RepeatVector, Dense, Attention, Flatten, Dot\nfrom tensorflow.keras.layers import BatchNormalization, Input, Activation, Dropout, Lambda, Multiply, Add, Concatenate, Conv2DTranspose\nfrom tensorflow.keras.models import Model\n\ndef get_output_dim(original_dim):\n if original_dim // 1.3 >= 512:\n return 512\n elif original_dim // 1.3 <= 128:\n return 128\n else:\n return int(original_dim // 1.3)\n\ndef TRepNet(n_steps, n_features, activation='elu'):\n codings_size = get_output_dim(n_steps * n_features)\n dilation_rates = [2**i for i in range(10)] * 1\n \n skips = []\n\n encoder_input = Input(shape=[n_steps, n_features])\n # Convolution\n conv = encoder_input\n for dilation_rate in dilation_rates:\n conv = keras.layers.GaussianNoise(0.01)(conv)\n conv = Conv1D(16, 1, activation=activation, padding='same')(conv)\n \n conv_filter = Conv1D(filters=128, kernel_size=3, padding='causal', activation=activation, dilation_rate=dilation_rate)(conv)\n conv_filter = Dropout(0.1)(conv_filter)\n \n conv_gate = Conv1D(filters=128, kernel_size=3, padding='causal', activation=activation, dilation_rate=dilation_rate)(conv)\n conv_gate = Dropout(0.1)(conv_gate)\n \n mul = Multiply()([Activation('tanh')(conv_filter), Activation('sigmoid')(conv_gate)])\n skip = Conv1D(16, 1, padding='same', activation=activation)(mul)\n \n conv = Add()([conv, skip])\n \n skips.append(skip)\n \n conv = Activation(activation)(Add()(skips))\n conv = Conv1D(16, 1, activation=activation, padding='same')(conv)\n conv = MaxPool1D(pool_size=2)(conv)\n conv = Flatten()(conv)\n\n \n z = Dense(codings_size, kernel_initializer='lecun_normal', activation='selu')(conv)\n \n encoder_output = Dense(codings_size, activation='sigmoid')(z)\n encoder = Model(inputs=[encoder_input], outputs=[encoder_output])\n\n # Decoder\n decoder_input = Input(shape=[codings_size])\n noise_input = keras.layers.GaussianNoise(0.01)(decoder_input)\n dconv = keras.layers.Reshape([codings_size, 1, 1])(noise_input)\n dconv = Conv2DTranspose(filters=32, kernel_size=3, activation=activation)(dconv)\n dconv = Conv2DTranspose(filters=16, kernel_size=1, activation=activation)(dconv)\n dconv = Flatten()(dconv)\n x = Dense(n_steps * n_features)(dconv)\n decoder_output = keras.layers.Reshape([n_steps, n_features])(x)\n decoder = Model(inputs=[decoder_input], outputs=[decoder_output])\n\n return encoder, decoder"
] | [
[
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Conv2DTranspose",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Conv1D",
"numpy.random.seed",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Multiply",
"tensorflow.keras.layers.Add",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.GaussianNoise",
"tensorflow.random.set_seed",
"tensorflow.keras.layers.MaxPool1D",
"tensorflow.keras.layers.Input"
]
] |
daniilgaltsev/ImageNet-Training | [
"9ca1d26cde07782398c7f366d5bf510c9e988236"
] | [
"imagenet_training/models/simple_cnn.py"
] | [
"\"\"\"A simple cnn model.\"\"\"\n\n\nimport argparse\nfrom collections import OrderedDict\nfrom typing import Any, Dict, Optional\n\nimport torch\nimport torch.nn as nn\n\n\nclass SimpleCNN(nn.Module):\n \"\"\"A simple CNN model.\n\n Args:\n data_config: a dictionary containing information about data.\n args (optional): args from argparser.\n \"\"\"\n\n def __init__(\n self,\n data_config: Dict[str, Any],\n args: Optional[argparse.Namespace] = None,\n ):\n super().__init__()\n\n if args is None:\n self.args = {}\n else:\n self.args = vars(args)\n\n num_classes = len(data_config[\"mapping\"])\n\n self.cnn = nn.Sequential(OrderedDict([\n (\"conv1\", nn.Conv2d(3, 32, kernel_size=3, padding=1, bias=False)),\n (\"relu1\", nn.ReLU(inplace=True)),\n (\"bn1\", nn.BatchNorm2d(32)),\n (\"maxpool1\", nn.MaxPool2d(kernel_size=2, stride=2)),\n (\"conv2\", nn.Conv2d(32, 64, kernel_size=3, bias=False)),\n (\"relu2\", nn.ReLU(inplace=True)),\n (\"bn2\", nn.BatchNorm2d(64)),\n (\"maxpool2\", nn.MaxPool2d(kernel_size=2, stride=2)),\n (\"conv3\", nn.Conv2d(64, 128, kernel_size=3, bias=False)),\n (\"relu3\", nn.ReLU(inplace=True)),\n (\"bn3\", nn.BatchNorm2d(128))\n ]))\n self.head = nn.Sequential(OrderedDict([\n (\"avgpool\", nn.AdaptiveAvgPool2d(1)),\n (\"flatten\", nn.Flatten()),\n (\"fc1\", nn.Linear(128, 128)),\n (\"relu1\", nn.ReLU(inplace=True)),\n (\"fc2\", nn.Linear(128, num_classes))\n ]))\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Performs forward operation on a given tensor.\"\"\"\n x = self.cnn(x)\n x = self.head(x)\n return x\n\n @staticmethod\n def add_to_argparse(\n parser: argparse.ArgumentParser,\n main_parser: argparse.ArgumentParser # pylint: disable=unused-argument\n ) -> argparse.ArgumentParser:\n \"\"\"Adds possible agrs to the given parser.\"\"\"\n return parser\n"
] | [
[
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.nn.Flatten",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Conv2d",
"torch.nn.ReLU"
]
] |
sgjholt/SpecMod | [
"453c77c1fa51c220470e2aa4d92ec432360bfc9f"
] | [
"specmod/Models.py"
] | [
"# MODELS contains a set of functions for minimisation to seismic spectra.\n# It can be modified as appropriate.\nimport numpy as np\nfrom . import config as cfg\n\nMODS = [\"BRUNE\", \"BOATWRIGHT\"]\n\n# UTIL FUNCS\ndef which_model(mod):\n if mod in MODS:\n if mod == \"BRUNE\":\n return BRUNE_MODEL\n if mod == \"BOATWRIGHT\":\n return BOATWRIGHT_MODEL\n else:\n raise ValueError(f\"Model {mod} not available. Choose from {MODS}.\")\n\n\ndef scale_to_motion(motion, f):\n if motion.lower() == 'displacement':\n return 0\n\n elif motion.lower() == 'velocity':\n return np.log10(2*np.pi*f)\n\n elif motion.lower() == 'acceleration':\n return np.log10(np.power(2*np.pi*f,2))\n else:\n return None\n\n# DEFAULT PARAMS FOR SOURCE MODE:\nBRUNE_MODEL = (1, 2) # omega squared\nBOATWRIGHT_MODEL = (2, 2) # omega cubed\n#\nMODEL = which_model(cfg.MODELS[\"MODEL\"])\nMOTION = cfg.MODELS[\"MOTION\"]\n\n\n\n# MINIMISATION FUNCTIONS\n## Source model\ndef source(f, llpsp, fc):\n gam, n = MODEL\n loga = llpsp - (1/gam)*np.log10((1+(f/fc)**(gam*n)))\n return loga\n\n# freq independent t-star attenuation model\ndef t_star(f, ts):\n return -(np.pi*f*ts / np.log(10))\n\n# freq dependent t-star attenuation\ndef t_star_freq(f, ts, a):\n return -(np.pi*(f**(1-a))*ts / np.log(10))\n\n# combine models\ndef simple_model(f, llpsp, fc, ts):\n global MOTION\n \"\"\"\n Simple attenuated source model to minimise.\n \"\"\"\n return source(f, llpsp, fc) + t_star(f, ts) + scale_to_motion(MOTION, f)\n\ndef simple_model_fdep(f, llpsp, fc, ts, a):\n \"\"\"\n Simple model but with frequency dependent attenuation.\n \"\"\"\n return source(f, llpsp, fc) + t_star_freq(f, ts, a) + scale_to_motion(MOTION, f)\n"
] | [
[
"numpy.log",
"numpy.log10",
"numpy.power"
]
] |
egonina/svm | [
"397f6fa8d29e8299478586e88864cae095fb08c1"
] | [
"test/svm_test.py"
] | [
"import unittest2 as unittest\nimport copy\nimport numpy as np\nfrom svm_specializer.svm import * \n\nclass BasicTests(unittest.TestCase):\n def test_init(self):\n svm = SVM()\n self.assertIsNotNone(svm)\n\nclass SyntheticDataTests(unittest.TestCase):\n def read_data(self, in_file_name):\n feats = open(in_file_name, \"r\")\n labels = []\n points = {}\n self.D = 0\n first_line = 1\n\n for line in feats:\n vals = line.split(\" \")\n l = vals[0]\n labels.append(l)\n idx = 0\n for v in vals[1:]:\n if first_line:\n self.D += 1\n f = v.split(\":\")[1].strip('\\n')\n if idx not in points.keys():\n points[idx] = []\n points[idx].append(f)\n idx += 1\n if first_line:\n first_line = 0\n\n self.N = len(labels)\n return_labels = np.array(labels, dtype=np.float32)\n points_list = [] \n\n for idx in points.keys():\n points_list.append(points[idx]) \n\n return_points = np.array(points_list, dtype=np.float32)\n return_points = return_points.reshape(self.N, self.D)\n\n return return_labels, return_points\n\n def setUp(self):\n # read in training data\n self.t1_labels, self.t1_data = self.read_data(\"test/sample_data/svm_train_1.svm\")\n self.t2_labels, self.t2_data = self.read_data(\"test/sample_data/svm_train_2.svm\")\n\n # read in training data\n self.c_labels, self.c_data = self.read_data(\"test/sample_data/svm_classify.svm\")\n\n def test_training_and_classify_once(self):\n svm = SVM()\n svm.train(self.t1_data, self.t1_labels, \"linear\")\n svm.classify(self.c_data, self.c_labels)\n\n def test_training_once(self):\n svm = SVM()\n a = svm.train(self.t2_data, self.t2_labels, \"linear\")\n\n def test_training_kernels(self):\n svm = SVM()\n a = svm.train(self.t1_data, self.t1_labels, \"linear\")\n a = svm.train(self.t2_data, self.t2_labels, \"gaussian\")\n\n def test_training_and_classify_twice(self):\n svm = SVM()\n svm.train(self.t1_data, self.t1_labels, \"linear\")\n svm.classify(self.c_data, self.c_labels)\n\n svm1 = SVM()\n svm1.train(self.t2_data, self.t2_labels, \"linear\")\n svm1.classify(self.c_data, self.c_labels)\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.array"
]
] |
runxuanjiang/DeepRL | [
"f5c47c52d4db50577fbada17b09d739da3da67cc"
] | [
"deep_rl/agent/PPO_recurrent_agent_recurrence.py"
] | [
"#######################################################################\n# Copyright (C) 2017 Shangtong Zhang([email protected]) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\n\n# TODO:\n# - plot average rewards in matplotlib\n# - look at when entropy loss is recorded\n\n\nfrom ..network import *\nfrom ..component import *\nfrom .BaseAgent import *\n\nfrom torch_geometric.data import Data, Batch\nfrom torch_geometric.transforms import Distance\n\nimport numpy\nimport numpy.random\n\nimport pdb\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass PPORecurrentAgentRecurrence(BaseAgent):\n def __init__(self, config):\n BaseAgent.__init__(self, config)\n self.config = config #config file, contains hyperparameters and other info\n self.task = config.task_fn() #gym environment wrapper\n self.hidden_size = config.hidden_size\n\n if config.network: #nnet used\n self.network = config.network\n else:\n self.network = config.network_fn()\n self.network.to(device)\n\n self.optimizer = config.optimizer_fn(self.network.parameters()) #optimization function\n self.total_steps = 0\n self.states = self.task.reset()\n self.h0 = torch.zeros(self.config.num_workers, self.hidden_size).to(device) #lstm hidden states\n self.c0 = torch.zeros(self.config.num_workers, self.hidden_size).to(device) #lstm cell states\n self.recurrence = self.config.recurrence\n print(\"running PPO, tag is \" + config.tag)\n\n def step(self):\n config = self.config\n storage = Storage(config.rollout_length) \n\n states = self.states\n\n ##############################################################################################\n #Sampling Loop\n ##############################################################################################\n for _ in range(config.rollout_length):\n\n #add recurrent states (lstm hidden and lstm cell states) to storage\n storage.add({\n 'h0' : self.h0.to(device),\n 'c0' : self.c0.to(device)\n })\n\n #run the neural net once to get prediction\n prediction, (self.h0, self.c0) = self.network(states, (self.h0, self.c0))\n self.h0 = self.h0.to(device)\n self.c0 = self.c0.to(device)\n\n #step the environment with the action determined by the prediction\n next_states, rewards, terminals, info = self.task.step(to_np(prediction['a']))\n self.record_online_return(info)\n rewards = config.reward_normalizer(rewards)\n\n #add everything to storage\n storage.add(prediction)\n storage.add({\n 's' : tensor(states).to(device),\n 'r': tensor(rewards).unsqueeze(-1).to(device),\n 'm': tensor(1 - terminals).unsqueeze(-1).to(device)\n })\n states = next_states\n \n #zero out lstm recurrent state if any of the environments finish\n for i, done in enumerate(terminals):\n if done:\n self.h0[i] = torch.zeros(self.hidden_size)\n self.c0[i] = torch.zeros(self.hidden_size)\n\n self.total_steps += config.num_workers\n\n self.states = states\n\n prediction, _ = self.network(states, (self.h0, self.c0))\n\n storage.add(prediction)\n storage.placeholder()\n\n\n #############################################################################################\n #Calculate advantages and returns and set up for training\n #############################################################################################\n\n advantages = tensor(np.zeros((config.num_workers, 1))).to(device)\n returns = prediction['v'].detach()\n for i in reversed(range(config.rollout_length)):\n returns = storage.r[i] + config.discount * storage.m[i] * returns\n if not config.use_gae:\n advantages = returns - storage.v[i].detach()\n else:\n td_error = storage.r[i] + config.discount * storage.m[i] * storage.v[i + 1] - storage.v[i]\n advantages = advantages * config.gae_tau * config.discount * storage.m[i] + td_error\n storage.adv[i] = advantages.detach()\n storage.ret[i] = returns.detach()\n\n storage.a = storage.a[:self.config.rollout_length]\n storage.log_pi_a = storage.log_pi_a[:self.config.rollout_length]\n storage.v = storage.v[:self.config.rollout_length]\n\n\n actions = torch.stack(storage.a, 1).reshape(-1)\n log_probs_old = torch.cat(storage.log_pi_a, 1).reshape(-1)\n values = torch.cat(storage.v, 1).reshape(-1)\n returns = torch.cat(storage.ret, 1).reshape(-1)\n advantages = torch.cat(storage.adv, 1).reshape(-1)\n\n log_probs_old = log_probs_old.detach()\n values = values.detach()\n states = torch.stack(storage.s, 1).view(-1, 4)\n h0 = torch.stack(storage.h0, 1).view(-1, self.hidden_size)\n c0 = torch.stack(storage.c0, 1).view(-1, self.hidden_size)\n\n \n advantages = (advantages - advantages.mean()) / advantages.std()\n\n self.logger.add_scalar('advantages', advantages.mean(), self.total_steps)\n\n\n\n ############################################################################################\n #Training Loop\n ############################################################################################\n for _ in range(config.optimization_epochs):\n indices = numpy.arange(0, self.config.rollout_length * self.config.num_workers, self.recurrence);\n indices = numpy.random.permutation(indices);\n num_indices = config.mini_batch_size // self.recurrence\n starting_batch_indices = [indices[i:i+num_indices] for i in range(0, len(indices), num_indices)]\n for starting_indices in starting_batch_indices:\n batch_entropy = 0\n batch_value_loss = 0\n batch_policy_loss = 0\n batch_loss = 0\n\n sampled_h0 = h0[starting_indices]\n sampled_c0 = c0[starting_indices]\n\n for i in range(self.recurrence):\n sampled_actions = actions[starting_indices + i]\n sampled_log_probs_old = log_probs_old[starting_indices + i]\n sampled_values = values[starting_indices + i]\n sampled_returns = returns[starting_indices + i]\n sampled_advantages = advantages[starting_indices + i]\n sampled_states = states[starting_indices + i]\n\n prediction, (sampled_h0, sampled_c0) = self.network(sampled_states, (sampled_h0, sampled_c0), sampled_actions)\n\n entropy = prediction['ent'].mean()\n \n prediction['log_pi_a'] = prediction['log_pi_a'].reshape(-1)\n prediction['v'] = prediction['v'].reshape(-1)\n\n ratio = (prediction['log_pi_a'] - sampled_log_probs_old).exp()\n obj = ratio * sampled_advantages\n obj_clipped = ratio.clamp(1.0 - self.config.ppo_ratio_clip,\n 1.0 + self.config.ppo_ratio_clip) * sampled_advantages\n policy_loss = -torch.min(obj, obj_clipped).mean() - config.entropy_weight * prediction['ent'].mean()\n\n value_loss = 0.5 * (sampled_returns - prediction['v']).pow(2).mean()\n\n loss = policy_loss + value_loss\n\n batch_entropy += entropy.item()\n batch_policy_loss += policy_loss.item()\n batch_value_loss += value_loss.item()\n batch_loss += loss;\n\n\n batch_entropy /= self.recurrence\n batch_policy_loss /= self.recurrence\n batch_value_loss /= self.recurrence\n batch_loss /= self.recurrence\n\n self.logger.add_scalar('entropy_loss', batch_entropy, self.total_steps)\n self.logger.add_scalar('policy_loss', batch_policy_loss, self.total_steps)\n self.logger.add_scalar('value_loss', batch_value_loss, self.total_steps)\n\n self.optimizer.zero_grad()\n batch_loss.backward()\n nn.utils.clip_grad_norm_(self.network.parameters(), config.gradient_clip)\n self.optimizer.step()\n\n\n \n"
] | [
[
"numpy.arange",
"numpy.random.permutation"
]
] |
icmaple931/facenet-pytorch | [
"555aa4bec20ca3e7c2ead14e7e39d5bbce203e4b"
] | [
"tests/travis_test.py"
] | [
"\"\"\"\nThe following code is intended to be run only by travis for continuius intengration and testing\npurposes. For implementation examples see notebooks in the examples folder.\n\"\"\"\n\nfrom PIL import Image, ImageDraw\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms, datasets\nimport numpy as np\nimport pandas as pd\nfrom time import time\nimport sys, os\nimport glob\n\nfrom models.mtcnn import MTCNN, fixed_image_standardization\nfrom models.inception_resnet_v1 import InceptionResnetV1, get_torch_home\n\n\n#### CLEAR ALL OUTPUT FILES ####\n\ncheckpoints = glob.glob(os.path.join(get_torch_home(), 'checkpoints/*'))\nfor c in checkpoints:\n print('Removing {}'.format(c))\n os.remove(c)\n\ncrop_files = glob.glob('data/test_images_aligned/**/*.png')\nfor c in crop_files:\n print('Removing {}'.format(c))\n os.remove(c)\n\n\n#### TEST EXAMPLE IPYNB'S ####\n\nos.system('jupyter nbconvert --to script --stdout examples/infer.ipynb examples/finetune.ipynb > examples/tmptest.py')\nos.chdir('examples')\ntry:\n import examples.tmptest\nexcept:\n import tmptest\nos.chdir('..')\n\n\n#### TEST MTCNN ####\n\ndef get_image(path, trans):\n img = Image.open(path)\n img = trans(img)\n return img\n\ntrans = transforms.Compose([\n transforms.Resize(512)\n])\n\ntrans_cropped = transforms.Compose([\n np.float32,\n transforms.ToTensor(),\n fixed_image_standardization\n])\n\ndataset = datasets.ImageFolder('data/test_images', transform=trans)\ndataset.idx_to_class = {k: v for v, k in dataset.class_to_idx.items()}\n\nmtcnn_pt = MTCNN(device=torch.device('cpu'))\n\nnames = []\naligned = []\naligned_fromfile = []\nfor img, idx in dataset:\n name = dataset.idx_to_class[idx]\n start = time()\n img_align = mtcnn_pt(img, save_path='data/test_images_aligned/{}/1.png'.format(name))\n print('MTCNN time: {:6f} seconds'.format(time() - start))\n \n # Comparison between types\n img_box = mtcnn_pt.detect(img)[0]\n assert (img_box - mtcnn_pt.detect(np.array(img))[0]).sum() < 1e-2\n assert (img_box - mtcnn_pt.detect(torch.as_tensor(np.array(img)))[0]).sum() < 1e-2\n\n # Batching test\n assert (img_box - mtcnn_pt.detect([img, img])[0]).sum() < 1e-2\n assert (img_box - mtcnn_pt.detect(np.array([np.array(img), np.array(img)]))[0]).sum() < 1e-2\n assert (img_box - mtcnn_pt.detect(torch.as_tensor([np.array(img), np.array(img)]))[0]).sum() < 1e-2\n\n # Box selection\n mtcnn_pt.selection_method = 'probability'\n print('\\nprobability - ', mtcnn_pt.detect(img))\n mtcnn_pt.selection_method = 'largest'\n print('largest - ', mtcnn_pt.detect(img))\n mtcnn_pt.selection_method = 'largest_over_theshold'\n print('largest_over_theshold - ', mtcnn_pt.detect(img))\n mtcnn_pt.selection_method = 'center_weighted_size'\n print('center_weighted_size - ', mtcnn_pt.detect(img))\n\n if img_align is not None:\n names.append(name)\n aligned.append(img_align)\n aligned_fromfile.append(get_image('data/test_images_aligned/{}/1.png'.format(name), trans_cropped))\n\naligned = torch.stack(aligned)\naligned_fromfile = torch.stack(aligned_fromfile)\n\n\n#### TEST EMBEDDINGS ####\n\nexpected = [\n [\n [0.000000, 1.482895, 0.886342, 1.438450, 1.437583],\n [1.482895, 0.000000, 1.345686, 1.029880, 1.061939],\n [0.886342, 1.345686, 0.000000, 1.363125, 1.338803],\n [1.438450, 1.029880, 1.363125, 0.000000, 1.066040],\n [1.437583, 1.061939, 1.338803, 1.066040, 0.000000]\n ],\n [\n [0.000000, 1.430769, 0.992931, 1.414197, 1.329544],\n [1.430769, 0.000000, 1.253911, 1.144899, 1.079755],\n [0.992931, 1.253911, 0.000000, 1.358875, 1.337322],\n [1.414197, 1.144899, 1.358875, 0.000000, 1.204118],\n [1.329544, 1.079755, 1.337322, 1.204118, 0.000000]\n ]\n]\n\nfor i, ds in enumerate(['vggface2', 'casia-webface']):\n resnet_pt = InceptionResnetV1(pretrained=ds).eval()\n\n start = time()\n embs = resnet_pt(aligned)\n print('\\nResnet time: {:6f} seconds\\n'.format(time() - start))\n\n embs_fromfile = resnet_pt(aligned_fromfile)\n\n dists = [[(emb - e).norm().item() for e in embs] for emb in embs]\n dists_fromfile = [[(emb - e).norm().item() for e in embs_fromfile] for emb in embs_fromfile]\n\n print('\\nOutput:')\n print(pd.DataFrame(dists, columns=names, index=names))\n print('\\nOutput (from file):')\n print(pd.DataFrame(dists_fromfile, columns=names, index=names))\n print('\\nExpected:')\n print(pd.DataFrame(expected[i], columns=names, index=names))\n\n total_error = (torch.tensor(dists) - torch.tensor(expected[i])).norm()\n total_error_fromfile = (torch.tensor(dists_fromfile) - torch.tensor(expected[i])).norm()\n\n print('\\nTotal error: {}, {}'.format(total_error, total_error_fromfile))\n\n if sys.platform != 'win32':\n assert total_error < 1e-4\n assert total_error_fromfile < 1e-4\n\n\n#### TEST CLASSIFICATION ####\n\nresnet_pt = InceptionResnetV1(pretrained=ds, classify=True).eval()\nprob = resnet_pt(aligned)\n\n\n#### MULTI-FACE TEST ####\n\nmtcnn = MTCNN(keep_all=True)\nimg = Image.open('data/multiface.jpg')\nboxes, probs = mtcnn.detect(img)\n\ndraw = ImageDraw.Draw(img)\nfor i, box in enumerate(boxes):\n draw.rectangle(box.tolist())\n\nmtcnn(img, save_path='data/tmp.png')\n\n\n#### MTCNN TYPES TEST ####\n\nimg = Image.open('data/multiface.jpg')\n\nmtcnn = MTCNN(keep_all=True)\nboxes_ref, _ = mtcnn.detect(img)\n_ = mtcnn(img)\n\nmtcnn = MTCNN(keep_all=True).double()\nboxes_test, _ = mtcnn.detect(img)\n_ = mtcnn(img)\n\nbox_diff = boxes_ref[np.argsort(boxes_ref[:,1])] - boxes_test[np.argsort(boxes_test[:,1])]\ntotal_error = np.sum(np.abs(box_diff))\nprint('\\nfp64 Total box error: {}'.format(total_error))\n\nassert total_error < 1e-2\n\n\n# half is not supported on CPUs, only GPUs\nif torch.cuda.is_available():\n\n mtcnn = MTCNN(keep_all=True, device='cuda').half()\n boxes_test, _ = mtcnn.detect(img)\n _ = mtcnn(img)\n\n box_diff = boxes_ref[np.argsort(boxes_ref[:,1])] - boxes_test[np.argsort(boxes_test[:,1])]\n print('fp16 Total box error: {}'.format(np.sum(np.abs(box_diff))))\n\n # test new automatic multi precision to compare\n if hasattr(torch.cuda, 'amp'):\n with torch.cuda.amp.autocast():\n mtcnn = MTCNN(keep_all=True, device='cuda')\n boxes_test, _ = mtcnn.detect(img)\n _ = mtcnn(img)\n\n box_diff = boxes_ref[np.argsort(boxes_ref[:,1])] - boxes_test[np.argsort(boxes_test[:,1])]\n print('AMP total box error: {}'.format(np.sum(np.abs(box_diff))))\n\n \n#### MULTI-IMAGE TEST ####\n\nmtcnn = MTCNN(keep_all=True)\nimg = [\n Image.open('data/multiface.jpg'),\n Image.open('data/multiface.jpg')\n]\nbatch_boxes, batch_probs = mtcnn.detect(img)\n\nmtcnn(img, save_path=['data/tmp1.png', 'data/tmp1.png'])\ntmp_files = glob.glob('data/tmp*')\nfor f in tmp_files:\n os.remove(f)\n\n\n#### NO-FACE TEST ####\n\nimg = Image.new('RGB', (512, 512))\nmtcnn(img)\nmtcnn(img, return_prob=True)\n"
] | [
[
"torch.stack",
"pandas.DataFrame",
"numpy.abs",
"numpy.argsort",
"torch.tensor",
"torch.cuda.is_available",
"torch.cuda.amp.autocast",
"numpy.array",
"torch.device"
]
] |
SandhyaaGopchandani/PythonNetworkLibsComparion | [
"72db0cabecd0a9764663a044b19ef4dde843c402"
] | [
"net_performance_comparison.py"
] | [
"import itertools\nimport numpy as np\nfrom timeit import default_timer as timer\nfrom graph_tool.all import *\nimport pickle\nimport networkx as nx\nimport matplotlib as mpl\n#mpl.use('TkAgg')\nimport matplotlib.pyplot as plt\nfrom igraph import *\n\n\ndef nodes_edges(num_nodes):\n \"\"\" this function takes number of nodes and returns nodes and edge list\"\"\"\n nodes = list(range(num_nodes))\n edges = list(itertools.combinations(nodes, 2))\n return nodes, edges\n\ndef create_graph_graphtool(node_num, edges):\n \"\"\" this function creates graph object of graphtool library\"\"\"\n g = Graph(directed=False)\n vlist = g.add_vertex(node_num)\n g.add_edge_list(edges)\n return g\n\ndef create_graph_igraph(nodes, edges):\n \"\"\" this function creates graph object of igraph library\"\"\"\n g = Graph(directed=False)\n g.add_vertices(nodes)\n g.add_edges(edges)\n return g\n\ndef create_graph_networkx(nodes, edges):\n \"\"\" this function creates graph object of networkx library\"\"\"\n g = nx.Graph(directed=False)\n g.add_nodes_from(nodes)\n g.add_edges_from(edges)\n return g\n\n\ndef get_edges(complete_edge_list, threshold=0.5):\n \"\"\" this function randomnly picks the edges in graph based on probability. 0.5 means we want to include only 50% of random \n edges of the total edges in the graph\"\"\"\n edge_list = []\n for key in complete_edge_list:\n if np.random.random() < threshold:\n edge_list.append(key)\n\n return edge_list\n\n\ndef multiple_graph(complete_edge_list, nodes, probs, netlib='networkx'):\n \"\"\"this function times the various centrality measures calculated using three different network libararies.\n The function computes various graph based on given probability of edges, computes the degree, closeness and betweenness\n centrality measure and time those. At the end, it returns the list of timestamp for each cenrality. \"\"\"\n print(\"total possible edges:\", len(complete_edge_list))\n time_deg_central = []\n time_closeness_central = []\n time_between_central = []\n num_edges = []\n for prob in probs:\n edges = get_edges(complete_edge_list, prob)\n if netlib == 'graph-tool':\n num_nodes = len(nodes)\n graph = create_graph_graphtool(num_nodes, edges)\n print(prob, len(graph.get_vertices()), len(graph.get_edges()))\n num_edges.append(len(graph.get_edges()))\n\n start = timer()\n doc_degree_centralities = graph.get_out_degrees(nodes)\n end = timer()\n time_deg_central.append(end - start)\n\n start = timer()\n vertex_betweenness, edge_betweenness = graph_tool.centrality.betweenness(graph)\n end = timer()\n time_between_central.append(end - start)\n\n start = timer()\n vertex_closeness = graph_tool.centrality.closeness(graph)\n end = timer()\n time_closeness_central.append(end - start)\n\n if netlib == 'networkx':\n graph = create_graph_networkx(nodes, edges)\n print(prob, len(graph.nodes()), len(graph.edges()))\n num_edges.append(len(graph.edges()))\n\n start = timer()\n doc_degree_centralities = nx.algorithms.centrality.degree_centrality(graph)\n end = timer()\n time_deg_central.append(end - start)\n\n start = timer()\n vertex_betweenness = nx.algorithms.centrality.betweenness_centrality(graph)\n end = timer()\n time_between_central.append(end - start)\n\n start = timer()\n vertex_closeness = nx.algorithms.centrality.closeness_centrality(graph)\n end = timer()\n time_closeness_central.append(end - start)\n\n if netlib == 'igraph':\n graph = create_graph_igraph(nodes, edges)\n print(prob, graph.vcount(), graph.ecount())\n num_edges.append(graph.ecount())\n\n start = timer()\n doc_degree_centralities = np.array(graph.degree(nodes), dtype='f') / (graph.vcount() - 1)\n end = timer()\n time_deg_central.append(end - start)\n\n start = timer()\n normalization_factor = 2 / (float(graph.vcount() - 1) * float(graph.vcount() - 2))\n vertex_betweenness = np.array(graph.betweenness(), dtype='f') * normalization_factor\n end = timer()\n time_between_central.append(end - start)\n\n start = timer()\n vertex_closeness = graph.closeness()\n end = timer()\n time_closeness_central.append(end - start)\n\n return num_edges, time_deg_central, time_closeness_central, time_between_central\n\n\ndef plot_result(num_nodes, x, y1, y2, y3):\n \"\"\"This function plots the timestamp for three centralities as a function of number of edges.\"\"\"\n plt.plot(x, y1)\n plt.plot(x, y2)\n plt.plot(x, y3)\n plt.legend(['degree centrality', 'closeness centrality','betweenness centrality'], loc='upper left')\n plt.xticks(x)\n plt.title('with network of nodes '+str(num_nodes))\n plt.xticks(rotation=90)\n plt.xlabel('number of edges')\n plt.ylabel('time (in seconds)')\n plt.show()\n\n\nif __name__ == '__main__':\n \n num_nodes = 500 # number of nodes\n nodes, complete_edge_list = nodes_edges(num_nodes)\n threshold = [0.05, 0.2, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n num_edges, time_deg_central, time_closeness_central, time_between_central = multiple_graph(complete_edge_list,\n nodes, threshold,\n netlib='igraph')\n print(num_edges, time_deg_central, time_closeness_central, time_between_central)\n plot_result(num_nodes, num_edges, time_deg_central, time_closeness_central, time_between_central)"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xticks",
"numpy.random.random",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel"
]
] |
tsjayram/mi-prometheus | [
"cf163d9e246c3ae3c100045e58924148b2f81c39"
] | [
"miprometheus/workers/worker.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) IBM Corporation 2018\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nworker.py:\n\n - Contains the definition of the ``Worker`` class, representing the base of the basic workers, such as \\\n ``OnlineTrainer`` and ``Tester``.\n\n\n\"\"\"\n__author__ = \"Vincent Marois, Tomasz Kornuta, Ryan L. McAvoy\"\n\nimport os\nimport yaml\n\nimport torch\nimport logging\nimport logging.config\nimport argparse\nimport numpy as np\nfrom random import randrange\nfrom abc import abstractmethod\n\nfrom torch.utils.data import DataLoader\nfrom miprometheus.utils.sampler_factory import SamplerFactory\nfrom miprometheus.problems.problem_factory import ProblemFactory\n\n# Import utils.\nfrom miprometheus.utils.app_state import AppState\nfrom miprometheus.utils.param_interface import ParamInterface\n\n\nclass Worker(object):\n \"\"\"\n Base abstract class for the workers.\n All base workers should subclass it and override the relevant methods.\n \"\"\"\n\n def __init__(self, name, add_default_parser_args = True):\n \"\"\"\n Base constructor for all workers:\n\n - Initializes the AppState singleton:\n\n >>> self.app_state = AppState()\n\n - Initializes the Parameter Registry:\n\n >>> self.params = ParamInterface()\n\n - Defines the logger:\n\n >>> self.logger = logging.getLogger(name=self.name)\n\n - Creates parser and adds default worker command line arguments.\n\n :param name: Name of the worker.\n :type name: str\n\n :param add_default_parser_args: If set, adds default parser arguments (DEFAULT: True).\n :type add_default_parser_args: bool\n\n \"\"\"\n # Call base constructor.\n super(Worker, self).__init__()\n\n # Set worker name.\n self.name = name\n\n # Initialize the application state singleton.\n self.app_state = AppState()\n\n # Initialize parameter interface/registry.\n self.params = ParamInterface()\n\n # Initialize logger using the configuration.\n self.initialize_logger()\n\n # Create parser with a list of runtime arguments.\n self.parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)\n\n # Add arguments to the specific parser.\n if add_default_parser_args:\n # These arguments will be shared by all basic workers.\n self.parser.add_argument('--config',\n dest='config',\n type=str,\n default='',\n help='Name of the configuration file(s) to be loaded. '\n 'If specifying more than one file, they must be separated with coma \",\".')\n\n self.parser.add_argument('--model',\n type=str,\n default='',\n dest='model',\n help='Path to the file containing the saved parameters'\n ' of the model to load (model checkpoint, should end with a .pt extension.)')\n\n self.parser.add_argument('--gpu',\n dest='use_gpu',\n action='store_true',\n help='The current worker will move the computations on GPU devices, if available '\n 'in the system. (Default: False)')\n\n self.parser.add_argument('--expdir',\n dest='expdir',\n type=str,\n default=\"./experiments\",\n help='Path to the directory where the experiment(s) folders are/will be stored.'\n ' (DEFAULT: ./experiments)')\n\n self.parser.add_argument('--savetag',\n dest='savetag',\n type=str,\n default='',\n help='Tag for the save directory.')\n\n self.parser.add_argument('--ll',\n action='store',\n dest='log_level',\n type=str,\n default='INFO',\n choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'],\n help=\"Log level. (Default: INFO)\")\n\n self.parser.add_argument('--li',\n dest='logging_interval',\n default=100,\n type=int,\n help='Statistics logging interval. Will impact logging to the logger and '\n 'exporting to TensorBoard. Writing to the csv file is not impacted '\n '(interval of 1).(Default: 100, i.e. logs every 100 episodes).')\n\n self.parser.add_argument('--agree',\n dest='confirm',\n action='store_true',\n help='Request user confirmation just after loading the settings, '\n 'before starting training. (Default: False)')\n\n def initialize_logger(self):\n \"\"\"\n Initializes the logger, with a specific configuration:\n\n >>> logger_config = {'version': 1,\n >>> 'disable_existing_loggers': False,\n >>> 'formatters': {\n >>> 'simple': {\n >>> 'format': '[%(asctime)s] - %(levelname)s - %(name)s >>> %(message)s',\n >>> 'datefmt': '%Y-%m-%d %H:%M:%S'}},\n >>> 'handlers': {\n >>> 'console': {\n >>> 'class': 'logging.StreamHandler',\n >>> 'level': 'INFO',\n >>> 'formatter': 'simple',\n >>> 'stream': 'ext://sys.stdout'}},\n >>> 'root': {'level': 'DEBUG',\n >>> 'handlers': ['console']}}\n\n \"\"\"\n # Load the default logger configuration.\n logger_config = {'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'simple': {\n 'format': '[%(asctime)s] - %(levelname)s - %(name)s >>> %(message)s',\n 'datefmt': '%Y-%m-%d %H:%M:%S'}},\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'level': 'INFO',\n 'formatter': 'simple',\n 'stream': 'ext://sys.stdout'}},\n 'root': {'level': 'DEBUG',\n 'handlers': ['console']}}\n\n logging.config.dictConfig(logger_config)\n\n # Create the Logger, set its label and logging level.\n self.logger = logging.getLogger(name=self.name)\n\n def display_parsing_results(self):\n \"\"\"\n Displays the properly & improperly parsed arguments (if any).\n\n \"\"\"\n # Log the parsed flags.\n flags_str = 'Properly parsed command line arguments: \\n'\n flags_str += '='*80 + '\\n'\n for arg in vars(self.flags): \n flags_str += \"{}= {} \\n\".format(arg, getattr(self.flags, arg))\n flags_str += '='*80 + '\\n'\n self.logger.info(flags_str)\n\n # Log the unparsed flags if any.\n if self.unparsed:\n flags_str = 'Invalid command line arguments: \\n'\n flags_str += '='*80 + '\\n'\n for arg in self.unparsed: \n flags_str += \"{} \\n\".format(arg)\n flags_str += '='*80 + '\\n'\n self.logger.warning(flags_str)\n\n def setup_experiment(self):\n \"\"\"\n Setups a specific experiment.\n\n Base method:\n\n - Parses command line arguments.\n\n - Sets the 3 default sections (training / validation / test) and sets their dataloaders params.\n\n .. note::\n\n Child classes should override this method, but still call its parent to draw the basic functionality \\\n implemented here.\n\n\n \"\"\"\n # Parse arguments.\n self.flags, self.unparsed = self.parser.parse_known_args()\n\n # Set logger depending on the settings.\n self.logger.setLevel(getattr(logging, self.flags.log_level.upper(), None))\n\n # add empty sections\n self.params.add_default_params({\"training\": {'terminal_conditions': {}}})\n self.params.add_default_params({\"validation\": {}})\n self.params.add_default_params({\"testing\": {}})\n\n # set a default configuration section for the DataLoaders\n dataloader_config = {'dataloader': {'shuffle': True, # shuffle set by default.\n 'batch_sampler': None,\n 'num_workers': 0, # Do not use multiprocessing by default - for now.\n 'pin_memory': False,\n 'drop_last': False,\n 'timeout': 0},\n 'sampler': {}, # not using sampler by default\n }\n\n self.params[\"training\"].add_default_params(dataloader_config)\n self.params[\"validation\"].add_default_params(dataloader_config)\n self.params[\"testing\"].add_default_params(dataloader_config)\n\n def build_problem_sampler_loader(self, params, section_name):\n \"\"\"\n Builds and returns the Problem class, alongside its DataLoader.\n\n Also builds the sampler if required.\n\n :param params: 'ParamInterface' object, referring to one of main sections (training/validation/testing).\n :type params: miprometheus.utils.ParamInterface\n\n :param section_name: name of the section that will be used by logger for display.\n\n :return: Problem instance & DataLoader instance.\n \"\"\"\n\n # Build the problem.\n problem = ProblemFactory.build(params['problem'])\n\n # Try to build the sampler.\n sampler = SamplerFactory.build(problem, params['sampler'])\n\n if sampler is not None:\n # Set shuffle to False - REQUIRED as those two are exclusive.\n params['dataloader'].add_config_params({'shuffle': False})\n\n # build the DataLoader on top of the validation problem\n loader = DataLoader(dataset=problem,\n batch_size=params['problem']['batch_size'],\n shuffle=params['dataloader']['shuffle'],\n sampler=sampler,\n batch_sampler=params['dataloader']['batch_sampler'],\n num_workers=params['dataloader']['num_workers'],\n collate_fn=problem.collate_fn,\n pin_memory=params['dataloader']['pin_memory'],\n drop_last=params['dataloader']['drop_last'],\n timeout=params['dataloader']['timeout'],\n worker_init_fn=problem.worker_init_fn)\n\n # Display sizes.\n self.logger.info(\"Problem for '{}' loaded (size: {})\".format(section_name, len(problem)))\n if (sampler is not None):\n self.logger.info(\"Sampler for '{}' created (size: {})\".format(section_name, len(sampler)))\n\n\n # Return sampler - even if it is none :]\n return problem, sampler, loader\n\n\n def get_epoch_size(self, problem, sampler, batch_size, drop_last):\n \"\"\"\n Compute the number of iterations ('episodes') to run given the size of the dataset and the batch size to cover\n the entire dataset once.\n\n Takes into account whether one used sampler or not.\n\n :param problem: Object derived from the ''Problem'' class\n\n :param sampler: Sampler (may be None)\n\n :param batch_size: Batch size.\n :type batch_size: int\n\n :param drop_last: If True then last batch (if incomplete) will not be counted\n :type drop_last: bool\n\n .. note::\n\n If the last batch is incomplete we are counting it in when ``drop_last`` in ``DataLoader()`` is set to Ttrue.\n\n .. warning::\n\n Leaving this method 'just in case', in most cases one might simply use ''len(dataloader)''.\n\n :return: Number of iterations to perform to go though the entire dataset once.\n\n \"\"\"\n # \"Estimate\" dataset size.\n if (sampler is not None):\n problem_size = len(sampler)\n else:\n problem_size = len(problem)\n\n # If problem_size is a multiciplity of batch_size OR drop last is set.\n if (problem_size % batch_size) == 0 or drop_last:\n return problem_size // batch_size\n else:\n return (problem_size // batch_size) + 1\n\n\n def export_experiment_configuration(self, log_dir, filename, user_confirm):\n \"\"\"\n Dumps the configuration to ``yaml`` file.\n\n :param log_dir: Directory used to host log files (such as the collected statistics).\n :type log_dir: str\n\n :param filename: Name of the ``yaml`` file to write to.\n :type filename: str\n\n :param user_confirm: Whether to request user confirmation.\n :type user_confirm: bool\n\n\n \"\"\"\n # -> At this point, all configuration for experiment is complete.\n\n # Display results of parsing.\n self.display_parsing_results()\n\n # Log the resulting training configuration.\n conf_str = 'Final parameter registry configuration:\\n'\n conf_str += '='*80 + '\\n'\n conf_str += yaml.safe_dump(self.params.to_dict(), default_flow_style=False)\n conf_str += '='*80 + '\\n'\n self.logger.info(conf_str)\n\n # Save the resulting configuration into a .yaml settings file, under log_dir\n with open(log_dir + filename, 'w') as yaml_backup_file:\n yaml.dump(self.params.to_dict(), yaml_backup_file, default_flow_style=False)\n\n # Ask for confirmation - optional.\n if user_confirm:\n try:\n input('Press <Enter> to confirm and start the experiment\\n')\n except KeyboardInterrupt:\n exit(0) \n\n\n def add_statistics(self, stat_col):\n \"\"\"\n Adds most elementary shared statistics to ``StatisticsCollector``: episode and loss.\n\n :param stat_col: ``StatisticsCollector``.\n\n \"\"\"\n # Add default statistics with formatting.\n stat_col.add_statistic('loss', '{:12.10f}')\n stat_col.add_statistic('episode', '{:06d}')\n\n def add_aggregators(self, stat_agg):\n \"\"\"\n Adds basic statistical aggregators to ``StatisticsAggregator``: episode, \\\n episodes_aggregated and loss derivatives.\n\n :param stat_agg: ``StatisticsAggregator``.\n\n \"\"\"\n # add 'aggregators' for the episode.\n stat_agg.add_aggregator('episode', '{:06d}')\n # Number of aggregated episodes.\n stat_agg.add_aggregator('episodes_aggregated', '{:06d}')\n\n # Add default statistical aggregators for the loss (indicating a formatting).\n # Represents the average loss, but stying with loss for TensorBoard \"variable compatibility\".\n stat_agg.add_aggregator('loss', '{:12.10f}') \n stat_agg.add_aggregator('loss_min', '{:12.10f}')\n stat_agg.add_aggregator('loss_max', '{:12.10f}')\n stat_agg.add_aggregator('loss_std', '{:12.10f}')\n\n def aggregate_statistics(self, stat_col, stat_agg):\n \"\"\"\n Aggregates the default statistics collected by the ``StatisticsCollector``.\n\n\n .. note::\n Only computes the min, max, mean, std of the loss as these are basic statistical aggregator by default.\n\n Given that the ``StatisticsAggregator`` uses the statistics collected by the ``StatisticsCollector``, \\\n It should be ensured that these statistics are correctly collected (i.e. use of ``self.add_statistics()`` \\\n and ``collect_statistics()``).\n\n :param stat_col: ``StatisticsCollector``\n\n :param stat_agg: ``StatisticsAggregator``\n\n \"\"\"\n # By default, copy the last value for all variables have matching names.\n # (will work well for e.g. episode or epoch)\n for k, v in stat_col.items():\n if k in stat_agg.aggregators:\n # Copy last collected value.\n stat_agg.aggregators[k] = v[-1]\n\n # Get loss values.\n loss_values = stat_col['loss']\n\n # Calculate default aggregates.\n stat_agg.aggregators['loss'] = torch.mean(torch.tensor(loss_values))\n stat_agg.aggregators['loss_min'] = min(loss_values)\n stat_agg.aggregators['loss_max'] = max(loss_values)\n stat_agg.aggregators['loss_std'] = 0.0 if len(loss_values) <= 1 else torch.std(torch.tensor(loss_values))\n stat_agg.aggregators['episodes_aggregated'] = len(loss_values)\n\n @abstractmethod\n def run_experiment(self):\n \"\"\"\n Main function of the worker which executes a specific experiment.\n\n .. note::\n\n Abstract. Should be implemented in the subclasses.\n\n\n \"\"\"\n\n def add_file_handler_to_logger(self, logfile):\n \"\"\"\n Add a ``logging.FileHandler`` to the logger of the current ``Worker``.\n\n Specifies a ``logging.Formatter``:\n\n >>> logging.Formatter(fmt='[%(asctime)s] - %(levelname)s - %(name)s >>> %(message)s',\n >>> datefmt='%Y-%m-%d %H:%M:%S')\n\n\n :param logfile: File used by the ``FileHandler``.\n\n \"\"\"\n # create file handler which logs even DEBUG messages\n fh = logging.FileHandler(logfile)\n\n # set logging level for this file\n fh.setLevel(logging.DEBUG)\n\n # create formatter and add it to the handlers\n formatter = logging.Formatter(fmt='[%(asctime)s] - %(levelname)s - %(name)s >>> %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n fh.setFormatter(formatter)\n\n # add the handler to the logger\n self.logger.addHandler(fh)\n\n def recurrent_config_parse(self, configs: str, configs_parsed: list):\n \"\"\"\n Parses names of configuration files in a recursive manner, i.e. \\\n by looking for ``default_config`` sections and trying to load and parse those \\\n files one by one.\n\n :param configs: String containing names of configuration files (with paths), separated by comas.\n :type configs: str\n\n :param configs_parsed: Configurations that were already parsed (so we won't parse them many times).\n :type configs_parsed: list\n\n\n :return: list of parsed configuration files.\n\n \"\"\"\n # Split and remove spaces.\n configs_to_parse = configs.replace(\" \", \"\").split(',')\n\n # Terminal condition.\n while len(configs_to_parse) > 0:\n\n # Get config.\n config = configs_to_parse.pop(0)\n\n # Skip empty names (after lose comas).\n if config == '':\n continue\n print(\"Info: Parsing the {} configuration file\".format(config))\n\n # Check if it was already loaded.\n if config in configs_parsed:\n print('Warning: Configuration file {} already parsed - skipping'.format(config))\n continue\n\n # Check if file exists.\n if not os.path.isfile(config):\n print('Error: Configuration file {} does not exist'.format(config))\n exit(-1)\n\n try:\n # Open file and get parameter dictionary.\n with open(config, 'r') as stream:\n param_dict = yaml.safe_load(stream)\n except yaml.YAMLError as e:\n print(\"Error: Couldn't properly parse the {} configuration file\".format(config))\n print('yaml.YAMLERROR:', e)\n exit(-1)\n\n # Remember that we loaded that config.\n configs_parsed.append(config)\n\n # Check if there are any default configs to load.\n if 'default_configs' in param_dict:\n # If there are - recursion!\n configs_parsed = self.recurrent_config_parse(\n param_dict['default_configs'], configs_parsed)\n\n # Done, return list of loaded configs.\n return configs_parsed\n\n def recurrent_config_load(self,configs_to_load):\n for config in reversed(configs_to_load):\n # Load params from YAML file.\n self.params.add_config_params_from_yaml(config)\n print('Loaded configuration from file {}'.format(config))\n\n def check_and_set_cuda(self, use_gpu):\n \"\"\"\n Enables computations on CUDA if GPU is available.\n Sets the default data types.\n\n :param use_gpu: Command line flag indicating whether use GPU/CUDA or not. \n\n \"\"\"\n # Determine if GPU/CUDA is available.\n if torch.cuda.is_available():\n if use_gpu:\n self.app_state.convert_cuda_types()\n self.logger.info('Running computations on GPU using CUDA enabled')\n elif use_gpu:\n self.logger.warning('GPU flag is enabled but there are no available GPU devices, using CPU instead')\n else:\n self.logger.warning('GPU flag is disabled, using CPU.')\n\n def predict_evaluate_collect(self, model, problem, data_dict, stat_col, episode, epoch=None):\n \"\"\"\n Function that performs the following:\n\n - passes samples through the model,\n - computes loss using the problem\n - collects problem and model statistics,\n\n\n :param model: trainable model.\n :type model: ``models.model.Model`` or a subclass\n\n :param problem: problem generating samples.\n :type problem: ``problems.problem.problem`` or a subclass\n\n :param data_dict: contains the batch of samples to pass to the model.\n :type data_dict: ``DataDict``\n\n :param stat_col: statistics collector used for logging accuracy etc.\n :type stat_col: ``StatisticsCollector``\n\n :param episode: current episode index\n :type episode: int\n\n :param epoch: current epoch index.\n :type epoch: int, optional\n\n\n :return:\n\n - logits,\n - loss\n\n\n \"\"\"\n # Convert to CUDA.\n if self.app_state.use_CUDA:\n data_dict = data_dict.cuda()\n\n # Perform forward calculation.\n logits = model(data_dict)\n\n # Evaluate loss function.\n loss = problem.evaluate_loss(data_dict, logits)\n\n # Collect \"elementary\" statistics - episode and loss.\n if ('epoch' in stat_col) and (epoch is not None):\n stat_col['epoch'] = epoch\n\n stat_col['episode'] = episode\n # Collect loss as float.\n stat_col['loss'] = loss.item()\n\n # Collect other (potential) statistics from problem & model.\n problem.collect_statistics(stat_col, data_dict, logits)\n model.collect_statistics(stat_col, data_dict, logits)\n\n # Return tuple: logits, loss.\n return logits, loss\n\n def export_statistics(self, stat_obj, tag='', export_to_log = True):\n \"\"\"\n Export the statistics/aggregations to logger, csv and TB.\n\n :param stat_obj: ``StatisticsCollector`` or ``StatisticsAggregato`` object.\n\n :param tag: Additional tag that will be added to string exported to logger, optional (DEFAULT = '').\n :type tag: str\n\n :param export_to_log: If True, exports statistics to logger (DEFAULT: True)\n :type export_to_log: bool\n\n \"\"\" \n # Log to logger\n if export_to_log:\n self.logger.info(stat_obj.export_to_string(tag))\n\n # Export to csv\n stat_obj.export_to_csv()\n\n # Export to TensorBoard.\n stat_obj.export_to_tensorboard()\n\n def aggregate_and_export_statistics(self, problem, model, stat_col, stat_agg, episode, tag='', export_to_log = True):\n \"\"\"\n Aggregates the collected statistics. Exports the aggregations to logger, csv and TB. \\\n Empties statistics collector for the next episode.\n\n :param model: trainable model.\n :type model: ``models.model.Model`` or a subclass\n\n :param problem: problem generating samples.\n :type problem: ``problems.problem.problem`` or a subclass\n\n :param stat_col: ``StatisticsCollector`` object.\n\n :param stat_agg: ``StatisticsAggregator`` object.\n\n :param tag: Additional tag that will be added to string exported to logger, optional (DEFAULT = '').\n :type tag: str\n\n :param export_to_log: If True, exports statistics to logger (DEFAULT: True)\n :type export_to_log: bool\n\n \"\"\" \n # Aggregate statistics.\n self.aggregate_statistics(stat_col, stat_agg)\n problem.aggregate_statistics(stat_col, stat_agg)\n model.aggregate_statistics(stat_col, stat_agg)\n\n # Set episode, so the datapoint will appear in the right place in TB.\n stat_agg[\"episode\"] = episode\n\n # Export to logger, cvs and TB.\n self.export_statistics(stat_agg, tag, export_to_log)\n\n def cycle(self, iterable):\n \"\"\"\n Cycle an iterator to prevent its exhaustion.\n This function is used in the (online) trainer to reuse the same ``DataLoader`` for a number of episodes\\\n > len(dataset)/batch_size.\n\n :param iterable: iterable.\n :type iterable: iter\n\n \"\"\"\n while True:\n for x in iterable:\n yield x\n\n def set_random_seeds(self, params, section_name):\n \"\"\"\n Set ``torch`` & ``NumPy`` random seeds from the ``ParamRegistry``: \\\n If one was indicated, use it, or set a random one.\n\n :param params: Section in config/param registry that will be changed \\\n (\"training\" or \"testing\" only will be taken into account.)\n\n :param section_name: Name of the section (for logging purposes only).\n :type section_name: str\n\n \"\"\"\n # Set the random seeds: either from the loaded configuration or a default randomly selected one.\n params.add_default_params({\"seed_numpy\": -1})\n if params[\"seed_numpy\"] == -1:\n seed = randrange(0, 2 ** 32)\n # Overwrite the config param!\n params.add_config_params({\"seed_numpy\": seed})\n\n self.logger.info(\"Setting numpy random seed in {} to: {}\".format(section_name, params[\"seed_numpy\"]))\n np.random.seed(params[\"seed_numpy\"])\n\n params.add_default_params({\"seed_torch\": -1})\n if params[\"seed_torch\"] == -1:\n seed = randrange(0, 2 ** 32)\n # Overwrite the config param!\n params.add_config_params({\"seed_torch\": seed})\n\n self.logger.info(\"Setting torch random seed in {} to: {}\".format(section_name, params[\"seed_torch\"]))\n torch.manual_seed(params[\"seed_torch\"])\n torch.cuda.manual_seed_all(params[\"seed_torch\"])\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"numpy.random.seed",
"torch.tensor",
"torch.cuda.is_available"
]
] |
laughingwithu/jesse | [
"c21adf59074ad62e4aa775261b4ad86c542ec4d5"
] | [
"jesse/indicators/mom.py"
] | [
"import numpy as np\nimport talib\n\nfrom typing import Union\n\n\ndef mom(candles: np.ndarray, period=10, sequential=False) -> Union[float, np.ndarray]:\n \"\"\"\n MOM - Momentum\n\n :param candles: np.ndarray\n :param period: int - default=10\n :param sequential: bool - default=False\n\n :return: float | np.ndarray\n \"\"\"\n if not sequential and len(candles) > 240:\n candles = candles[-240:]\n\n res = talib.MOM(candles[:, 2], timeperiod=period)\n\n if sequential:\n return res\n else:\n return None if np.isnan(res[-1]) else res[-1]\n"
] | [
[
"numpy.isnan"
]
] |
LJ-LiJiahe/cnn_pytorch | [
"abddc46240a2c7da9818c1cb945d951a8e3b107f"
] | [
"plot_loss_accuracy.py"
] | [
"import os\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport matplotlib\n\nimport config as cfg\nfrom utils import read_from_pickle_file\n\n# Server to Dell box\nmatplotlib.use('TkAgg')\ntrain_loss = []\nvalidation_loss = []\n\ntrain_loss_loc = os.path.join(cfg.loss_dir, 'train_loss')\nvalidation_loss_loc = os.path.join(cfg.loss_dir, 'validation_loss')\n\n\nfor item in read_from_pickle_file(train_loss_loc):\n train_loss.append(item)\n\nfor item in read_from_pickle_file(validation_loss_loc):\n validation_loss.append(item)\n\ntrain_loss = np.array(train_loss)\nvalidation_loss = np.array(validation_loss)\nplt.plot(train_loss[1:-1, 0],train_loss[1:-1, 1],label=\"Train Loss\")\nplt.plot(validation_loss[1:-1, 0],validation_loss[1:-1, 1],label=\"Validation Loss\")\nplt.ylabel(\"Loss\")\nplt.xlabel(\"iterations\")\nplt.legend(loc='upper left')\nplt.show()\n\n\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.xlabel"
]
] |
antonkulaga/DeepAb | [
"51a32d06d19815705bdbfb35a8a9518c17ec313a"
] | [
"deepab/resnets/CrissCrossResNet2D.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.checkpoint import checkpoint\nfrom einops import rearrange, repeat\n\n\nclass CrissCrossAttention(nn.Module):\n def __init__(self, in_dim):\n super(CrissCrossAttention, self).__init__()\n self.query_conv = nn.Conv2d(in_channels=in_dim,\n out_channels=in_dim // 8,\n kernel_size=1)\n self.key_conv = nn.Conv2d(in_channels=in_dim,\n out_channels=in_dim // 8,\n kernel_size=1)\n self.value_conv = nn.Conv2d(in_channels=in_dim,\n out_channels=in_dim,\n kernel_size=1)\n self.softmax = nn.Softmax(dim=3)\n self.gamma = nn.Parameter(torch.zeros(1))\n\n def forward(self, x):\n device = x.device\n b, _, h, w = x.shape\n\n q = self.query_conv(x)\n q_h = rearrange(q, \"b c h w -> (b w) h c\")\n q_w = rearrange(q, \"b c h w -> (b h) w c\")\n\n k = self.key_conv(x)\n k_h = rearrange(k, \"b c h w -> (b w) c h\")\n k_w = rearrange(k, \"b c h w -> (b h) c w\")\n\n v = self.value_conv(x)\n v_h = rearrange(v, \"b c h w -> (b w) c h\")\n v_w = rearrange(v, \"b c h w -> (b h) c w\")\n\n inf = repeat(torch.diag(\n torch.tensor(float(\"-inf\"), device=device).repeat(h), 0),\n \"h1 h2 -> (b w) h1 h2\",\n b=b,\n w=w)\n e_h = rearrange(torch.bmm(q_h, k_h) + inf,\n \"(b w) h1 h2 -> b h1 w h2\",\n b=b)\n e_w = rearrange(torch.bmm(q_w, k_w), \"(b h) w1 w2 -> b h w1 w2\", b=b)\n\n attn = self.softmax(torch.cat([e_h, e_w], 3))\n attn_h, attn_w = attn.chunk(2, dim=-1)\n attn_h = rearrange(attn_h, \"b h1 w h2 -> (b w) h1 h2\")\n attn_w = rearrange(attn_w, \"b h w1 w2 -> (b h) w1 w2\")\n\n out_h = torch.bmm(v_h, rearrange(attn_h, \"bw h1 h2 -> bw h2 h1\"))\n out_h = rearrange(out_h, \"(b w) c h -> b c h w\", b=b)\n out_w = torch.bmm(v_w, rearrange(attn_w, \"bh w1 w2 -> bh w2 w1\"))\n out_w = rearrange(out_w, \"(b h) c w -> b c h w\", b=b)\n\n return_attn = torch.stack([\n rearrange(attn_h, \"(b w) h1 h2 -> b h2 h1 w\", b=b),\n rearrange(attn_w, \"(b h) w1 w2 -> b w2 h w1\", b=b)\n ],\n dim=1)\n\n return self.gamma * (out_h + out_w) + x, return_attn\n\n\nclass RCCAModule(nn.Module):\n def __init__(self, in_channels, kernel_size=3, return_attn=False):\n super(RCCAModule, self).__init__()\n self.return_attn = return_attn\n inter_channels = in_channels // 4\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels,\n inter_channels,\n kernel_size=(kernel_size, kernel_size),\n stride=(1, 1),\n padding=((kernel_size - 1) // 2, (kernel_size - 1) // 2),\n bias=False), nn.BatchNorm2d(inter_channels), nn.ReLU())\n self.cca = CrissCrossAttention(inter_channels)\n self.conv2 = nn.Sequential(\n nn.Conv2d(inter_channels,\n in_channels,\n kernel_size=(kernel_size, kernel_size),\n stride=(1, 1),\n padding=((kernel_size - 1) // 2, (kernel_size - 1) // 2),\n bias=False), nn.BatchNorm2d(in_channels), nn.ReLU())\n\n def forward(self, x):\n output = self.conv1(x)\n attns = []\n for _ in range(2):\n output, attn = checkpoint(self.cca, output)\n attns.append(attn)\n output = self.conv2(output)\n\n if self.return_attn:\n return output, attns\n else:\n return output\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.utils.checkpoint.checkpoint",
"torch.nn.Softmax",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.zeros",
"torch.bmm",
"torch.cat"
]
] |
eugeniu1994/Update_CV | [
"562b646e02ffb374dae428a7b6f3ae1debecc997"
] | [
"stuff/scripts/stuff/PointCloudViz.py"
] | [
"from mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nskip = 15\n#source = '/home/eugen/catkin_ws/src/Camera_Lidar/DATA/pcd/0002.csv'\n#data = np.genfromtxt(source, delimiter=',')[1::skip,:3]\n#print ('data ', np.shape(data))\n\n#x,y,z = data[:,0],data[:,1],data[:,2]\n\n'''fig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\n#ax.scatter(x, y, z, c=c, marker=m)\nax.scatter(x, y, z, s=0.01)\n\nax.set_xlabel('X Label')\nax.set_ylabel('Y Label')\nax.set_zlabel('Z Label')\n\nplt.show()'''\n\n#---------------------------------------------------------\ndef fit_plane_scipy(P=None):\n from skspatial.objects import Points, Plane\n from skspatial.plotting import plot_3d\n\n points = Points([[0, 0, 0], [1, 3, 5], [-5, 6, 3], [3, 6, 7], [-2, 6, 7]]) if P is None else Points(P)\n\n plane = Plane.best_fit(points)\n plot_3d(\n points.plotter(c='k', s=0.1, depthshade=False),\n plane.plotter(alpha=0.8, lims_x=(-5, 5), lims_y=(-5, 5)),\n )\n plt.show()\n\n#fit_plane_scipy(data)\n\n#---------------------------------------------------------\ndef fit_plane_1():\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n import numpy as np\n\n '''N_POINTS = 10\n TARGET_X_SLOPE = 2\n TARGET_y_SLOPE = 3\n TARGET_OFFSET = 5\n EXTENTS = 5\n NOISE = 5\n \n # create random data\n xs = [np.random.uniform(2*EXTENTS)-EXTENTS for i in range(N_POINTS)]\n ys = [np.random.uniform(2*EXTENTS)-EXTENTS for i in range(N_POINTS)]\n zs = []\n for i in range(N_POINTS):\n zs.append(xs[i]*TARGET_X_SLOPE + ys[i]*TARGET_y_SLOPE + TARGET_OFFSET + np.random.normal(scale=NOISE))'''\n\n xs,ys,zs = x,y,z\n\n # plot raw data\n plt.figure()\n ax = plt.subplot(111, projection='3d')\n ax.scatter(xs, ys, zs,s=0.05)\n\n # do fit\n tmp_A = []\n tmp_b = []\n for i in range(len(xs)):\n tmp_A.append([xs[i], ys[i], 1])\n tmp_b.append(zs[i])\n b = np.matrix(tmp_b).T\n A = np.matrix(tmp_A)\n\n # Manual solution\n fit = (A.T * A).I * A.T * b\n errors = b - A * fit\n residual = np.linalg.norm(errors)\n\n # Or use Scipy\n # from scipy.linalg import lstsq\n # fit, residual, rnk, s = lstsq(A, b)\n\n print(\"solution:\")\n print (\"%f x + %f y + %f = z\" % (fit[0], fit[1], fit[2]))\n print (\"errors:\")\n print (errors)\n print (\"residual:\")\n print (residual)\n\n # plot plane\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n X,Y = np.meshgrid(np.arange(xlim[0], xlim[1]),\n np.arange(ylim[0], ylim[1]))\n Z = np.zeros(X.shape)\n for r in range(X.shape[0]):\n for c in range(X.shape[1]):\n Z[r,c] = fit[0] * X[r,c] + fit[1] * Y[r,c] + fit[2]\n ax.plot_wireframe(X,Y,Z, color='k')\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()\n#fit_plane_1()\n\ndef test():\n import matplotlib.pyplot as plt\n import numpy as np\n import pandas as pd\n import mpld3\n from mpld3 import plugins\n\n css = \"\"\"\n table\n {\n border-collapse: collapse;\n }\n th\n {\n color: #ffffff;\n background-color: #000000;\n }\n td\n {\n background-color: #cccccc;\n }\n table, th, td\n {\n font-family:Arial, Helvetica, sans-serif;\n border: 1px solid black;\n text-align: right;\n }\n \"\"\"\n\n fig, ax = plt.subplots()\n ax.grid(True, alpha=0.3)\n\n N = 50\n df = pd.DataFrame(index=range(N))\n df['x'] = np.random.randn(N)\n df['y'] = np.random.randn(N)\n df['z'] = np.random.randn(N)\n\n labels = []\n for i in range(N):\n #label = df.ix[[i], :].T\n label = df.iloc[[i], :].T\n label.columns = ['Row {0}'.format(i)]\n # .to_html() is unicode; so make leading 'u' go away with str()\n labels.append(str(label.to_html()))\n\n points = ax.plot(df.x, df.y, 'o', color='b',\n mec='k', ms=15, mew=1, alpha=.6)\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_title('HTML tooltips', size=20)\n\n tooltip = plugins.PointHTMLTooltip(points[0], labels, voffset=10, hoffset=10, css=css)\n plugins.connect(fig, tooltip)\n\n mpld3.show()\n#test()\n\ndef pick():\n import matplotlib.pyplot as plt, numpy as np\n from mpl_toolkits.mplot3d import proj3d\n\n def visualize3DData(X):\n fig = plt.figure(figsize=(16, 10))\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(X[:, 0], X[:, 1], X[:, 2], depthshade=False, s=2, picker=True)\n global idx\n def distance(point, event):\n assert point.shape == (3,), \"distance: point.shape is wrong: %s, must be (3,)\" % point.shape\n # Project 3d data space to 2d data space\n x2, y2, _ = proj3d.proj_transform(point[0], point[1], point[2], plt.gca().get_proj())\n # Convert 2d data space to 2d screen space\n x3, y3 = ax.transData.transform((x2, y2))\n\n return np.sqrt((x3 - event.x) ** 2 + (y3 - event.y) ** 2)\n\n def calcClosestDatapoint(X, event):\n distances = [distance(X[i, 0:3], event) for i in range(X.shape[0])]\n return np.argmin(distances)\n\n def annotatePlot(X, index):\n # If we have previously displayed another label, remove it first\n if hasattr(annotatePlot, 'label'):\n annotatePlot.label.remove()\n # Get data point from array of points X, at position index\n x2, y2, _ = proj3d.proj_transform(X[index, 0], X[index, 1], X[index, 2], ax.get_proj())\n annotatePlot.label = plt.annotate(\"Value %d\" % index,\n xy=(x2, y2), xytext=(-20, 20), textcoords='offset points', ha='right',\n va='bottom',\n bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),\n arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))\n fig.canvas.draw()\n\n def onMouseMotion(event):\n \"\"\"Event that is triggered when mouse is moved. Shows text annotation over data point closest to mouse.\"\"\"\n closestIndex = calcClosestDatapoint(X, event)\n annotatePlot(X, closestIndex)\n global idx\n idx = closestIndex\n\n # Pick points\n picked, corners = [], []\n def onpick(event):\n #ind = event.ind[0]\n #closestIndex = calcClosestDatapoint(X, event)\n #print('ind ', ind)\n global idx\n #print('closestIndex ',idx)\n ind = idx\n x, y, z = event.artist._offsets3d\n # Ignore if same point selected again\n if picked and (x[ind] == picked[-1][0] and y[ind] == picked[-1][1] and z[ind] == picked[-1][2]):\n return\n\n # Display picked point\n picked.append((x[ind], y[ind], z[ind]))\n corners.append((x[ind], y[ind], z[ind]))\n print('PCL: %s', str(picked[-1]))\n\n if len(picked) > 1:\n # Draw the line\n temp = np.array(picked)\n ax.plot(temp[:, 0], temp[:, 1], temp[:, 2])\n ax.figure.canvas.draw_idle()\n # Reset list for future pick events\n del picked[0]\n\n # Display GUI\n fig.canvas.mpl_connect('pick_event', onpick)\n fig.canvas.mpl_connect('motion_notify_event', onMouseMotion) # on mouse motion\n plt.show()\n\n velodyne = '/home/eugen/catkin_ws/src/Camera_Lidar/scripts/pcl_frame.csv'\n points = np.genfromtxt(velodyne, delimiter=',')[1::skip, :3]\n points = np.asarray(points.tolist())\n print('points ', np.shape(points))\n\n # Select points within chessboard range\n inrange = np.where((points[:, 0] > 0) &\n (points[:, 0] < 2.5) &\n (np.abs(points[:, 1]) < 2.5) &\n (points[:, 2] < 2))\n points = points[inrange[0]]\n if points.shape[0] > 5:\n print('PCL points available: %d', points.shape)\n else:\n print('Very few PCL points available in range')\n return\n\n X = np.random.random((30, 3))\n X = points\n visualize3DData(X)\n\n#pick()\n\ndef test2():\n import numpy as np\n import scipy.optimize\n\n from mpl_toolkits.mplot3d import Axes3D\n import matplotlib.pyplot as plt\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n def fitPlaneLTSQ(XYZ):\n (rows, cols) = XYZ.shape\n G = np.ones((rows, 3))\n G[:, 0] = XYZ[:, 0] # X\n G[:, 1] = XYZ[:, 1] # Y\n Z = XYZ[:, 2]\n (a, b, c), resid, rank, s = np.linalg.lstsq(G, Z, rcond=-1)\n normal = (a, b, -1)\n nn = np.linalg.norm(normal)\n normal = normal / nn\n return (c, normal)\n\n data = np.random.randn(100, 3) / 3\n data[:, 2] /= 10\n c, normal = fitPlaneLTSQ(data)\n\n # plot fitted plane\n maxx = np.max(data[:, 0])\n maxy = np.max(data[:, 1])\n minx = np.min(data[:, 0])\n miny = np.min(data[:, 1])\n\n point = np.array([0.0, 0.0, c])\n d = -point.dot(normal)\n\n # plot original points\n ax.scatter(data[:, 0], data[:, 1], data[:, 2])\n\n # compute needed points for plane plotting\n xx, yy = np.meshgrid([minx, maxx], [miny, maxy])\n z = (-normal[0] * xx - normal[1] * yy - d) * 1. / normal[2]\n\n # plot plane\n ax.plot_surface(xx, yy, z, alpha=0.2)\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()\n\ntest2()\n"
] | [
[
"numpy.ones",
"numpy.meshgrid",
"matplotlib.pyplot.figure",
"numpy.argmin",
"matplotlib.pyplot.gca",
"numpy.abs",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"numpy.arange",
"numpy.max",
"numpy.min",
"numpy.array",
"numpy.linalg.norm",
"numpy.matrix",
"numpy.random.randn",
"numpy.random.random",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"numpy.shape",
"numpy.linalg.lstsq",
"numpy.sqrt",
"numpy.genfromtxt"
]
] |
sandbox/pandas | [
"fd5471208244ae1cb9cb426d6aa02ab408cfacba",
"fd5471208244ae1cb9cb426d6aa02ab408cfacba"
] | [
"pandas/tests/test_base.py",
"pandas/tests/plotting/test_boxplot_method.py"
] | [
"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nimport re\nimport sys\nfrom datetime import datetime, timedelta\n\nimport numpy as np\n\nimport pandas as pd\nimport pandas.compat as compat\nfrom pandas.types.common import (is_object_dtype, is_datetimetz,\n needs_i8_conversion)\nimport pandas.util.testing as tm\nfrom pandas import (Series, Index, DatetimeIndex, TimedeltaIndex, PeriodIndex,\n Timedelta)\nfrom pandas.compat import u, StringIO\nfrom pandas.compat.numpy import np_array_datetime64_compat\nfrom pandas.core.base import (FrozenList, FrozenNDArray, PandasDelegate,\n NoNewAttributesMixin)\nfrom pandas.tseries.base import DatetimeIndexOpsMixin\n\n\nclass CheckStringMixin(object):\n\n def test_string_methods_dont_fail(self):\n repr(self.container)\n str(self.container)\n bytes(self.container)\n if not compat.PY3:\n unicode(self.container) # noqa\n\n def test_tricky_container(self):\n if not hasattr(self, 'unicode_container'):\n raise nose.SkipTest('Need unicode_container to test with this')\n repr(self.unicode_container)\n str(self.unicode_container)\n bytes(self.unicode_container)\n if not compat.PY3:\n unicode(self.unicode_container) # noqa\n\n\nclass CheckImmutable(object):\n mutable_regex = re.compile('does not support mutable operations')\n\n def check_mutable_error(self, *args, **kwargs):\n # pass whatever functions you normally would to assertRaises (after the\n # Exception kind)\n tm.assertRaisesRegexp(TypeError, self.mutable_regex, *args, **kwargs)\n\n def test_no_mutable_funcs(self):\n def setitem():\n self.container[0] = 5\n\n self.check_mutable_error(setitem)\n\n def setslice():\n self.container[1:2] = 3\n\n self.check_mutable_error(setslice)\n\n def delitem():\n del self.container[0]\n\n self.check_mutable_error(delitem)\n\n def delslice():\n del self.container[0:3]\n\n self.check_mutable_error(delslice)\n mutable_methods = getattr(self, \"mutable_methods\", [])\n for meth in mutable_methods:\n self.check_mutable_error(getattr(self.container, meth))\n\n def test_slicing_maintains_type(self):\n result = self.container[1:2]\n expected = self.lst[1:2]\n self.check_result(result, expected)\n\n def check_result(self, result, expected, klass=None):\n klass = klass or self.klass\n self.assertIsInstance(result, klass)\n self.assertEqual(result, expected)\n\n\nclass TestFrozenList(CheckImmutable, CheckStringMixin, tm.TestCase):\n mutable_methods = ('extend', 'pop', 'remove', 'insert')\n unicode_container = FrozenList([u(\"\\u05d0\"), u(\"\\u05d1\"), \"c\"])\n\n def setUp(self):\n self.lst = [1, 2, 3, 4, 5]\n self.container = FrozenList(self.lst)\n self.klass = FrozenList\n\n def test_add(self):\n result = self.container + (1, 2, 3)\n expected = FrozenList(self.lst + [1, 2, 3])\n self.check_result(result, expected)\n\n result = (1, 2, 3) + self.container\n expected = FrozenList([1, 2, 3] + self.lst)\n self.check_result(result, expected)\n\n def test_inplace(self):\n q = r = self.container\n q += [5]\n self.check_result(q, self.lst + [5])\n # other shouldn't be mutated\n self.check_result(r, self.lst)\n\n\nclass TestFrozenNDArray(CheckImmutable, CheckStringMixin, tm.TestCase):\n mutable_methods = ('put', 'itemset', 'fill')\n unicode_container = FrozenNDArray([u(\"\\u05d0\"), u(\"\\u05d1\"), \"c\"])\n\n def setUp(self):\n self.lst = [3, 5, 7, -2]\n self.container = FrozenNDArray(self.lst)\n self.klass = FrozenNDArray\n\n def test_shallow_copying(self):\n original = self.container.copy()\n self.assertIsInstance(self.container.view(), FrozenNDArray)\n self.assertFalse(isinstance(\n self.container.view(np.ndarray), FrozenNDArray))\n self.assertIsNot(self.container.view(), self.container)\n self.assert_numpy_array_equal(self.container, original)\n # shallow copy should be the same too\n self.assertIsInstance(self.container._shallow_copy(), FrozenNDArray)\n\n # setting should not be allowed\n def testit(container):\n container[0] = 16\n\n self.check_mutable_error(testit, self.container)\n\n def test_values(self):\n original = self.container.view(np.ndarray).copy()\n n = original[0] + 15\n vals = self.container.values()\n self.assert_numpy_array_equal(original, vals)\n self.assertIsNot(original, vals)\n vals[0] = n\n self.assertIsInstance(self.container, pd.core.base.FrozenNDArray)\n self.assert_numpy_array_equal(self.container.values(), original)\n self.assertEqual(vals[0], n)\n\n\nclass TestPandasDelegate(tm.TestCase):\n\n class Delegator(object):\n _properties = ['foo']\n _methods = ['bar']\n\n def _set_foo(self, value):\n self.foo = value\n\n def _get_foo(self):\n return self.foo\n\n foo = property(_get_foo, _set_foo, doc=\"foo property\")\n\n def bar(self, *args, **kwargs):\n \"\"\" a test bar method \"\"\"\n pass\n\n class Delegate(PandasDelegate):\n\n def __init__(self, obj):\n self.obj = obj\n\n def setUp(self):\n pass\n\n def test_invalida_delgation(self):\n # these show that in order for the delegation to work\n # the _delegate_* methods need to be overriden to not raise a TypeError\n\n self.Delegate._add_delegate_accessors(\n delegate=self.Delegator,\n accessors=self.Delegator._properties,\n typ='property'\n )\n self.Delegate._add_delegate_accessors(\n delegate=self.Delegator,\n accessors=self.Delegator._methods,\n typ='method'\n )\n\n delegate = self.Delegate(self.Delegator())\n\n def f():\n delegate.foo\n\n self.assertRaises(TypeError, f)\n\n def f():\n delegate.foo = 5\n\n self.assertRaises(TypeError, f)\n\n def f():\n delegate.foo()\n\n self.assertRaises(TypeError, f)\n\n def test_memory_usage(self):\n # Delegate does not implement memory_usage.\n # Check that we fall back to in-built `__sizeof__`\n # GH 12924\n delegate = self.Delegate(self.Delegator())\n sys.getsizeof(delegate)\n\n\nclass Ops(tm.TestCase):\n\n def _allow_na_ops(self, obj):\n \"\"\"Whether to skip test cases including NaN\"\"\"\n if (isinstance(obj, Index) and\n (obj.is_boolean() or not obj._can_hold_na)):\n # don't test boolean / int64 index\n return False\n return True\n\n def setUp(self):\n self.bool_index = tm.makeBoolIndex(10, name='a')\n self.int_index = tm.makeIntIndex(10, name='a')\n self.float_index = tm.makeFloatIndex(10, name='a')\n self.dt_index = tm.makeDateIndex(10, name='a')\n self.dt_tz_index = tm.makeDateIndex(10, name='a').tz_localize(\n tz='US/Eastern')\n self.period_index = tm.makePeriodIndex(10, name='a')\n self.string_index = tm.makeStringIndex(10, name='a')\n self.unicode_index = tm.makeUnicodeIndex(10, name='a')\n\n arr = np.random.randn(10)\n self.int_series = Series(arr, index=self.int_index, name='a')\n self.float_series = Series(arr, index=self.float_index, name='a')\n self.dt_series = Series(arr, index=self.dt_index, name='a')\n self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)\n self.period_series = Series(arr, index=self.period_index, name='a')\n self.string_series = Series(arr, index=self.string_index, name='a')\n\n types = ['bool', 'int', 'float', 'dt', 'dt_tz', 'period', 'string',\n 'unicode']\n fmts = [\"{0}_{1}\".format(t, f)\n for t in types for f in ['index', 'series']]\n self.objs = [getattr(self, f)\n for f in fmts if getattr(self, f, None) is not None]\n\n def check_ops_properties(self, props, filter=None, ignore_failures=False):\n for op in props:\n for o in self.is_valid_objs:\n\n # if a filter, skip if it doesn't match\n if filter is not None:\n filt = o.index if isinstance(o, Series) else o\n if not filter(filt):\n continue\n\n try:\n if isinstance(o, Series):\n expected = Series(\n getattr(o.index, op), index=o.index, name='a')\n else:\n expected = getattr(o, op)\n except (AttributeError):\n if ignore_failures:\n continue\n\n result = getattr(o, op)\n\n # these couuld be series, arrays or scalars\n if isinstance(result, Series) and isinstance(expected, Series):\n tm.assert_series_equal(result, expected)\n elif isinstance(result, Index) and isinstance(expected, Index):\n tm.assert_index_equal(result, expected)\n elif isinstance(result, np.ndarray) and isinstance(expected,\n np.ndarray):\n self.assert_numpy_array_equal(result, expected)\n else:\n self.assertEqual(result, expected)\n\n # freq raises AttributeError on an Int64Index because its not\n # defined we mostly care about Series hwere anyhow\n if not ignore_failures:\n for o in self.not_valid_objs:\n\n # an object that is datetimelike will raise a TypeError,\n # otherwise an AttributeError\n if issubclass(type(o), DatetimeIndexOpsMixin):\n self.assertRaises(TypeError, lambda: getattr(o, op))\n else:\n self.assertRaises(AttributeError,\n lambda: getattr(o, op))\n\n def test_binary_ops_docs(self):\n from pandas import DataFrame, Panel\n op_map = {'add': '+',\n 'sub': '-',\n 'mul': '*',\n 'mod': '%',\n 'pow': '**',\n 'truediv': '/',\n 'floordiv': '//'}\n for op_name in ['add', 'sub', 'mul', 'mod', 'pow', 'truediv',\n 'floordiv']:\n for klass in [Series, DataFrame, Panel]:\n operand1 = klass.__name__.lower()\n operand2 = 'other'\n op = op_map[op_name]\n expected_str = ' '.join([operand1, op, operand2])\n self.assertTrue(expected_str in getattr(klass,\n op_name).__doc__)\n\n # reverse version of the binary ops\n expected_str = ' '.join([operand2, op, operand1])\n self.assertTrue(expected_str in getattr(klass, 'r' +\n op_name).__doc__)\n\n\nclass TestIndexOps(Ops):\n\n def setUp(self):\n super(TestIndexOps, self).setUp()\n self.is_valid_objs = [o for o in self.objs if o._allow_index_ops]\n self.not_valid_objs = [o for o in self.objs if not o._allow_index_ops]\n\n def test_none_comparison(self):\n\n # bug brought up by #1079\n # changed from TypeError in 0.17.0\n for o in self.is_valid_objs:\n if isinstance(o, Series):\n\n o[0] = np.nan\n\n # noinspection PyComparisonWithNone\n result = o == None # noqa\n self.assertFalse(result.iat[0])\n self.assertFalse(result.iat[1])\n\n # noinspection PyComparisonWithNone\n result = o != None # noqa\n self.assertTrue(result.iat[0])\n self.assertTrue(result.iat[1])\n\n result = None == o # noqa\n self.assertFalse(result.iat[0])\n self.assertFalse(result.iat[1])\n\n # this fails for numpy < 1.9\n # and oddly for *some* platforms\n # result = None != o # noqa\n # self.assertTrue(result.iat[0])\n # self.assertTrue(result.iat[1])\n\n result = None > o\n self.assertFalse(result.iat[0])\n self.assertFalse(result.iat[1])\n\n result = o < None\n self.assertFalse(result.iat[0])\n self.assertFalse(result.iat[1])\n\n def test_ndarray_compat_properties(self):\n\n for o in self.objs:\n\n # check that we work\n for p in ['shape', 'dtype', 'flags', 'T', 'strides', 'itemsize',\n 'nbytes']:\n self.assertIsNotNone(getattr(o, p, None))\n self.assertTrue(hasattr(o, 'base'))\n\n # if we have a datetimelike dtype then needs a view to work\n # but the user is responsible for that\n try:\n self.assertIsNotNone(o.data)\n except ValueError:\n pass\n\n self.assertRaises(ValueError, o.item) # len > 1\n self.assertEqual(o.ndim, 1)\n self.assertEqual(o.size, len(o))\n\n self.assertEqual(Index([1]).item(), 1)\n self.assertEqual(Series([1]).item(), 1)\n\n def test_ops(self):\n for op in ['max', 'min']:\n for o in self.objs:\n result = getattr(o, op)()\n if not isinstance(o, PeriodIndex):\n expected = getattr(o.values, op)()\n else:\n expected = pd.Period(ordinal=getattr(o._values, op)(),\n freq=o.freq)\n try:\n self.assertEqual(result, expected)\n except TypeError:\n # comparing tz-aware series with np.array results in\n # TypeError\n expected = expected.astype('M8[ns]').astype('int64')\n self.assertEqual(result.value, expected)\n\n def test_nanops(self):\n # GH 7261\n for op in ['max', 'min']:\n for klass in [Index, Series]:\n\n obj = klass([np.nan, 2.0])\n self.assertEqual(getattr(obj, op)(), 2.0)\n\n obj = klass([np.nan])\n self.assertTrue(pd.isnull(getattr(obj, op)()))\n\n obj = klass([])\n self.assertTrue(pd.isnull(getattr(obj, op)()))\n\n obj = klass([pd.NaT, datetime(2011, 11, 1)])\n # check DatetimeIndex monotonic path\n self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1))\n\n obj = klass([pd.NaT, datetime(2011, 11, 1), pd.NaT])\n # check DatetimeIndex non-monotonic path\n self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1))\n\n # argmin/max\n obj = Index(np.arange(5, dtype='int64'))\n self.assertEqual(obj.argmin(), 0)\n self.assertEqual(obj.argmax(), 4)\n\n obj = Index([np.nan, 1, np.nan, 2])\n self.assertEqual(obj.argmin(), 1)\n self.assertEqual(obj.argmax(), 3)\n\n obj = Index([np.nan])\n self.assertEqual(obj.argmin(), -1)\n self.assertEqual(obj.argmax(), -1)\n\n obj = Index([pd.NaT, datetime(2011, 11, 1), datetime(2011, 11, 2),\n pd.NaT])\n self.assertEqual(obj.argmin(), 1)\n self.assertEqual(obj.argmax(), 2)\n\n obj = Index([pd.NaT])\n self.assertEqual(obj.argmin(), -1)\n self.assertEqual(obj.argmax(), -1)\n\n def test_value_counts_unique_nunique(self):\n for orig in self.objs:\n o = orig.copy()\n klass = type(o)\n values = o._values\n\n if isinstance(values, Index):\n # reset name not to affect latter process\n values.name = None\n\n # create repeated values, 'n'th element is repeated by n+1 times\n # skip boolean, because it only has 2 values at most\n if isinstance(o, Index) and o.is_boolean():\n continue\n elif isinstance(o, Index):\n expected_index = pd.Index(o[::-1])\n expected_index.name = None\n o = o.repeat(range(1, len(o) + 1))\n o.name = 'a'\n else:\n expected_index = pd.Index(values[::-1])\n idx = o.index.repeat(range(1, len(o) + 1))\n rep = np.repeat(values, range(1, len(o) + 1))\n o = klass(rep, index=idx, name='a')\n\n # check values has the same dtype as the original\n self.assertEqual(o.dtype, orig.dtype)\n\n expected_s = Series(range(10, 0, -1), index=expected_index,\n dtype='int64', name='a')\n\n result = o.value_counts()\n tm.assert_series_equal(result, expected_s)\n self.assertTrue(result.index.name is None)\n self.assertEqual(result.name, 'a')\n\n result = o.unique()\n if isinstance(o, Index):\n self.assertTrue(isinstance(result, o.__class__))\n self.assert_index_equal(result, orig)\n elif is_datetimetz(o):\n # datetimetz Series returns array of Timestamp\n self.assertEqual(result[0], orig[0])\n for r in result:\n self.assertIsInstance(r, pd.Timestamp)\n tm.assert_numpy_array_equal(result,\n orig._values.asobject.values)\n else:\n tm.assert_numpy_array_equal(result, orig.values)\n\n self.assertEqual(o.nunique(), len(np.unique(o.values)))\n\n def test_value_counts_unique_nunique_null(self):\n\n for null_obj in [np.nan, None]:\n for orig in self.objs:\n o = orig.copy()\n klass = type(o)\n values = o._values\n\n if not self._allow_na_ops(o):\n continue\n\n # special assign to the numpy array\n if is_datetimetz(o):\n if isinstance(o, DatetimeIndex):\n v = o.asi8\n v[0:2] = pd.tslib.iNaT\n values = o._shallow_copy(v)\n else:\n o = o.copy()\n o[0:2] = pd.tslib.iNaT\n values = o._values\n\n elif needs_i8_conversion(o):\n values[0:2] = pd.tslib.iNaT\n values = o._shallow_copy(values)\n else:\n values[0:2] = null_obj\n # check values has the same dtype as the original\n\n self.assertEqual(values.dtype, o.dtype)\n\n # create repeated values, 'n'th element is repeated by n+1\n # times\n if isinstance(o, (DatetimeIndex, PeriodIndex)):\n expected_index = o.copy()\n expected_index.name = None\n\n # attach name to klass\n o = klass(values.repeat(range(1, len(o) + 1)))\n o.name = 'a'\n else:\n if is_datetimetz(o):\n expected_index = orig._values._shallow_copy(values)\n else:\n expected_index = pd.Index(values)\n expected_index.name = None\n o = o.repeat(range(1, len(o) + 1))\n o.name = 'a'\n\n # check values has the same dtype as the original\n self.assertEqual(o.dtype, orig.dtype)\n # check values correctly have NaN\n nanloc = np.zeros(len(o), dtype=np.bool)\n nanloc[:3] = True\n if isinstance(o, Index):\n self.assert_numpy_array_equal(pd.isnull(o), nanloc)\n else:\n exp = pd.Series(nanloc, o.index, name='a')\n self.assert_series_equal(pd.isnull(o), exp)\n\n expected_s_na = Series(list(range(10, 2, -1)) + [3],\n index=expected_index[9:0:-1],\n dtype='int64', name='a')\n expected_s = Series(list(range(10, 2, -1)),\n index=expected_index[9:1:-1],\n dtype='int64', name='a')\n\n result_s_na = o.value_counts(dropna=False)\n tm.assert_series_equal(result_s_na, expected_s_na)\n self.assertTrue(result_s_na.index.name is None)\n self.assertEqual(result_s_na.name, 'a')\n result_s = o.value_counts()\n tm.assert_series_equal(o.value_counts(), expected_s)\n self.assertTrue(result_s.index.name is None)\n self.assertEqual(result_s.name, 'a')\n\n result = o.unique()\n if isinstance(o, Index):\n tm.assert_index_equal(result,\n Index(values[1:], name='a'))\n elif is_datetimetz(o):\n # unable to compare NaT / nan\n tm.assert_numpy_array_equal(result[1:],\n values[2:].asobject.values)\n self.assertIs(result[0], pd.NaT)\n else:\n tm.assert_numpy_array_equal(result[1:], values[2:])\n\n self.assertTrue(pd.isnull(result[0]))\n self.assertEqual(result.dtype, orig.dtype)\n\n self.assertEqual(o.nunique(), 8)\n self.assertEqual(o.nunique(dropna=False), 9)\n\n def test_value_counts_inferred(self):\n klasses = [Index, Series]\n for klass in klasses:\n s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']\n s = klass(s_values)\n expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c'])\n tm.assert_series_equal(s.value_counts(), expected)\n\n if isinstance(s, Index):\n exp = Index(np.unique(np.array(s_values, dtype=np.object_)))\n tm.assert_index_equal(s.unique(), exp)\n else:\n exp = np.unique(np.array(s_values, dtype=np.object_))\n tm.assert_numpy_array_equal(s.unique(), exp)\n\n self.assertEqual(s.nunique(), 4)\n # don't sort, have to sort after the fact as not sorting is\n # platform-dep\n hist = s.value_counts(sort=False).sort_values()\n expected = Series([3, 1, 4, 2], index=list('acbd')).sort_values()\n tm.assert_series_equal(hist, expected)\n\n # sort ascending\n hist = s.value_counts(ascending=True)\n expected = Series([1, 2, 3, 4], index=list('cdab'))\n tm.assert_series_equal(hist, expected)\n\n # relative histogram.\n hist = s.value_counts(normalize=True)\n expected = Series([.4, .3, .2, .1], index=['b', 'a', 'd', 'c'])\n tm.assert_series_equal(hist, expected)\n\n def test_value_counts_bins(self):\n klasses = [Index, Series]\n for klass in klasses:\n s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']\n s = klass(s_values)\n\n # bins\n self.assertRaises(TypeError,\n lambda bins: s.value_counts(bins=bins), 1)\n\n s1 = Series([1, 1, 2, 3])\n res1 = s1.value_counts(bins=1)\n exp1 = Series({0.998: 4})\n tm.assert_series_equal(res1, exp1)\n res1n = s1.value_counts(bins=1, normalize=True)\n exp1n = Series({0.998: 1.0})\n tm.assert_series_equal(res1n, exp1n)\n\n if isinstance(s1, Index):\n tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))\n else:\n exp = np.array([1, 2, 3], dtype=np.int64)\n tm.assert_numpy_array_equal(s1.unique(), exp)\n\n self.assertEqual(s1.nunique(), 3)\n\n res4 = s1.value_counts(bins=4)\n exp4 = Series({0.998: 2,\n 1.5: 1,\n 2.0: 0,\n 2.5: 1}, index=[0.998, 2.5, 1.5, 2.0])\n tm.assert_series_equal(res4, exp4)\n res4n = s1.value_counts(bins=4, normalize=True)\n exp4n = Series(\n {0.998: 0.5,\n 1.5: 0.25,\n 2.0: 0.0,\n 2.5: 0.25}, index=[0.998, 2.5, 1.5, 2.0])\n tm.assert_series_equal(res4n, exp4n)\n\n # handle NA's properly\n s_values = ['a', 'b', 'b', 'b', np.nan, np.nan,\n 'd', 'd', 'a', 'a', 'b']\n s = klass(s_values)\n expected = Series([4, 3, 2], index=['b', 'a', 'd'])\n tm.assert_series_equal(s.value_counts(), expected)\n\n if isinstance(s, Index):\n exp = Index(['a', 'b', np.nan, 'd'])\n tm.assert_index_equal(s.unique(), exp)\n else:\n exp = np.array(['a', 'b', np.nan, 'd'], dtype=object)\n tm.assert_numpy_array_equal(s.unique(), exp)\n self.assertEqual(s.nunique(), 3)\n\n s = klass({})\n expected = Series([], dtype=np.int64)\n tm.assert_series_equal(s.value_counts(), expected,\n check_index_type=False)\n # returned dtype differs depending on original\n if isinstance(s, Index):\n self.assert_index_equal(s.unique(), Index([]),\n exact=False)\n else:\n self.assert_numpy_array_equal(s.unique(), np.array([]),\n check_dtype=False)\n\n self.assertEqual(s.nunique(), 0)\n\n def test_value_counts_datetime64(self):\n klasses = [Index, Series]\n for klass in klasses:\n # GH 3002, datetime64[ns]\n # don't test names though\n txt = \"\\n\".join(['xxyyzz20100101PIE', 'xxyyzz20100101GUM',\n 'xxyyzz20100101EGG', 'xxyyww20090101EGG',\n 'foofoo20080909PIE', 'foofoo20080909GUM'])\n f = StringIO(txt)\n df = pd.read_fwf(f, widths=[6, 8, 3],\n names=[\"person_id\", \"dt\", \"food\"],\n parse_dates=[\"dt\"])\n\n s = klass(df['dt'].copy())\n s.name = None\n\n idx = pd.to_datetime(['2010-01-01 00:00:00Z',\n '2008-09-09 00:00:00Z',\n '2009-01-01 00:00:00X'])\n expected_s = Series([3, 2, 1], index=idx)\n tm.assert_series_equal(s.value_counts(), expected_s)\n\n expected = np_array_datetime64_compat(['2010-01-01 00:00:00Z',\n '2009-01-01 00:00:00Z',\n '2008-09-09 00:00:00Z'],\n dtype='datetime64[ns]')\n if isinstance(s, Index):\n tm.assert_index_equal(s.unique(), DatetimeIndex(expected))\n else:\n tm.assert_numpy_array_equal(s.unique(), expected)\n\n self.assertEqual(s.nunique(), 3)\n\n # with NaT\n s = df['dt'].copy()\n s = klass([v for v in s.values] + [pd.NaT])\n\n result = s.value_counts()\n self.assertEqual(result.index.dtype, 'datetime64[ns]')\n tm.assert_series_equal(result, expected_s)\n\n result = s.value_counts(dropna=False)\n expected_s[pd.NaT] = 1\n tm.assert_series_equal(result, expected_s)\n\n unique = s.unique()\n self.assertEqual(unique.dtype, 'datetime64[ns]')\n\n # numpy_array_equal cannot compare pd.NaT\n if isinstance(s, Index):\n exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])\n tm.assert_index_equal(unique, exp_idx)\n else:\n tm.assert_numpy_array_equal(unique[:3], expected)\n self.assertTrue(pd.isnull(unique[3]))\n\n self.assertEqual(s.nunique(), 3)\n self.assertEqual(s.nunique(dropna=False), 4)\n\n # timedelta64[ns]\n td = df.dt - df.dt + timedelta(1)\n td = klass(td, name='dt')\n\n result = td.value_counts()\n expected_s = Series([6], index=[Timedelta('1day')], name='dt')\n tm.assert_series_equal(result, expected_s)\n\n expected = TimedeltaIndex(['1 days'], name='dt')\n if isinstance(td, Index):\n tm.assert_index_equal(td.unique(), expected)\n else:\n tm.assert_numpy_array_equal(td.unique(), expected.values)\n\n td2 = timedelta(1) + (df.dt - df.dt)\n td2 = klass(td2, name='dt')\n result2 = td2.value_counts()\n tm.assert_series_equal(result2, expected_s)\n\n def test_factorize(self):\n for orig in self.objs:\n o = orig.copy()\n\n if isinstance(o, Index) and o.is_boolean():\n exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp)\n exp_uniques = o\n exp_uniques = Index([False, True])\n else:\n exp_arr = np.array(range(len(o)), dtype=np.intp)\n exp_uniques = o\n labels, uniques = o.factorize()\n\n self.assert_numpy_array_equal(labels, exp_arr)\n if isinstance(o, Series):\n self.assert_index_equal(uniques, Index(orig),\n check_names=False)\n else:\n # factorize explicitly resets name\n self.assert_index_equal(uniques, exp_uniques,\n check_names=False)\n\n def test_factorize_repeated(self):\n for orig in self.objs:\n o = orig.copy()\n\n # don't test boolean\n if isinstance(o, Index) and o.is_boolean():\n continue\n\n # sort by value, and create duplicates\n if isinstance(o, Series):\n o = o.sort_values()\n n = o.iloc[5:].append(o)\n else:\n indexer = o.argsort()\n o = o.take(indexer)\n n = o[5:].append(o)\n\n exp_arr = np.array([5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n dtype=np.intp)\n labels, uniques = n.factorize(sort=True)\n\n self.assert_numpy_array_equal(labels, exp_arr)\n if isinstance(o, Series):\n self.assert_index_equal(uniques, Index(orig).sort_values(),\n check_names=False)\n else:\n self.assert_index_equal(uniques, o, check_names=False)\n\n exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4],\n np.intp)\n labels, uniques = n.factorize(sort=False)\n self.assert_numpy_array_equal(labels, exp_arr)\n\n if isinstance(o, Series):\n expected = Index(o.iloc[5:10].append(o.iloc[:5]))\n self.assert_index_equal(uniques, expected, check_names=False)\n else:\n expected = o[5:10].append(o[:5])\n self.assert_index_equal(uniques, expected, check_names=False)\n\n def test_duplicated_drop_duplicates_index(self):\n # GH 4060\n for original in self.objs:\n if isinstance(original, Index):\n\n # special case\n if original.is_boolean():\n result = original.drop_duplicates()\n expected = Index([False, True], name='a')\n tm.assert_index_equal(result, expected)\n continue\n\n # original doesn't have duplicates\n expected = np.array([False] * len(original), dtype=bool)\n duplicated = original.duplicated()\n tm.assert_numpy_array_equal(duplicated, expected)\n self.assertTrue(duplicated.dtype == bool)\n result = original.drop_duplicates()\n tm.assert_index_equal(result, original)\n self.assertFalse(result is original)\n\n # has_duplicates\n self.assertFalse(original.has_duplicates)\n\n # create repeated values, 3rd and 5th values are duplicated\n idx = original[list(range(len(original))) + [5, 3]]\n expected = np.array([False] * len(original) + [True, True],\n dtype=bool)\n duplicated = idx.duplicated()\n tm.assert_numpy_array_equal(duplicated, expected)\n self.assertTrue(duplicated.dtype == bool)\n tm.assert_index_equal(idx.drop_duplicates(), original)\n\n base = [False] * len(idx)\n base[3] = True\n base[5] = True\n expected = np.array(base)\n\n duplicated = idx.duplicated(keep='last')\n tm.assert_numpy_array_equal(duplicated, expected)\n self.assertTrue(duplicated.dtype == bool)\n result = idx.drop_duplicates(keep='last')\n tm.assert_index_equal(result, idx[~expected])\n\n # deprecate take_last\n with tm.assert_produces_warning(FutureWarning):\n duplicated = idx.duplicated(take_last=True)\n tm.assert_numpy_array_equal(duplicated, expected)\n self.assertTrue(duplicated.dtype == bool)\n with tm.assert_produces_warning(FutureWarning):\n result = idx.drop_duplicates(take_last=True)\n tm.assert_index_equal(result, idx[~expected])\n\n base = [False] * len(original) + [True, True]\n base[3] = True\n base[5] = True\n expected = np.array(base)\n\n duplicated = idx.duplicated(keep=False)\n tm.assert_numpy_array_equal(duplicated, expected)\n self.assertTrue(duplicated.dtype == bool)\n result = idx.drop_duplicates(keep=False)\n tm.assert_index_equal(result, idx[~expected])\n\n with tm.assertRaisesRegexp(\n TypeError, r\"drop_duplicates\\(\\) got an unexpected \"\n \"keyword argument\"):\n idx.drop_duplicates(inplace=True)\n\n else:\n expected = Series([False] * len(original),\n index=original.index, name='a')\n tm.assert_series_equal(original.duplicated(), expected)\n result = original.drop_duplicates()\n tm.assert_series_equal(result, original)\n self.assertFalse(result is original)\n\n idx = original.index[list(range(len(original))) + [5, 3]]\n values = original._values[list(range(len(original))) + [5, 3]]\n s = Series(values, index=idx, name='a')\n\n expected = Series([False] * len(original) + [True, True],\n index=idx, name='a')\n tm.assert_series_equal(s.duplicated(), expected)\n tm.assert_series_equal(s.drop_duplicates(), original)\n\n base = [False] * len(idx)\n base[3] = True\n base[5] = True\n expected = Series(base, index=idx, name='a')\n\n tm.assert_series_equal(s.duplicated(keep='last'), expected)\n tm.assert_series_equal(s.drop_duplicates(keep='last'),\n s[~np.array(base)])\n\n # deprecate take_last\n with tm.assert_produces_warning(FutureWarning):\n tm.assert_series_equal(\n s.duplicated(take_last=True), expected)\n with tm.assert_produces_warning(FutureWarning):\n tm.assert_series_equal(s.drop_duplicates(take_last=True),\n s[~np.array(base)])\n base = [False] * len(original) + [True, True]\n base[3] = True\n base[5] = True\n expected = Series(base, index=idx, name='a')\n\n tm.assert_series_equal(s.duplicated(keep=False), expected)\n tm.assert_series_equal(s.drop_duplicates(keep=False),\n s[~np.array(base)])\n\n s.drop_duplicates(inplace=True)\n tm.assert_series_equal(s, original)\n\n def test_drop_duplicates_series_vs_dataframe(self):\n # GH 14192\n df = pd.DataFrame({'a': [1, 1, 1, 'one', 'one'],\n 'b': [2, 2, np.nan, np.nan, np.nan],\n 'c': [3, 3, np.nan, np.nan, 'three'],\n 'd': [1, 2, 3, 4, 4],\n 'e': [datetime(2015, 1, 1), datetime(2015, 1, 1),\n datetime(2015, 2, 1), pd.NaT, pd.NaT]\n })\n for column in df.columns:\n for keep in ['first', 'last', False]:\n dropped_frame = df[[column]].drop_duplicates(keep=keep)\n dropped_series = df[column].drop_duplicates(keep=keep)\n tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())\n\n def test_fillna(self):\n # # GH 11343\n # though Index.fillna and Series.fillna has separate impl,\n # test here to confirm these works as the same\n\n for orig in self.objs:\n\n o = orig.copy()\n values = o.values\n\n # values will not be changed\n result = o.fillna(o.astype(object).values[0])\n if isinstance(o, Index):\n self.assert_index_equal(o, result)\n else:\n self.assert_series_equal(o, result)\n # check shallow_copied\n self.assertFalse(o is result)\n\n for null_obj in [np.nan, None]:\n for orig in self.objs:\n o = orig.copy()\n klass = type(o)\n\n if not self._allow_na_ops(o):\n continue\n\n if needs_i8_conversion(o):\n\n values = o.astype(object).values\n fill_value = values[0]\n values[0:2] = pd.NaT\n else:\n values = o.values.copy()\n fill_value = o.values[0]\n values[0:2] = null_obj\n\n expected = [fill_value] * 2 + list(values[2:])\n\n expected = klass(expected)\n o = klass(values)\n\n # check values has the same dtype as the original\n self.assertEqual(o.dtype, orig.dtype)\n\n result = o.fillna(fill_value)\n if isinstance(o, Index):\n self.assert_index_equal(result, expected)\n else:\n self.assert_series_equal(result, expected)\n # check shallow_copied\n self.assertFalse(o is result)\n\n def test_memory_usage(self):\n for o in self.objs:\n res = o.memory_usage()\n res_deep = o.memory_usage(deep=True)\n\n if (is_object_dtype(o) or (isinstance(o, Series) and\n is_object_dtype(o.index))):\n # if there are objects, only deep will pick them up\n self.assertTrue(res_deep > res)\n else:\n self.assertEqual(res, res_deep)\n\n if isinstance(o, Series):\n self.assertEqual(\n (o.memory_usage(index=False) +\n o.index.memory_usage()),\n o.memory_usage(index=True)\n )\n\n # sys.getsizeof will call the .memory_usage with\n # deep=True, and add on some GC overhead\n diff = res_deep - sys.getsizeof(o)\n self.assertTrue(abs(diff) < 100)\n\n def test_searchsorted(self):\n # See gh-12238\n for o in self.objs:\n index = np.searchsorted(o, max(o))\n self.assertTrue(0 <= index <= len(o))\n\n index = np.searchsorted(o, max(o), sorter=range(len(o)))\n self.assertTrue(0 <= index <= len(o))\n\n def test_validate_bool_args(self):\n invalid_values = [1, \"True\", [1, 2, 3], 5.0]\n\n for value in invalid_values:\n with self.assertRaises(ValueError):\n self.int_series.drop_duplicates(inplace=value)\n\n\nclass TestTranspose(Ops):\n errmsg = \"the 'axes' parameter is not supported\"\n\n def test_transpose(self):\n for obj in self.objs:\n if isinstance(obj, Index):\n tm.assert_index_equal(obj.transpose(), obj)\n else:\n tm.assert_series_equal(obj.transpose(), obj)\n\n def test_transpose_non_default_axes(self):\n for obj in self.objs:\n tm.assertRaisesRegexp(ValueError, self.errmsg,\n obj.transpose, 1)\n tm.assertRaisesRegexp(ValueError, self.errmsg,\n obj.transpose, axes=1)\n\n def test_numpy_transpose(self):\n for obj in self.objs:\n if isinstance(obj, Index):\n tm.assert_index_equal(np.transpose(obj), obj)\n else:\n tm.assert_series_equal(np.transpose(obj), obj)\n\n tm.assertRaisesRegexp(ValueError, self.errmsg,\n np.transpose, obj, axes=1)\n\n\nclass TestNoNewAttributesMixin(tm.TestCase):\n\n def test_mixin(self):\n class T(NoNewAttributesMixin):\n pass\n\n t = T()\n self.assertFalse(hasattr(t, \"__frozen\"))\n t.a = \"test\"\n self.assertEqual(t.a, \"test\")\n t._freeze()\n # self.assertTrue(\"__frozen\" not in dir(t))\n self.assertIs(getattr(t, \"__frozen\"), True)\n\n def f():\n t.b = \"test\"\n\n self.assertRaises(AttributeError, f)\n self.assertFalse(hasattr(t, \"b\"))\n\n\nif __name__ == '__main__':\n import nose\n\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n # '--with-coverage', '--cover-package=pandas.core'],\n exit=False)\n",
"# coding: utf-8\n\nimport nose\nimport itertools\nimport string\nfrom distutils.version import LooseVersion\n\nfrom pandas import Series, DataFrame, MultiIndex\nfrom pandas.compat import range, lzip\nimport pandas.util.testing as tm\nfrom pandas.util.testing import slow\n\nimport numpy as np\nfrom numpy import random\nfrom numpy.random import randn\n\nimport pandas.tools.plotting as plotting\n\nfrom pandas.tests.plotting.common import (TestPlotBase, _check_plot_works)\n\n\n\"\"\" Test cases for .boxplot method \"\"\"\n\n\ndef _skip_if_mpl_14_or_dev_boxplot():\n # GH 8382\n # Boxplot failures on 1.4 and 1.4.1\n # Don't need try / except since that's done at class level\n import matplotlib\n if str(matplotlib.__version__) >= LooseVersion('1.4'):\n raise nose.SkipTest(\"Matplotlib Regression in 1.4 and current dev.\")\n\n\[email protected]\nclass TestDataFramePlots(TestPlotBase):\n\n @slow\n def test_boxplot_legacy(self):\n df = DataFrame(randn(6, 4),\n index=list(string.ascii_letters[:6]),\n columns=['one', 'two', 'three', 'four'])\n df['indic'] = ['foo', 'bar'] * 3\n df['indic2'] = ['foo', 'bar', 'foo'] * 2\n\n _check_plot_works(df.boxplot, return_type='dict')\n _check_plot_works(df.boxplot, column=[\n 'one', 'two'], return_type='dict')\n # _check_plot_works adds an ax so catch warning. see GH #13188\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.boxplot, column=['one', 'two'],\n by='indic')\n _check_plot_works(df.boxplot, column='one', by=['indic', 'indic2'])\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.boxplot, by='indic')\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.boxplot, by=['indic', 'indic2'])\n _check_plot_works(plotting.boxplot, data=df['one'], return_type='dict')\n _check_plot_works(df.boxplot, notch=1, return_type='dict')\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.boxplot, by='indic', notch=1)\n\n df = DataFrame(np.random.rand(10, 2), columns=['Col1', 'Col2'])\n df['X'] = Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B'])\n df['Y'] = Series(['A'] * 10)\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.boxplot, by='X')\n\n # When ax is supplied and required number of axes is 1,\n # passed ax should be used:\n fig, ax = self.plt.subplots()\n axes = df.boxplot('Col1', by='X', ax=ax)\n ax_axes = ax.axes if self.mpl_ge_1_5_0 else ax.get_axes()\n self.assertIs(ax_axes, axes)\n\n fig, ax = self.plt.subplots()\n axes = df.groupby('Y').boxplot(ax=ax, return_type='axes')\n ax_axes = ax.axes if self.mpl_ge_1_5_0 else ax.get_axes()\n self.assertIs(ax_axes, axes['A'])\n\n # Multiple columns with an ax argument should use same figure\n fig, ax = self.plt.subplots()\n with tm.assert_produces_warning(UserWarning):\n axes = df.boxplot(column=['Col1', 'Col2'],\n by='X', ax=ax, return_type='axes')\n self.assertIs(axes['Col1'].get_figure(), fig)\n\n # When by is None, check that all relevant lines are present in the\n # dict\n fig, ax = self.plt.subplots()\n d = df.boxplot(ax=ax, return_type='dict')\n lines = list(itertools.chain.from_iterable(d.values()))\n self.assertEqual(len(ax.get_lines()), len(lines))\n\n @slow\n def test_boxplot_return_type_none(self):\n # GH 12216; return_type=None & by=None -> axes\n result = self.hist_df.boxplot()\n self.assertTrue(isinstance(result, self.plt.Axes))\n\n @slow\n def test_boxplot_return_type_legacy(self):\n # API change in https://github.com/pandas-dev/pandas/pull/7096\n import matplotlib as mpl # noqa\n\n df = DataFrame(randn(6, 4),\n index=list(string.ascii_letters[:6]),\n columns=['one', 'two', 'three', 'four'])\n with tm.assertRaises(ValueError):\n df.boxplot(return_type='NOTATYPE')\n\n result = df.boxplot()\n self._check_box_return_type(result, 'axes')\n\n with tm.assert_produces_warning(False):\n result = df.boxplot(return_type='dict')\n self._check_box_return_type(result, 'dict')\n\n with tm.assert_produces_warning(False):\n result = df.boxplot(return_type='axes')\n self._check_box_return_type(result, 'axes')\n\n with tm.assert_produces_warning(False):\n result = df.boxplot(return_type='both')\n self._check_box_return_type(result, 'both')\n\n @slow\n def test_boxplot_axis_limits(self):\n\n def _check_ax_limits(col, ax):\n y_min, y_max = ax.get_ylim()\n self.assertTrue(y_min <= col.min())\n self.assertTrue(y_max >= col.max())\n\n df = self.hist_df.copy()\n df['age'] = np.random.randint(1, 20, df.shape[0])\n # One full row\n height_ax, weight_ax = df.boxplot(['height', 'weight'], by='category')\n _check_ax_limits(df['height'], height_ax)\n _check_ax_limits(df['weight'], weight_ax)\n self.assertEqual(weight_ax._sharey, height_ax)\n\n # Two rows, one partial\n p = df.boxplot(['height', 'weight', 'age'], by='category')\n height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0]\n dummy_ax = p[1, 1]\n\n _check_ax_limits(df['height'], height_ax)\n _check_ax_limits(df['weight'], weight_ax)\n _check_ax_limits(df['age'], age_ax)\n self.assertEqual(weight_ax._sharey, height_ax)\n self.assertEqual(age_ax._sharey, height_ax)\n self.assertIsNone(dummy_ax._sharey)\n\n @slow\n def test_boxplot_empty_column(self):\n _skip_if_mpl_14_or_dev_boxplot()\n df = DataFrame(np.random.randn(20, 4))\n df.loc[:, 0] = np.nan\n _check_plot_works(df.boxplot, return_type='axes')\n\n\[email protected]\nclass TestDataFrameGroupByPlots(TestPlotBase):\n\n @slow\n def test_boxplot_legacy(self):\n grouped = self.hist_df.groupby(by='gender')\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(grouped.boxplot, return_type='axes')\n self._check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2))\n axes = _check_plot_works(grouped.boxplot, subplots=False,\n return_type='axes')\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n tuples = lzip(string.ascii_letters[:10], range(10))\n df = DataFrame(np.random.rand(10, 3),\n index=MultiIndex.from_tuples(tuples))\n\n grouped = df.groupby(level=1)\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(grouped.boxplot, return_type='axes')\n self._check_axes_shape(list(axes.values), axes_num=10, layout=(4, 3))\n\n axes = _check_plot_works(grouped.boxplot, subplots=False,\n return_type='axes')\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n\n grouped = df.unstack(level=1).groupby(level=0, axis=1)\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(grouped.boxplot, return_type='axes')\n self._check_axes_shape(list(axes.values), axes_num=3, layout=(2, 2))\n axes = _check_plot_works(grouped.boxplot, subplots=False,\n return_type='axes')\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n\n @slow\n def test_grouped_plot_fignums(self):\n n = 10\n weight = Series(np.random.normal(166, 20, size=n))\n height = Series(np.random.normal(60, 10, size=n))\n with tm.RNGContext(42):\n gender = np.random.choice(['male', 'female'], size=n)\n df = DataFrame({'height': height, 'weight': weight, 'gender': gender})\n gb = df.groupby('gender')\n\n res = gb.plot()\n self.assertEqual(len(self.plt.get_fignums()), 2)\n self.assertEqual(len(res), 2)\n tm.close()\n\n res = gb.boxplot(return_type='axes')\n self.assertEqual(len(self.plt.get_fignums()), 1)\n self.assertEqual(len(res), 2)\n tm.close()\n\n # now works with GH 5610 as gender is excluded\n res = df.groupby('gender').hist()\n tm.close()\n\n @slow\n def test_grouped_box_return_type(self):\n df = self.hist_df\n\n # old style: return_type=None\n result = df.boxplot(by='gender')\n self.assertIsInstance(result, np.ndarray)\n self._check_box_return_type(\n result, None,\n expected_keys=['height', 'weight', 'category'])\n\n # now for groupby\n result = df.groupby('gender').boxplot(return_type='dict')\n self._check_box_return_type(\n result, 'dict', expected_keys=['Male', 'Female'])\n\n columns2 = 'X B C D A G Y N Q O'.split()\n df2 = DataFrame(random.randn(50, 10), columns=columns2)\n categories2 = 'A B C D E F G H I J'.split()\n df2['category'] = categories2 * 5\n\n for t in ['dict', 'axes', 'both']:\n returned = df.groupby('classroom').boxplot(return_type=t)\n self._check_box_return_type(\n returned, t, expected_keys=['A', 'B', 'C'])\n\n returned = df.boxplot(by='classroom', return_type=t)\n self._check_box_return_type(\n returned, t,\n expected_keys=['height', 'weight', 'category'])\n\n returned = df2.groupby('category').boxplot(return_type=t)\n self._check_box_return_type(returned, t, expected_keys=categories2)\n\n returned = df2.boxplot(by='category', return_type=t)\n self._check_box_return_type(returned, t, expected_keys=columns2)\n\n @slow\n def test_grouped_box_layout(self):\n df = self.hist_df\n\n self.assertRaises(ValueError, df.boxplot, column=['weight', 'height'],\n by=df.gender, layout=(1, 1))\n self.assertRaises(ValueError, df.boxplot,\n column=['height', 'weight', 'category'],\n layout=(2, 1), return_type='dict')\n self.assertRaises(ValueError, df.boxplot, column=['weight', 'height'],\n by=df.gender, layout=(-1, -1))\n\n # _check_plot_works adds an ax so catch warning. see GH #13188\n with tm.assert_produces_warning(UserWarning):\n box = _check_plot_works(df.groupby('gender').boxplot,\n column='height', return_type='dict')\n self._check_axes_shape(self.plt.gcf().axes, axes_num=2, layout=(1, 2))\n\n with tm.assert_produces_warning(UserWarning):\n box = _check_plot_works(df.groupby('category').boxplot,\n column='height',\n return_type='dict')\n self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))\n\n # GH 6769\n with tm.assert_produces_warning(UserWarning):\n box = _check_plot_works(df.groupby('classroom').boxplot,\n column='height', return_type='dict')\n self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))\n\n # GH 5897\n axes = df.boxplot(column=['height', 'weight', 'category'], by='gender',\n return_type='axes')\n self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))\n for ax in [axes['height']]:\n self._check_visible(ax.get_xticklabels(), visible=False)\n self._check_visible([ax.xaxis.get_label()], visible=False)\n for ax in [axes['weight'], axes['category']]:\n self._check_visible(ax.get_xticklabels())\n self._check_visible([ax.xaxis.get_label()])\n\n box = df.groupby('classroom').boxplot(\n column=['height', 'weight', 'category'], return_type='dict')\n self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))\n\n with tm.assert_produces_warning(UserWarning):\n box = _check_plot_works(df.groupby('category').boxplot,\n column='height',\n layout=(3, 2), return_type='dict')\n self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))\n with tm.assert_produces_warning(UserWarning):\n box = _check_plot_works(df.groupby('category').boxplot,\n column='height',\n layout=(3, -1), return_type='dict')\n self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))\n\n box = df.boxplot(column=['height', 'weight', 'category'], by='gender',\n layout=(4, 1))\n self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(4, 1))\n\n box = df.boxplot(column=['height', 'weight', 'category'], by='gender',\n layout=(-1, 1))\n self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(3, 1))\n\n box = df.groupby('classroom').boxplot(\n column=['height', 'weight', 'category'], layout=(1, 4),\n return_type='dict')\n self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 4))\n\n box = df.groupby('classroom').boxplot( # noqa\n column=['height', 'weight', 'category'], layout=(1, -1),\n return_type='dict')\n self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 3))\n\n @slow\n def test_grouped_box_multiple_axes(self):\n # GH 6970, GH 7069\n df = self.hist_df\n\n # check warning to ignore sharex / sharey\n # this check should be done in the first function which\n # passes multiple axes to plot, hist or boxplot\n # location should be changed if other test is added\n # which has earlier alphabetical order\n with tm.assert_produces_warning(UserWarning):\n fig, axes = self.plt.subplots(2, 2)\n df.groupby('category').boxplot(\n column='height', return_type='axes', ax=axes)\n self._check_axes_shape(self.plt.gcf().axes,\n axes_num=4, layout=(2, 2))\n\n fig, axes = self.plt.subplots(2, 3)\n with tm.assert_produces_warning(UserWarning):\n returned = df.boxplot(column=['height', 'weight', 'category'],\n by='gender', return_type='axes', ax=axes[0])\n returned = np.array(list(returned.values))\n self._check_axes_shape(returned, axes_num=3, layout=(1, 3))\n self.assert_numpy_array_equal(returned, axes[0])\n self.assertIs(returned[0].figure, fig)\n\n # draw on second row\n with tm.assert_produces_warning(UserWarning):\n returned = df.groupby('classroom').boxplot(\n column=['height', 'weight', 'category'],\n return_type='axes', ax=axes[1])\n returned = np.array(list(returned.values))\n self._check_axes_shape(returned, axes_num=3, layout=(1, 3))\n self.assert_numpy_array_equal(returned, axes[1])\n self.assertIs(returned[0].figure, fig)\n\n with tm.assertRaises(ValueError):\n fig, axes = self.plt.subplots(2, 3)\n # pass different number of axes from required\n with tm.assert_produces_warning(UserWarning):\n axes = df.groupby('classroom').boxplot(ax=axes)\n\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n"
] | [
[
"pandas.Series",
"pandas.util.testing.makeFloatIndex",
"pandas.core.base.FrozenNDArray",
"pandas.types.common.is_datetimetz",
"pandas.util.testing.assert_produces_warning",
"pandas.util.testing.assert_series_equal",
"pandas.util.testing.makeStringIndex",
"pandas.util.testing.makeIntIndex",
"numpy.transpose",
"pandas.compat.StringIO",
"pandas.to_datetime",
"pandas.isnull",
"numpy.unique",
"pandas.util.testing.makeDateIndex",
"pandas.util.testing.makeUnicodeIndex",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.types.common.needs_i8_conversion",
"pandas.read_fwf",
"pandas.TimedeltaIndex",
"numpy.arange",
"pandas.Timedelta",
"pandas.util.testing.assertRaisesRegexp",
"pandas.util.testing.assert_index_equal",
"pandas.Index",
"numpy.array",
"pandas.compat.u",
"pandas.DatetimeIndex",
"numpy.random.randn",
"pandas.compat.numpy.np_array_datetime64_compat",
"pandas.types.common.is_object_dtype",
"pandas.util.testing.makePeriodIndex",
"pandas.core.base.FrozenList",
"pandas.util.testing.makeBoolIndex"
],
[
"pandas.util.testing.RNGContext",
"pandas.Series",
"pandas.DataFrame",
"numpy.random.randn",
"numpy.random.normal",
"numpy.random.choice",
"pandas.util.testing.assert_produces_warning",
"pandas.util.testing.close",
"numpy.random.rand",
"pandas.compat.range",
"pandas.MultiIndex.from_tuples",
"pandas.tests.plotting.common._check_plot_works",
"numpy.random.randint",
"pandas.util.testing.assertRaises"
]
] |
dgoodwin208/6.883ProteinDocking | [
"07f33688bd5ec8c5ae6d4d4113eb64b0f2352e9e"
] | [
"config.py"
] | [
"import torch\n\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\nFloatTensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if torch.cuda.is_available() else torch.LongTensor\n"
] | [
[
"torch.cuda.is_available",
"torch.device"
]
] |
ilya-fedin/tg_owt | [
"d5c3d43b959c7e9e7d8004b9b7fdadd12ce7d589"
] | [
"src/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_unittest.py"
] | [
"# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.\n#\n# Use of this source code is governed by a BSD-style license\n# that can be found in the LICENSE file in the root of the source\n# tree. An additional intellectual property rights grant can be found\n# in the file PATENTS. All contributing project authors may\n# be found in the AUTHORS file in the root of the source tree.\n\"\"\"Unit tests for the test_data_generation module.\n\"\"\"\n\nimport os\nimport shutil\nimport tempfile\nimport unittest\n\nimport numpy as np\nimport scipy.io\n\nfrom . import test_data_generation\nfrom . import test_data_generation_factory\nfrom . import signal_processing\n\n\nclass TestTestDataGenerators(unittest.TestCase):\n \"\"\"Unit tests for the test_data_generation module.\n \"\"\"\n\n def setUp(self):\n \"\"\"Create temporary folders.\"\"\"\n self._base_output_path = tempfile.mkdtemp()\n self._test_data_cache_path = tempfile.mkdtemp()\n self._fake_air_db_path = tempfile.mkdtemp()\n\n # Fake AIR DB impulse responses.\n # TODO(alessiob): ReverberationTestDataGenerator will change to allow custom\n # impulse responses. When changed, the coupling below between\n # impulse_response_mat_file_names and\n # ReverberationTestDataGenerator._IMPULSE_RESPONSES can be removed.\n impulse_response_mat_file_names = [\n 'air_binaural_lecture_0_0_1.mat',\n 'air_binaural_booth_0_0_1.mat',\n ]\n for impulse_response_mat_file_name in impulse_response_mat_file_names:\n data = {'h_air': np.random.rand(1, 1000).astype('<f8')}\n scipy.io.savemat(\n os.path.join(self._fake_air_db_path,\n impulse_response_mat_file_name), data)\n\n def tearDown(self):\n \"\"\"Recursively delete temporary folders.\"\"\"\n shutil.rmtree(self._base_output_path)\n shutil.rmtree(self._test_data_cache_path)\n shutil.rmtree(self._fake_air_db_path)\n\n def testTestDataGenerators(self):\n # Preliminary check.\n self.assertTrue(os.path.exists(self._base_output_path))\n self.assertTrue(os.path.exists(self._test_data_cache_path))\n\n # Check that there is at least one registered test data generator.\n registered_classes = (\n test_data_generation.TestDataGenerator.REGISTERED_CLASSES)\n self.assertIsInstance(registered_classes, dict)\n self.assertGreater(len(registered_classes), 0)\n\n # Instance generators factory.\n generators_factory = test_data_generation_factory.TestDataGeneratorFactory(\n aechen_ir_database_path=self._fake_air_db_path,\n noise_tracks_path=test_data_generation. \\\n AdditiveNoiseTestDataGenerator. \\\n DEFAULT_NOISE_TRACKS_PATH,\n copy_with_identity=False)\n generators_factory.SetOutputDirectoryPrefix('datagen-')\n\n # Use a simple input file as clean input signal.\n input_signal_filepath = os.path.join(os.getcwd(), 'probing_signals',\n 'tone-880.wav')\n self.assertTrue(os.path.exists(input_signal_filepath))\n\n # Load input signal.\n input_signal = signal_processing.SignalProcessingUtils.LoadWav(\n input_signal_filepath)\n\n # Try each registered test data generator.\n for generator_name in registered_classes:\n # Instance test data generator.\n generator = generators_factory.GetInstance(\n registered_classes[generator_name])\n\n # Generate the noisy input - reference pairs.\n generator.Generate(input_signal_filepath=input_signal_filepath,\n test_data_cache_path=self._test_data_cache_path,\n base_output_path=self._base_output_path)\n\n # Perform checks.\n self._CheckGeneratedPairsListSizes(generator)\n self._CheckGeneratedPairsSignalDurations(generator, input_signal)\n self._CheckGeneratedPairsOutputPaths(generator)\n\n def testTestidentityDataGenerator(self):\n # Preliminary check.\n self.assertTrue(os.path.exists(self._base_output_path))\n self.assertTrue(os.path.exists(self._test_data_cache_path))\n\n # Use a simple input file as clean input signal.\n input_signal_filepath = os.path.join(os.getcwd(), 'probing_signals',\n 'tone-880.wav')\n self.assertTrue(os.path.exists(input_signal_filepath))\n\n def GetNoiseReferenceFilePaths(identity_generator):\n noisy_signal_filepaths = identity_generator.noisy_signal_filepaths\n reference_signal_filepaths = identity_generator.reference_signal_filepaths\n assert noisy_signal_filepaths.keys(\n ) == reference_signal_filepaths.keys()\n assert len(noisy_signal_filepaths.keys()) == 1\n key = noisy_signal_filepaths.keys()[0]\n return noisy_signal_filepaths[key], reference_signal_filepaths[key]\n\n # Test the `copy_with_identity` flag.\n for copy_with_identity in [False, True]:\n # Instance the generator through the factory.\n factory = test_data_generation_factory.TestDataGeneratorFactory(\n aechen_ir_database_path='',\n noise_tracks_path='',\n copy_with_identity=copy_with_identity)\n factory.SetOutputDirectoryPrefix('datagen-')\n generator = factory.GetInstance(\n test_data_generation.IdentityTestDataGenerator)\n # Check `copy_with_identity` is set correctly.\n self.assertEqual(copy_with_identity, generator.copy_with_identity)\n\n # Generate test data and extract the paths to the noise and the reference\n # files.\n generator.Generate(input_signal_filepath=input_signal_filepath,\n test_data_cache_path=self._test_data_cache_path,\n base_output_path=self._base_output_path)\n noisy_signal_filepath, reference_signal_filepath = (\n GetNoiseReferenceFilePaths(generator))\n\n # Check that a copy is made if and only if `copy_with_identity` is True.\n if copy_with_identity:\n self.assertNotEqual(noisy_signal_filepath,\n input_signal_filepath)\n self.assertNotEqual(reference_signal_filepath,\n input_signal_filepath)\n else:\n self.assertEqual(noisy_signal_filepath, input_signal_filepath)\n self.assertEqual(reference_signal_filepath,\n input_signal_filepath)\n\n def _CheckGeneratedPairsListSizes(self, generator):\n config_names = generator.config_names\n number_of_pairs = len(config_names)\n self.assertEqual(number_of_pairs,\n len(generator.noisy_signal_filepaths))\n self.assertEqual(number_of_pairs, len(generator.apm_output_paths))\n self.assertEqual(number_of_pairs,\n len(generator.reference_signal_filepaths))\n\n def _CheckGeneratedPairsSignalDurations(self, generator, input_signal):\n \"\"\"Checks duration of the generated signals.\n\n Checks that the noisy input and the reference tracks are audio files\n with duration equal to or greater than that of the input signal.\n\n Args:\n generator: TestDataGenerator instance.\n input_signal: AudioSegment instance.\n \"\"\"\n input_signal_length = (\n signal_processing.SignalProcessingUtils.CountSamples(input_signal))\n\n # Iterate over the noisy signal - reference pairs.\n for config_name in generator.config_names:\n # Load the noisy input file.\n noisy_signal_filepath = generator.noisy_signal_filepaths[\n config_name]\n noisy_signal = signal_processing.SignalProcessingUtils.LoadWav(\n noisy_signal_filepath)\n\n # Check noisy input signal length.\n noisy_signal_length = (signal_processing.SignalProcessingUtils.\n CountSamples(noisy_signal))\n self.assertGreaterEqual(noisy_signal_length, input_signal_length)\n\n # Load the reference file.\n reference_signal_filepath = generator.reference_signal_filepaths[\n config_name]\n reference_signal = signal_processing.SignalProcessingUtils.LoadWav(\n reference_signal_filepath)\n\n # Check noisy input signal length.\n reference_signal_length = (signal_processing.SignalProcessingUtils.\n CountSamples(reference_signal))\n self.assertGreaterEqual(reference_signal_length,\n input_signal_length)\n\n def _CheckGeneratedPairsOutputPaths(self, generator):\n \"\"\"Checks that the output path created by the generator exists.\n\n Args:\n generator: TestDataGenerator instance.\n \"\"\"\n # Iterate over the noisy signal - reference pairs.\n for config_name in generator.config_names:\n output_path = generator.apm_output_paths[config_name]\n self.assertTrue(os.path.exists(output_path))\n"
] | [
[
"numpy.random.rand"
]
] |
alexandgu/hyperopt | [
"cfb7a89d689ea8102b90b20daefd390d526eb131"
] | [
"hyperopt/tpe.py"
] | [
"\"\"\"\nGraphical model (GM)-based optimization algorithm using Theano\n\"\"\"\nfrom past.utils import old_div\nimport logging\nimport time\n\nimport numpy as np\nfrom scipy.special import erf\nfrom . import pyll\nfrom .pyll import scope\nfrom .pyll.stochastic import implicit_stochastic\n\nfrom .base import miscs_to_idxs_vals\nfrom .base import miscs_update_idxs_vals\n\n# from .base import Trials\nfrom . import rand\n\n__authors__ = \"James Bergstra\"\n__license__ = \"3-clause BSD License\"\n__contact__ = \"github.com/jaberg/hyperopt\"\nlogger = logging.getLogger(__name__)\n\nEPS = 1e-12\n\n# -- default linear forgetting. don't try to change by writing this variable\n# because it's captured in function default args when this file is read\nDEFAULT_LF = 25\n\n\nadaptive_parzen_samplers = {}\n\n\n# a decorator to register functions to the dict `adaptive_parzen_samplers`\ndef adaptive_parzen_sampler(name):\n def wrapper(f):\n assert name not in adaptive_parzen_samplers\n adaptive_parzen_samplers[name] = f\n return f\n\n return wrapper\n\n\n#\n# These are some custom distributions\n# that are used to represent posterior distributions.\n#\n\n# -- Categorical\n\n\[email protected]\ndef categorical_lpdf(sample, p):\n if sample.size:\n return np.log(np.asarray(p)[sample])\n return np.asarray([])\n\n\[email protected]\ndef randint_via_categorical_lpdf(sample, p):\n if sample.size:\n return np.log(np.asarray(p)[sample])\n return np.asarray([])\n\n\n# -- Bounded Gaussian Mixture Model (BGMM)\n\n\n@implicit_stochastic\[email protected]\ndef GMM1(weights, mus, sigmas, low=None, high=None, q=None, rng=None, size=()):\n \"\"\"Sample from truncated 1-D Gaussian Mixture Model\"\"\"\n weights, mus, sigmas = list(map(np.asarray, (weights, mus, sigmas)))\n assert len(weights) == len(mus) == len(sigmas)\n n_samples = int(np.prod(size))\n # n_components = len(weights)\n if low is None and high is None:\n # -- draw from a standard GMM\n active = np.argmax(rng.multinomial(1, weights, (n_samples,)), axis=1)\n samples = rng.normal(loc=mus[active], scale=sigmas[active])\n else:\n # -- draw from truncated components, handling one-sided truncation\n low = float(low) if low is not None else -float(\"Inf\")\n high = float(high) if high is not None else float(\"Inf\")\n if low >= high:\n raise ValueError(\"low >= high\", (low, high))\n samples = []\n while len(samples) < n_samples:\n active = np.argmax(rng.multinomial(1, weights))\n draw = rng.normal(loc=mus[active], scale=sigmas[active])\n if low <= draw < high:\n samples.append(draw)\n samples = np.reshape(np.asarray(samples), size)\n if q is None:\n return samples\n return np.round(old_div(samples, q)) * q\n\n\[email protected]\ndef normal_cdf(x, mu, sigma):\n top = x - mu\n bottom = np.maximum(np.sqrt(2) * sigma, EPS)\n z = old_div(top, bottom)\n return 0.5 * (1 + erf(z))\n\n\[email protected]\ndef GMM1_lpdf(samples, weights, mus, sigmas, low=None, high=None, q=None):\n def print_verbose(s, x):\n return print(f\"GMM1_lpdf:{s}\", x)\n\n verbose = 0\n samples, weights, mus, sigmas = list(\n map(np.asarray, (samples, weights, mus, sigmas))\n )\n if samples.size == 0:\n return np.asarray([])\n if weights.ndim != 1:\n raise TypeError(\"need vector of weights\", weights.shape)\n if mus.ndim != 1:\n raise TypeError(\"need vector of mus\", mus.shape)\n if sigmas.ndim != 1:\n raise TypeError(\"need vector of sigmas\", sigmas.shape)\n assert len(weights) == len(mus) == len(sigmas)\n _samples = samples\n samples = _samples.flatten()\n\n if verbose:\n print_verbose(\"samples\", set(samples))\n print_verbose(\"weights\", weights)\n print_verbose(\"mus\", mus)\n print_verbose(\"sigmas\", sigmas)\n print_verbose(\"low\", low)\n print_verbose(\"high\", high)\n print_verbose(\"q\", q)\n\n if low is None and high is None:\n p_accept = 1\n else:\n p_accept = np.sum(\n weights * (normal_cdf(high, mus, sigmas) - normal_cdf(low, mus, sigmas))\n )\n\n if q is None:\n dist = samples[:, None] - mus\n mahal = (old_div(dist, np.maximum(sigmas, EPS))) ** 2\n # mahal shape is (n_samples, n_components)\n Z = np.sqrt(2 * np.pi * sigmas ** 2)\n coef = weights / Z / p_accept\n rval = logsum_rows(-0.5 * mahal + np.log(coef))\n else:\n prob = np.zeros(samples.shape, dtype=\"float64\")\n for w, mu, sigma in zip(weights, mus, sigmas):\n if high is None:\n ubound = samples + old_div(q, 2.0)\n else:\n ubound = np.minimum(samples + old_div(q, 2.0), high)\n if low is None:\n lbound = samples - old_div(q, 2.0)\n else:\n lbound = np.maximum(samples - old_div(q, 2.0), low)\n # -- two-stage addition is slightly more numerically accurate\n inc_amt = w * normal_cdf(ubound, mu, sigma)\n inc_amt -= w * normal_cdf(lbound, mu, sigma)\n prob += inc_amt\n rval = np.log(prob) - np.log(p_accept)\n\n if verbose:\n print_verbose(\"rval:\", dict(list(zip(samples, rval))))\n\n rval.shape = _samples.shape\n return rval\n\n\n# -- Mixture of Log-Normals\n\n\[email protected]\ndef lognormal_cdf(x, mu, sigma):\n # wikipedia claims cdf is\n # .5 + .5 erf( log(x) - mu / sqrt(2 sigma^2))\n #\n # the maximum is used to move negative values and 0 up to a point\n # where they do not cause nan or inf, but also don't contribute much\n # to the cdf.\n if len(x) == 0:\n return np.asarray([])\n if x.min() < 0:\n raise ValueError(\"negative arg to lognormal_cdf\", x)\n olderr = np.seterr(divide=\"ignore\")\n try:\n top = np.log(np.maximum(x, EPS)) - mu\n bottom = np.maximum(np.sqrt(2) * sigma, EPS)\n z = old_div(top, bottom)\n return 0.5 + 0.5 * erf(z)\n finally:\n np.seterr(**olderr)\n\n\[email protected]\ndef lognormal_lpdf(x, mu, sigma):\n # formula copied from wikipedia\n # http://en.wikipedia.org/wiki/Log-normal_distribution\n assert np.all(sigma >= 0)\n sigma = np.maximum(sigma, EPS)\n Z = sigma * x * np.sqrt(2 * np.pi)\n E = 0.5 * (old_div((np.log(x) - mu), sigma)) ** 2\n rval = -E - np.log(Z)\n return rval\n\n\[email protected]\ndef qlognormal_lpdf(x, mu, sigma, q):\n # casting rounds up to nearest step multiple.\n # so lpdf is log of integral from x-step to x+1 of P(x)\n\n # XXX: subtracting two numbers potentially very close together.\n return np.log(lognormal_cdf(x, mu, sigma) - lognormal_cdf(x - q, mu, sigma))\n\n\n@implicit_stochastic\[email protected]\ndef LGMM1(weights, mus, sigmas, low=None, high=None, q=None, rng=None, size=()):\n weights, mus, sigmas = list(map(np.asarray, (weights, mus, sigmas)))\n n_samples = np.prod(size)\n # n_components = len(weights)\n if low is None and high is None:\n active = np.argmax(rng.multinomial(1, weights, (n_samples,)), axis=1)\n assert len(active) == n_samples\n samples = np.exp(rng.normal(loc=mus[active], scale=sigmas[active]))\n else:\n # -- draw from truncated components\n # TODO: one-sided-truncation\n low = float(low)\n high = float(high)\n if low >= high:\n raise ValueError(\"low >= high\", (low, high))\n samples = []\n while len(samples) < n_samples:\n active = np.argmax(rng.multinomial(1, weights))\n draw = rng.normal(loc=mus[active], scale=sigmas[active])\n if low <= draw < high:\n samples.append(np.exp(draw))\n samples = np.asarray(samples)\n\n samples = np.reshape(np.asarray(samples), size)\n if q is not None:\n samples = np.round(old_div(samples, q)) * q\n return samples\n\n\ndef logsum_rows(x):\n m = x.max(axis=1)\n return np.log(np.exp(x - m[:, None]).sum(axis=1)) + m\n\n\[email protected]\ndef LGMM1_lpdf(samples, weights, mus, sigmas, low=None, high=None, q=None):\n samples, weights, mus, sigmas = list(\n map(np.asarray, (samples, weights, mus, sigmas))\n )\n assert weights.ndim == 1\n assert mus.ndim == 1\n assert sigmas.ndim == 1\n _samples = samples\n if samples.ndim != 1:\n samples = samples.flatten()\n\n if low is None and high is None:\n p_accept = 1\n else:\n p_accept = np.sum(\n weights * (normal_cdf(high, mus, sigmas) - normal_cdf(low, mus, sigmas))\n )\n\n if q is None:\n # compute the lpdf of each sample under each component\n lpdfs = lognormal_lpdf(samples[:, None], mus, sigmas)\n rval = logsum_rows(lpdfs + np.log(weights))\n else:\n # compute the lpdf of each sample under each component\n prob = np.zeros(samples.shape, dtype=\"float64\")\n for w, mu, sigma in zip(weights, mus, sigmas):\n if high is None:\n ubound = samples + old_div(q, 2.0)\n else:\n ubound = np.minimum(samples + old_div(q, 2.0), np.exp(high))\n if low is None:\n lbound = samples - old_div(q, 2.0)\n else:\n lbound = np.maximum(samples - old_div(q, 2.0), np.exp(low))\n lbound = np.maximum(0, lbound)\n # -- two-stage addition is slightly more numerically accurate\n inc_amt = w * lognormal_cdf(ubound, mu, sigma)\n inc_amt -= w * lognormal_cdf(lbound, mu, sigma)\n prob += inc_amt\n rval = np.log(prob) - np.log(p_accept)\n rval.shape = _samples.shape\n return rval\n\n\n#\n# This is the weird heuristic ParzenWindow estimator used for continuous\n# distributions in various ways.\n#\n\n\[email protected]_info(o_len=3)\ndef adaptive_parzen_normal_orig(mus, prior_weight, prior_mu, prior_sigma):\n \"\"\"\n A heuristic estimator for the mu and sigma values of a GMM\n TODO: try to find this heuristic in the literature, and cite it - Yoshua\n mentioned the term 'elastic' I think?\n\n mus - matrix (N, M) of M, N-dimensional component centers\n \"\"\"\n mus_orig = np.array(mus)\n mus = np.array(mus)\n assert str(mus.dtype) != \"object\"\n\n if mus.ndim != 1:\n raise TypeError(\"mus must be vector\", mus)\n if len(mus) == 0:\n mus = np.asarray([prior_mu])\n sigma = np.asarray([prior_sigma])\n elif len(mus) == 1:\n mus = np.asarray([prior_mu] + [mus[0]])\n sigma = np.asarray([prior_sigma, prior_sigma * 0.5])\n elif len(mus) >= 2:\n order = np.argsort(mus)\n mus = mus[order]\n sigma = np.zeros_like(mus)\n sigma[1:-1] = np.maximum(mus[1:-1] - mus[0:-2], mus[2:] - mus[1:-1])\n if len(mus) > 2:\n lsigma = mus[2] - mus[0]\n usigma = mus[-1] - mus[-3]\n else:\n lsigma = mus[1] - mus[0]\n usigma = mus[-1] - mus[-2]\n\n sigma[0] = lsigma\n sigma[-1] = usigma\n\n # XXX: is sorting them necessary anymore?\n # un-sort the mus and sigma\n mus[order] = mus.copy()\n sigma[order] = sigma.copy()\n\n if not np.all(mus_orig == mus):\n print(\"orig\", mus_orig)\n print(\"mus\", mus)\n assert np.all(mus_orig == mus)\n\n # put the prior back in\n mus = np.asarray([prior_mu] + list(mus))\n sigma = np.asarray([prior_sigma] + list(sigma))\n\n maxsigma = prior_sigma\n # -- magic formula:\n minsigma = old_div(prior_sigma, np.sqrt(1 + len(mus)))\n\n sigma = np.clip(sigma, minsigma, maxsigma)\n\n weights = np.ones(len(mus), dtype=mus.dtype)\n weights[0] = prior_weight\n\n weights = old_div(weights, weights.sum())\n\n return weights, mus, sigma\n\n\[email protected]\ndef linear_forgetting_weights(N, LF):\n assert N >= 0\n assert LF > 0\n if N == 0:\n return np.asarray([])\n if N < LF:\n return np.ones(N)\n ramp = np.linspace(old_div(1.0, N), 1.0, num=N - LF)\n flat = np.ones(LF)\n weights = np.concatenate([ramp, flat], axis=0)\n assert weights.shape == (N,), (weights.shape, N)\n return weights\n\n\n# XXX: make TPE do a post-inference pass over the pyll graph and insert\n# non-default LF argument\n\n\[email protected]_info(o_len=3)\ndef adaptive_parzen_normal(mus, prior_weight, prior_mu, prior_sigma, LF=DEFAULT_LF):\n \"\"\"\n mus - matrix (N, M) of M, N-dimensional component centers\n \"\"\"\n mus = np.array(mus)\n assert str(mus.dtype) != \"object\"\n\n if mus.ndim != 1:\n raise TypeError(\"mus must be vector\", mus)\n if len(mus) == 0:\n srtd_mus = np.asarray([prior_mu])\n sigma = np.asarray([prior_sigma])\n prior_pos = 0\n elif len(mus) == 1:\n if prior_mu < mus[0]:\n prior_pos = 0\n srtd_mus = np.asarray([prior_mu, mus[0]])\n sigma = np.asarray([prior_sigma, prior_sigma * 0.5])\n else:\n prior_pos = 1\n srtd_mus = np.asarray([mus[0], prior_mu])\n sigma = np.asarray([prior_sigma * 0.5, prior_sigma])\n elif len(mus) >= 2:\n\n # create new_mus, which is sorted, and in which\n # the prior has been inserted\n order = np.argsort(mus)\n prior_pos = np.searchsorted(mus[order], prior_mu)\n srtd_mus = np.zeros(len(mus) + 1)\n srtd_mus[:prior_pos] = mus[order[:prior_pos]]\n srtd_mus[prior_pos] = prior_mu\n srtd_mus[prior_pos + 1 :] = mus[order[prior_pos:]]\n sigma = np.zeros_like(srtd_mus)\n sigma[1:-1] = np.maximum(\n srtd_mus[1:-1] - srtd_mus[0:-2], srtd_mus[2:] - srtd_mus[1:-1]\n )\n lsigma = srtd_mus[1] - srtd_mus[0]\n usigma = srtd_mus[-1] - srtd_mus[-2]\n sigma[0] = lsigma\n sigma[-1] = usigma\n\n if LF and LF < len(mus):\n unsrtd_weights = linear_forgetting_weights(len(mus), LF)\n srtd_weights = np.zeros_like(srtd_mus)\n assert len(unsrtd_weights) + 1 == len(srtd_mus)\n srtd_weights[:prior_pos] = unsrtd_weights[order[:prior_pos]]\n srtd_weights[prior_pos] = prior_weight\n srtd_weights[prior_pos + 1 :] = unsrtd_weights[order[prior_pos:]]\n\n else:\n srtd_weights = np.ones(len(srtd_mus))\n srtd_weights[prior_pos] = prior_weight\n\n # -- magic formula:\n maxsigma = old_div(prior_sigma, 1.0)\n minsigma = old_div(prior_sigma, min(100.0, (1.0 + len(srtd_mus))))\n\n sigma = np.clip(sigma, minsigma, maxsigma)\n\n sigma[prior_pos] = prior_sigma\n assert prior_sigma > 0\n assert maxsigma > 0\n assert minsigma > 0\n assert np.all(sigma > 0), (sigma.min(), minsigma, maxsigma)\n\n srtd_weights /= srtd_weights.sum()\n\n return srtd_weights, srtd_mus, sigma\n\n\n#\n# Adaptive Parzen Samplers\n# These produce conditional estimators for various prior distributions\n#\n# NOTE: These are actually used in a fairly complicated way.\n# They are actually returning pyll.Apply AST (Abstract Syntax Tree) objects.\n# This AST is then manipulated and the corresponding _lpdf function is called\n# (e.g GMM1_lpdf)\n#\n# Please see the build_posterior function for details\n\n# -- Uniform\n\n\n@adaptive_parzen_sampler(\"uniform\")\ndef ap_uniform_sampler(obs, prior_weight, low, high, size=(), rng=None):\n prior_mu = 0.5 * (high + low)\n prior_sigma = 1.0 * (high - low)\n weights, mus, sigmas = scope.adaptive_parzen_normal(\n obs, prior_weight, prior_mu, prior_sigma\n )\n return scope.GMM1(\n weights, mus, sigmas, low=low, high=high, q=None, size=size, rng=rng\n )\n\n\n@adaptive_parzen_sampler(\"quniform\")\ndef ap_quniform_sampler(obs, prior_weight, low, high, q, size=(), rng=None):\n prior_mu = 0.5 * (high + low)\n prior_sigma = 1.0 * (high - low)\n weights, mus, sigmas = scope.adaptive_parzen_normal(\n obs, prior_weight, prior_mu, prior_sigma\n )\n return scope.GMM1(weights, mus, sigmas, low=low, high=high, q=q, size=size, rng=rng)\n\n\n@adaptive_parzen_sampler(\"loguniform\")\ndef ap_loguniform_sampler(obs, prior_weight, low, high, size=(), rng=None):\n prior_mu = 0.5 * (high + low)\n prior_sigma = 1.0 * (high - low)\n weights, mus, sigmas = scope.adaptive_parzen_normal(\n scope.log(obs), prior_weight, prior_mu, prior_sigma\n )\n rval = scope.LGMM1(weights, mus, sigmas, low=low, high=high, size=size, rng=rng)\n return rval\n\n\n@adaptive_parzen_sampler(\"qloguniform\")\ndef ap_qloguniform_sampler(obs, prior_weight, low, high, q, size=(), rng=None):\n prior_mu = 0.5 * (high + low)\n prior_sigma = 1.0 * (high - low)\n weights, mus, sigmas = scope.adaptive_parzen_normal(\n scope.log(\n # -- map observations that were quantized to be below exp(low)\n # (particularly 0) back up to exp(low) where they will\n # interact in a reasonable way with the AdaptiveParzen\n # thing.\n scope.maximum(\n obs,\n scope.maximum( # -- protect against exp(low) underflow\n EPS, scope.exp(low)\n ),\n )\n ),\n prior_weight,\n prior_mu,\n prior_sigma,\n )\n return scope.LGMM1(weights, mus, sigmas, low, high, q=q, size=size, rng=rng)\n\n\n# -- Normal\n\n\n@adaptive_parzen_sampler(\"normal\")\ndef ap_normal_sampler(obs, prior_weight, mu, sigma, size=(), rng=None):\n weights, mus, sigmas = scope.adaptive_parzen_normal(obs, prior_weight, mu, sigma)\n return scope.GMM1(weights, mus, sigmas, size=size, rng=rng)\n\n\n@adaptive_parzen_sampler(\"qnormal\")\ndef ap_qnormal_sampler(obs, prior_weight, mu, sigma, q, size=(), rng=None):\n weights, mus, sigmas = scope.adaptive_parzen_normal(obs, prior_weight, mu, sigma)\n return scope.GMM1(weights, mus, sigmas, q=q, size=size, rng=rng)\n\n\n@adaptive_parzen_sampler(\"lognormal\")\ndef ap_loglognormal_sampler(obs, prior_weight, mu, sigma, size=(), rng=None):\n weights, mus, sigmas = scope.adaptive_parzen_normal(\n scope.log(obs), prior_weight, mu, sigma\n )\n rval = scope.LGMM1(weights, mus, sigmas, size=size, rng=rng)\n return rval\n\n\n@adaptive_parzen_sampler(\"qlognormal\")\ndef ap_qlognormal_sampler(obs, prior_weight, mu, sigma, q, size=(), rng=None):\n log_obs = scope.log(scope.maximum(obs, EPS))\n weights, mus, sigmas = scope.adaptive_parzen_normal(\n log_obs, prior_weight, mu, sigma\n )\n rval = scope.LGMM1(weights, mus, sigmas, q=q, size=size, rng=rng)\n return rval\n\n\n# -- Categorical\n\n\n@adaptive_parzen_sampler(\"randint\")\ndef ap_randint_sampler(\n obs, prior_weight, low, high=None, size=(), rng=None, LF=DEFAULT_LF\n):\n # randint can be seen as a categorical with high - low categories\n weights = scope.linear_forgetting_weights(scope.len(obs), LF=LF)\n # if high is None, then low represents high and there is no offset\n domain_size = low if high is None else high - low\n offset = pyll.Literal(0) if high is None else low\n counts = scope.bincount(obs, offset=offset, minlength=domain_size, weights=weights)\n # -- add in some prior pseudocounts\n pseudocounts = counts + prior_weight\n random_variable = scope.randint_via_categorical(\n old_div(pseudocounts, scope.sum(pseudocounts)), size=size, rng=rng\n )\n return random_variable\n\n\[email protected]\ndef tpe_cat_pseudocounts(counts, prior_weight, p, size):\n if np.prod(size) == 0:\n return []\n if p.ndim == 2:\n assert np.all(p == p[0])\n p = p[0]\n pseudocounts = counts + p.size * (prior_weight * p)\n return old_div(pseudocounts, np.sum(pseudocounts))\n\n\n@adaptive_parzen_sampler(\"categorical\")\ndef ap_categorical_sampler(obs, prior_weight, p, size=(), rng=None, LF=DEFAULT_LF):\n weights = scope.linear_forgetting_weights(scope.len(obs), LF=LF)\n # in order to support pchoice here, we need to find the size of p,\n # but p can have p.ndim == 2, so we pass p to bincount and unpack it\n # (if required) there\n counts = scope.bincount(obs, p=p, weights=weights)\n pseudocounts = scope.tpe_cat_pseudocounts(counts, prior_weight, p, size)\n return scope.categorical(pseudocounts, size=size, rng=rng)\n\n\n#\n# Posterior clone performs symbolic inference on the pyll graph of priors.\n#\n\n\[email protected]_info(o_len=2)\ndef ap_split_trials(o_idxs, o_vals, l_idxs, l_vals, gamma, gamma_cap=DEFAULT_LF):\n \"\"\"Split the elements of `o_vals` (observations values) into two groups: those for\n trials whose losses (`l_vals`) were above gamma, and those below gamma. Note that\n only unique elements are returned, so the total number of returned elements might\n be lower than `len(o_vals)`\n \"\"\"\n o_idxs, o_vals, l_idxs, l_vals = list(\n map(np.asarray, [o_idxs, o_vals, l_idxs, l_vals])\n )\n\n # XXX if this is working, refactor this sort for efficiency\n\n # Splitting is done this way to cope with duplicate loss values.\n n_below = min(int(np.ceil(gamma * np.sqrt(len(l_vals)))), gamma_cap)\n l_order = np.argsort(l_vals)\n\n keep_idxs = set(l_idxs[l_order[:n_below]])\n below = [v for i, v in zip(o_idxs, o_vals) if i in keep_idxs]\n\n keep_idxs = set(l_idxs[l_order[n_below:]])\n above = [v for i, v in zip(o_idxs, o_vals) if i in keep_idxs]\n\n return np.asarray(below), np.asarray(above)\n\n\[email protected]\ndef broadcast_best(samples, below_llik, above_llik):\n if len(samples):\n score = below_llik - above_llik\n if len(samples) != len(score):\n raise ValueError()\n best = np.argmax(score)\n return [samples[best]] * len(samples)\n else:\n return []\n\n\ndef build_posterior(\n specs,\n prior_idxs,\n prior_vals,\n obs_idxs,\n obs_vals,\n obs_loss_idxs,\n obs_loss_vals,\n oloss_gamma,\n prior_weight,\n):\n \"\"\"\n This method clones a posterior inference graph by iterating forward in\n topological order, and replacing prior random-variables (prior_idxs, prior_vals)\n with new posterior distributions (post_specs, post_idxs, post_vals) that make use\n of observations (obs_idxs, obs_vals).\n\n \"\"\"\n assert all(\n isinstance(arg, pyll.Apply)\n for arg in [obs_loss_idxs, obs_loss_vals, oloss_gamma]\n )\n assert set(prior_idxs.keys()) == set(prior_vals.keys())\n\n expr = pyll.as_apply([specs, prior_idxs, prior_vals])\n nodes = pyll.dfs(expr)\n\n # build the joint posterior distribution as the values in this memo\n memo = {}\n # map prior RVs to observations\n obs_memo = {}\n\n for nid in prior_vals:\n # construct the leading args for each call to adaptive_parzen_sampler\n # which will permit the \"adaptive parzen samplers\" to adapt to the\n # correct samples.\n obs_below, obs_above = scope.ap_split_trials(\n obs_idxs[nid], obs_vals[nid], obs_loss_idxs, obs_loss_vals, oloss_gamma\n )\n obs_memo[prior_vals[nid]] = [obs_below, obs_above]\n for node in nodes:\n if node not in memo:\n new_inputs = [memo[arg] for arg in node.inputs()]\n if node in obs_memo:\n # -- this case corresponds to an observed Random Var\n # node.name is a distribution like \"normal\", \"randint\", etc.\n obs_below, obs_above = obs_memo[node]\n aa = [memo[a] for a in node.pos_args]\n fn = adaptive_parzen_samplers[node.name]\n b_args = [obs_below, prior_weight] + aa\n named_args = {kw: memo[arg] for (kw, arg) in node.named_args}\n b_post = fn(*b_args, **named_args)\n a_args = [obs_above, prior_weight] + aa\n a_post = fn(*a_args, **named_args)\n\n # fn is a function e.g ap_uniform_sampler, ap_normal_sampler, etc\n # b_post and a_post are pyll.Apply objects that are\n # AST (Abstract Syntax Trees). They create the distribution,\n # (e.g. using adaptive_parzen_normal), and then\n # call a function to sample randomly from that distribution\n # (e.g. using scope.GMM1) which return those samples.\n #\n # However we are only interested in using the samples from b_post.\n # This code looks at the AST and grabs the function name that we used\n # for sampling (e.g. scope.GMM1) and modifies it, e.g. to\n # \"scope.GMM1_lpdf\". It then calls this function, passing in the\n # samples as the first parameter.a_args\n #\n # The result is that we are effectively calling, for example:\n # below_llik = GMM1_lpdf( b_post, *adaptive_parzen_normal(obs_below, ...))\n # above_llik = GMM1_lpdf( b_post, *adaptive_parzen_normal(obs_above, ...))\n\n assert a_post.name == b_post.name\n fn_lpdf = getattr(scope, a_post.name + \"_lpdf\")\n a_kwargs = {\n n: a for n, a in a_post.named_args if n not in (\"rng\", \"size\")\n }\n b_kwargs = {\n n: a for n, a in b_post.named_args if n not in (\"rng\", \"size\")\n }\n\n # calculate the log likelihood of b_post under both distributions\n below_llik = fn_lpdf(*([b_post] + b_post.pos_args), **b_kwargs)\n above_llik = fn_lpdf(*([b_post] + a_post.pos_args), **a_kwargs)\n # compute new_node based on below & above log likelihood\n new_node = scope.broadcast_best(b_post, below_llik, above_llik)\n elif hasattr(node, \"obj\"):\n # -- keep same literals in the graph\n new_node = node\n else:\n # -- this case is for all the other stuff in the graph\n new_node = node.clone_from_inputs(new_inputs)\n memo[node] = new_node\n\n post_idxs = {nid: memo[idxs] for nid, idxs in prior_idxs.items()}\n post_vals = {nid: memo[vals] for nid, vals in prior_vals.items()}\n return post_idxs, post_vals\n\n\n# TODO: is this used?\n# @scope.define\n# def idxs_prod(full_idxs, idxs_by_label, llik_by_label):\n# \"\"\"Add all of the log-likelihoods together by id.\n#\n# Example arguments:\n# full_idxs = [0, 1, ... N-1]\n# idxs_by_label = {'node_a': [1, 3], 'node_b': [3]}\n# llik_by_label = {'node_a': [0.1, -3.3], node_b: [1.0]}\n#\n# This would return N elements: [0, 0.1, 0, -2.3, 0, 0, ... ]\n# \"\"\"\n# assert len(set(full_idxs)) == len(full_idxs)\n# full_idxs = list(full_idxs)\n# rval = np.zeros(len(full_idxs))\n# pos_of_tid = dict(list(zip(full_idxs, list(range(len(full_idxs))))))\n# assert set(idxs_by_label.keys()) == set(llik_by_label.keys())\n# for nid in idxs_by_label:\n# idxs = idxs_by_label[nid]\n# llik = llik_by_label[nid]\n# assert np.all(np.asarray(idxs) > 1)\n# assert len(set(idxs)) == len(idxs)\n# assert len(idxs) == len(llik)\n# for ii, ll in zip(idxs, llik):\n# rval[pos_of_tid[ii]] += ll\n# return rval\n\n\n_default_prior_weight = 1.0\n\n# -- suggest best of this many draws on every iteration\n_default_n_EI_candidates = 120\n\n# -- gamma * sqrt(n_trials) is fraction of to use as good\n_default_gamma = 0.25\n\n_default_n_startup_jobs = 100\n\n_default_linear_forgetting = DEFAULT_LF\n\n\ndef build_posterior_wrapper(domain, prior_weight, gamma):\n \"\"\"\n Calls build_posterior\n Args:\n domain (hyperopt.base.Domain): contains info about the obj function and the hp\n space passed to fmin\n prior_weight (float): smoothing factor for counts, to avoid having 0 prob\n # TODO: consider renaming or improving documentation for suggest\n gamma (float): the threshold to split between l(x) and g(x), see eq. 2 in\n https://papers.nips.cc/paper/4443-algorithms-for-hyper-parameter-optimization.pdf\n\n Returns:\n\n \"\"\"\n\n # -- these dummy values will be replaced in build_posterior() and never used\n observed = {\"idxs\": pyll.Literal(), \"vals\": pyll.Literal()}\n observed_loss = {\"idxs\": pyll.Literal(), \"vals\": pyll.Literal()}\n\n posterior = build_posterior(\n # -- vectorized clone of bandit template\n domain.vh.v_expr,\n # -- this dict and next represent prior dists\n domain.vh.idxs_by_label(),\n domain.vh.vals_by_label(),\n observed[\"idxs\"],\n observed[\"vals\"],\n observed_loss[\"idxs\"],\n observed_loss[\"vals\"],\n pyll.Literal(gamma),\n pyll.Literal(float(prior_weight)),\n )\n\n return observed, observed_loss, posterior\n\n\ndef suggest(\n new_ids,\n domain,\n trials,\n seed,\n prior_weight=_default_prior_weight,\n n_startup_jobs=_default_n_startup_jobs,\n n_EI_candidates=_default_n_EI_candidates,\n gamma=_default_gamma,\n verbose=True,\n):\n \"\"\"\n Given previous trials and the domain, suggest the best expected hp point\n according to the TPE-EI algo\n\n\n Args:\n prior_weight(\n n_startup_jobs:\n n_EI_candidates:\n gamma:\n verbose:\n\n Returns:\n\n \"\"\"\n\n t0 = time.time()\n # use build_posterior_wrapper to create the pyll nodes\n observed, observed_loss, posterior = build_posterior_wrapper(\n domain, prior_weight, gamma\n )\n tt = time.time() - t0\n if verbose:\n logger.info(\"build_posterior_wrapper took %f seconds\" % tt)\n\n # Loop over previous trials to collect best_docs and best_docs_loss\n best_docs = dict()\n best_docs_loss = dict()\n for doc in trials.trials:\n\n # get either these docs own tid or the one that it's from\n tid = doc[\"misc\"].get(\"from_tid\", doc[\"tid\"])\n\n # associate infinite loss to new/running/failed jobs\n loss = doc[\"result\"].get(\"loss\")\n loss = float(\"inf\") if loss is None else float(loss)\n\n # if set, update loss for this tid if it's higher than current loss\n # otherwise, set it\n best_docs_loss.setdefault(tid, loss)\n if loss <= best_docs_loss[tid]:\n best_docs_loss[tid] = loss\n best_docs[tid] = doc\n\n # -- sort docs by order of suggestion\n # so that linear_forgetting removes the oldest ones\n tid_docs = sorted(best_docs.items())\n losses = [best_docs_loss[tid] for tid, doc in tid_docs]\n tids, docs = list(zip(*tid_docs)) if tid_docs else ([], [])\n\n if verbose:\n if docs:\n s = \"%i/%i trials with best loss %f\" % (\n len(docs),\n len(trials),\n np.nanmin(losses),\n )\n else:\n s = \"0 trials\"\n logger.info(\"TPE using %s\" % s)\n\n if len(docs) < n_startup_jobs:\n # N.B. THIS SEEDS THE RNG BASED ON THE new_id\n return rand.suggest(new_ids, domain, trials, seed)\n\n # Sample and compute log-probability.\n first_new_id = new_ids[0]\n if tids:\n # -- the +2 coordinates with an assertion above\n # to ensure that fake ids are used during sampling\n # TODO: not sure what assertion this refers to...\n fake_id_0 = max(max(tids), first_new_id) + 2\n else:\n # -- weird - we're running the TPE algo from scratch\n assert n_startup_jobs <= 0\n fake_id_0 = first_new_id + 2\n\n fake_ids = list(range(fake_id_0, fake_id_0 + n_EI_candidates))\n\n # -- this dictionary will map pyll nodes to the values\n # they should take during the evaluation of the pyll program\n memo = {domain.s_new_ids: fake_ids, domain.s_rng: np.random.default_rng(seed)}\n\n memo[observed_loss[\"idxs\"]] = tids\n memo[observed_loss[\"vals\"]] = losses\n\n observed_idxs_dict, observed_vals_dict = miscs_to_idxs_vals(\n [doc[\"misc\"] for doc in docs], keys=list(domain.params.keys())\n )\n memo[observed[\"idxs\"]] = observed_idxs_dict\n memo[observed[\"vals\"]] = observed_vals_dict\n\n # evaluate `n_EI_candidates` pyll nodes in `posterior` using `memo`\n # TODO: it seems to return idxs, vals, all the same. Is this correct?\n idxs, vals = pyll.rec_eval(posterior, memo=memo, print_node_on_error=False)\n\n # hack to add offset again for randint params\n for label, param in domain.params.items():\n if param.name == \"randint\" and len(param.pos_args) == 2:\n offset = param.pos_args[0].obj\n vals[label] = [val + offset for val in vals[label]]\n\n # -- retrieve the best of the samples and form the return tuple\n\n # specs are deprecated since build_posterior makes all the same\n rval_specs = [None]\n rval_results = [domain.new_result()]\n rval_miscs = [{\"tid\": first_new_id, \"cmd\": domain.cmd, \"workdir\": domain.workdir}]\n\n miscs_update_idxs_vals(\n rval_miscs,\n idxs,\n vals,\n idxs_map={fake_ids[0]: first_new_id},\n assert_all_vals_used=False,\n )\n # return the doc for the best new trial\n return trials.new_trial_docs([first_new_id], rval_specs, rval_results, rval_miscs)\n"
] | [
[
"numpy.ones",
"numpy.sum",
"numpy.argsort",
"numpy.asarray",
"numpy.log",
"numpy.seterr",
"numpy.zeros",
"numpy.searchsorted",
"numpy.argmax",
"numpy.all",
"numpy.prod",
"numpy.maximum",
"numpy.array",
"numpy.zeros_like",
"numpy.random.default_rng",
"numpy.exp",
"numpy.nanmin",
"numpy.clip",
"numpy.sqrt",
"numpy.concatenate",
"scipy.special.erf"
]
] |
selflein/nn_uncertainty_eval | [
"94a7f2292b8db2197cd55fab57324d438618ae06"
] | [
"uncertainty_eval/datasets/other.py"
] | [
"import json\nfrom pathlib import Path\n\nimport torch\nimport numpy as np\nfrom PIL import Image\nfrom torch.utils.data import Dataset, TensorDataset\nfrom tfrecord.torch.dataset import MultiTFRecordDataset\n\nfrom uncertainty_eval.datasets.tabular import TabularDataset\nfrom uncertainty_eval.datasets.abstract_datasplit import DatasetSplit\n\n\nclass GaussianNoise(DatasetSplit):\n def __init__(self, data_root, mean, std, length=10_000):\n self.data_root = data_root\n self.mean = mean\n self.std = std\n self.length = length\n\n def train(self, transform):\n return self.test(transform)\n\n def val(self, transform):\n return self.test(transform)\n\n def test(self, transform):\n return GaussianNoiseDataset(self.length, self.mean, self.std, transform)\n\n\nclass GaussianNoiseDataset(Dataset):\n \"\"\"\n Use CIFAR-10 mean and standard deviation as default values.\n mean=(125.3, 123.0, 113.9), std=(63.0, 62.1, 66.7)\n \"\"\"\n\n def __init__(self, length, mean, std, transform=None):\n self.transform = transform\n self.mean = mean\n self.std = std\n self.length = length\n self.dist = torch.distributions.Normal(mean, std)\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, idx):\n img = self.dist.sample()\n if len(self.mean.shape) == 3:\n img = Image.fromarray(img.numpy().squeeze().astype(np.uint8))\n if self.transform is not None:\n img = self.transform(img)\n return img, -1\n\n\nclass Constant(DatasetSplit):\n def __init__(self, data_root, low, high, shape, length=10_000):\n self.low = low\n self.high = high\n self.length = length\n self.shape = shape\n\n def train(self, transform):\n return self.test(transform)\n\n def val(self, transform):\n return self.test(transform)\n\n def test(self, transform):\n return ConstantDataset(self.length, self.low, self.high, self.shape, transform)\n\n\nclass ConstantDataset(Dataset):\n def __init__(self, length, low, high, shape, transform=None):\n assert isinstance(low, float) and isinstance(high, float)\n\n self.low = low\n self.high = high\n self.transform = transform\n self.length = length\n self.shape = shape\n self.dist = torch.distributions.Uniform(low, high)\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, idx):\n sample = self.dist.sample().item()\n sample = torch.empty(self.shape).fill_(sample)\n\n if len(self.shape) == 3:\n sample = Image.fromarray(sample.numpy().squeeze().astype(np.uint8))\n\n if self.transform is not None:\n sample = self.transform(sample)\n return sample, -1\n\n\nclass UniformNoise(DatasetSplit):\n def __init__(self, data_root, low, high, length=10_000):\n self.low = low\n self.high = high\n self.length = length\n\n def train(self, transform):\n return self.test(transform)\n\n def val(self, transform):\n return self.test(transform)\n\n def test(self, transform):\n return UniformNoiseDataset(self.length, self.low, self.high, transform)\n\n\nclass UniformNoiseDataset(Dataset):\n def __init__(self, length, low, high, transform=None):\n self.low = low\n self.high = high\n self.transform = transform\n self.length = length\n self.dist = torch.distributions.Uniform(low, high)\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, idx):\n img = self.dist.sample()\n if len(self.low.shape) == 3:\n img = Image.fromarray(img.numpy().squeeze().astype(np.uint8))\n if self.transform is not None:\n img = self.transform(img)\n return img, -1\n\n\nclass OODGenomics(torch.utils.data.IterableDataset):\n \"\"\"PyTorch Dataset implementation for the Bacteria Genomics OOD dataset (https://github.com/google-research/google-research/tree/master/genomics_ood) proposed in\n\n J. Ren et al., “Likelihood Ratios for Out-of-Distribution Detection,” arXiv:1906.02845 [cs, stat], Available: http://arxiv.org/abs/1906.02845.\n \"\"\"\n\n splits = {\n \"train\": \"before_2011_in_tr\",\n \"val\": \"between_2011-2016_in_val\",\n \"test\": \"after_2016_in_test\",\n \"val_ood\": \"between_2011-2016_ood_val\",\n \"test_ood\": \"after_2016_ood_test\",\n }\n\n def __init__(self, data_root, split=\"train\", transform=None, target_transform=None):\n if isinstance(data_root, str):\n data_root = Path(data_root)\n self.data_root = data_root / \"llr_ood_genomics\"\n\n assert split in self.splits, f\"Split '{split}' does not exist.\"\n split_dir = self.data_root / self.splits[split]\n\n tf_record_ids = [f.stem for f in split_dir.iterdir() if f.suffix == \".tfrecord\"]\n\n self.ds = MultiTFRecordDataset(\n data_pattern=str(split_dir / \"{}.tfrecord\"),\n index_pattern=str(split_dir / \"{}.index\"),\n splits={id_: 1 / len(tf_record_ids) for id_ in tf_record_ids},\n description={\"x\": \"byte\", \"y\": \"int\", \"z\": \"byte\"},\n )\n\n with open(self.data_root / \"label_dict.json\") as f:\n label_dict = json.load(f)\n self.label_dict = {v: k for k, v in label_dict.items()}\n\n transform = transform if transform is not None else lambda x: x\n target_transform = (\n target_transform if target_transform is not None else lambda x: x\n )\n self.data_transform = lambda x: self.full_transform(\n x, transform, target_transform\n )\n\n @staticmethod\n def full_transform(item, transform, target_transform):\n dec = np.array([int(i) for i in item[\"x\"].tobytes().decode(\"utf-8\").split(\" \")])\n x = torch.from_numpy(transform(dec.copy()))\n x = torch.nn.functional.one_hot(x.long(), 4).float()\n y = torch.from_numpy(target_transform(item[\"y\"].copy())).long().squeeze()\n return x, y\n\n def __iter__(self):\n return map(self.data_transform, self.ds.__iter__())\n\n\nclass GenomicsDataset(DatasetSplit):\n data_shape = (250,)\n\n def __init__(self, data_root):\n self.data_root = data_root\n\n def train(self, transform):\n return OODGenomics(self.data_root, split=\"train\", transform=transform)\n\n def val(self, transform):\n return OODGenomics(self.data_root, split=\"val\", transform=transform)\n\n def test(self, transform):\n return OODGenomics(self.data_root, split=\"test\", transform=transform)\n\n\nclass OODGenomicsDataset(DatasetSplit):\n data_shape = (250,)\n\n def __init__(self, data_root):\n self.data_root = data_root\n\n def train(self, transform):\n raise NotImplementedError\n\n def val(self, transform):\n return OODGenomics(self.data_root, split=\"val_ood\", transform=transform)\n\n def test(self, transform):\n return OODGenomics(self.data_root, split=\"test_ood\", transform=transform)\n\n\nclass ImageEmbeddingDataset(DatasetSplit):\n data_shape = (640,)\n\n def __init__(self, data_root, dataset_name):\n self.data_root = data_root\n self.dataset_name = dataset_name\n\n def load_split(self, split):\n data = np.load(\n self.data_root / \"embeddings\" / f\"{self.dataset_name}_{split}.npz\"\n )\n return torch.from_numpy(data[\"x\"]), torch.from_numpy(data[\"y\"])\n\n def train(self, transform):\n return TabularDataset(*self.load_split(\"train\"), transforms=transform)\n\n def val(self, transform):\n return TabularDataset(*self.load_split(\"val\"), transforms=transform)\n\n def test(self, transform):\n return TabularDataset(*self.load_split(\"test\"), transforms=transform)\n\n\nclass GenomicsNoise(DatasetSplit):\n data_shape = (250,)\n\n def __init__(self, data_root):\n self.data_root = data_root\n data = np.load(self.data_root / \"genomics_noise.npz\")\n self.x = torch.from_numpy(data[\"x\"])\n self.y = torch.from_numpy(data[\"y\"])\n\n def train(self, transform):\n raise NotImplementedError\n\n def val(self, transform):\n raise NotImplementedError\n\n def test(self, transform):\n return TensorDataset(self.x, self.y)\n\n\nclass GenomicsEmbeddingsDataset(ImageEmbeddingDataset):\n data_shape = (128,)\n"
] | [
[
"torch.distributions.Uniform",
"numpy.load",
"torch.empty",
"torch.distributions.Normal",
"torch.from_numpy",
"torch.utils.data.TensorDataset"
]
] |
atakanokan/flair | [
"d33aa6a007384da76d1ae8dac6f4fc61bc652ce7"
] | [
"flair/embeddings.py"
] | [
"import os\nimport re\nimport logging\nfrom abc import abstractmethod\nfrom collections import Counter\nfrom pathlib import Path\nfrom typing import List, Union, Dict\n\nimport gensim\nimport numpy as np\nimport torch\nfrom bpemb import BPEmb\nfrom deprecated import deprecated\n\nfrom pytorch_pretrained_bert import (\n BertTokenizer,\n BertModel,\n TransfoXLTokenizer,\n TransfoXLModel,\n OpenAIGPTModel,\n OpenAIGPTTokenizer,\n)\n\nfrom pytorch_pretrained_bert.modeling_openai import (\n PRETRAINED_MODEL_ARCHIVE_MAP as OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP,\n)\n\nfrom pytorch_pretrained_bert.modeling_transfo_xl import (\n PRETRAINED_MODEL_ARCHIVE_MAP as TRANSFORMER_XL_PRETRAINED_MODEL_ARCHIVE_MAP,\n)\n\nimport flair\nfrom flair.data import Corpus\nfrom .nn import LockedDropout, WordDropout\nfrom .data import Dictionary, Token, Sentence\nfrom .file_utils import cached_path, open_inside_zip\n\nlog = logging.getLogger(\"flair\")\n\n\nclass Embeddings(torch.nn.Module):\n \"\"\"Abstract base class for all embeddings. Every new type of embedding must implement these methods.\"\"\"\n\n @property\n @abstractmethod\n def embedding_length(self) -> int:\n \"\"\"Returns the length of the embedding vector.\"\"\"\n pass\n\n @property\n @abstractmethod\n def embedding_type(self) -> str:\n pass\n\n def embed(self, sentences: Union[Sentence, List[Sentence]]) -> List[Sentence]:\n \"\"\"Add embeddings to all words in a list of sentences. If embeddings are already added, updates only if embeddings\n are non-static.\"\"\"\n\n # if only one sentence is passed, convert to list of sentence\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n everything_embedded: bool = True\n\n if self.embedding_type == \"word-level\":\n for sentence in sentences:\n for token in sentence.tokens:\n if self.name not in token._embeddings.keys():\n everything_embedded = False\n else:\n for sentence in sentences:\n if self.name not in sentence._embeddings.keys():\n everything_embedded = False\n\n if not everything_embedded or not self.static_embeddings:\n self._add_embeddings_internal(sentences)\n\n return sentences\n\n @abstractmethod\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n \"\"\"Private method for adding embeddings to all words in a list of sentences.\"\"\"\n pass\n\n\nclass TokenEmbeddings(Embeddings):\n \"\"\"Abstract base class for all token-level embeddings. Ever new type of word embedding must implement these methods.\"\"\"\n\n @property\n @abstractmethod\n def embedding_length(self) -> int:\n \"\"\"Returns the length of the embedding vector.\"\"\"\n pass\n\n @property\n def embedding_type(self) -> str:\n return \"word-level\"\n\n\nclass DocumentEmbeddings(Embeddings):\n \"\"\"Abstract base class for all document-level embeddings. Ever new type of document embedding must implement these methods.\"\"\"\n\n @property\n @abstractmethod\n def embedding_length(self) -> int:\n \"\"\"Returns the length of the embedding vector.\"\"\"\n pass\n\n @property\n def embedding_type(self) -> str:\n return \"sentence-level\"\n\n\nclass StackedEmbeddings(TokenEmbeddings):\n \"\"\"A stack of embeddings, used if you need to combine several different embedding types.\"\"\"\n\n def __init__(self, embeddings: List[TokenEmbeddings], detach: bool = True):\n \"\"\"The constructor takes a list of embeddings to be combined.\"\"\"\n super().__init__()\n\n self.embeddings = embeddings\n\n # IMPORTANT: add embeddings as torch modules\n for i, embedding in enumerate(embeddings):\n self.add_module(\"list_embedding_{}\".format(i), embedding)\n\n self.detach: bool = detach\n self.name: str = \"Stack\"\n self.static_embeddings: bool = True\n\n self.__embedding_type: str = embeddings[0].embedding_type\n\n self.__embedding_length: int = 0\n for embedding in embeddings:\n self.__embedding_length += embedding.embedding_length\n\n def embed(\n self, sentences: Union[Sentence, List[Sentence]], static_embeddings: bool = True\n ):\n # if only one sentence is passed, convert to list of sentence\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n for embedding in self.embeddings:\n embedding.embed(sentences)\n\n @property\n def embedding_type(self) -> str:\n return self.__embedding_type\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n for embedding in self.embeddings:\n embedding._add_embeddings_internal(sentences)\n\n return sentences\n\n def __str__(self):\n return f'StackedEmbeddings [{\",\".join([str(e) for e in self.embeddings])}]'\n\n\nclass WordEmbeddings(TokenEmbeddings):\n \"\"\"Standard static word embeddings, such as GloVe or FastText.\"\"\"\n\n def __init__(self, embeddings: str, field: str = None):\n \"\"\"\n Initializes classic word embeddings. Constructor downloads required files if not there.\n :param embeddings: one of: 'glove', 'extvec', 'crawl' or two-letter language code or custom\n If you want to use a custom embedding file, just pass the path to the embeddings as embeddings variable.\n \"\"\"\n self.embeddings = embeddings\n\n old_base_path = (\n \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/\"\n )\n base_path = (\n \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/\"\n )\n embeddings_path_v4 = (\n \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/\"\n )\n embeddings_path_v4_1 = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4.1/\"\n\n cache_dir = Path(\"embeddings\")\n\n # GLOVE embeddings\n if embeddings.lower() == \"glove\" or embeddings.lower() == \"en-glove\":\n cached_path(f\"{old_base_path}glove.gensim.vectors.npy\", cache_dir=cache_dir)\n embeddings = cached_path(\n f\"{old_base_path}glove.gensim\", cache_dir=cache_dir\n )\n\n # TURIAN embeddings\n elif embeddings.lower() == \"turian\" or embeddings.lower() == \"en-turian\":\n cached_path(\n f\"{embeddings_path_v4_1}turian.vectors.npy\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{embeddings_path_v4_1}turian\", cache_dir=cache_dir\n )\n\n # KOMNINOS embeddings\n elif embeddings.lower() == \"extvec\" or embeddings.lower() == \"en-extvec\":\n cached_path(\n f\"{old_base_path}extvec.gensim.vectors.npy\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{old_base_path}extvec.gensim\", cache_dir=cache_dir\n )\n\n # FT-CRAWL embeddings\n elif embeddings.lower() == \"crawl\" or embeddings.lower() == \"en-crawl\":\n cached_path(\n f\"{base_path}en-fasttext-crawl-300d-1M.vectors.npy\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{base_path}en-fasttext-crawl-300d-1M\", cache_dir=cache_dir\n )\n\n # FT-CRAWL embeddings\n elif (\n embeddings.lower() == \"news\"\n or embeddings.lower() == \"en-news\"\n or embeddings.lower() == \"en\"\n ):\n cached_path(\n f\"{base_path}en-fasttext-news-300d-1M.vectors.npy\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{base_path}en-fasttext-news-300d-1M\", cache_dir=cache_dir\n )\n\n # twitter embeddings\n elif embeddings.lower() == \"twitter\" or embeddings.lower() == \"en-twitter\":\n cached_path(\n f\"{old_base_path}twitter.gensim.vectors.npy\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{old_base_path}twitter.gensim\", cache_dir=cache_dir\n )\n\n # two-letter language code wiki embeddings\n elif len(embeddings.lower()) == 2:\n cached_path(\n f\"{embeddings_path_v4}{embeddings}-wiki-fasttext-300d-1M.vectors.npy\",\n cache_dir=cache_dir,\n )\n embeddings = cached_path(\n f\"{embeddings_path_v4}{embeddings}-wiki-fasttext-300d-1M\",\n cache_dir=cache_dir,\n )\n\n # two-letter language code wiki embeddings\n elif len(embeddings.lower()) == 7 and embeddings.endswith(\"-wiki\"):\n cached_path(\n f\"{embeddings_path_v4}{embeddings[:2]}-wiki-fasttext-300d-1M.vectors.npy\",\n cache_dir=cache_dir,\n )\n embeddings = cached_path(\n f\"{embeddings_path_v4}{embeddings[:2]}-wiki-fasttext-300d-1M\",\n cache_dir=cache_dir,\n )\n\n # two-letter language code crawl embeddings\n elif len(embeddings.lower()) == 8 and embeddings.endswith(\"-crawl\"):\n cached_path(\n f\"{embeddings_path_v4}{embeddings[:2]}-crawl-fasttext-300d-1M.vectors.npy\",\n cache_dir=cache_dir,\n )\n embeddings = cached_path(\n f\"{embeddings_path_v4}{embeddings[:2]}-crawl-fasttext-300d-1M\",\n cache_dir=cache_dir,\n )\n\n elif not Path(embeddings).exists():\n raise ValueError(\n f'The given embeddings \"{embeddings}\" is not available or is not a valid path.'\n )\n\n self.name: str = str(embeddings)\n self.static_embeddings = True\n\n if str(embeddings).endswith(\".bin\"):\n self.precomputed_word_embeddings = gensim.models.KeyedVectors.load_word2vec_format(\n str(embeddings), binary=True\n )\n else:\n self.precomputed_word_embeddings = gensim.models.KeyedVectors.load(\n str(embeddings)\n )\n\n self.field = field\n\n self.__embedding_length: int = self.precomputed_word_embeddings.vector_size\n super().__init__()\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n for i, sentence in enumerate(sentences):\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n\n if \"field\" not in self.__dict__ or self.field is None:\n word = token.text\n else:\n word = token.get_tag(self.field).value\n\n if word in self.precomputed_word_embeddings:\n word_embedding = self.precomputed_word_embeddings[word]\n elif word.lower() in self.precomputed_word_embeddings:\n word_embedding = self.precomputed_word_embeddings[word.lower()]\n elif (\n re.sub(r\"\\d\", \"#\", word.lower()) in self.precomputed_word_embeddings\n ):\n word_embedding = self.precomputed_word_embeddings[\n re.sub(r\"\\d\", \"#\", word.lower())\n ]\n elif (\n re.sub(r\"\\d\", \"0\", word.lower()) in self.precomputed_word_embeddings\n ):\n word_embedding = self.precomputed_word_embeddings[\n re.sub(r\"\\d\", \"0\", word.lower())\n ]\n else:\n word_embedding = np.zeros(self.embedding_length, dtype=\"float\")\n\n word_embedding = torch.FloatTensor(word_embedding)\n\n token.set_embedding(self.name, word_embedding)\n\n return sentences\n\n def __str__(self):\n return self.name\n\n def extra_repr(self):\n return f\"'{self.embeddings}'\"\n\n\nclass OneHotEmbeddings(TokenEmbeddings):\n \"\"\"One-hot encoded embeddings.\"\"\"\n\n def __init__(\n self,\n corpus=Union[Corpus, List[Sentence]],\n field: str = \"text\",\n embedding_length: int = 300,\n min_freq: int = 3,\n ):\n\n super().__init__()\n self.name = \"one-hot\"\n self.static_embeddings = False\n self.min_freq = min_freq\n\n tokens = list(map((lambda s: s.tokens), corpus.train))\n tokens = [token for sublist in tokens for token in sublist]\n\n if field == \"text\":\n most_common = Counter(list(map((lambda t: t.text), tokens))).most_common()\n else:\n most_common = Counter(\n list(map((lambda t: t.get_tag(field)), tokens))\n ).most_common()\n\n tokens = []\n for token, freq in most_common:\n if freq < min_freq:\n break\n tokens.append(token)\n\n self.vocab_dictionary: Dictionary = Dictionary()\n for token in tokens:\n self.vocab_dictionary.add_item(token)\n\n # max_tokens = 500\n self.__embedding_length = embedding_length\n\n print(self.vocab_dictionary.idx2item)\n print(f\"vocabulary size of {len(self.vocab_dictionary)}\")\n\n # model architecture\n self.embedding_layer = torch.nn.Embedding(\n len(self.vocab_dictionary), self.__embedding_length\n )\n torch.nn.init.xavier_uniform_(self.embedding_layer.weight)\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n one_hot_sentences = []\n for i, sentence in enumerate(sentences):\n context_idxs = [\n self.vocab_dictionary.get_idx_for_item(t.text) for t in sentence.tokens\n ]\n\n one_hot_sentences.extend(context_idxs)\n\n one_hot_sentences = torch.tensor(one_hot_sentences, dtype=torch.long).to(\n flair.device\n )\n\n embedded = self.embedding_layer.forward(one_hot_sentences)\n\n index = 0\n for sentence in sentences:\n for token in sentence:\n embedding = embedded[index]\n token.set_embedding(self.name, embedding)\n index += 1\n\n return sentences\n\n def __str__(self):\n return self.name\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def extra_repr(self):\n return \"min_freq={}\".format(self.min_freq)\n\n\nclass BPEmbSerializable(BPEmb):\n def __getstate__(self):\n state = self.__dict__.copy()\n # save the sentence piece model as binary file (not as path which may change)\n state[\"spm_model_binary\"] = open(self.model_file, mode=\"rb\").read()\n state[\"spm\"] = None\n return state\n\n def __setstate__(self, state):\n from bpemb.util import sentencepiece_load\n\n model_file = self.model_tpl.format(lang=state[\"lang\"], vs=state[\"vs\"])\n self.__dict__ = state\n\n # write out the binary sentence piece model into the expected directory\n self.cache_dir: Path = Path(flair.cache_root) / \"embeddings\"\n if \"spm_model_binary\" in self.__dict__:\n # if the model was saved as binary and it is not found on disk, write to appropriate path\n if not os.path.exists(self.cache_dir / state[\"lang\"]):\n os.makedirs(self.cache_dir / state[\"lang\"])\n self.model_file = self.cache_dir / model_file\n with open(self.model_file, \"wb\") as out:\n out.write(self.__dict__[\"spm_model_binary\"])\n else:\n # otherwise, use normal process and potentially trigger another download\n self.model_file = self._load_file(model_file)\n\n # once the modes if there, load it with sentence piece\n state[\"spm\"] = sentencepiece_load(self.model_file)\n\n\nclass BytePairEmbeddings(TokenEmbeddings):\n def __init__(\n self,\n language: str,\n dim: int = 50,\n syllables: int = 100000,\n cache_dir=Path(flair.cache_root) / \"embeddings\",\n ):\n \"\"\"\n Initializes BP embeddings. Constructor downloads required files if not there.\n \"\"\"\n\n self.name: str = f\"bpe-{language}-{syllables}-{dim}\"\n self.static_embeddings = True\n self.embedder = BPEmbSerializable(\n lang=language, vs=syllables, dim=dim, cache_dir=cache_dir\n )\n\n self.__embedding_length: int = self.embedder.emb.vector_size * 2\n super().__init__()\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n for i, sentence in enumerate(sentences):\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n\n if \"field\" not in self.__dict__ or self.field is None:\n word = token.text\n else:\n word = token.get_tag(self.field).value\n\n if word.strip() == \"\":\n # empty words get no embedding\n token.set_embedding(\n self.name, torch.zeros(self.embedding_length, dtype=torch.float)\n )\n else:\n # all other words get embedded\n embeddings = self.embedder.embed(word.lower())\n embedding = np.concatenate(\n (embeddings[0], embeddings[len(embeddings) - 1])\n )\n token.set_embedding(\n self.name, torch.tensor(embedding, dtype=torch.float)\n )\n\n return sentences\n\n def __str__(self):\n return self.name\n\n def extra_repr(self):\n return \"model={}\".format(self.name)\n\n\nclass ELMoEmbeddings(TokenEmbeddings):\n \"\"\"Contextual word embeddings using word-level LM, as proposed in Peters et al., 2018.\"\"\"\n\n def __init__(\n self, model: str = \"original\", options_file: str = None, weight_file: str = None\n ):\n super().__init__()\n\n try:\n import allennlp.commands.elmo\n except:\n log.warning(\"-\" * 100)\n log.warning('ATTENTION! The library \"allennlp\" is not installed!')\n log.warning(\n 'To use ELMoEmbeddings, please first install with \"pip install allennlp\"'\n )\n log.warning(\"-\" * 100)\n pass\n\n self.name = \"elmo-\" + model\n self.static_embeddings = True\n\n if not options_file or not weight_file:\n # the default model for ELMo is the 'original' model, which is very large\n options_file = allennlp.commands.elmo.DEFAULT_OPTIONS_FILE\n weight_file = allennlp.commands.elmo.DEFAULT_WEIGHT_FILE\n # alternatively, a small, medium or portuguese model can be selected by passing the appropriate mode name\n if model == \"small\":\n options_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_options.json\"\n weight_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_weights.hdf5\"\n if model == \"medium\":\n options_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x2048_256_2048cnn_1xhighway/elmo_2x2048_256_2048cnn_1xhighway_options.json\"\n weight_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x2048_256_2048cnn_1xhighway/elmo_2x2048_256_2048cnn_1xhighway_weights.hdf5\"\n if model == \"pt\" or model == \"portuguese\":\n options_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pt/elmo_pt_options.json\"\n weight_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pt/elmo_pt_weights.hdf5\"\n if model == \"pubmed\":\n options_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pubmed/elmo_2x4096_512_2048cnn_2xhighway_options.json\"\n weight_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pubmed/elmo_2x4096_512_2048cnn_2xhighway_weights_PubMed_only.hdf5\"\n\n # put on Cuda if available\n from flair import device\n\n if re.fullmatch(r'cuda:[0-9]+', str(device)):\n cuda_device = int(str(device).split(':')[-1])\n elif str(device) == \"cpu\":\n cuda_device = -1\n else:\n cuda_device = 0\n\n self.ee = allennlp.commands.elmo.ElmoEmbedder(\n options_file=options_file, weight_file=weight_file, cuda_device=cuda_device\n )\n\n # embed a dummy sentence to determine embedding_length\n dummy_sentence: Sentence = Sentence()\n dummy_sentence.add_token(Token(\"hello\"))\n embedded_dummy = self.embed(dummy_sentence)\n self.__embedding_length: int = len(\n embedded_dummy[0].get_token(1).get_embedding()\n )\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n sentence_words: List[List[str]] = []\n for sentence in sentences:\n sentence_words.append([token.text for token in sentence])\n\n embeddings = self.ee.embed_batch(sentence_words)\n\n for i, sentence in enumerate(sentences):\n\n sentence_embeddings = embeddings[i]\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n word_embedding = torch.cat(\n [\n torch.FloatTensor(sentence_embeddings[0, token_idx, :]),\n torch.FloatTensor(sentence_embeddings[1, token_idx, :]),\n torch.FloatTensor(sentence_embeddings[2, token_idx, :]),\n ],\n 0,\n )\n\n token.set_embedding(self.name, word_embedding)\n\n return sentences\n\n def extra_repr(self):\n return \"model={}\".format(self.name)\n\n def __str__(self):\n return self.name\n\n\nclass ELMoTransformerEmbeddings(TokenEmbeddings):\n \"\"\"Contextual word embeddings using word-level Transformer-based LM, as proposed in Peters et al., 2018.\"\"\"\n\n def __init__(self, model_file: str):\n super().__init__()\n\n try:\n from allennlp.modules.token_embedders.bidirectional_language_model_token_embedder import (\n BidirectionalLanguageModelTokenEmbedder,\n )\n from allennlp.data.token_indexers.elmo_indexer import (\n ELMoTokenCharactersIndexer,\n )\n except:\n log.warning(\"-\" * 100)\n log.warning('ATTENTION! The library \"allennlp\" is not installed!')\n log.warning(\n \"To use ELMoTransformerEmbeddings, please first install a recent version from https://github.com/allenai/allennlp\"\n )\n log.warning(\"-\" * 100)\n pass\n\n self.name = \"elmo-transformer\"\n self.static_embeddings = True\n self.lm_embedder = BidirectionalLanguageModelTokenEmbedder(\n archive_file=model_file,\n dropout=0.2,\n bos_eos_tokens=(\"<S>\", \"</S>\"),\n remove_bos_eos=True,\n requires_grad=False,\n )\n self.lm_embedder = self.lm_embedder.to(device=flair.device)\n self.vocab = self.lm_embedder._lm.vocab\n self.indexer = ELMoTokenCharactersIndexer()\n\n # embed a dummy sentence to determine embedding_length\n dummy_sentence: Sentence = Sentence()\n dummy_sentence.add_token(Token(\"hello\"))\n embedded_dummy = self.embed(dummy_sentence)\n self.__embedding_length: int = len(\n embedded_dummy[0].get_token(1).get_embedding()\n )\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n # Avoid conflicts with flair's Token class\n import allennlp.data.tokenizers.token as allen_nlp_token\n\n indexer = self.indexer\n vocab = self.vocab\n\n for sentence in sentences:\n character_indices = indexer.tokens_to_indices(\n [allen_nlp_token.Token(token.text) for token in sentence], vocab, \"elmo\"\n )[\"elmo\"]\n\n indices_tensor = torch.LongTensor([character_indices])\n indices_tensor = indices_tensor.to(device=flair.device)\n embeddings = self.lm_embedder(indices_tensor)[0].detach().cpu().numpy()\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n embedding = embeddings[token_idx]\n word_embedding = torch.FloatTensor(embedding)\n token.set_embedding(self.name, word_embedding)\n\n return sentences\n\n def extra_repr(self):\n return \"model={}\".format(self.name)\n\n def __str__(self):\n return self.name\n\n\nclass TransformerXLEmbeddings(TokenEmbeddings):\n def __init__(self, model: str = \"transfo-xl-wt103\"):\n \"\"\"Transformer-XL embeddings, as proposed in Dai et al., 2019.\n :param model: name of Transformer-XL model\n \"\"\"\n super().__init__()\n\n if model not in TRANSFORMER_XL_PRETRAINED_MODEL_ARCHIVE_MAP.keys():\n raise ValueError(\"Provided Transformer-XL model is not available.\")\n\n self.tokenizer = TransfoXLTokenizer.from_pretrained(model)\n self.model = TransfoXLModel.from_pretrained(model)\n self.name = model\n self.static_embeddings = True\n\n dummy_sentence: Sentence = Sentence()\n dummy_sentence.add_token(Token(\"hello\"))\n embedded_dummy = self.embed(dummy_sentence)\n self.__embedding_length: int = len(\n embedded_dummy[0].get_token(1).get_embedding()\n )\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n self.model.to(flair.device)\n self.model.eval()\n\n with torch.no_grad():\n for sentence in sentences:\n token_strings = [token.text for token in sentence.tokens]\n indexed_tokens = self.tokenizer.convert_tokens_to_ids(token_strings)\n\n tokens_tensor = torch.tensor([indexed_tokens])\n tokens_tensor = tokens_tensor.to(flair.device)\n\n hidden_states, _ = self.model(tokens_tensor)\n\n for token, token_idx in zip(\n sentence.tokens, range(len(sentence.tokens))\n ):\n token.set_embedding(self.name, hidden_states[0][token_idx])\n\n return sentences\n\n def extra_repr(self):\n return \"model={}\".format(self.name)\n\n def __str__(self):\n return self.name\n\n\nclass OpenAIGPTEmbeddings(TokenEmbeddings):\n def __init__(\n self, model: str = \"openai-gpt\", pooling_operation: str = \"first_last\"\n ):\n \"\"\"OpenAI GPT embeddings, as proposed in Radford et al. 2018.\n :param model: name of OpenAI GPT model\n :param pooling_operation: defines pooling operation for subwords\n \"\"\"\n super().__init__()\n\n if model not in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP.keys():\n raise ValueError(\"Provided OpenAI GPT model is not available.\")\n\n self.tokenizer = OpenAIGPTTokenizer.from_pretrained(model)\n self.model = OpenAIGPTModel.from_pretrained(model)\n self.name = model\n self.static_embeddings = True\n self.pooling_operation = pooling_operation\n\n dummy_sentence: Sentence = Sentence()\n dummy_sentence.add_token(Token(\"hello\"))\n embedded_dummy = self.embed(dummy_sentence)\n self.__embedding_length: int = len(\n embedded_dummy[0].get_token(1).get_embedding()\n )\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n self.model.to(flair.device)\n self.model.eval()\n\n with torch.no_grad():\n for sentence in sentences:\n for token in sentence.tokens:\n token_text = token.text\n\n subwords = self.tokenizer.tokenize(token_text)\n indexed_tokens = self.tokenizer.convert_tokens_to_ids(subwords)\n tokens_tensor = torch.tensor([indexed_tokens])\n tokens_tensor = tokens_tensor.to(flair.device)\n\n hidden_states = self.model(tokens_tensor)\n\n if self.pooling_operation == \"first\":\n # Use embedding of first subword\n token.set_embedding(self.name, hidden_states[0][0])\n elif self.pooling_operation == \"last\":\n last_embedding = hidden_states[0][len(hidden_states[0]) - 1]\n token.set_embedding(self.name, last_embedding)\n elif self.pooling_operation == \"first_last\":\n # Use embedding of first and last subword\n first_embedding = hidden_states[0][0]\n last_embedding = hidden_states[0][len(hidden_states[0]) - 1]\n final_embedding = torch.cat([first_embedding, last_embedding])\n token.set_embedding(self.name, final_embedding)\n else:\n # Otherwise, use mean over all subwords in token\n all_embeddings = [\n embedding.unsqueeze(0) for embedding in hidden_states[0]\n ]\n mean = torch.mean(torch.cat(all_embeddings, dim=0), dim=0)\n token.set_embedding(self.name, mean)\n\n return sentences\n\n def extra_repr(self):\n return \"model={}\".format(self.name)\n\n def __str__(self):\n return self.name\n\n\nclass CharacterEmbeddings(TokenEmbeddings):\n \"\"\"Character embeddings of words, as proposed in Lample et al., 2016.\"\"\"\n\n def __init__(self, path_to_char_dict: str = None, char_embedding_dim: int = 25, hidden_size_char: int = 25):\n \"\"\"Uses the default character dictionary if none provided.\"\"\"\n\n super().__init__()\n self.name = \"Char\"\n self.static_embeddings = False\n\n # use list of common characters if none provided\n if path_to_char_dict is None:\n self.char_dictionary: Dictionary = Dictionary.load(\"common-chars\")\n else:\n self.char_dictionary: Dictionary = Dictionary.load_from_file(\n path_to_char_dict\n )\n\n self.char_embedding_dim: int = char_embedding_dim\n self.hidden_size_char: int = hidden_size_char\n self.char_embedding = torch.nn.Embedding(\n len(self.char_dictionary.item2idx), self.char_embedding_dim\n )\n self.char_rnn = torch.nn.LSTM(\n self.char_embedding_dim,\n self.hidden_size_char,\n num_layers=1,\n bidirectional=True,\n )\n\n self.__embedding_length = self.char_embedding_dim * 2\n\n self.to(flair.device)\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n\n for sentence in sentences:\n\n tokens_char_indices = []\n\n # translate words in sentence into ints using dictionary\n for token in sentence.tokens:\n char_indices = [\n self.char_dictionary.get_idx_for_item(char) for char in token.text\n ]\n tokens_char_indices.append(char_indices)\n\n # sort words by length, for batching and masking\n tokens_sorted_by_length = sorted(\n tokens_char_indices, key=lambda p: len(p), reverse=True\n )\n d = {}\n for i, ci in enumerate(tokens_char_indices):\n for j, cj in enumerate(tokens_sorted_by_length):\n if ci == cj:\n d[j] = i\n continue\n chars2_length = [len(c) for c in tokens_sorted_by_length]\n longest_token_in_sentence = max(chars2_length)\n tokens_mask = torch.zeros(\n (len(tokens_sorted_by_length), longest_token_in_sentence),\n dtype=torch.long,\n device=flair.device,\n )\n\n for i, c in enumerate(tokens_sorted_by_length):\n tokens_mask[i, : chars2_length[i]] = torch.tensor(\n c, dtype=torch.long, device=flair.device\n )\n\n # chars for rnn processing\n chars = tokens_mask\n\n character_embeddings = self.char_embedding(chars).transpose(0, 1)\n\n packed = torch.nn.utils.rnn.pack_padded_sequence(\n character_embeddings, chars2_length\n )\n\n lstm_out, self.hidden = self.char_rnn(packed)\n\n outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(lstm_out)\n outputs = outputs.transpose(0, 1)\n chars_embeds_temp = torch.zeros(\n (outputs.size(0), outputs.size(2)),\n dtype=torch.float,\n device=flair.device,\n )\n for i, index in enumerate(output_lengths):\n chars_embeds_temp[i] = outputs[i, index - 1]\n character_embeddings = chars_embeds_temp.clone()\n for i in range(character_embeddings.size(0)):\n character_embeddings[d[i]] = chars_embeds_temp[i]\n\n for token_number, token in enumerate(sentence.tokens):\n token.set_embedding(self.name, character_embeddings[token_number])\n\n def __str__(self):\n return self.name\n\n\nclass FlairEmbeddings(TokenEmbeddings):\n \"\"\"Contextual string embeddings of words, as proposed in Akbik et al., 2018.\"\"\"\n\n def __init__(\n self,\n model: str,\n use_cache: bool = False,\n cache_directory: Path = None,\n chars_per_chunk: int = 512,\n ):\n \"\"\"\n initializes contextual string embeddings using a character-level language model.\n :param model: model string, one of 'news-forward', 'news-backward', 'news-forward-fast', 'news-backward-fast',\n 'mix-forward', 'mix-backward', 'german-forward', 'german-backward', 'polish-backward', 'polish-forward'\n depending on which character language model is desired.\n :param use_cache: if set to False, will not write embeddings to file for later retrieval. this saves disk space but will\n not allow re-use of once computed embeddings that do not fit into memory\n :param cache_directory: if cache_directory is not set, the cache will be written to ~/.flair/embeddings. otherwise the cache\n is written to the provided directory.\n :param chars_per_chunk: max number of chars per rnn pass to control speed/memory tradeoff. Higher means faster but requires\n more memory. Lower means slower but less memory.\n \"\"\"\n super().__init__()\n\n cache_dir = Path(\"embeddings\")\n\n aws_path: str = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources\"\n\n self.PRETRAINED_MODEL_ARCHIVE_MAP = {\n # multilingual models\n \"multi-forward\": f\"{aws_path}/embeddings-v0.4/lm-multi-forward-v0.1.pt\",\n \"multi-backward\": f\"{aws_path}/embeddings-v0.4/lm-multi-backward-v0.1.pt\",\n \"multi-forward-fast\": f\"{aws_path}/embeddings-v0.4/lm-multi-forward-fast-v0.1.pt\",\n \"multi-backward-fast\": f\"{aws_path}/embeddings-v0.4/lm-multi-backward-fast-v0.1.pt\",\n # English models\n \"news-forward\": f\"{aws_path}/embeddings-v0.4.1/big-news-forward--h2048-l1-d0.05-lr30-0.25-20/news-forward-0.4.1.pt\",\n \"news-backward\": f\"{aws_path}/embeddings-v0.4.1/big-news-backward--h2048-l1-d0.05-lr30-0.25-20/news-backward-0.4.1.pt\",\n \"news-forward-fast\": f\"{aws_path}/embeddings/lm-news-english-forward-1024-v0.2rc.pt\",\n \"news-backward-fast\": f\"{aws_path}/embeddings/lm-news-english-backward-1024-v0.2rc.pt\",\n \"mix-forward\": f\"{aws_path}/embeddings/lm-mix-english-forward-v0.2rc.pt\",\n \"mix-backward\": f\"{aws_path}/embeddings/lm-mix-english-backward-v0.2rc.pt\",\n # Arabic\n \"ar-forward\": f\"{aws_path}/embeddings-stefan-it/lm-ar-opus-large-forward-v0.1.pt\",\n \"ar-backward\": f\"{aws_path}/embeddings-stefan-it/lm-ar-opus-large-backward-v0.1.pt\",\n # Bulgarian\n \"bg-forward-fast\": f\"{aws_path}/embeddings-v0.3/lm-bg-small-forward-v0.1.pt\",\n \"bg-backward-fast\": f\"{aws_path}/embeddings-v0.3/lm-bg-small-backward-v0.1.pt\",\n \"bg-forward\": f\"{aws_path}/embeddings-stefan-it/lm-bg-opus-large-forward-v0.1.pt\",\n \"bg-backward\": f\"{aws_path}/embeddings-stefan-it/lm-bg-opus-large-backward-v0.1.pt\",\n # Czech\n \"cs-forward\": f\"{aws_path}/embeddings-stefan-it/lm-cs-opus-large-forward-v0.1.pt\",\n \"cs-backward\": f\"{aws_path}/embeddings-stefan-it/lm-cs-opus-large-backward-v0.1.pt\",\n \"cs-v0-forward\": f\"{aws_path}/embeddings-v0.4/lm-cs-large-forward-v0.1.pt\",\n \"cs-v0-backward\": f\"{aws_path}/embeddings-v0.4/lm-cs-large-backward-v0.1.pt\",\n # Danish\n \"da-forward\": f\"{aws_path}/embeddings-stefan-it/lm-da-opus-large-forward-v0.1.pt\",\n \"da-backward\": f\"{aws_path}/embeddings-stefan-it/lm-da-opus-large-backward-v0.1.pt\",\n # German\n \"de-forward\": f\"{aws_path}/embeddings/lm-mix-german-forward-v0.2rc.pt\",\n \"de-backward\": f\"{aws_path}/embeddings/lm-mix-german-backward-v0.2rc.pt\",\n \"de-historic-ha-forward\": f\"{aws_path}/embeddings-stefan-it/lm-historic-hamburger-anzeiger-forward-v0.1.pt\",\n \"de-historic-ha-backward\": f\"{aws_path}/embeddings-stefan-it/lm-historic-hamburger-anzeiger-backward-v0.1.pt\",\n \"de-historic-wz-forward\": f\"{aws_path}/embeddings-stefan-it/lm-historic-wiener-zeitung-forward-v0.1.pt\",\n \"de-historic-wz-backward\": f\"{aws_path}/embeddings-stefan-it/lm-historic-wiener-zeitung-backward-v0.1.pt\",\n # Spanish\n \"es-forward\": f\"{aws_path}/embeddings-v0.4/language_model_es_forward_long/lm-es-forward.pt\",\n \"es-backward\": f\"{aws_path}/embeddings-v0.4/language_model_es_backward_long/lm-es-backward.pt\",\n \"es-forward-fast\": f\"{aws_path}/embeddings-v0.4/language_model_es_forward/lm-es-forward-fast.pt\",\n \"es-backward-fast\": f\"{aws_path}/embeddings-v0.4/language_model_es_backward/lm-es-backward-fast.pt\",\n # Basque\n \"eu-forward\": f\"{aws_path}/embeddings-stefan-it/lm-eu-opus-large-forward-v0.1.pt\",\n \"eu-backward\": f\"{aws_path}/embeddings-stefan-it/lm-eu-opus-large-backward-v0.1.pt\",\n \"eu-v0-forward\": f\"{aws_path}/embeddings-v0.4/lm-eu-large-forward-v0.1.pt\",\n \"eu-v0-backward\": f\"{aws_path}/embeddings-v0.4/lm-eu-large-backward-v0.1.pt\",\n # Persian\n \"fa-forward\": f\"{aws_path}/embeddings-stefan-it/lm-fa-opus-large-forward-v0.1.pt\",\n \"fa-backward\": f\"{aws_path}/embeddings-stefan-it/lm-fa-opus-large-backward-v0.1.pt\",\n # Finnish\n \"fi-forward\": f\"{aws_path}/embeddings-stefan-it/lm-fi-opus-large-forward-v0.1.pt\",\n \"fi-backward\": f\"{aws_path}/embeddings-stefan-it/lm-fi-opus-large-backward-v0.1.pt\",\n # French\n \"fr-forward\": f\"{aws_path}/embeddings/lm-fr-charlm-forward.pt\",\n \"fr-backward\": f\"{aws_path}/embeddings/lm-fr-charlm-backward.pt\",\n # Hebrew\n \"he-forward\": f\"{aws_path}/embeddings-stefan-it/lm-he-opus-large-forward-v0.1.pt\",\n \"he-backward\": f\"{aws_path}/embeddings-stefan-it/lm-he-opus-large-backward-v0.1.pt\",\n # Hindi\n \"hi-forward\": f\"{aws_path}/embeddings-stefan-it/lm-hi-opus-large-forward-v0.1.pt\",\n \"hi-backward\": f\"{aws_path}/embeddings-stefan-it/lm-hi-opus-large-backward-v0.1.pt\",\n # Croatian\n \"hr-forward\": f\"{aws_path}/embeddings-stefan-it/lm-hr-opus-large-forward-v0.1.pt\",\n \"hr-backward\": f\"{aws_path}/embeddings-stefan-it/lm-hr-opus-large-backward-v0.1.pt\",\n # Indonesian\n \"id-forward\": f\"{aws_path}/embeddings-stefan-it/lm-id-opus-large-forward-v0.1.pt\",\n \"id-backward\": f\"{aws_path}/embeddings-stefan-it/lm-id-opus-large-backward-v0.1.pt\",\n # Italian\n \"it-forward\": f\"{aws_path}/embeddings-stefan-it/lm-it-opus-large-forward-v0.1.pt\",\n \"it-backward\": f\"{aws_path}/embeddings-stefan-it/lm-it-opus-large-backward-v0.1.pt\",\n # Japanese\n \"ja-forward\": f\"{aws_path}/embeddings-v0.4.1/lm__char-forward__ja-wikipedia-3GB/japanese-forward.pt\",\n \"ja-backward\": f\"{aws_path}/embeddings-v0.4.1/lm__char-backward__ja-wikipedia-3GB/japanese-backward.pt\",\n # Dutch\n \"nl-forward\": f\"{aws_path}/embeddings-stefan-it/lm-nl-opus-large-forward-v0.1.pt\",\n \"nl-backward\": f\"{aws_path}/embeddings-stefan-it/lm-nl-opus-large-backward-v0.1.pt\",\n \"nl-v0-forward\": f\"{aws_path}/embeddings-v0.4/lm-nl-large-forward-v0.1.pt\",\n \"nl-v0-backward\": f\"{aws_path}/embeddings-v0.4/lm-nl-large-backward-v0.1.pt\",\n # Norwegian\n \"no-forward\": f\"{aws_path}/embeddings-stefan-it/lm-no-opus-large-forward-v0.1.pt\",\n \"no-backward\": f\"{aws_path}/embeddings-stefan-it/lm-no-opus-large-backward-v0.1.pt\",\n # Polish\n \"pl-forward\": f\"{aws_path}/embeddings/lm-polish-forward-v0.2.pt\",\n \"pl-backward\": f\"{aws_path}/embeddings/lm-polish-backward-v0.2.pt\",\n \"pl-opus-forward\": f\"{aws_path}/embeddings-stefan-it/lm-pl-opus-large-forward-v0.1.pt\",\n \"pl-opus-backward\": f\"{aws_path}/embeddings-stefan-it/lm-pl-opus-large-backward-v0.1.pt\",\n # Portuguese\n \"pt-forward\": f\"{aws_path}/embeddings-v0.4/lm-pt-forward.pt\",\n \"pt-backward\": f\"{aws_path}/embeddings-v0.4/lm-pt-backward.pt\",\n # Pubmed\n \"pubmed-forward\": f\"{aws_path}/embeddings-v0.4.1/pubmed-2015-fw-lm.pt\",\n \"pubmed-backward\": f\"{aws_path}/embeddings-v0.4.1/pubmed-2015-bw-lm.pt\",\n # Slovenian\n \"sl-forward\": f\"{aws_path}/embeddings-stefan-it/lm-sl-opus-large-forward-v0.1.pt\",\n \"sl-backward\": f\"{aws_path}/embeddings-stefan-it/lm-sl-opus-large-backward-v0.1.pt\",\n \"sl-v0-forward\": f\"{aws_path}/embeddings-v0.3/lm-sl-large-forward-v0.1.pt\",\n \"sl-v0-backward\": f\"{aws_path}/embeddings-v0.3/lm-sl-large-backward-v0.1.pt\",\n # Swedish\n \"sv-forward\": f\"{aws_path}/embeddings-stefan-it/lm-sv-opus-large-forward-v0.1.pt\",\n \"sv-backward\": f\"{aws_path}/embeddings-stefan-it/lm-sv-opus-large-backward-v0.1.pt\",\n \"sv-v0-forward\": f\"{aws_path}/embeddings-v0.4/lm-sv-large-forward-v0.1.pt\",\n \"sv-v0-backward\": f\"{aws_path}/embeddings-v0.4/lm-sv-large-backward-v0.1.pt\",\n }\n\n # load model if in pretrained model map\n if model.lower() in self.PRETRAINED_MODEL_ARCHIVE_MAP:\n base_path = self.PRETRAINED_MODEL_ARCHIVE_MAP[model.lower()]\n model = cached_path(base_path, cache_dir=cache_dir)\n\n elif replace_with_language_code(model) in self.PRETRAINED_MODEL_ARCHIVE_MAP:\n base_path = self.PRETRAINED_MODEL_ARCHIVE_MAP[\n replace_with_language_code(model)\n ]\n model = cached_path(base_path, cache_dir=cache_dir)\n\n elif not Path(model).exists():\n raise ValueError(\n f'The given model \"{model}\" is not available or is not a valid path.'\n )\n\n self.name = str(model)\n self.static_embeddings = True\n\n from flair.models import LanguageModel\n\n self.lm = LanguageModel.load_language_model(model)\n\n self.is_forward_lm: bool = self.lm.is_forward_lm\n self.chars_per_chunk: int = chars_per_chunk\n\n # initialize cache if use_cache set\n self.cache = None\n if use_cache:\n cache_path = (\n Path(f\"{self.name}-tmp-cache.sqllite\")\n if not cache_directory\n else cache_directory / f\"{self.name}-tmp-cache.sqllite\"\n )\n from sqlitedict import SqliteDict\n\n self.cache = SqliteDict(str(cache_path), autocommit=True)\n\n # embed a dummy sentence to determine embedding_length\n dummy_sentence: Sentence = Sentence()\n dummy_sentence.add_token(Token(\"hello\"))\n embedded_dummy = self.embed(dummy_sentence)\n self.__embedding_length: int = len(\n embedded_dummy[0].get_token(1).get_embedding()\n )\n\n # set to eval mode\n self.eval()\n\n def train(self, mode=True):\n pass\n\n def __getstate__(self):\n # Copy the object's state from self.__dict__ which contains\n # all our instance attributes. Always use the dict.copy()\n # method to avoid modifying the original state.\n state = self.__dict__.copy()\n # Remove the unpicklable entries.\n state[\"cache\"] = None\n return state\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n # make compatible with serialized models\n if \"chars_per_chunk\" not in self.__dict__:\n self.chars_per_chunk = 512\n\n # if cache is used, try setting embeddings from cache first\n if \"cache\" in self.__dict__ and self.cache is not None:\n\n # try populating embeddings from cache\n all_embeddings_retrieved_from_cache: bool = True\n for sentence in sentences:\n key = sentence.to_tokenized_string()\n embeddings = self.cache.get(key)\n\n if not embeddings:\n all_embeddings_retrieved_from_cache = False\n break\n else:\n for token, embedding in zip(sentence, embeddings):\n token.set_embedding(self.name, torch.FloatTensor(embedding))\n\n if all_embeddings_retrieved_from_cache:\n return sentences\n\n with torch.no_grad():\n\n # if this is not possible, use LM to generate embedding. First, get text sentences\n text_sentences = [sentence.to_tokenized_string() for sentence in sentences]\n\n longest_character_sequence_in_batch: int = len(max(text_sentences, key=len))\n\n # pad strings with whitespaces to longest sentence\n sentences_padded: List[str] = []\n append_padded_sentence = sentences_padded.append\n\n start_marker = \"\\n\"\n\n end_marker = \" \"\n extra_offset = len(start_marker)\n for sentence_text in text_sentences:\n pad_by = longest_character_sequence_in_batch - len(sentence_text)\n if self.is_forward_lm:\n padded = \"{}{}{}{}\".format(\n start_marker, sentence_text, end_marker, pad_by * \" \"\n )\n append_padded_sentence(padded)\n else:\n padded = \"{}{}{}{}\".format(\n start_marker, sentence_text[::-1], end_marker, pad_by * \" \"\n )\n append_padded_sentence(padded)\n\n # get hidden states from language model\n all_hidden_states_in_lm = self.lm.get_representation(\n sentences_padded, self.chars_per_chunk\n )\n\n # take first or last hidden states from language model as word representation\n for i, sentence in enumerate(sentences):\n sentence_text = sentence.to_tokenized_string()\n\n offset_forward: int = extra_offset\n offset_backward: int = len(sentence_text) + extra_offset\n\n for token in sentence.tokens:\n\n offset_forward += len(token.text)\n\n if self.is_forward_lm:\n offset = offset_forward\n else:\n offset = offset_backward\n\n embedding = all_hidden_states_in_lm[offset, i, :]\n\n # if self.tokenized_lm or token.whitespace_after:\n offset_forward += 1\n offset_backward -= 1\n\n offset_backward -= len(token.text)\n\n token.set_embedding(self.name, embedding.clone().detach())\n\n all_hidden_states_in_lm = None\n\n if \"cache\" in self.__dict__ and self.cache is not None:\n for sentence in sentences:\n self.cache[sentence.to_tokenized_string()] = [\n token._embeddings[self.name].tolist() for token in sentence\n ]\n\n return sentences\n\n def __str__(self):\n return self.name\n\n\nclass PooledFlairEmbeddings(TokenEmbeddings):\n def __init__(\n self,\n contextual_embeddings: Union[str, FlairEmbeddings],\n pooling: str = \"min\",\n only_capitalized: bool = False,\n **kwargs,\n ):\n\n super().__init__()\n\n # use the character language model embeddings as basis\n if type(contextual_embeddings) is str:\n self.context_embeddings: FlairEmbeddings = FlairEmbeddings(\n contextual_embeddings, **kwargs\n )\n else:\n self.context_embeddings: FlairEmbeddings = contextual_embeddings\n\n # length is twice the original character LM embedding length\n self.embedding_length = self.context_embeddings.embedding_length * 2\n self.name = self.context_embeddings.name + \"-context\"\n\n # these fields are for the embedding memory\n self.word_embeddings = {}\n self.word_count = {}\n\n # whether to add only capitalized words to memory (faster runtime and lower memory consumption)\n self.only_capitalized = only_capitalized\n\n # we re-compute embeddings dynamically at each epoch\n self.static_embeddings = False\n\n # set the memory method\n self.pooling = pooling\n if pooling == \"mean\":\n self.aggregate_op = torch.add\n elif pooling == \"fade\":\n self.aggregate_op = torch.add\n elif pooling == \"max\":\n self.aggregate_op = torch.max\n elif pooling == \"min\":\n self.aggregate_op = torch.min\n\n def train(self, mode=True):\n super().train(mode=mode)\n if mode:\n # memory is wiped each time we do a training run\n print(\"train mode resetting embeddings\")\n self.word_embeddings = {}\n self.word_count = {}\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n self.context_embeddings.embed(sentences)\n\n # if we keep a pooling, it needs to be updated continuously\n for sentence in sentences:\n for token in sentence.tokens:\n\n # update embedding\n local_embedding = token._embeddings[self.context_embeddings.name]\n local_embedding = local_embedding.to(flair.device)\n\n if token.text[0].isupper() or not self.only_capitalized:\n\n if token.text not in self.word_embeddings:\n self.word_embeddings[token.text] = local_embedding\n self.word_count[token.text] = 1\n else:\n aggregated_embedding = self.aggregate_op(\n self.word_embeddings[token.text], local_embedding\n )\n if self.pooling == \"fade\":\n aggregated_embedding /= 2\n self.word_embeddings[token.text] = aggregated_embedding\n self.word_count[token.text] += 1\n\n # add embeddings after updating\n for sentence in sentences:\n for token in sentence.tokens:\n if token.text in self.word_embeddings:\n base = (\n self.word_embeddings[token.text] / self.word_count[token.text]\n if self.pooling == \"mean\"\n else self.word_embeddings[token.text]\n )\n else:\n base = token._embeddings[self.context_embeddings.name]\n\n token.set_embedding(self.name, base)\n\n return sentences\n\n def embedding_length(self) -> int:\n return self.embedding_length\n\n\nclass BertEmbeddings(TokenEmbeddings):\n def __init__(\n self,\n bert_model_or_path: str = \"bert-base-uncased\",\n layers: str = \"-1,-2,-3,-4\",\n pooling_operation: str = \"first\",\n ):\n \"\"\"\n Bidirectional transformer embeddings of words, as proposed in Devlin et al., 2018.\n :param bert_model_or_path: name of BERT model ('') or directory path containing custom model, configuration file\n and vocab file (names of three files should be - bert_config.json, pytorch_model.bin/model.chkpt, vocab.txt)\n :param layers: string indicating which layers to take for embedding\n :param pooling_operation: how to get from token piece embeddings to token embedding. Either pool them and take\n the average ('mean') or use first word piece embedding as token embedding ('first)\n \"\"\"\n super().__init__()\n\n self.tokenizer = BertTokenizer.from_pretrained(bert_model_or_path)\n self.model = BertModel.from_pretrained(bert_model_or_path)\n self.layer_indexes = [int(x) for x in layers.split(\",\")]\n self.pooling_operation = pooling_operation\n self.name = str(bert_model_or_path)\n self.static_embeddings = True\n\n class BertInputFeatures(object):\n \"\"\"Private helper class for holding BERT-formatted features\"\"\"\n\n def __init__(\n self,\n unique_id,\n tokens,\n input_ids,\n input_mask,\n input_type_ids,\n token_subtoken_count,\n ):\n self.unique_id = unique_id\n self.tokens = tokens\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.input_type_ids = input_type_ids\n self.token_subtoken_count = token_subtoken_count\n\n def _convert_sentences_to_features(\n self, sentences, max_sequence_length: int\n ) -> [BertInputFeatures]:\n\n max_sequence_length = max_sequence_length + 2\n\n features: List[BertEmbeddings.BertInputFeatures] = []\n for (sentence_index, sentence) in enumerate(sentences):\n\n bert_tokenization: List[str] = []\n token_subtoken_count: Dict[int, int] = {}\n\n for token in sentence:\n subtokens = self.tokenizer.tokenize(token.text)\n bert_tokenization.extend(subtokens)\n token_subtoken_count[token.idx] = len(subtokens)\n\n if len(bert_tokenization) > max_sequence_length - 2:\n bert_tokenization = bert_tokenization[0 : (max_sequence_length - 2)]\n\n tokens = []\n input_type_ids = []\n tokens.append(\"[CLS]\")\n input_type_ids.append(0)\n for token in bert_tokenization:\n tokens.append(token)\n input_type_ids.append(0)\n tokens.append(\"[SEP]\")\n input_type_ids.append(0)\n\n input_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_sequence_length:\n input_ids.append(0)\n input_mask.append(0)\n input_type_ids.append(0)\n\n features.append(\n BertEmbeddings.BertInputFeatures(\n unique_id=sentence_index,\n tokens=tokens,\n input_ids=input_ids,\n input_mask=input_mask,\n input_type_ids=input_type_ids,\n token_subtoken_count=token_subtoken_count,\n )\n )\n\n return features\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n \"\"\"Add embeddings to all words in a list of sentences. If embeddings are already added,\n updates only if embeddings are non-static.\"\"\"\n\n # first, find longest sentence in batch\n longest_sentence_in_batch: int = len(\n max(\n [\n self.tokenizer.tokenize(sentence.to_tokenized_string())\n for sentence in sentences\n ],\n key=len,\n )\n )\n\n # prepare id maps for BERT model\n features = self._convert_sentences_to_features(\n sentences, longest_sentence_in_batch\n )\n all_input_ids = torch.LongTensor([f.input_ids for f in features]).to(\n flair.device\n )\n all_input_masks = torch.LongTensor([f.input_mask for f in features]).to(\n flair.device\n )\n\n # put encoded batch through BERT model to get all hidden states of all encoder layers\n self.model.to(flair.device)\n self.model.eval()\n all_encoder_layers, _ = self.model(\n all_input_ids, token_type_ids=None, attention_mask=all_input_masks\n )\n\n with torch.no_grad():\n\n for sentence_index, sentence in enumerate(sentences):\n\n feature = features[sentence_index]\n\n # get aggregated embeddings for each BERT-subtoken in sentence\n subtoken_embeddings = []\n for token_index, _ in enumerate(feature.tokens):\n all_layers = []\n for layer_index in self.layer_indexes:\n layer_output = (\n all_encoder_layers[int(layer_index)]\n .detach()\n .cpu()[sentence_index]\n )\n all_layers.append(layer_output[token_index])\n\n subtoken_embeddings.append(torch.cat(all_layers))\n\n # get the current sentence object\n token_idx = 0\n for token in sentence:\n # add concatenated embedding to sentence\n token_idx += 1\n\n if self.pooling_operation == \"first\":\n # use first subword embedding if pooling operation is 'first'\n token.set_embedding(self.name, subtoken_embeddings[token_idx])\n else:\n # otherwise, do a mean over all subwords in token\n embeddings = subtoken_embeddings[\n token_idx : token_idx\n + feature.token_subtoken_count[token.idx]\n ]\n embeddings = [\n embedding.unsqueeze(0) for embedding in embeddings\n ]\n mean = torch.mean(torch.cat(embeddings, dim=0), dim=0)\n token.set_embedding(self.name, mean)\n\n token_idx += feature.token_subtoken_count[token.idx] - 1\n\n return sentences\n\n @property\n @abstractmethod\n def embedding_length(self) -> int:\n \"\"\"Returns the length of the embedding vector.\"\"\"\n return len(self.layer_indexes) * self.model.config.hidden_size\n\n\nclass CharLMEmbeddings(TokenEmbeddings):\n \"\"\"Contextual string embeddings of words, as proposed in Akbik et al., 2018. \"\"\"\n\n @deprecated(version=\"0.4\", reason=\"Use 'FlairEmbeddings' instead.\")\n def __init__(\n self,\n model: str,\n detach: bool = True,\n use_cache: bool = False,\n cache_directory: Path = None,\n ):\n \"\"\"\n initializes contextual string embeddings using a character-level language model.\n :param model: model string, one of 'news-forward', 'news-backward', 'news-forward-fast', 'news-backward-fast',\n 'mix-forward', 'mix-backward', 'german-forward', 'german-backward', 'polish-backward', 'polish-forward'\n depending on which character language model is desired.\n :param detach: if set to False, the gradient will propagate into the language model. this dramatically slows down\n training and often leads to worse results, so not recommended.\n :param use_cache: if set to False, will not write embeddings to file for later retrieval. this saves disk space but will\n not allow re-use of once computed embeddings that do not fit into memory\n :param cache_directory: if cache_directory is not set, the cache will be written to ~/.flair/embeddings. otherwise the cache\n is written to the provided directory.\n \"\"\"\n super().__init__()\n\n cache_dir = Path(\"embeddings\")\n\n # multilingual forward (English, German, French, Italian, Dutch, Polish)\n if model.lower() == \"multi-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-multi-forward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n # multilingual backward (English, German, French, Italian, Dutch, Polish)\n elif model.lower() == \"multi-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-multi-backward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # news-english-forward\n elif model.lower() == \"news-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-forward-v0.2rc.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # news-english-backward\n elif model.lower() == \"news-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-backward-v0.2rc.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # news-english-forward\n elif model.lower() == \"news-forward-fast\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-forward-1024-v0.2rc.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # news-english-backward\n elif model.lower() == \"news-backward-fast\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-backward-1024-v0.2rc.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # mix-english-forward\n elif model.lower() == \"mix-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-english-forward-v0.2rc.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # mix-english-backward\n elif model.lower() == \"mix-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-english-backward-v0.2rc.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # mix-german-forward\n elif model.lower() == \"german-forward\" or model.lower() == \"de-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-german-forward-v0.2rc.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # mix-german-backward\n elif model.lower() == \"german-backward\" or model.lower() == \"de-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-german-backward-v0.2rc.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # common crawl Polish forward\n elif model.lower() == \"polish-forward\" or model.lower() == \"pl-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-polish-forward-v0.2.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # common crawl Polish backward\n elif model.lower() == \"polish-backward\" or model.lower() == \"pl-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-polish-backward-v0.2.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # Slovenian forward\n elif model.lower() == \"slovenian-forward\" or model.lower() == \"sl-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-sl-large-forward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n # Slovenian backward\n elif model.lower() == \"slovenian-backward\" or model.lower() == \"sl-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-sl-large-backward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # Bulgarian forward\n elif model.lower() == \"bulgarian-forward\" or model.lower() == \"bg-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-bg-small-forward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n # Bulgarian backward\n elif model.lower() == \"bulgarian-backward\" or model.lower() == \"bg-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-bg-small-backward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # Dutch forward\n elif model.lower() == \"dutch-forward\" or model.lower() == \"nl-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-nl-large-forward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n # Dutch backward\n elif model.lower() == \"dutch-backward\" or model.lower() == \"nl-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-nl-large-backward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # Swedish forward\n elif model.lower() == \"swedish-forward\" or model.lower() == \"sv-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-sv-large-forward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n # Swedish backward\n elif model.lower() == \"swedish-backward\" or model.lower() == \"sv-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-sv-large-backward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # French forward\n elif model.lower() == \"french-forward\" or model.lower() == \"fr-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-fr-charlm-forward.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n # French backward\n elif model.lower() == \"french-backward\" or model.lower() == \"fr-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-fr-charlm-backward.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # Czech forward\n elif model.lower() == \"czech-forward\" or model.lower() == \"cs-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-cs-large-forward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n # Czech backward\n elif model.lower() == \"czech-backward\" or model.lower() == \"cs-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-cs-large-backward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # Portuguese forward\n elif model.lower() == \"portuguese-forward\" or model.lower() == \"pt-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-pt-forward.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n # Portuguese backward\n elif model.lower() == \"portuguese-backward\" or model.lower() == \"pt-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-pt-backward.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n elif not Path(model).exists():\n raise ValueError(\n f'The given model \"{model}\" is not available or is not a valid path.'\n )\n\n self.name = str(model)\n self.static_embeddings = detach\n\n from flair.models import LanguageModel\n\n self.lm = LanguageModel.load_language_model(model)\n self.detach = detach\n\n self.is_forward_lm: bool = self.lm.is_forward_lm\n\n # initialize cache if use_cache set\n self.cache = None\n if use_cache:\n cache_path = (\n Path(f\"{self.name}-tmp-cache.sqllite\")\n if not cache_directory\n else cache_directory / f\"{self.name}-tmp-cache.sqllite\"\n )\n from sqlitedict import SqliteDict\n\n self.cache = SqliteDict(str(cache_path), autocommit=True)\n\n # embed a dummy sentence to determine embedding_length\n dummy_sentence: Sentence = Sentence()\n dummy_sentence.add_token(Token(\"hello\"))\n embedded_dummy = self.embed(dummy_sentence)\n self.__embedding_length: int = len(\n embedded_dummy[0].get_token(1).get_embedding()\n )\n\n # set to eval mode\n self.eval()\n\n def train(self, mode=True):\n pass\n\n def __getstate__(self):\n # Copy the object's state from self.__dict__ which contains\n # all our instance attributes. Always use the dict.copy()\n # method to avoid modifying the original state.\n state = self.__dict__.copy()\n # Remove the unpicklable entries.\n state[\"cache\"] = None\n return state\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n # if cache is used, try setting embeddings from cache first\n if \"cache\" in self.__dict__ and self.cache is not None:\n\n # try populating embeddings from cache\n all_embeddings_retrieved_from_cache: bool = True\n for sentence in sentences:\n key = sentence.to_tokenized_string()\n embeddings = self.cache.get(key)\n\n if not embeddings:\n all_embeddings_retrieved_from_cache = False\n break\n else:\n for token, embedding in zip(sentence, embeddings):\n token.set_embedding(self.name, torch.FloatTensor(embedding))\n\n if all_embeddings_retrieved_from_cache:\n return sentences\n\n # if this is not possible, use LM to generate embedding. First, get text sentences\n text_sentences = [sentence.to_tokenized_string() for sentence in sentences]\n\n longest_character_sequence_in_batch: int = len(max(text_sentences, key=len))\n\n # pad strings with whitespaces to longest sentence\n sentences_padded: List[str] = []\n append_padded_sentence = sentences_padded.append\n\n end_marker = \" \"\n extra_offset = 1\n for sentence_text in text_sentences:\n pad_by = longest_character_sequence_in_batch - len(sentence_text)\n if self.is_forward_lm:\n padded = \"\\n{}{}{}\".format(sentence_text, end_marker, pad_by * \" \")\n append_padded_sentence(padded)\n else:\n padded = \"\\n{}{}{}\".format(\n sentence_text[::-1], end_marker, pad_by * \" \"\n )\n append_padded_sentence(padded)\n\n # get hidden states from language model\n all_hidden_states_in_lm = self.lm.get_representation(sentences_padded)\n\n # take first or last hidden states from language model as word representation\n for i, sentence in enumerate(sentences):\n sentence_text = sentence.to_tokenized_string()\n\n offset_forward: int = extra_offset\n offset_backward: int = len(sentence_text) + extra_offset\n\n for token in sentence.tokens:\n\n offset_forward += len(token.text)\n\n if self.is_forward_lm:\n offset = offset_forward\n else:\n offset = offset_backward\n\n embedding = all_hidden_states_in_lm[offset, i, :]\n\n # if self.tokenized_lm or token.whitespace_after:\n offset_forward += 1\n offset_backward -= 1\n\n offset_backward -= len(token.text)\n\n token.set_embedding(self.name, embedding)\n\n if \"cache\" in self.__dict__ and self.cache is not None:\n for sentence in sentences:\n self.cache[sentence.to_tokenized_string()] = [\n token._embeddings[self.name].tolist() for token in sentence\n ]\n\n return sentences\n\n def __str__(self):\n return self.name\n\n\nclass DocumentMeanEmbeddings(DocumentEmbeddings):\n @deprecated(\n version=\"0.3.1\",\n reason=\"The functionality of this class is moved to 'DocumentPoolEmbeddings'\",\n )\n def __init__(self, token_embeddings: List[TokenEmbeddings]):\n \"\"\"The constructor takes a list of embeddings to be combined.\"\"\"\n super().__init__()\n\n self.embeddings: StackedEmbeddings = StackedEmbeddings(\n embeddings=token_embeddings\n )\n self.name: str = \"document_mean\"\n\n self.__embedding_length: int = self.embeddings.embedding_length\n\n self.to(flair.device)\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def embed(self, sentences: Union[List[Sentence], Sentence]):\n \"\"\"Add embeddings to every sentence in the given list of sentences. If embeddings are already added, updates\n only if embeddings are non-static.\"\"\"\n\n everything_embedded: bool = True\n\n # if only one sentence is passed, convert to list of sentence\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n for sentence in sentences:\n if self.name not in sentence._embeddings.keys():\n everything_embedded = False\n\n if not everything_embedded:\n\n self.embeddings.embed(sentences)\n\n for sentence in sentences:\n word_embeddings = []\n for token in sentence.tokens:\n word_embeddings.append(token.get_embedding().unsqueeze(0))\n\n word_embeddings = torch.cat(word_embeddings, dim=0).to(flair.device)\n\n mean_embedding = torch.mean(word_embeddings, 0)\n\n sentence.set_embedding(self.name, mean_embedding)\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n pass\n\n\nclass DocumentPoolEmbeddings(DocumentEmbeddings):\n def __init__(\n self,\n embeddings: List[TokenEmbeddings],\n fine_tune_mode=\"linear\",\n pooling: str = \"mean\",\n ):\n \"\"\"The constructor takes a list of embeddings to be combined.\n :param embeddings: a list of token embeddings\n :param pooling: a string which can any value from ['mean', 'max', 'min']\n \"\"\"\n super().__init__()\n\n self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)\n self.__embedding_length = self.embeddings.embedding_length\n\n # optional fine-tuning on top of embedding layer\n self.fine_tune_mode = fine_tune_mode\n if self.fine_tune_mode in [\"nonlinear\", \"linear\"]:\n self.embedding_flex = torch.nn.Linear(\n self.embedding_length, self.embedding_length, bias=False\n )\n self.embedding_flex.weight.data.copy_(torch.eye(self.embedding_length))\n\n if self.fine_tune_mode in [\"nonlinear\"]:\n self.embedding_flex_nonlinear = torch.nn.ReLU(self.embedding_length)\n self.embedding_flex_nonlinear_map = torch.nn.Linear(\n self.embedding_length, self.embedding_length\n )\n\n self.__embedding_length: int = self.embeddings.embedding_length\n\n self.to(flair.device)\n\n self.pooling = pooling\n if self.pooling == \"mean\":\n self.pool_op = torch.mean\n elif pooling == \"max\":\n self.pool_op = torch.max\n elif pooling == \"min\":\n self.pool_op = torch.min\n else:\n raise ValueError(f\"Pooling operation for {self.mode!r} is not defined\")\n self.name: str = f\"document_{self.pooling}\"\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def embed(self, sentences: Union[List[Sentence], Sentence]):\n \"\"\"Add embeddings to every sentence in the given list of sentences. If embeddings are already added, updates\n only if embeddings are non-static.\"\"\"\n\n # if only one sentence is passed, convert to list of sentence\n if isinstance(sentences, Sentence):\n sentences = [sentences]\n\n self.embeddings.embed(sentences)\n\n for sentence in sentences:\n word_embeddings = []\n for token in sentence.tokens:\n word_embeddings.append(token.get_embedding().unsqueeze(0))\n\n word_embeddings = torch.cat(word_embeddings, dim=0).to(flair.device)\n\n if self.fine_tune_mode in [\"nonlinear\", \"linear\"]:\n word_embeddings = self.embedding_flex(word_embeddings)\n\n if self.fine_tune_mode in [\"nonlinear\"]:\n word_embeddings = self.embedding_flex_nonlinear(word_embeddings)\n word_embeddings = self.embedding_flex_nonlinear_map(word_embeddings)\n\n if self.pooling == \"mean\":\n pooled_embedding = self.pool_op(word_embeddings, 0)\n else:\n pooled_embedding, _ = self.pool_op(word_embeddings, 0)\n\n sentence.set_embedding(self.name, pooled_embedding)\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n pass\n\n def extra_repr(self):\n return f\"fine_tune_mode={self.fine_tune_mode}, pooling={self.pooling}\"\n\n\nclass DocumentRNNEmbeddings(DocumentEmbeddings):\n def __init__(\n self,\n embeddings: List[TokenEmbeddings],\n hidden_size=128,\n rnn_layers=1,\n reproject_words: bool = True,\n reproject_words_dimension: int = None,\n bidirectional: bool = False,\n dropout: float = 0.5,\n word_dropout: float = 0.0,\n locked_dropout: float = 0.0,\n rnn_type=\"GRU\",\n ):\n \"\"\"The constructor takes a list of embeddings to be combined.\n :param embeddings: a list of token embeddings\n :param hidden_size: the number of hidden states in the rnn\n :param rnn_layers: the number of layers for the rnn\n :param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear\n layer before putting them into the rnn or not\n :param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output\n dimension as before will be taken.\n :param bidirectional: boolean value, indicating whether to use a bidirectional rnn or not\n :param dropout: the dropout value to be used\n :param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used\n :param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used\n :param rnn_type: 'GRU' or 'LSTM'\n \"\"\"\n super().__init__()\n\n self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)\n\n self.rnn_type = rnn_type\n\n self.reproject_words = reproject_words\n self.bidirectional = bidirectional\n\n self.length_of_all_token_embeddings: int = self.embeddings.embedding_length\n\n self.static_embeddings = False\n\n self.__embedding_length: int = hidden_size\n if self.bidirectional:\n self.__embedding_length *= 4\n\n self.embeddings_dimension: int = self.length_of_all_token_embeddings\n if self.reproject_words and reproject_words_dimension is not None:\n self.embeddings_dimension = reproject_words_dimension\n\n self.word_reprojection_map = torch.nn.Linear(\n self.length_of_all_token_embeddings, self.embeddings_dimension\n )\n\n # bidirectional RNN on top of embedding layer\n if rnn_type == \"LSTM\":\n self.rnn = torch.nn.LSTM(\n self.embeddings_dimension,\n hidden_size,\n num_layers=rnn_layers,\n bidirectional=self.bidirectional,\n )\n else:\n self.rnn = torch.nn.GRU(\n self.embeddings_dimension,\n hidden_size,\n num_layers=rnn_layers,\n bidirectional=self.bidirectional,\n )\n\n self.name = \"document_\" + self.rnn._get_name()\n\n # dropouts\n if locked_dropout > 0.0:\n self.dropout: torch.nn.Module = LockedDropout(locked_dropout)\n else:\n self.dropout = torch.nn.Dropout(dropout)\n\n self.use_word_dropout: bool = word_dropout > 0.0\n if self.use_word_dropout:\n self.word_dropout = WordDropout(word_dropout)\n\n torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)\n\n self.to(flair.device)\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def embed(self, sentences: Union[List[Sentence], Sentence]):\n \"\"\"Add embeddings to all sentences in the given list of sentences. If embeddings are already added, update\n only if embeddings are non-static.\"\"\"\n\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n self.rnn.zero_grad()\n\n sentences.sort(key=lambda x: len(x), reverse=True)\n\n self.embeddings.embed(sentences)\n\n # first, sort sentences by number of tokens\n longest_token_sequence_in_batch: int = len(sentences[0])\n\n all_sentence_tensors = []\n lengths: List[int] = []\n\n # go through each sentence in batch\n for i, sentence in enumerate(sentences):\n\n lengths.append(len(sentence.tokens))\n\n word_embeddings = []\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n word_embeddings.append(token.get_embedding().unsqueeze(0))\n\n # PADDING: pad shorter sentences out\n for add in range(longest_token_sequence_in_batch - len(sentence.tokens)):\n word_embeddings.append(\n torch.zeros(\n self.length_of_all_token_embeddings, dtype=torch.float\n ).unsqueeze(0)\n )\n\n word_embeddings_tensor = torch.cat(word_embeddings, 0).to(flair.device)\n\n sentence_states = word_embeddings_tensor\n\n # ADD TO SENTENCE LIST: add the representation\n all_sentence_tensors.append(sentence_states.unsqueeze(1))\n\n # --------------------------------------------------------------------\n # GET REPRESENTATION FOR ENTIRE BATCH\n # --------------------------------------------------------------------\n sentence_tensor = torch.cat(all_sentence_tensors, 1)\n\n # --------------------------------------------------------------------\n # FF PART\n # --------------------------------------------------------------------\n # use word dropout if set\n if self.use_word_dropout:\n sentence_tensor = self.word_dropout(sentence_tensor)\n\n if self.reproject_words:\n sentence_tensor = self.word_reprojection_map(sentence_tensor)\n\n sentence_tensor = self.dropout(sentence_tensor)\n\n packed = torch.nn.utils.rnn.pack_padded_sequence(sentence_tensor, lengths)\n\n self.rnn.flatten_parameters()\n\n rnn_out, hidden = self.rnn(packed)\n\n outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(rnn_out)\n\n outputs = self.dropout(outputs)\n\n # --------------------------------------------------------------------\n # EXTRACT EMBEDDINGS FROM RNN\n # --------------------------------------------------------------------\n for sentence_no, length in enumerate(lengths):\n last_rep = outputs[length - 1, sentence_no]\n\n embedding = last_rep\n if self.bidirectional:\n first_rep = outputs[0, sentence_no]\n embedding = torch.cat([first_rep, last_rep], 0)\n\n sentence = sentences[sentence_no]\n sentence.set_embedding(self.name, embedding)\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n pass\n\n\n@deprecated(\n version=\"0.4\",\n reason=\"The functionality of this class is moved to 'DocumentRNNEmbeddings'\",\n)\nclass DocumentLSTMEmbeddings(DocumentEmbeddings):\n def __init__(\n self,\n embeddings: List[TokenEmbeddings],\n hidden_size=128,\n rnn_layers=1,\n reproject_words: bool = True,\n reproject_words_dimension: int = None,\n bidirectional: bool = False,\n dropout: float = 0.5,\n word_dropout: float = 0.0,\n locked_dropout: float = 0.0,\n ):\n \"\"\"The constructor takes a list of embeddings to be combined.\n :param embeddings: a list of token embeddings\n :param hidden_size: the number of hidden states in the lstm\n :param rnn_layers: the number of layers for the lstm\n :param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear\n layer before putting them into the lstm or not\n :param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output\n dimension as before will be taken.\n :param bidirectional: boolean value, indicating whether to use a bidirectional lstm or not\n :param dropout: the dropout value to be used\n :param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used\n :param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used\n \"\"\"\n super().__init__()\n\n self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)\n\n self.reproject_words = reproject_words\n self.bidirectional = bidirectional\n\n self.length_of_all_token_embeddings: int = self.embeddings.embedding_length\n\n self.name = \"document_lstm\"\n self.static_embeddings = False\n\n self.__embedding_length: int = hidden_size\n if self.bidirectional:\n self.__embedding_length *= 4\n\n self.embeddings_dimension: int = self.length_of_all_token_embeddings\n if self.reproject_words and reproject_words_dimension is not None:\n self.embeddings_dimension = reproject_words_dimension\n\n # bidirectional LSTM on top of embedding layer\n self.word_reprojection_map = torch.nn.Linear(\n self.length_of_all_token_embeddings, self.embeddings_dimension\n )\n self.rnn = torch.nn.GRU(\n self.embeddings_dimension,\n hidden_size,\n num_layers=rnn_layers,\n bidirectional=self.bidirectional,\n )\n\n # dropouts\n if locked_dropout > 0.0:\n self.dropout: torch.nn.Module = LockedDropout(locked_dropout)\n else:\n self.dropout = torch.nn.Dropout(dropout)\n\n self.use_word_dropout: bool = word_dropout > 0.0\n if self.use_word_dropout:\n self.word_dropout = WordDropout(word_dropout)\n\n torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)\n\n self.to(flair.device)\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def embed(self, sentences: Union[List[Sentence], Sentence]):\n \"\"\"Add embeddings to all sentences in the given list of sentences. If embeddings are already added, update\n only if embeddings are non-static.\"\"\"\n\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n self.rnn.zero_grad()\n\n sentences.sort(key=lambda x: len(x), reverse=True)\n\n self.embeddings.embed(sentences)\n\n # first, sort sentences by number of tokens\n longest_token_sequence_in_batch: int = len(sentences[0])\n\n all_sentence_tensors = []\n lengths: List[int] = []\n\n # go through each sentence in batch\n for i, sentence in enumerate(sentences):\n\n lengths.append(len(sentence.tokens))\n\n word_embeddings = []\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n word_embeddings.append(token.get_embedding().unsqueeze(0))\n\n # PADDING: pad shorter sentences out\n for add in range(longest_token_sequence_in_batch - len(sentence.tokens)):\n word_embeddings.append(\n torch.zeros(\n self.length_of_all_token_embeddings, dtype=torch.float\n ).unsqueeze(0)\n )\n\n word_embeddings_tensor = torch.cat(word_embeddings, 0).to(flair.device)\n\n sentence_states = word_embeddings_tensor\n\n # ADD TO SENTENCE LIST: add the representation\n all_sentence_tensors.append(sentence_states.unsqueeze(1))\n\n # --------------------------------------------------------------------\n # GET REPRESENTATION FOR ENTIRE BATCH\n # --------------------------------------------------------------------\n sentence_tensor = torch.cat(all_sentence_tensors, 1)\n\n # --------------------------------------------------------------------\n # FF PART\n # --------------------------------------------------------------------\n # use word dropout if set\n if self.use_word_dropout:\n sentence_tensor = self.word_dropout(sentence_tensor)\n\n if self.reproject_words:\n sentence_tensor = self.word_reprojection_map(sentence_tensor)\n\n sentence_tensor = self.dropout(sentence_tensor)\n\n packed = torch.nn.utils.rnn.pack_padded_sequence(sentence_tensor, lengths)\n\n self.rnn.flatten_parameters()\n\n lstm_out, hidden = self.rnn(packed)\n\n outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(lstm_out)\n\n outputs = self.dropout(outputs)\n\n # --------------------------------------------------------------------\n # EXTRACT EMBEDDINGS FROM LSTM\n # --------------------------------------------------------------------\n for sentence_no, length in enumerate(lengths):\n last_rep = outputs[length - 1, sentence_no]\n\n embedding = last_rep\n if self.bidirectional:\n first_rep = outputs[0, sentence_no]\n embedding = torch.cat([first_rep, last_rep], 0)\n\n sentence = sentences[sentence_no]\n sentence.set_embedding(self.name, embedding)\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n pass\n\n\nclass DocumentLMEmbeddings(DocumentEmbeddings):\n def __init__(self, flair_embeddings: List[FlairEmbeddings], detach: bool = True):\n super().__init__()\n\n self.embeddings = flair_embeddings\n self.name = \"document_lm\"\n\n self.static_embeddings = detach\n self.detach = detach\n\n self._embedding_length: int = sum(\n embedding.embedding_length for embedding in flair_embeddings\n )\n\n @property\n def embedding_length(self) -> int:\n return self._embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n for embedding in self.embeddings:\n embedding.embed(sentences)\n\n # iterate over sentences\n for sentence in sentences:\n\n # if its a forward LM, take last state\n if embedding.is_forward_lm:\n sentence.set_embedding(\n embedding.name,\n sentence[len(sentence) - 1]._embeddings[embedding.name],\n )\n else:\n sentence.set_embedding(\n embedding.name, sentence[0]._embeddings[embedding.name]\n )\n\n return sentences\n\n\nclass NILCEmbeddings(WordEmbeddings):\n def __init__(self, embeddings: str, model: str = \"skip\", size: int = 100):\n \"\"\"\n Initializes portuguese classic word embeddings trained by NILC Lab (http://www.nilc.icmc.usp.br/embeddings).\n Constructor downloads required files if not there.\n :param embeddings: one of: 'fasttext', 'glove', 'wang2vec' or 'word2vec'\n :param model: one of: 'skip' or 'cbow'. This is not applicable to glove.\n :param size: one of: 50, 100, 300, 600 or 1000.\n \"\"\"\n\n base_path = \"http://143.107.183.175:22980/download.php?file=embeddings/\"\n\n cache_dir = Path(\"embeddings\") / embeddings.lower()\n\n # GLOVE embeddings\n if embeddings.lower() == \"glove\":\n cached_path(\n f\"{base_path}{embeddings}/{embeddings}_s{size}.zip\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{base_path}{embeddings}/{embeddings}_s{size}.zip\", cache_dir=cache_dir\n )\n\n elif embeddings.lower() in [\"fasttext\", \"wang2vec\", \"word2vec\"]:\n cached_path(\n f\"{base_path}{embeddings}/{model}_s{size}.zip\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{base_path}{embeddings}/{model}_s{size}.zip\", cache_dir=cache_dir\n )\n\n elif not Path(embeddings).exists():\n raise ValueError(\n f'The given embeddings \"{embeddings}\" is not available or is not a valid path.'\n )\n\n self.name: str = str(embeddings)\n self.static_embeddings = True\n\n log.info(\"Reading embeddings from %s\" % embeddings)\n self.precomputed_word_embeddings = gensim.models.KeyedVectors.load_word2vec_format(\n open_inside_zip(str(embeddings), cache_dir=cache_dir)\n )\n\n self.__embedding_length: int = self.precomputed_word_embeddings.vector_size\n super(TokenEmbeddings, self).__init__()\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def __str__(self):\n return self.name\n\n\ndef replace_with_language_code(string: str):\n string = string.replace(\"arabic-\", \"ar-\")\n string = string.replace(\"basque-\", \"eu-\")\n string = string.replace(\"bulgarian-\", \"bg-\")\n string = string.replace(\"croatian-\", \"hr-\")\n string = string.replace(\"czech-\", \"cs-\")\n string = string.replace(\"danish-\", \"da-\")\n string = string.replace(\"dutch-\", \"nl-\")\n string = string.replace(\"farsi-\", \"fa-\")\n string = string.replace(\"persian-\", \"fa-\")\n string = string.replace(\"finnish-\", \"fi-\")\n string = string.replace(\"french-\", \"fr-\")\n string = string.replace(\"german-\", \"de-\")\n string = string.replace(\"hebrew-\", \"he-\")\n string = string.replace(\"hindi-\", \"hi-\")\n string = string.replace(\"indonesian-\", \"id-\")\n string = string.replace(\"italian-\", \"it-\")\n string = string.replace(\"japanese-\", \"ja-\")\n string = string.replace(\"norwegian-\", \"no\")\n string = string.replace(\"polish-\", \"pl-\")\n string = string.replace(\"portuguese-\", \"pt-\")\n string = string.replace(\"slovenian-\", \"sl-\")\n string = string.replace(\"spanish-\", \"es-\")\n string = string.replace(\"swedish-\", \"sv-\")\n return string\n"
] | [
[
"torch.nn.init.xavier_uniform_",
"torch.nn.LSTM",
"torch.nn.Linear",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.Dropout",
"torch.FloatTensor",
"numpy.zeros",
"torch.no_grad",
"torch.tensor",
"torch.nn.GRU",
"torch.nn.ReLU",
"torch.eye",
"torch.zeros",
"torch.LongTensor",
"torch.cat",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.mean"
]
] |
mavismonica/pandas | [
"dbdc55c9d59f25589d58cc60247af193f06c3c66"
] | [
"pandas/tests/indexing/test_indexing.py"
] | [
"\"\"\" test fancy indexing & misc \"\"\"\n\nfrom datetime import datetime\nimport re\nimport weakref\n\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.common import is_float_dtype, is_integer_dtype\n\nimport pandas as pd\nfrom pandas import DataFrame, Index, NaT, Series\nimport pandas._testing as tm\nfrom pandas.core.indexing import maybe_numeric_slice, non_reducing_slice\nfrom pandas.tests.indexing.common import _mklbl\n\nfrom .test_floats import gen_obj\n\n\ndef getitem(x):\n return x\n\n\ndef setitem(x):\n return x\n\n\ndef loc(x):\n return x.loc\n\n\ndef iloc(x):\n return x.iloc\n\n\n# ------------------------------------------------------------------------\n# Indexing test cases\n\n\nclass TestFancy:\n \"\"\" pure get/set item & fancy indexing \"\"\"\n\n def test_setitem_ndarray_1d(self):\n # GH5508\n\n # len of indexer vs length of the 1d ndarray\n df = DataFrame(index=Index(np.arange(1, 11)))\n df[\"foo\"] = np.zeros(10, dtype=np.float64)\n df[\"bar\"] = np.zeros(10, dtype=complex)\n\n # invalid\n msg = \"Must have equal len keys and value when setting with an iterable\"\n with pytest.raises(ValueError, match=msg):\n df.loc[df.index[2:5], \"bar\"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])\n\n # valid\n df.loc[df.index[2:6], \"bar\"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])\n\n result = df.loc[df.index[2:6], \"bar\"]\n expected = Series(\n [2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6], name=\"bar\"\n )\n tm.assert_series_equal(result, expected)\n\n # dtype getting changed?\n df = DataFrame(index=Index(np.arange(1, 11)))\n df[\"foo\"] = np.zeros(10, dtype=np.float64)\n df[\"bar\"] = np.zeros(10, dtype=complex)\n\n msg = \"Must have equal len keys and value when setting with an iterable\"\n with pytest.raises(ValueError, match=msg):\n df[2:5] = np.arange(1, 4) * 1j\n\n @pytest.mark.parametrize(\"idxr\", [getitem, loc, iloc])\n def test_getitem_ndarray_3d(self, index, frame_or_series, idxr):\n # GH 25567\n obj = gen_obj(frame_or_series, index)\n idxr = idxr(obj)\n nd3 = np.random.randint(5, size=(2, 2, 2))\n\n msg = \"|\".join(\n [\n r\"Buffer has wrong number of dimensions \\(expected 1, got 3\\)\",\n \"Cannot index with multidimensional key\",\n r\"Wrong number of dimensions. values.ndim != ndim \\[3 != 1\\]\",\n \"Index data must be 1-dimensional\",\n \"positional indexers are out-of-bounds\",\n \"Indexing a MultiIndex with a multidimensional key is not implemented\",\n ]\n )\n\n potential_errors = (IndexError, ValueError, NotImplementedError)\n with pytest.raises(potential_errors, match=msg):\n with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):\n idxr[nd3]\n\n @pytest.mark.parametrize(\"indexer\", [setitem, loc, iloc])\n def test_setitem_ndarray_3d(self, index, frame_or_series, indexer):\n # GH 25567\n obj = gen_obj(frame_or_series, index)\n idxr = indexer(obj)\n nd3 = np.random.randint(5, size=(2, 2, 2))\n\n if indexer.__name__ == \"iloc\":\n err = ValueError\n msg = f\"Cannot set values with ndim > {obj.ndim}\"\n elif (\n isinstance(index, pd.IntervalIndex)\n and indexer.__name__ == \"setitem\"\n and obj.ndim == 1\n ):\n err = AttributeError\n msg = (\n \"'pandas._libs.interval.IntervalTree' object has no attribute 'get_loc'\"\n )\n else:\n err = ValueError\n msg = r\"Buffer has wrong number of dimensions \\(expected 1, got 3\\)|\"\n\n with pytest.raises(err, match=msg):\n idxr[nd3] = 0\n\n def test_inf_upcast(self):\n # GH 16957\n # We should be able to use np.inf as a key\n # np.inf should cause an index to convert to float\n\n # Test with np.inf in rows\n df = DataFrame(columns=[0])\n df.loc[1] = 1\n df.loc[2] = 2\n df.loc[np.inf] = 3\n\n # make sure we can look up the value\n assert df.loc[np.inf, 0] == 3\n\n result = df.index\n expected = pd.Float64Index([1, 2, np.inf])\n tm.assert_index_equal(result, expected)\n\n # Test with np.inf in columns\n df = DataFrame()\n df.loc[0, 0] = 1\n df.loc[1, 1] = 2\n df.loc[0, np.inf] = 3\n\n result = df.columns\n expected = pd.Float64Index([0, 1, np.inf])\n tm.assert_index_equal(result, expected)\n\n def test_setitem_dtype_upcast(self):\n\n # GH3216\n df = DataFrame([{\"a\": 1}, {\"a\": 3, \"b\": 2}])\n df[\"c\"] = np.nan\n assert df[\"c\"].dtype == np.float64\n\n df.loc[0, \"c\"] = \"foo\"\n expected = DataFrame(\n [{\"a\": 1, \"b\": np.nan, \"c\": \"foo\"}, {\"a\": 3, \"b\": 2, \"c\": np.nan}]\n )\n tm.assert_frame_equal(df, expected)\n\n # GH10280\n df = DataFrame(\n np.arange(6, dtype=\"int64\").reshape(2, 3),\n index=list(\"ab\"),\n columns=[\"foo\", \"bar\", \"baz\"],\n )\n\n for val in [3.14, \"wxyz\"]:\n left = df.copy()\n left.loc[\"a\", \"bar\"] = val\n right = DataFrame(\n [[0, val, 2], [3, 4, 5]],\n index=list(\"ab\"),\n columns=[\"foo\", \"bar\", \"baz\"],\n )\n\n tm.assert_frame_equal(left, right)\n assert is_integer_dtype(left[\"foo\"])\n assert is_integer_dtype(left[\"baz\"])\n\n left = DataFrame(\n np.arange(6, dtype=\"int64\").reshape(2, 3) / 10.0,\n index=list(\"ab\"),\n columns=[\"foo\", \"bar\", \"baz\"],\n )\n left.loc[\"a\", \"bar\"] = \"wxyz\"\n\n right = DataFrame(\n [[0, \"wxyz\", 0.2], [0.3, 0.4, 0.5]],\n index=list(\"ab\"),\n columns=[\"foo\", \"bar\", \"baz\"],\n )\n\n tm.assert_frame_equal(left, right)\n assert is_float_dtype(left[\"foo\"])\n assert is_float_dtype(left[\"baz\"])\n\n def test_dups_fancy_indexing(self):\n\n # GH 3455\n\n df = tm.makeCustomDataframe(10, 3)\n df.columns = [\"a\", \"a\", \"b\"]\n result = df[[\"b\", \"a\"]].columns\n expected = Index([\"b\", \"a\", \"a\"])\n tm.assert_index_equal(result, expected)\n\n # across dtypes\n df = DataFrame([[1, 2, 1.0, 2.0, 3.0, \"foo\", \"bar\"]], columns=list(\"aaaaaaa\"))\n df.head()\n str(df)\n result = DataFrame([[1, 2, 1.0, 2.0, 3.0, \"foo\", \"bar\"]])\n result.columns = list(\"aaaaaaa\")\n\n # TODO(wesm): unused?\n df_v = df.iloc[:, 4] # noqa\n res_v = result.iloc[:, 4] # noqa\n\n tm.assert_frame_equal(df, result)\n\n # GH 3561, dups not in selected order\n df = DataFrame(\n {\"test\": [5, 7, 9, 11], \"test1\": [4.0, 5, 6, 7], \"other\": list(\"abcd\")},\n index=[\"A\", \"A\", \"B\", \"C\"],\n )\n rows = [\"C\", \"B\"]\n expected = DataFrame(\n {\"test\": [11, 9], \"test1\": [7.0, 6], \"other\": [\"d\", \"c\"]}, index=rows\n )\n result = df.loc[rows]\n tm.assert_frame_equal(result, expected)\n\n result = df.loc[Index(rows)]\n tm.assert_frame_equal(result, expected)\n\n rows = [\"C\", \"B\", \"E\"]\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n df.loc[rows]\n\n # see GH5553, make sure we use the right indexer\n rows = [\"F\", \"G\", \"H\", \"C\", \"B\", \"E\"]\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n df.loc[rows]\n\n # List containing only missing label\n dfnu = DataFrame(np.random.randn(5, 3), index=list(\"AABCD\"))\n with pytest.raises(\n KeyError,\n match=re.escape(\n \"\\\"None of [Index(['E'], dtype='object')] are in the [index]\\\"\"\n ),\n ):\n dfnu.loc[[\"E\"]]\n\n # ToDo: check_index_type can be True after GH 11497\n\n # GH 4619; duplicate indexer with missing label\n df = DataFrame({\"A\": [0, 1, 2]})\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n df.loc[[0, 8, 0]]\n\n df = DataFrame({\"A\": list(\"abc\")})\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n df.loc[[0, 8, 0]]\n\n # non unique with non unique selector\n df = DataFrame({\"test\": [5, 7, 9, 11]}, index=[\"A\", \"A\", \"B\", \"C\"])\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n df.loc[[\"A\", \"A\", \"E\"]]\n\n def test_dups_fancy_indexing2(self):\n # GH 5835\n # dups on index and missing values\n df = DataFrame(np.random.randn(5, 5), columns=[\"A\", \"B\", \"B\", \"B\", \"A\"])\n\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n df.loc[:, [\"A\", \"B\", \"C\"]]\n\n # GH 6504, multi-axis indexing\n df = DataFrame(\n np.random.randn(9, 2), index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=[\"a\", \"b\"]\n )\n\n expected = df.iloc[0:6]\n result = df.loc[[1, 2]]\n tm.assert_frame_equal(result, expected)\n\n expected = df\n result = df.loc[:, [\"a\", \"b\"]]\n tm.assert_frame_equal(result, expected)\n\n expected = df.iloc[0:6, :]\n result = df.loc[[1, 2], [\"a\", \"b\"]]\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"case\", [getitem, loc])\n def test_duplicate_int_indexing(self, case):\n # GH 17347\n s = Series(range(3), index=[1, 1, 3])\n expected = s[1]\n result = case(s)[[1]]\n tm.assert_series_equal(result, expected)\n\n def test_indexing_mixed_frame_bug(self):\n\n # GH3492\n df = DataFrame(\n {\"a\": {1: \"aaa\", 2: \"bbb\", 3: \"ccc\"}, \"b\": {1: 111, 2: 222, 3: 333}}\n )\n\n # this works, new column is created correctly\n df[\"test\"] = df[\"a\"].apply(lambda x: \"_\" if x == \"aaa\" else x)\n\n # this does not work, ie column test is not changed\n idx = df[\"test\"] == \"_\"\n temp = df.loc[idx, \"a\"].apply(lambda x: \"-----\" if x == \"aaa\" else x)\n df.loc[idx, \"test\"] = temp\n assert df.iloc[0, 2] == \"-----\"\n\n def test_multitype_list_index_access(self):\n # GH 10610\n df = DataFrame(np.random.random((10, 5)), columns=[\"a\"] + [20, 21, 22, 23])\n\n with pytest.raises(KeyError, match=re.escape(\"'[-8, 26] not in index'\")):\n df[[22, 26, -8]]\n assert df[21].shape[0] == df.shape[0]\n\n def test_set_index_nan(self):\n\n # GH 3586\n df = DataFrame(\n {\n \"PRuid\": {\n 17: \"nonQC\",\n 18: \"nonQC\",\n 19: \"nonQC\",\n 20: \"10\",\n 21: \"11\",\n 22: \"12\",\n 23: \"13\",\n 24: \"24\",\n 25: \"35\",\n 26: \"46\",\n 27: \"47\",\n 28: \"48\",\n 29: \"59\",\n 30: \"10\",\n },\n \"QC\": {\n 17: 0.0,\n 18: 0.0,\n 19: 0.0,\n 20: np.nan,\n 21: np.nan,\n 22: np.nan,\n 23: np.nan,\n 24: 1.0,\n 25: np.nan,\n 26: np.nan,\n 27: np.nan,\n 28: np.nan,\n 29: np.nan,\n 30: np.nan,\n },\n \"data\": {\n 17: 7.9544899999999998,\n 18: 8.0142609999999994,\n 19: 7.8591520000000008,\n 20: 0.86140349999999999,\n 21: 0.87853110000000001,\n 22: 0.8427041999999999,\n 23: 0.78587700000000005,\n 24: 0.73062459999999996,\n 25: 0.81668560000000001,\n 26: 0.81927080000000008,\n 27: 0.80705009999999999,\n 28: 0.81440240000000008,\n 29: 0.80140849999999997,\n 30: 0.81307740000000006,\n },\n \"year\": {\n 17: 2006,\n 18: 2007,\n 19: 2008,\n 20: 1985,\n 21: 1985,\n 22: 1985,\n 23: 1985,\n 24: 1985,\n 25: 1985,\n 26: 1985,\n 27: 1985,\n 28: 1985,\n 29: 1985,\n 30: 1986,\n },\n }\n ).reset_index()\n\n result = (\n df.set_index([\"year\", \"PRuid\", \"QC\"])\n .reset_index()\n .reindex(columns=df.columns)\n )\n tm.assert_frame_equal(result, df)\n\n def test_multi_assign(self):\n\n # GH 3626, an assignment of a sub-df to a df\n df = DataFrame(\n {\n \"FC\": [\"a\", \"b\", \"a\", \"b\", \"a\", \"b\"],\n \"PF\": [0, 0, 0, 0, 1, 1],\n \"col1\": list(range(6)),\n \"col2\": list(range(6, 12)),\n }\n )\n df.iloc[1, 0] = np.nan\n df2 = df.copy()\n\n mask = ~df2.FC.isna()\n cols = [\"col1\", \"col2\"]\n\n dft = df2 * 2\n dft.iloc[3, 3] = np.nan\n\n expected = DataFrame(\n {\n \"FC\": [\"a\", np.nan, \"a\", \"b\", \"a\", \"b\"],\n \"PF\": [0, 0, 0, 0, 1, 1],\n \"col1\": Series([0, 1, 4, 6, 8, 10]),\n \"col2\": [12, 7, 16, np.nan, 20, 22],\n }\n )\n\n # frame on rhs\n df2.loc[mask, cols] = dft.loc[mask, cols]\n tm.assert_frame_equal(df2, expected)\n\n df2.loc[mask, cols] = dft.loc[mask, cols]\n tm.assert_frame_equal(df2, expected)\n\n # with an ndarray on rhs\n # coerces to float64 because values has float64 dtype\n # GH 14001\n expected = DataFrame(\n {\n \"FC\": [\"a\", np.nan, \"a\", \"b\", \"a\", \"b\"],\n \"PF\": [0, 0, 0, 0, 1, 1],\n \"col1\": [0.0, 1.0, 4.0, 6.0, 8.0, 10.0],\n \"col2\": [12, 7, 16, np.nan, 20, 22],\n }\n )\n df2 = df.copy()\n df2.loc[mask, cols] = dft.loc[mask, cols].values\n tm.assert_frame_equal(df2, expected)\n df2.loc[mask, cols] = dft.loc[mask, cols].values\n tm.assert_frame_equal(df2, expected)\n\n # broadcasting on the rhs is required\n df = DataFrame(\n {\n \"A\": [1, 2, 0, 0, 0],\n \"B\": [0, 0, 0, 10, 11],\n \"C\": [0, 0, 0, 10, 11],\n \"D\": [3, 4, 5, 6, 7],\n }\n )\n\n expected = df.copy()\n mask = expected[\"A\"] == 0\n for col in [\"A\", \"B\"]:\n expected.loc[mask, col] = df[\"D\"]\n\n df.loc[df[\"A\"] == 0, [\"A\", \"B\"]] = df[\"D\"]\n tm.assert_frame_equal(df, expected)\n\n def test_setitem_list(self):\n\n # GH 6043\n # iloc with a list\n df = DataFrame(index=[0, 1], columns=[0])\n df.iloc[1, 0] = [1, 2, 3]\n df.iloc[1, 0] = [1, 2]\n\n result = DataFrame(index=[0, 1], columns=[0])\n result.iloc[1, 0] = [1, 2]\n\n tm.assert_frame_equal(result, df)\n\n # iloc with an object\n class TO:\n def __init__(self, value):\n self.value = value\n\n def __str__(self) -> str:\n return f\"[{self.value}]\"\n\n __repr__ = __str__\n\n def __eq__(self, other) -> bool:\n return self.value == other.value\n\n def view(self):\n return self\n\n df = DataFrame(index=[0, 1], columns=[0])\n df.iloc[1, 0] = TO(1)\n df.iloc[1, 0] = TO(2)\n\n result = DataFrame(index=[0, 1], columns=[0])\n result.iloc[1, 0] = TO(2)\n\n tm.assert_frame_equal(result, df)\n\n # remains object dtype even after setting it back\n df = DataFrame(index=[0, 1], columns=[0])\n df.iloc[1, 0] = TO(1)\n df.iloc[1, 0] = np.nan\n result = DataFrame(index=[0, 1], columns=[0])\n\n tm.assert_frame_equal(result, df)\n\n def test_string_slice(self):\n # GH 14424\n # string indexing against datetimelike with object\n # dtype should properly raises KeyError\n df = DataFrame([1], Index([pd.Timestamp(\"2011-01-01\")], dtype=object))\n assert df.index._is_all_dates\n with pytest.raises(KeyError, match=\"'2011'\"):\n df[\"2011\"]\n\n with pytest.raises(KeyError, match=\"'2011'\"):\n with tm.assert_produces_warning(FutureWarning):\n # This does an is_all_dates check\n df.loc[\"2011\", 0]\n\n df = DataFrame()\n assert not df.index._is_all_dates\n with pytest.raises(KeyError, match=\"'2011'\"):\n df[\"2011\"]\n\n with pytest.raises(KeyError, match=\"'2011'\"):\n df.loc[\"2011\", 0]\n\n def test_astype_assignment(self):\n\n # GH4312 (iloc)\n df_orig = DataFrame(\n [[\"1\", \"2\", \"3\", \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\n )\n\n df = df_orig.copy()\n df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)\n expected = DataFrame(\n [[1, 2, \"3\", \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\n )\n tm.assert_frame_equal(df, expected)\n\n df = df_orig.copy()\n df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)\n expected = DataFrame(\n [[1, 2, \"3\", \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\n )\n tm.assert_frame_equal(df, expected)\n\n # GH5702 (loc)\n df = df_orig.copy()\n df.loc[:, \"A\"] = df.loc[:, \"A\"].astype(np.int64)\n expected = DataFrame(\n [[1, \"2\", \"3\", \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\n )\n tm.assert_frame_equal(df, expected)\n\n df = df_orig.copy()\n df.loc[:, [\"B\", \"C\"]] = df.loc[:, [\"B\", \"C\"]].astype(np.int64)\n expected = DataFrame(\n [[\"1\", 2, 3, \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\n )\n tm.assert_frame_equal(df, expected)\n\n # full replacements / no nans\n df = DataFrame({\"A\": [1.0, 2.0, 3.0, 4.0]})\n df.iloc[:, 0] = df[\"A\"].astype(np.int64)\n expected = DataFrame({\"A\": [1, 2, 3, 4]})\n tm.assert_frame_equal(df, expected)\n\n df = DataFrame({\"A\": [1.0, 2.0, 3.0, 4.0]})\n df.loc[:, \"A\"] = df[\"A\"].astype(np.int64)\n expected = DataFrame({\"A\": [1, 2, 3, 4]})\n tm.assert_frame_equal(df, expected)\n\n @pytest.mark.parametrize(\"indexer\", [getitem, loc])\n def test_index_type_coercion(self, indexer):\n\n # GH 11836\n # if we have an index type and set it with something that looks\n # to numpy like the same, but is actually, not\n # (e.g. setting with a float or string '0')\n # then we need to coerce to object\n\n # integer indexes\n for s in [Series(range(5)), Series(range(5), index=range(1, 6))]:\n\n assert s.index.is_integer()\n\n s2 = s.copy()\n indexer(s2)[0.1] = 0\n assert s2.index.is_floating()\n assert indexer(s2)[0.1] == 0\n\n s2 = s.copy()\n indexer(s2)[0.0] = 0\n exp = s.index\n if 0 not in s:\n exp = Index(s.index.tolist() + [0])\n tm.assert_index_equal(s2.index, exp)\n\n s2 = s.copy()\n indexer(s2)[\"0\"] = 0\n assert s2.index.is_object()\n\n for s in [Series(range(5), index=np.arange(5.0))]:\n\n assert s.index.is_floating()\n\n s2 = s.copy()\n indexer(s2)[0.1] = 0\n assert s2.index.is_floating()\n assert indexer(s2)[0.1] == 0\n\n s2 = s.copy()\n indexer(s2)[0.0] = 0\n tm.assert_index_equal(s2.index, s.index)\n\n s2 = s.copy()\n indexer(s2)[\"0\"] = 0\n assert s2.index.is_object()\n\n\nclass TestMisc:\n def test_float_index_to_mixed(self):\n df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})\n df[\"a\"] = 10\n tm.assert_frame_equal(\n DataFrame({0.0: df[0.0], 1.0: df[1.0], \"a\": [10] * 10}), df\n )\n\n def test_float_index_non_scalar_assignment(self):\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [3, 4, 5]}, index=[1.0, 2.0, 3.0])\n df.loc[df.index[:2]] = 1\n expected = DataFrame({\"a\": [1, 1, 3], \"b\": [1, 1, 5]}, index=df.index)\n tm.assert_frame_equal(expected, df)\n\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [3, 4, 5]}, index=[1.0, 2.0, 3.0])\n df2 = df.copy()\n df.loc[df.index] = df.loc[df.index]\n tm.assert_frame_equal(df, df2)\n\n def test_float_index_at_iat(self):\n s = Series([1, 2, 3], index=[0.1, 0.2, 0.3])\n for el, item in s.items():\n assert s.at[el] == item\n for i in range(len(s)):\n assert s.iat[i] == i + 1\n\n def test_rhs_alignment(self):\n # GH8258, tests that both rows & columns are aligned to what is\n # assigned to. covers both uniform data-type & multi-type cases\n def run_tests(df, rhs, right_loc, right_iloc):\n # label, index, slice\n lbl_one, idx_one, slice_one = list(\"bcd\"), [1, 2, 3], slice(1, 4)\n lbl_two, idx_two, slice_two = [\"joe\", \"jolie\"], [1, 2], slice(1, 3)\n\n left = df.copy()\n left.loc[lbl_one, lbl_two] = rhs\n tm.assert_frame_equal(left, right_loc)\n\n left = df.copy()\n left.iloc[idx_one, idx_two] = rhs\n tm.assert_frame_equal(left, right_iloc)\n\n left = df.copy()\n left.iloc[slice_one, slice_two] = rhs\n tm.assert_frame_equal(left, right_iloc)\n\n xs = np.arange(20).reshape(5, 4)\n cols = [\"jim\", \"joe\", \"jolie\", \"joline\"]\n df = DataFrame(xs, columns=cols, index=list(\"abcde\"), dtype=\"int64\")\n\n # right hand side; permute the indices and multiplpy by -2\n rhs = -2 * df.iloc[3:0:-1, 2:0:-1]\n\n # expected `right` result; just multiply by -2\n right_iloc = df.copy()\n right_iloc[\"joe\"] = [1, 14, 10, 6, 17]\n right_iloc[\"jolie\"] = [2, 13, 9, 5, 18]\n right_iloc.iloc[1:4, 1:3] *= -2\n right_loc = df.copy()\n right_loc.iloc[1:4, 1:3] *= -2\n\n # run tests with uniform dtypes\n run_tests(df, rhs, right_loc, right_iloc)\n\n # make frames multi-type & re-run tests\n for frame in [df, rhs, right_loc, right_iloc]:\n frame[\"joe\"] = frame[\"joe\"].astype(\"float64\")\n frame[\"jolie\"] = frame[\"jolie\"].map(\"@{}\".format)\n right_iloc[\"joe\"] = [1.0, \"@-28\", \"@-20\", \"@-12\", 17.0]\n right_iloc[\"jolie\"] = [\"@2\", -26.0, -18.0, -10.0, \"@18\"]\n run_tests(df, rhs, right_loc, right_iloc)\n\n def test_str_label_slicing_with_negative_step(self):\n SLC = pd.IndexSlice\n\n def assert_slices_equivalent(l_slc, i_slc):\n tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])\n\n if not idx.is_integer:\n # For integer indices, .loc and plain getitem are position-based.\n tm.assert_series_equal(s[l_slc], s.iloc[i_slc])\n tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])\n\n for idx in [_mklbl(\"A\", 20), np.arange(20) + 100, np.linspace(100, 150, 20)]:\n idx = Index(idx)\n s = Series(np.arange(20), index=idx)\n assert_slices_equivalent(SLC[idx[9] :: -1], SLC[9::-1])\n assert_slices_equivalent(SLC[: idx[9] : -1], SLC[:8:-1])\n assert_slices_equivalent(SLC[idx[13] : idx[9] : -1], SLC[13:8:-1])\n assert_slices_equivalent(SLC[idx[9] : idx[13] : -1], SLC[:0])\n\n def test_slice_with_zero_step_raises(self):\n s = Series(np.arange(20), index=_mklbl(\"A\", 20))\n with pytest.raises(ValueError, match=\"slice step cannot be zero\"):\n s[::0]\n with pytest.raises(ValueError, match=\"slice step cannot be zero\"):\n s.loc[::0]\n\n def test_indexing_assignment_dict_already_exists(self):\n df = DataFrame({\"x\": [1, 2, 6], \"y\": [2, 2, 8], \"z\": [-5, 0, 5]}).set_index(\"z\")\n expected = df.copy()\n rhs = {\"x\": 9, \"y\": 99}\n df.loc[5] = rhs\n expected.loc[5] = [9, 99]\n tm.assert_frame_equal(df, expected)\n\n def test_indexing_dtypes_on_empty(self):\n # Check that .iloc returns correct dtypes GH9983\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [\"b\", \"b2\", \"b3\"]})\n df2 = df.iloc[[], :]\n\n assert df2.loc[:, \"a\"].dtype == np.int64\n tm.assert_series_equal(df2.loc[:, \"a\"], df2.iloc[:, 0])\n\n @pytest.mark.parametrize(\"size\", [5, 999999, 1000000])\n def test_range_in_series_indexing(self, size):\n # range can cause an indexing error\n # GH 11652\n s = Series(index=range(size), dtype=np.float64)\n s.loc[range(1)] = 42\n tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=[0]))\n\n s.loc[range(2)] = 43\n tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1]))\n\n @pytest.mark.parametrize(\n \"slc\",\n [\n pd.IndexSlice[:, :],\n pd.IndexSlice[:, 1],\n pd.IndexSlice[1, :],\n pd.IndexSlice[[1], [1]],\n pd.IndexSlice[1, [1]],\n pd.IndexSlice[[1], 1],\n pd.IndexSlice[1],\n pd.IndexSlice[1, 1],\n slice(None, None, None),\n [0, 1],\n np.array([0, 1]),\n Series([0, 1]),\n ],\n )\n def test_non_reducing_slice(self, slc):\n df = DataFrame([[0, 1], [2, 3]])\n\n tslice_ = non_reducing_slice(slc)\n assert isinstance(df.loc[tslice_], DataFrame)\n\n def test_list_slice(self):\n # like dataframe getitem\n slices = [[\"A\"], Series([\"A\"]), np.array([\"A\"])]\n df = DataFrame({\"A\": [1, 2], \"B\": [3, 4]}, index=[\"A\", \"B\"])\n expected = pd.IndexSlice[:, [\"A\"]]\n for subset in slices:\n result = non_reducing_slice(subset)\n tm.assert_frame_equal(df.loc[result], df.loc[expected])\n\n def test_maybe_numeric_slice(self):\n df = DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"], \"C\": [True, False]})\n result = maybe_numeric_slice(df, slice_=None)\n expected = pd.IndexSlice[:, [\"A\"]]\n assert result == expected\n\n result = maybe_numeric_slice(df, None, include_bool=True)\n expected = pd.IndexSlice[:, [\"A\", \"C\"]]\n assert all(result[1] == expected[1])\n result = maybe_numeric_slice(df, [1])\n expected = [1]\n assert result == expected\n\n def test_partial_boolean_frame_indexing(self):\n # GH 17170\n df = DataFrame(\n np.arange(9.0).reshape(3, 3), index=list(\"abc\"), columns=list(\"ABC\")\n )\n index_df = DataFrame(1, index=list(\"ab\"), columns=list(\"AB\"))\n result = df[index_df.notnull()]\n expected = DataFrame(\n np.array([[0.0, 1.0, np.nan], [3.0, 4.0, np.nan], [np.nan] * 3]),\n index=list(\"abc\"),\n columns=list(\"ABC\"),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_no_reference_cycle(self):\n df = DataFrame({\"a\": [0, 1], \"b\": [2, 3]})\n for name in (\"loc\", \"iloc\", \"at\", \"iat\"):\n getattr(df, name)\n wr = weakref.ref(df)\n del df\n assert wr() is None\n\n def test_label_indexing_on_nan(self):\n # GH 32431\n df = Series([1, \"{1,2}\", 1, None])\n vc = df.value_counts(dropna=False)\n result1 = vc.loc[np.nan]\n result2 = vc[np.nan]\n\n expected = 1\n assert result1 == expected\n assert result2 == expected\n\n\nclass TestSeriesNoneCoercion:\n EXPECTED_RESULTS = [\n # For numeric series, we should coerce to NaN.\n ([1, 2, 3], [np.nan, 2, 3]),\n ([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),\n # For datetime series, we should coerce to NaT.\n (\n [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],\n [NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)],\n ),\n # For objects, we should preserve the None value.\n ([\"foo\", \"bar\", \"baz\"], [None, \"bar\", \"baz\"]),\n ]\n\n @pytest.mark.parametrize(\"start_data,expected_result\", EXPECTED_RESULTS)\n def test_coercion_with_setitem(self, start_data, expected_result):\n start_series = Series(start_data)\n start_series[0] = None\n\n expected_series = Series(expected_result)\n tm.assert_series_equal(start_series, expected_series)\n\n @pytest.mark.parametrize(\"start_data,expected_result\", EXPECTED_RESULTS)\n def test_coercion_with_loc_setitem(self, start_data, expected_result):\n start_series = Series(start_data)\n start_series.loc[0] = None\n\n expected_series = Series(expected_result)\n tm.assert_series_equal(start_series, expected_series)\n\n @pytest.mark.parametrize(\"start_data,expected_result\", EXPECTED_RESULTS)\n def test_coercion_with_setitem_and_series(self, start_data, expected_result):\n start_series = Series(start_data)\n start_series[start_series == start_series[0]] = None\n\n expected_series = Series(expected_result)\n tm.assert_series_equal(start_series, expected_series)\n\n @pytest.mark.parametrize(\"start_data,expected_result\", EXPECTED_RESULTS)\n def test_coercion_with_loc_and_series(self, start_data, expected_result):\n start_series = Series(start_data)\n start_series.loc[start_series == start_series[0]] = None\n\n expected_series = Series(expected_result)\n tm.assert_series_equal(start_series, expected_series)\n\n\nclass TestDataframeNoneCoercion:\n EXPECTED_SINGLE_ROW_RESULTS = [\n # For numeric series, we should coerce to NaN.\n ([1, 2, 3], [np.nan, 2, 3]),\n ([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),\n # For datetime series, we should coerce to NaT.\n (\n [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],\n [NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)],\n ),\n # For objects, we should preserve the None value.\n ([\"foo\", \"bar\", \"baz\"], [None, \"bar\", \"baz\"]),\n ]\n\n @pytest.mark.parametrize(\"expected\", EXPECTED_SINGLE_ROW_RESULTS)\n def test_coercion_with_loc(self, expected):\n start_data, expected_result = expected\n\n start_dataframe = DataFrame({\"foo\": start_data})\n start_dataframe.loc[0, [\"foo\"]] = None\n\n expected_dataframe = DataFrame({\"foo\": expected_result})\n tm.assert_frame_equal(start_dataframe, expected_dataframe)\n\n @pytest.mark.parametrize(\"expected\", EXPECTED_SINGLE_ROW_RESULTS)\n def test_coercion_with_setitem_and_dataframe(self, expected):\n start_data, expected_result = expected\n\n start_dataframe = DataFrame({\"foo\": start_data})\n start_dataframe[start_dataframe[\"foo\"] == start_dataframe[\"foo\"][0]] = None\n\n expected_dataframe = DataFrame({\"foo\": expected_result})\n tm.assert_frame_equal(start_dataframe, expected_dataframe)\n\n @pytest.mark.parametrize(\"expected\", EXPECTED_SINGLE_ROW_RESULTS)\n def test_none_coercion_loc_and_dataframe(self, expected):\n start_data, expected_result = expected\n\n start_dataframe = DataFrame({\"foo\": start_data})\n start_dataframe.loc[start_dataframe[\"foo\"] == start_dataframe[\"foo\"][0]] = None\n\n expected_dataframe = DataFrame({\"foo\": expected_result})\n tm.assert_frame_equal(start_dataframe, expected_dataframe)\n\n def test_none_coercion_mixed_dtypes(self):\n start_dataframe = DataFrame(\n {\n \"a\": [1, 2, 3],\n \"b\": [1.0, 2.0, 3.0],\n \"c\": [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],\n \"d\": [\"a\", \"b\", \"c\"],\n }\n )\n start_dataframe.iloc[0] = None\n\n exp = DataFrame(\n {\n \"a\": [np.nan, 2, 3],\n \"b\": [np.nan, 2.0, 3.0],\n \"c\": [NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)],\n \"d\": [None, \"b\", \"c\"],\n }\n )\n tm.assert_frame_equal(start_dataframe, exp)\n\n\ndef test_extension_array_cross_section():\n # A cross-section of a homogeneous EA should be an EA\n df = DataFrame(\n {\n \"A\": pd.core.arrays.integer_array([1, 2]),\n \"B\": pd.core.arrays.integer_array([3, 4]),\n },\n index=[\"a\", \"b\"],\n )\n expected = Series(pd.core.arrays.integer_array([1, 3]), index=[\"A\", \"B\"], name=\"a\")\n result = df.loc[\"a\"]\n tm.assert_series_equal(result, expected)\n\n result = df.iloc[0]\n tm.assert_series_equal(result, expected)\n\n\ndef test_extension_array_cross_section_converts():\n # all numeric columns -> numeric series\n df = DataFrame(\n {\"A\": pd.array([1, 2], dtype=\"Int64\"), \"B\": np.array([1, 2])}, index=[\"a\", \"b\"]\n )\n result = df.loc[\"a\"]\n expected = Series([1, 1], dtype=\"Int64\", index=[\"A\", \"B\"], name=\"a\")\n tm.assert_series_equal(result, expected)\n\n result = df.iloc[0]\n tm.assert_series_equal(result, expected)\n\n # mixed columns -> object series\n df = DataFrame(\n {\"A\": pd.array([1, 2], dtype=\"Int64\"), \"B\": np.array([\"a\", \"b\"])},\n index=[\"a\", \"b\"],\n )\n result = df.loc[\"a\"]\n expected = Series([1, \"a\"], dtype=object, index=[\"A\", \"B\"], name=\"a\")\n tm.assert_series_equal(result, expected)\n\n result = df.iloc[0]\n tm.assert_series_equal(result, expected)\n\n\ndef test_setitem_with_bool_mask_and_values_matching_n_trues_in_length():\n # GH 30567\n ser = Series([None] * 10)\n mask = [False] * 3 + [True] * 5 + [False] * 2\n ser[mask] = range(5)\n result = ser\n expected = Series([None] * 3 + list(range(5)) + [None] * 2).astype(\"object\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_missing_labels_inside_loc_matched_in_error_message():\n # GH34272\n s = Series({\"a\": 1, \"b\": 2, \"c\": 3})\n error_message_regex = \"missing_0.*missing_1.*missing_2\"\n with pytest.raises(KeyError, match=error_message_regex):\n s.loc[[\"a\", \"b\", \"missing_0\", \"c\", \"missing_1\", \"missing_2\"]]\n\n\ndef test_many_missing_labels_inside_loc_error_message_limited():\n # GH34272\n n = 10000\n missing_labels = [f\"missing_{label}\" for label in range(n)]\n s = Series({\"a\": 1, \"b\": 2, \"c\": 3})\n # regex checks labels between 4 and 9995 are replaced with ellipses\n error_message_regex = \"missing_4.*\\\\.\\\\.\\\\..*missing_9995\"\n with pytest.raises(KeyError, match=error_message_regex):\n s.loc[[\"a\", \"c\"] + missing_labels]\n\n\ndef test_long_text_missing_labels_inside_loc_error_message_limited():\n # GH34272\n s = Series({\"a\": 1, \"b\": 2, \"c\": 3})\n missing_labels = [f\"long_missing_label_text_{i}\" * 5 for i in range(3)]\n # regex checks for very long labels there are new lines between each\n error_message_regex = \"long_missing_label_text_0.*\\\\\\\\n.*long_missing_label_text_1\"\n with pytest.raises(KeyError, match=error_message_regex):\n s.loc[[\"a\", \"c\"] + missing_labels]\n\n\ndef test_setitem_categorical():\n # https://github.com/pandas-dev/pandas/issues/35369\n df = DataFrame({\"h\": Series(list(\"mn\")).astype(\"category\")})\n df.h = df.h.cat.reorder_categories([\"n\", \"m\"])\n expected = DataFrame(\n {\"h\": pd.Categorical([\"m\", \"n\"]).reorder_categories([\"n\", \"m\"])}\n )\n tm.assert_frame_equal(df, expected)\n"
] | [
[
"pandas.Series",
"pandas.array",
"pandas._testing.assert_frame_equal",
"pandas.Float64Index",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.Categorical",
"pandas._testing.assert_series_equal",
"pandas.core.indexing.maybe_numeric_slice",
"pandas._testing.assert_produces_warning",
"numpy.random.rand",
"numpy.linspace",
"pandas.Timestamp",
"numpy.zeros",
"pandas.core.arrays.integer_array",
"numpy.arange",
"pandas.tests.indexing.common._mklbl",
"pandas.core.indexing.non_reducing_slice",
"pandas.Index",
"pandas._testing.makeCustomDataframe",
"pandas.DataFrame",
"numpy.random.randn",
"numpy.random.random",
"pandas._testing.assert_index_equal",
"numpy.array",
"numpy.random.randint",
"pandas.core.dtypes.common.is_integer_dtype"
]
] |
jinhan814/PyTorch-GAN-Study | [
"c63ed1bbcbc663d3267671d8ded4ed13c766b738"
] | [
"PGGAN/hyun_experiments.py"
] | [
"import torch\n## 실험용 입니다\n\n# x = torch.randint(10,size=(1,4,2,2))\n# print(x)\n# print(x.size())\n\n# factor =2\n# s = x.size()\n# x = x.view(-1, s[1], s[2], 1, s[3], 1) # (-1, 4, 2, 1, 2, 1)\n# print(x.size())\n# # print(x)\n# x = x.expand(-1, s[1], s[2], factor, s[3], factor) # (-1, 4,2,2,2,2)\n# print(x.size())\n# # print(x)\n# x = x.contiguous().view(-1, s[1], s[2] * factor, s[3] * factor)\n# # x = x.view(-1, s[1], s[2] * factor, s[3] * factor)\n# print(x.size())\n# # print(x)\n\n# x = torch.rand(,4,2,2)\n# subGroupSize = 4\n\n# size = x.size()\n# subGroupSize = min(size[0], subGroupSize)\n# if size[0] % subGroupSize != 0:\n# subGroupSize = size[0]\n# G = int(size[0] / subGroupSize)\n\n# print(subGroupSize,G)\n# print(x)\n# if subGroupSize > 1:\n# y = x.view(-1, subGroupSize, size[1], size[2], size[3])\n# print(y)\n# y = torch.var(y, 1)\n# print(y)\n# y = torch.sqrt(y + 1e-8)\n# print(y)\n# y = y.view(G, -1)\n# print(y)\n# y = torch.mean(y, 1).view(G, 1)\n# print(y)\n# y = y.expand(G, size[2]*size[3]).view((G, 1, 1, size[2], size[3]))\n# print(y)\n# y = y.expand(G, subGroupSize, -1, -1, -1)\n# print(y)\n# y = y.contiguous().view((-1, 1, size[2], size[3]))\n# else:\n# y = torch.zeros(x.size(0), 1, x.size(2), x.size(3), device=x.device)\n#\n\nimport torch\nimport torchvision\nimport cv2\nx = torch.randint(10,size=(8,8,3))\nx= torch.transpose(x,(2,0,1))\nprint(x.size())\nx = torchvision.transforms.Resize((4,4))(x)\nx = torch.transpose(x,(1,2,0))\nprint(x.size())"
] | [
[
"torch.randint",
"torch.transpose"
]
] |
nxdao2000/probability | [
"33d2bc1cb0e7b6284579ea7f3692b9d056e0d700",
"33d2bc1cb0e7b6284579ea7f3692b9d056e0d700"
] | [
"tensorflow_probability/python/positive_semidefinite_kernels/positive_semidefinite_kernel.py",
"tensorflow_probability/python/internal/backend/numpy/nn.py"
] | [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"PositiveSemidefiniteKernel base.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport contextlib\nimport functools\nimport operator\nimport six\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.positive_semidefinite_kernels.internal import util\n\n\n__all__ = [\n 'PositiveSemidefiniteKernel',\n]\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass PositiveSemidefiniteKernel(tf.Module):\n \"\"\"Abstract base class for positive semi-definite kernel functions.\n\n #### Background\n\n For any set `S`, a real- (or complex-valued) function `k` on the Cartesian\n product `S x S` is called positive semi-definite if we have\n\n ```none\n sum_i sum_j (c[i]*) c[j] k(x[i], x[j]) >= 0\n ```\n\n for any finite collections `{x[1], ..., x[N]}` in S and `{c[1], ..., c[N]}` in\n the reals (or the complex plane). '*' denotes the complex conjugate, in the\n complex case.\n\n Some examples:\n - `S` is R, and `k(s, t) = (s - a) (t - b)`, where a, b are in R. This\n corresponds to a linear kernel.\n - `S` is R^+ U {0}, and `k(s, t) = min(s, t)`. This corresponds to a kernel\n for a Wiener process.\n - `S` is the set of strings over an alphabet `A = {c1, ... cC}`, and\n `k(s, t)` is defined via some similarity metric over strings.\n\n We model positive semi-definite functions (*kernels*, in common machine\n learning parlance) as classes with 3 primary public methods: `apply`,\n `matrix`, and `tensor`.\n\n `apply` computes the value of the kernel function at a pair of (batches of)\n input locations. It is the more \"low-level\" operation: `matrix` and `tensor`\n are implemented in terms of `apply`.\n\n `matrix` computes the value of the kernel *pairwise* on two (batches of)\n lists of input examples. When the two collections are the same the result is\n called the Gram (or Gramian) matrix\n (https://en.wikipedia.org/wiki/Gramian_matrix).\n\n `tensor` generalizes `matrix`, taking rank `k1` and `k2` collections of\n input examples to a rank `k1 + k2` collection of kernel values.\n\n #### Kernel Parameter Shape Semantics\n\n PositiveSemidefiniteKernel implementations support batching of kernel\n parameters and broadcasting of these parameters across batches of inputs. This\n allows, for example, creating a single kernel object which acts like a\n collection of kernels with different parameters. This might be useful for,\n e.g., for exploring multiple random initializations in parallel during a\n kernel parameter optimization procedure.\n\n The interaction between kernel parameter shapes and input shapes (see below)\n is somewhat subtle. The semantics are designed to make the most common use\n cases easy, while not ruling out more intricate control. The overarching\n principle is that kernel parameter batch shapes must be broadcastable with\n input batch shapes (see below). Examples are provided in the method-level\n documentation.\n\n #### Input Shape Semantics\n\n PositiveSemidefiniteKernel methods each support a notion of batching inputs;\n see the method-level documentation for full details; here we describe the\n overall semantics of input shapes. Inputs to PositiveSemidefiniteKernel\n methods partition into 3 pieces:\n\n ```none\n [b1, ..., bB, e1, ..., eE, f1, ..., fF]\n '----------' '---------' '---------'\n | | '-- Feature dimensions\n | '-- Example dimensions\n '-- Batch dimensions\n ```\n\n - Feature dimensions correspond to the space over which the kernel is defined;\n in typical applications inputs are vectors and this part of the shape is\n rank-1. For example, if our kernel is defined over R^2 x R^2, each input is\n a 2-D vector (a rank-1 tensor of shape `[2,]`) so that\n `F = 1, [f1, ..., fF] = [2]`. If we defined a kernel over DxD matrices, its\n domain would be R^(DxD) x R^(DxD), we would have `F = 2` and\n `[f1, ..., fF] = [D, D]`. Feature shapes of inputs should be the same, but\n no exception will be raised unless they are broadcast-incompatible.\n - Batch dimensions describe collections of inputs which in some sense have\n nothing to do with each other, but may be coupled to batches of kernel\n parameters. It's required that batch dimensions of inputs broadcast with\n each other, and with the kernel's overall batch shape.\n - Example dimensions are shape elements which represent a collection of inputs\n that in some sense \"go together\" (whereas batches are \"independent\"). The\n exact semantics are different for the `apply`, `matrix` and `tensor` methods\n (see method-level doc strings for more details). `apply` combines examples\n together pairwise, much like the python built-in `zip`. `matrix` combines\n examples pairwise for *all* pairs of elements from two rank-1 input\n collections (lists), ie, it applies the kernel to all elements in the\n cross-product of two lists of examples. `tensor` further generalizes\n `matrix` to higher rank collections of inputs. Only `matrix` strictly\n requires example dimensions to be present (and to be exactly rank 1),\n although the typical usage of `apply` (eg, building a matrix diagonal) will\n also have `example_ndims` 1.\n\n ##### Examples\n\n ```python\n import tensorflow_probability as tfp\n\n # Suppose `SomeKernel` acts on vectors (rank-1 tensors), ie number of\n # feature dimensions is 1.\n scalar_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=.5)\n scalar_kernel.batch_shape\n # ==> []\n\n # `x` and `y` are batches of five 3-D vectors:\n x = np.ones([5, 3], np.float32)\n y = np.ones([5, 3], np.float32)\n scalar_kernel.apply(x, y).shape\n # ==> [5]\n\n scalar_kernel.matrix(x, y).shape\n # ==> [5, 5]\n ```\n\n Now we can consider a kernel with batched parameters:\n\n ```python\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=[.2, .5])\n batch_kernel.batch_shape\n # ==> [2]\n\n # `x` and `y` are batches of five 3-D vectors:\n x = np.ones([5, 3], np.float32)\n y = np.ones([5, 3], np.float32)\n\n batch_kernel.apply(x, y).shape\n # ==> Error! [2] and [5] can't broadcast.\n # We could solve this by telling `apply` to treat the 5 as an example dim:\n\n batch_kernel.apply(x, y, example_ndims=1).shape\n # ==> [2, 5]\n\n # Note that example_ndims is implicitly 1 for a call to `matrix`, so the\n # following just works:\n batch_kernel.matrix(x, y).shape\n # ==> [2, 5, 5]\n ```\n\n \"\"\"\n\n def __init__(self, feature_ndims, dtype=None, name=None):\n \"\"\"Construct a PositiveSemidefiniteKernel (subclass) instance.\n\n Args:\n feature_ndims: Python `integer` indicating the number of dims (the rank)\n of the feature space this kernel acts on.\n dtype: `DType` on which this kernel operates.\n name: Python `str` name prefixed to Ops created by this class. Default:\n subclass name.\n\n Raises:\n ValueError: if `feature_ndims` is not an integer greater than 0\n Inputs to PositiveSemidefiniteKernel methods partition into 3 pieces:\n\n ```none\n [b1, ..., bB, e1, ..., eE, f1, ..., fF]\n '----------' '---------' '---------'\n | | '-- Feature dimensions\n | '-- Example dimensions\n '-- Batch dimensions\n ```\n\n The `feature_ndims` argument declares how many of the right-most shape\n dimensions belong to the feature dimensions. This enables us to predict\n which shape dimensions will be 'reduced' away during kernel computation.\n \"\"\"\n if not (isinstance(feature_ndims, int) and feature_ndims > 0):\n raise ValueError(\n '`feature_ndims` must be a Python `integer` greater than zero. ' +\n 'Got: {}'.format(feature_ndims))\n self._feature_ndims = feature_ndims\n self._dtype = dtype\n if not name or name[-1] != '/': # `name` is not a name scope\n name = tf.name_scope(name or type(self).__name__).name\n self._name = name\n\n @property\n def feature_ndims(self):\n \"\"\"The number of feature dimensions.\n\n Kernel functions generally act on pairs of inputs from some space like\n\n ```none\n R^(d1 x ... x dD)\n ```\n\n or, in words: rank-`D` real-valued tensors of shape `[d1, ..., dD]`. Inputs\n can be vectors in some `R^N`, but are not restricted to be. Indeed, one\n might consider kernels over matrices, tensors, or even more general spaces,\n like strings or graphs.\n\n Returns:\n The number of feature dimensions (feature rank) of this kernel.\n \"\"\"\n return self._feature_ndims\n\n @property\n def dtype(self):\n \"\"\"DType over which the kernel operates.\"\"\"\n return self._dtype\n\n @property\n def name(self):\n \"\"\"Name prepended to all ops created by this class.\"\"\"\n return self._name\n\n @property\n def batch_shape(self):\n \"\"\"The batch_shape property of a PositiveSemidefiniteKernel.\n\n This property describes the fully broadcast shape of all kernel parameters.\n For example, consider an ExponentiatedQuadratic kernel, which is\n parameterized by an amplitude and length_scale:\n\n ```none\n exp_quad(x, x') := amplitude * exp(||x - x'||**2 / length_scale**2)\n ```\n\n The batch_shape of such a kernel is derived from broadcasting the shapes of\n `amplitude` and `length_scale`. E.g., if their shapes were\n\n ```python\n amplitude.shape = [2, 1, 1]\n length_scale.shape = [1, 4, 3]\n ```\n\n then `exp_quad`'s batch_shape would be `[2, 4, 3]`.\n\n Note that this property defers to the private _batch_shape method, which\n concrete implementation sub-classes are obliged to provide.\n\n Returns:\n `TensorShape` instance describing the fully broadcast shape of all\n kernel parameters.\n \"\"\"\n return self._batch_shape()\n\n def batch_shape_tensor(self):\n \"\"\"The batch_shape property of a PositiveSemidefiniteKernel as a `Tensor`.\n\n Returns:\n `Tensor` which evaluates to a vector of integers which are the\n fully-broadcast shapes of the kernel parameters.\n \"\"\"\n with tf.name_scope(self._name):\n if self.batch_shape.is_fully_defined():\n return tf.convert_to_tensor(\n self.batch_shape.as_list(), dtype=tf.int32, name='batch_shape')\n with tf.name_scope('batch_shape_tensor'):\n return self._batch_shape_tensor()\n\n @contextlib.contextmanager\n def _name_scope(self, name=None, values=None):\n \"\"\"Helper function to standardize op scope.\"\"\"\n with tf.name_scope(self.name):\n values = [] if values is None else values\n with tf.name_scope(name) as scope:\n yield scope\n\n def apply(self, x1, x2, example_ndims=0):\n \"\"\"Apply the kernel function pairs of inputs.\n\n Args:\n x1: `Tensor` input to the kernel, of shape `B1 + E1 + F`, where `B1` and\n `E1` may be empty (ie, no batch/example dims, resp.) and `F` (the\n feature shape) must have rank equal to the kernel's `feature_ndims`\n property. Batch shape must broadcast with the batch shape of `x2` and\n with the kernel's batch shape. Example shape must broadcast with example\n shape of `x2`. `x1` and `x2` must have the same *number* of example dims\n (ie, same rank).\n x2: `Tensor` input to the kernel, of shape `B2 + E2 + F`, where `B2` and\n `E2` may be empty (ie, no batch/example dims, resp.) and `F` (the\n feature shape) must have rank equal to the kernel's `feature_ndims`\n property. Batch shape must broadcast with the batch shape of `x2` and\n with the kernel's batch shape. Example shape must broadcast with example\n shape of `x2`. `x1` and `x2` must have the same *number* of example\n example_ndims: A python integer, the number of example dims in the inputs.\n In essence, this parameter controls how broadcasting of the kernel's\n batch shape with input batch shapes works. The kernel batch shape will\n be broadcast against everything to the left of the combined example and\n feature dimensions in the input shapes.\n\n Returns:\n `Tensor` containing the results of applying the kernel function to inputs\n `x1` and `x2`. If the kernel parameters' batch shape is `Bk` then the\n shape of the `Tensor` resulting from this method call is\n `broadcast(Bk, B1, B2) + broadcast(E1, E2)`.\n\n Given an index set `S`, a kernel function is mathematically defined as a\n real- or complex-valued function on `S` satisfying the\n positive semi-definiteness constraint:\n\n ```none\n sum_i sum_j (c[i]*) c[j] k(x[i], x[j]) >= 0\n ```\n\n for any finite collections `{x[1], ..., x[N]}` in `S` and\n `{c[1], ..., c[N]}` in the reals (or the complex plane). '*' is the complex\n conjugate, in the complex case.\n\n This method most closely resembles the function described in the\n mathematical definition of a kernel. Given a PositiveSemidefiniteKernel `k`\n with scalar parameters and inputs `x` and `y` in `S`, `apply(x, y)` yields a\n single scalar value.\n\n #### Examples\n\n ```python\n import tensorflow_probability as tfp\n\n # Suppose `SomeKernel` acts on vectors (rank-1 tensors)\n scalar_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=.5)\n scalar_kernel.batch_shape\n # ==> []\n\n # `x` and `y` are batches of five 3-D vectors:\n x = np.ones([5, 3], np.float32)\n y = np.ones([5, 3], np.float32)\n scalar_kernel.apply(x, y).shape\n # ==> [5]\n ```\n\n The above output is the result of vectorized computation of the five values\n\n ```none\n [k(x[0], y[0]), k(x[1], y[1]), ..., k(x[4], y[4])]\n ```\n\n Now we can consider a kernel with batched parameters:\n\n ```python\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=[.2, .5])\n batch_kernel.batch_shape\n # ==> [2]\n batch_kernel.apply(x, y).shape\n # ==> Error! [2] and [5] can't broadcast.\n ```\n\n The parameter batch shape of `[2]` and the input batch shape of `[5]` can't\n be broadcast together. We can fix this in either of two ways:\n\n 1. Give the parameter a shape of `[2, 1]` which will correctly\n broadcast with `[5]` to yield `[2, 5]`:\n\n ```python\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(\n param=[[.2], [.5]])\n batch_kernel.batch_shape\n # ==> [2, 1]\n batch_kernel.apply(x, y).shape\n # ==> [2, 5]\n ```\n\n 2. By specifying `example_ndims`, which tells the kernel to treat the `5`\n in the input shape as part of the \"example shape\", and \"pushing\" the\n kernel batch shape to the left:\n\n ```python\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=[.2, .5])\n batch_kernel.batch_shape\n # ==> [2]\n batch_kernel.apply(x, y, example_ndims=1).shape\n # ==> [2, 5]\n\n \"\"\"\n with self._name_scope(self._name, values=[x1, x2]):\n x1 = tf.convert_to_tensor(x1, name='x1')\n x2 = tf.convert_to_tensor(x2, name='x2')\n\n should_expand_dims = (example_ndims == 0)\n\n if should_expand_dims:\n example_ndims += 1\n x1 = tf.expand_dims(x1, -(self.feature_ndims + 1))\n x2 = tf.expand_dims(x2, -(self.feature_ndims + 1))\n\n result = self._apply(x1, x2, example_ndims=example_ndims)\n\n if should_expand_dims:\n result = tf.squeeze(result, axis=-1)\n\n return result\n\n def _apply(self, x1, x2, example_ndims=1):\n \"\"\"Apply the kernel function to a pair of (batches of) inputs.\n\n Subclasses must implement this method. It will always be called with\n example_ndims >= 1. Implementations should take care to respect\n example_ndims, by padding parameters on the right with 1's example_ndims\n times. See tests and existing subclasses for examples.\n\n Args:\n x1: `Tensor` input to the first positional parameter of the kernel, of\n shape `B1 + E1 + F`, where `B1` may be empty (ie, no batch dims, resp.),\n `E1` is a shape of rank at least 1, and `F` (the feature shape) must\n have rank equal to the kernel's `feature_ndims` property. Batch shape\n must broadcast with the batch shape of `x2` and with the kernel's batch\n shape. Example shape must broadcast with example shape of `x2` (They\n don't strictly need to be equal, e.g., when `apply` is called from\n `matrix`, `x1` and `x2` each have 1's in opposing positions in their\n example shapes). `x1` and `x2` must have the same *number* of example\n dims (ie, same rank).\n x2: `Tensor` input to the second positional parameter of the kernel,\n shape `B2 + E2 + F`, where `B2` may be empty (ie, no batch dims, resp.),\n `E2` is a shape of rank at least 1, and `F` (the feature shape) must\n have rank equal to the kernel's `feature_ndims` property. Batch shape\n must broadcast with the batch shape of `x1` and with the kernel's batch\n shape. Example shape must broadcast with example shape of `x1` (They\n don't strictly need to be equal, e.g., when `apply` is called from\n `matrix`, `x1` and `x2` each have 1's in opposing positions in their\n example shapes). `x1` and `x2` must have the same *number* of example\n dims (ie, same rank).\n example_ndims: A python integer greater than or equal to 1, the number of\n example dims in the inputs. In essence, this parameter controls how\n broadcasting of the kernel's batch shape with input batch shapes works.\n The kernel batch shape will be broadcast against everything to the left\n of the combined example and feature dimensions in the input shapes.\n\n Returns:\n `Tensor` containing the results of applying the kernel function to inputs\n `x1` and `x2`. If the kernel parameters' batch shape is `Bk` then the\n shape of the `Tensor` resulting from this method call is\n `broadcast(Bk, B1, B2) + broadcast(E1, E2)`.\n \"\"\"\n raise NotImplementedError(\n 'Subclasses must provide `_apply` implementation.')\n\n def matrix(self, x1, x2):\n \"\"\"Construct (batched) matrices from (batches of) collections of inputs.\n\n Args:\n x1: `Tensor` input to the first positional parameter of the kernel, of\n shape `B1 + [e1] + F`, where `B1` may be empty (ie, no batch dims,\n resp.), `e1` is a single integer (ie, `x1` has example ndims exactly 1),\n and `F` (the feature shape) must have rank equal to the kernel's\n `feature_ndims` property. Batch shape must broadcast with the batch\n shape of `x2` and with the kernel's batch shape.\n x2: `Tensor` input to the second positional parameter of the kernel,\n shape `B2 + [e2] + F`, where `B2` may be empty (ie, no batch dims,\n resp.), `e2` is a single integer (ie, `x2` has example ndims exactly 1),\n and `F` (the feature shape) must have rank equal to the kernel's\n `feature_ndims` property. Batch shape must broadcast with the batch\n shape of `x1` and with the kernel's batch shape.\n\n Returns:\n `Tensor` containing the matrix (possibly batched) of kernel applications\n to pairs from inputs `x1` and `x2`. If the kernel parameters' batch shape\n is `Bk` then the shape of the `Tensor` resulting from this method call is\n `broadcast(Bk, B1, B2) + [e1, e2]` (note this differs from `apply`: the\n example dimensions are concatenated, whereas in `apply` the example dims\n are broadcast together).\n\n Given inputs `x1` and `x2` of shapes\n\n ```none\n [b1, ..., bB, e1, f1, ..., fF]\n ```\n\n and\n\n ```none\n [c1, ..., cC, e2, f1, ..., fF]\n ```\n\n This method computes the batch of `e1 x e2` matrices resulting from applying\n the kernel function to all pairs of inputs from `x1` and `x2`. The shape\n of the batch of matrices is the result of broadcasting the batch shapes of\n `x1`, `x2`, and the kernel parameters (see examples below). As such, it's\n required that these shapes all be broadcast compatible. However, the kernel\n parameter batch shapes need not broadcast against the 'example shapes' (`e1`\n and `e2` above).\n\n When the two inputs are the (batches of) identical collections, the\n resulting matrix is the so-called Gram (or Gramian) matrix\n (https://en.wikipedia.org/wiki/Gramian_matrix).\n\n #### Examples\n\n First, consider a kernel with a single scalar parameter.\n\n ```python\n import tensorflow_probability as tfp\n\n scalar_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=.5)\n scalar_kernel.batch_shape\n # ==> []\n\n # Our inputs are two lists of 3-D vectors\n x = np.ones([5, 3], np.float32)\n y = np.ones([4, 3], np.float32)\n scalar_kernel.matrix(x, y).shape\n # ==> [5, 4]\n ```\n\n The result comes from applying the kernel to the entries in `x` and `y`\n pairwise, across all pairs:\n\n ```none\n | k(x[0], y[0]) k(x[0], y[1]) ... k(x[0], y[3]) |\n | k(x[1], y[0]) k(x[1], y[1]) ... k(x[1], y[3]) |\n | ... ... ... |\n | k(x[4], y[0]) k(x[4], y[1]) ... k(x[4], y[3]) |\n ```\n\n Now consider a kernel with batched parameters with the same inputs\n\n ```python\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=[1., .5])\n batch_kernel.batch_shape\n # ==> [2]\n\n batch_kernel.matrix(x, y).shape\n # ==> [2, 5, 4]\n ```\n\n This results in a batch of 2 matrices, one computed from the kernel with\n `param = 1.` and the other with `param = .5`.\n\n We also support batching of the inputs. First, let's look at that with\n the scalar kernel again.\n\n ```python\n # Batch of 10 lists of 5 vectors of dimension 3\n x = np.ones([10, 5, 3], np.float32)\n\n # Batch of 10 lists of 4 vectors of dimension 3\n y = np.ones([10, 4, 3], np.float32)\n\n scalar_kernel.matrix(x, y).shape\n # ==> [10, 5, 4]\n ```\n\n The result is a batch of 10 matrices built from the batch of 10 lists of\n input vectors. These batch shapes have to be broadcastable. The following\n will *not* work:\n\n ```python\n x = np.ones([10, 5, 3], np.float32)\n y = np.ones([20, 4, 3], np.float32)\n scalar_kernel.matrix(x, y).shape\n # ==> Error! [10] and [20] can't broadcast.\n ```\n\n Now let's consider batches of inputs in conjunction with batches of kernel\n parameters. We require that the input batch shapes be broadcastable with\n the kernel parameter batch shapes, otherwise we get an error:\n\n ```python\n x = np.ones([10, 5, 3], np.float32)\n y = np.ones([10, 4, 3], np.float32)\n\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(params=[1., .5])\n batch_kernel.batch_shape\n # ==> [2]\n batch_kernel.matrix(x, y).shape\n # ==> Error! [2] and [10] can't broadcast.\n ```\n\n The fix is to make the kernel parameter shape broadcastable with `[10]` (or\n reshape the inputs to be broadcastable!):\n\n ```python\n x = np.ones([10, 5, 3], np.float32)\n y = np.ones([10, 4, 3], np.float32)\n\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(\n params=[[1.], [.5]])\n batch_kernel.batch_shape\n # ==> [2, 1]\n batch_kernel.matrix(x, y).shape\n # ==> [2, 10, 5, 4]\n\n # Or, make the inputs broadcastable:\n x = np.ones([10, 1, 5, 3], np.float32)\n y = np.ones([10, 1, 4, 3], np.float32)\n\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(\n params=[1., .5])\n batch_kernel.batch_shape\n # ==> [2]\n batch_kernel.matrix(x, y).shape\n # ==> [10, 2, 5, 4]\n ```\n\n Here, we have the result of applying the kernel, with 2 different\n parameters, to each of a batch of 10 pairs of input lists.\n\n \"\"\"\n with self._name_scope(self._name, values=[x1, x2]):\n x1 = tf.convert_to_tensor(x1, name='x1')\n x2 = tf.convert_to_tensor(x2, name='x2')\n\n return self.tensor(x1, x2, x1_example_ndims=1, x2_example_ndims=1)\n\n def tensor(self, x1, x2, x1_example_ndims, x2_example_ndims):\n \"\"\"Construct (batched) tensors from (batches of) collections of inputs.\n\n Args:\n x1: `Tensor` input to the first positional parameter of the kernel, of\n shape `B1 + E1 + F`, where `B1` and `E1` arbitrary shapes which may be\n empty (ie, no batch/example dims, resp.), and `F` (the feature shape)\n must have rank equal to the kernel's `feature_ndims` property. Batch\n shape must broadcast with the batch shape of `x2` and with the kernel's\n batch shape.\n x2: `Tensor` input to the second positional parameter of the kernel,\n shape `B2 + E2 + F`, where `B2` and `E2` arbitrary shapes which may be\n empty (ie, no batch/example dims, resp.), and `F` (the feature shape)\n must have rank equal to the kernel's `feature_ndims` property. Batch\n shape must broadcast with the batch shape of `x1` and with the kernel's\n batch shape.\n x1_example_ndims: A python integer greater than or equal to 0, the number\n of example dims in the first input. This affects both the alignment of\n batch shapes and the shape of the final output of the function.\n Everything left of the feature shape and the example dims in `x1` is\n considered \"batch shape\", and must broadcast as specified above.\n x2_example_ndims: A python integer greater than or equal to 0, the number\n of example dims in the second input. This affects both the alignment of\n batch shapes and the shape of the final output of the function.\n Everything left of the feature shape and the example dims in `x1` is\n considered \"batch shape\", and must broadcast as specified above.\n\n Returns:\n `Tensor` containing (possibly batched) kernel applications to pairs from\n inputs `x1` and `x2`. If the kernel parameters' batch shape is `Bk` then\n the shape of the `Tensor` resulting from this method call is\n `broadcast(Bk, B1, B2) + E1 + E2`. Note this differs from `apply`: the\n example dimensions are concatenated, whereas in `apply` the example dims\n are broadcast together. It also differs from `matrix`: the example shapes\n are arbitrary here, and the result accrues a rank equal to the sum of the\n ranks of the input example shapes.\n\n #### Examples\n\n First, consider a kernel with a single scalar parameter.\n\n ```python\n import tensorflow_probability as tfp\n\n scalar_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=.5)\n scalar_kernel.batch_shape\n # ==> []\n\n # Our inputs are two rank-2 collections of 3-D vectors\n x = np.ones([5, 6, 3], np.float32)\n y = np.ones([7, 8, 3], np.float32)\n scalar_kernel.tensor(x, y, x1_example_ndims=2, x2_example_ndims=2).shape\n # ==> [5, 6, 7, 8]\n\n # Empty example shapes work too!\n x = np.ones([3], np.float32)\n y = np.ones([5, 3], np.float32)\n scalar_kernel.tensor(x, y, x1_example_ndims=0, x2_example_ndims=1).shape\n # ==> [5]\n ```\n\n The result comes from applying the kernel to the entries in `x` and `y`\n pairwise, across all pairs:\n\n ```none\n | k(x[0], y[0]) k(x[0], y[1]) ... k(x[0], y[3]) |\n | k(x[1], y[0]) k(x[1], y[1]) ... k(x[1], y[3]) |\n | ... ... ... |\n | k(x[4], y[0]) k(x[4], y[1]) ... k(x[4], y[3]) |\n ```\n\n Now consider a kernel with batched parameters.\n\n ```python\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=[1., .5])\n batch_kernel.batch_shape\n # ==> [2]\n\n # Inputs are two rank-2 collections of 3-D vectors\n x = np.ones([5, 6, 3], np.float32)\n y = np.ones([7, 8, 3], np.float32)\n scalar_kernel.tensor(x, y, x1_example_ndims=2, x2_example_ndims=2).shape\n # ==> [2, 5, 6, 7, 8]\n ```\n\n We also support batching of the inputs. First, let's look at that with\n the scalar kernel again.\n\n ```python\n # Batch of 10 lists of 5x6 collections of dimension 3\n x = np.ones([10, 5, 6, 3], np.float32)\n\n # Batch of 10 lists of 7x8 collections of dimension 3\n y = np.ones([10, 7, 8, 3], np.float32)\n\n scalar_kernel.tensor(x, y, x1_example_ndims=2, x2_example_ndims=2).shape\n # ==> [10, 5, 6, 7, 8]\n ```\n\n The result is a batch of 10 tensors built from the batch of 10 rank-2\n collections of input vectors. The batch shapes have to be broadcastable.\n The following will *not* work:\n\n ```python\n x = np.ones([10, 5, 3], np.float32)\n y = np.ones([20, 4, 3], np.float32)\n scalar_kernel.tensor(x, y, x1_example_ndims=1, x2_example_ndims=1).shape\n # ==> Error! [10] and [20] can't broadcast.\n ```\n\n Now let's consider batches of inputs in conjunction with batches of kernel\n parameters. We require that the input batch shapes be broadcastable with\n the kernel parameter batch shapes, otherwise we get an error:\n\n ```python\n x = np.ones([10, 5, 6, 3], np.float32)\n y = np.ones([10, 7, 8, 3], np.float32)\n\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(params=[1., .5])\n batch_kernel.batch_shape\n # ==> [2]\n batch_kernel.tensor(x, y, x1_example_ndims=2, x2_example_ndims=2).shape\n # ==> Error! [2] and [10] can't broadcast.\n ```\n\n The fix is to make the kernel parameter shape broadcastable with `[10]` (or\n reshape the inputs to be broadcastable!):\n\n ```python\n x = np.ones([10, 5, 6, 3], np.float32)\n y = np.ones([10, 7, 8, 3], np.float32)\n\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(\n params=[[1.], [.5]])\n batch_kernel.batch_shape\n # ==> [2, 1]\n batch_kernel.tensor(x, y, x1_example_ndims=2, x2_example_ndims=2).shape\n # ==> [2, 10, 5, 6, 7, 8]\n\n # Or, make the inputs broadcastable:\n x = np.ones([10, 1, 5, 6, 3], np.float32)\n y = np.ones([10, 1, 7, 8, 3], np.float32)\n\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(\n params=[1., .5])\n batch_kernel.batch_shape\n # ==> [2]\n batch_kernel.tensor(x, y, x1_example_ndims=2, x2_example_ndims=2).shape\n # ==> [10, 2, 5, 6, 7, 8]\n ```\n\n \"\"\"\n with self._name_scope(self._name, values=[x1, x2]):\n x1 = tf.convert_to_tensor(x1, name='x1')\n x2 = tf.convert_to_tensor(x2, name='x2')\n\n x1 = util.pad_shape_with_ones(\n x1,\n ndims=x2_example_ndims,\n start=-(self.feature_ndims + 1))\n\n x2 = util.pad_shape_with_ones(\n x2,\n ndims=x1_example_ndims,\n start=-(self.feature_ndims + 1 + x2_example_ndims))\n\n return self.apply(\n x1, x2, example_ndims=(x1_example_ndims + x2_example_ndims))\n\n def _batch_shape(self):\n raise NotImplementedError('Subclasses must provide batch_shape property.')\n\n def _batch_shape_tensor(self):\n raise NotImplementedError(\n 'Subclasses must provide batch_shape_tensor implementation')\n\n def __add__(self, k):\n if not isinstance(k, PositiveSemidefiniteKernel):\n raise ValueError(\n \"Can't add non-kernel (of type '%s') to kernel\" % type(k))\n return _SumKernel([self, k])\n\n def __iadd__(self, k):\n return self.__add__(k)\n\n def __mul__(self, k):\n if not isinstance(k, PositiveSemidefiniteKernel):\n raise ValueError(\n \"Can't multiply by non-kernel (of type '%s') to kernel\" % type(k))\n return _ProductKernel([self, k])\n\n def __imul__(self, k):\n return self.__mul__(k)\n\n def __str__(self):\n return ('tfp.positive_semidefinite_kernels.{type_name}('\n '\"{self_name}\"'\n '{maybe_batch_shape}'\n ', feature_ndims={feature_ndims}'\n ', dtype={dtype})'.format(\n type_name=type(self).__name__,\n self_name=self.name,\n maybe_batch_shape=(', batch_shape={}'.format(self.batch_shape)\n if self.batch_shape.ndims is not None\n else ''),\n feature_ndims=self.feature_ndims,\n dtype=None if self.dtype is None else self.dtype.name))\n\n def __repr__(self):\n return ('<tfp.positive_semidefinite_kernels.{type_name} '\n '\\'{self_name}\\''\n ' batch_shape={batch_shape}'\n ' feature_ndims={feature_ndims}'\n ' dtype={dtype}>'.format(\n type_name=type(self).__name__,\n self_name=self.name,\n batch_shape=self.batch_shape,\n feature_ndims=self.feature_ndims,\n dtype=None if self.dtype is None else self.dtype.name))\n\n\ndef _flatten_summand_list(kernels):\n \"\"\"Flatten a list of kernels which may contain _SumKernel instances.\n\n Args:\n kernels: Python list of `PositiveSemidefiniteKernel` instances\n\n Returns:\n Python list containing the elements of kernels, with any _SumKernel\n instances replaced by their `kernels` property contents.\n \"\"\"\n flattened = []\n for k in kernels:\n if isinstance(k, _SumKernel):\n flattened += k.kernels\n else:\n flattened.append(k)\n return flattened\n\n\ndef _flatten_multiplicand_list(kernels):\n \"\"\"Flatten a list of kernels which may contain _ProductKernel instances.\n\n Args:\n kernels: Python list of `PositiveSemidefiniteKernel` instances\n\n Returns:\n Python list containing the elements of kernels, with any _ProductKernel\n instances replaced by their `kernels` property contents.\n \"\"\"\n flattened = []\n for k in kernels:\n if isinstance(k, _ProductKernel):\n flattened += k.kernels\n else:\n flattened.append(k)\n return flattened\n\n\nclass _SumKernel(PositiveSemidefiniteKernel):\n \"\"\"Kernel class representing summation over a list of kernels.\n\n Mathematically this class represents the pointwise sum of several kernels.\n Given two kernels, `k1` and `k2`, and `kp = _SumKernel([k1, k2])`, we have\n\n ```none\n kp.apply(x, y) = k1(x, y) + k2(x, y)\n ```\n\n for any `x`, `y` in the feature space (this presumes that the constituent\n kernels all act on the same feature space).\n\n That the sum is positive semi-definite follows simply from the definition of\n positive semi-definiteness of functions. If we have\n\n ```none\n sum_i sum_j (c[i]*) c[j] k1(x[i], x[j]) >= 0\n ```\n and\n\n ```none\n sum_i sum_j (c[i]*) c[j] k2(x[i], x[j]) >= 0\n ```\n\n for any finite collections `{x[1], ..., x[N]}` in S and `{c[1], ..., c[N]}` in\n the reals (or the complex plane), then we clearly also have the same for the\n sum of `k1` and `k2`.\n \"\"\"\n\n def __init__(self, kernels, name=None):\n \"\"\"Create a kernel which is the sum of `kernels`.\n\n The input list is 'flattened' in the sense that any entries which are also\n of type `_SumKernel` will have their list of kernels appended to this\n instance's list of kernels. This will reduce the stack depth when actually\n evaluating the sum over kernel applications.\n\n Args:\n kernels: Python `list` of `PositiveSemidefiniteKernel` instances.\n name: Python `str` name prefixed to Ops created by this class.\n Raises:\n ValueError: `kernels` is an empty list, or `kernels` don't all have the\n same `feature_ndims`.\n \"\"\"\n if not kernels:\n raise ValueError(\"Can't create _SumKernel over empty list.\")\n if len(set([k.feature_ndims for k in kernels])) > 1:\n raise ValueError(\n \"Can't sum kernels with different feature_ndims. Got:\\n%s\" %\n str([k.feature_ndims for k in kernels]))\n self._kernels = _flatten_summand_list(kernels)\n if name is None:\n name = 'SumKernel'\n # We have ensured the list is non-empty and all feature_ndims are the same.\n super(_SumKernel, self).__init__(\n feature_ndims=kernels[0].feature_ndims,\n dtype=util.maybe_get_common_dtype(\n [None if k.dtype is None else k for k in kernels]),\n name=name)\n\n @property\n def kernels(self):\n \"\"\"The list of kernels this _SumKernel sums over.\"\"\"\n return self._kernels\n\n def _apply(self, x1, x2, example_ndims=0):\n return sum([k.apply(x1, x2, example_ndims) for k in self.kernels])\n\n def _batch_shape(self):\n return functools.reduce(tf.broadcast_static_shape,\n [k.batch_shape for k in self.kernels])\n\n def _batch_shape_tensor(self):\n return functools.reduce(tf.broadcast_dynamic_shape,\n [k.batch_shape_tensor() for k in self.kernels])\n\n\nclass _ProductKernel(PositiveSemidefiniteKernel):\n \"\"\"Kernel class representing the product over a list of kernels.\n\n Mathematically this class represents the pointwise product of several kernels.\n Given two kernels, `k1` and `k2`, and `kp = _ProductKernel([k1, k2])`, we have\n\n ```none\n kp.apply(x, y) = k1(x, y) * k2(x, y)\n ```\n\n for any x, y in the feature space (this presumes that the constituent kernels\n all act on the same feature space).\n\n The fact that this product is still positive semi-definite can be shown in a\n variety of ways, many deep and all fascinating, but follows readily from the\n [Schur product theorem](https://en.wikipedia.org/wiki/Schur_product_theorem),\n which states that the Hadamard (element-wise) product of two PSD matrices is\n also PSD.\n \"\"\"\n\n def __init__(self, kernels, name=None):\n \"\"\"Create a kernel which is the product of `kernels`.\n\n The input list is 'flattened' in the sense that any entries which are also\n of type `_ProductKernel` will have their list of kernels appended to this\n instance's list of kernels. This will reduce the stack depth when actually\n evaluating the product over kernel applications.\n\n Args:\n kernels: Python `list` of `PositiveSemidefiniteKernel` instances.\n name: Python `str` name prefixed to Ops created by this class.\n Raises:\n ValueError: `kernels` is an empty list, or `kernels` don't all have the\n same `feature_ndims`.\n \"\"\"\n if not kernels:\n raise ValueError(\"Can't create _ProductKernel over empty list.\")\n if len(set([k.feature_ndims for k in kernels])) > 1:\n raise ValueError(\n \"Can't multiply kernels with different feature_ndims. Got:\\n%s\" %\n str([k.feature_ndims for k in kernels]))\n self._kernels = _flatten_multiplicand_list(kernels)\n if name is None:\n name = 'ProductKernel'\n # We have ensured the list is non-empty and all feature_ndims are the same.\n super(_ProductKernel, self).__init__(\n feature_ndims=kernels[0].feature_ndims,\n dtype=util.maybe_get_common_dtype(\n [None if k.dtype is None else k for k in kernels]),\n name=name)\n\n @property\n def kernels(self):\n \"\"\"The list of kernels this _ProductKernel multiplies over.\"\"\"\n return self._kernels\n\n def _apply(self, x1, x2, example_ndims=0):\n return functools.reduce(\n operator.mul,\n [k.apply(x1, x2, example_ndims) for k in self.kernels])\n\n def _batch_shape(self):\n return functools.reduce(tf.broadcast_static_shape,\n [k.batch_shape for k in self.kernels])\n\n def _batch_shape_tensor(self):\n return functools.reduce(tf.broadcast_dynamic_shape,\n [k.batch_shape_tensor() for k in self.kernels])\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Numpy implementations of TensorFlow functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow as tf\n\nfrom tensorflow_probability.python.internal.backend.numpy import numpy_array\n\nfrom tensorflow_probability.python.internal.backend.numpy.internal import utils\nfrom tensorflow_probability.python.internal.backend.numpy.numpy_math import l2_normalize\nfrom tensorflow_probability.python.internal.backend.numpy.numpy_math import log_softmax\nfrom tensorflow_probability.python.internal.backend.numpy.numpy_math import reduce_logsumexp\nfrom tensorflow_probability.python.internal.backend.numpy.numpy_math import softmax\nfrom tensorflow_probability.python.internal.backend.numpy.numpy_math import softplus\nfrom tensorflow_probability.python.internal.backend.numpy.numpy_math import top_k\n\n\n__all__ = [\n 'l2_normalize',\n 'log_softmax',\n 'relu',\n 'softmax',\n 'softplus',\n 'sigmoid_cross_entropy_with_logits',\n 'sparse_softmax_cross_entropy_with_logits',\n 'top_k',\n]\n\n\ndef _sigmoid_cross_entropy_with_logits( # pylint: disable=invalid-name,unused-argument\n _sentinel=None,\n labels=None,\n logits=None,\n name=None):\n return (np.maximum(logits, 0)\n - logits * labels + np.log1p(np.exp(-np.abs(logits))))\n\n\ndef _sparse_softmax_cross_entropy_with_logits( # pylint: disable=invalid-name,unused-argument\n _sentinel=None,\n labels=None,\n logits=None,\n name=None):\n \"\"\"Softmax cross entropy with logits.\"\"\"\n labels_shape = labels.shape\n num_classes = logits.shape[-1]\n logits = np.reshape(logits, [-1, num_classes])\n labels = np.reshape(labels, [-1])\n\n labels = numpy_array.one_hot(labels, num_classes)\n\n cost = -np.sum(\n labels * (logits - reduce_logsumexp(logits, axis=-1, keepdims=True)),\n axis=-1)\n cost = np.reshape(cost, labels_shape)\n return cost\n\n\n# --- Begin Public Functions --------------------------------------------------\n\nl2_normalize = utils.copy_docstring(\n tf.nn.l2_normalize,\n l2_normalize)\n\n\nrelu = utils.copy_docstring(\n tf.nn.relu,\n lambda features, name=None: np.max(features, 0))\n\n\nsoftplus = utils.copy_docstring(\n tf.nn.softplus,\n lambda features, name=None: np.log(1 + np.exp(features)))\n\n\nsigmoid_cross_entropy_with_logits = utils.copy_docstring(\n tf.nn.sigmoid_cross_entropy_with_logits,\n _sigmoid_cross_entropy_with_logits)\n\n\nsparse_softmax_cross_entropy_with_logits = utils.copy_docstring(\n tf.nn.sparse_softmax_cross_entropy_with_logits,\n _sparse_softmax_cross_entropy_with_logits)\n"
] | [
[
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.squeeze",
"tensorflow.compat.v2.expand_dims",
"tensorflow.compat.v2.convert_to_tensor"
],
[
"numpy.reshape",
"numpy.abs",
"numpy.exp",
"numpy.max",
"numpy.maximum"
]
] |
PeterDeWeirdt/sgrna_modeler | [
"5c6cf0330cda35acf67d7e5f58d0b2ae29bf026e"
] | [
"sgrna_modeler/models.py"
] | [
"from sgrna_modeler import features as fe\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import ensemble\nfrom tensorflow import keras as k\nimport pandas as pd\nimport os\nfrom joblib import load\nimport sgrna_modeler.enzymes as en\n\ndef curr_path():\n return os.path.dirname(__file__)\n\ndef get_deepcpf1_weights():\n path = os.path.join(curr_path(), 'data/saved_models/Seq_deepCpf1_weights_tf.h5')\n return path\n\ndef get_enpam_gb():\n path = os.path.join(curr_path(), 'data/saved_models/enPAM_GB.joblib')\n return path\n\ndef build_kim2018(input_shape=(34, 4)):\n \"\"\"\n Build a convolutional neural network\n\n From:\n Kim, Hui Kwon, et al. \"Deep learning improves prediction of CRISPR–Cpf1 guide RNA activity.\" \\\n Nature biotechnology 36.3 (2018): 239.\n\n :param input_shape: guide length by nts (4)\n :type input_shape: tuple\n :return: CNN architecture\n :rtype: keras Model object\n \"\"\"\n \"\"\"Build a Convolutional neural network model from Kim 2018\n\n Parmeters\n ---------\n input_shape: tuple, optional (default (34, 4)\n shape of the first layer of the model\n\n Returns\n -------\n model: keras model object\n \"\"\"\n Input_SEQ = k.layers.Input(shape=input_shape)\n C1 = k.layers.Convolution1D(80, 5, activation='relu')(Input_SEQ)\n P1 = k.layers.AveragePooling1D(2)(C1)\n F = k.layers.Flatten()(P1)\n DO1 = k.layers.Dropout(0.3)(F)\n D1 = k.layers.Dense(80, activation='relu')(DO1)\n DO2 = k.layers.Dropout(0.3)(D1)\n D2 = k.layers.Dense(40, activation='relu')(DO2)\n DO3 = k.layers.Dropout(0.3)(D2)\n D3 = k.layers.Dense(40, activation='relu')(DO3)\n DO4 = k.layers.Dropout(0.3)(D3)\n Output = k.layers.Dense(1, activation='linear')(DO4)\n model = k.models.Model(inputs = Input_SEQ, outputs = Output)\n return model\n\nclass KerasSgrnaModel(object):\n \"\"\"This class is for creating, training, and predicting guide activity with a Keras model\n\n :param random_state: set random state in train/test split for reproducibility\n :type random_stat: int\n :param val_frac: amount of data to use for early stopping\n :type val_frac: float\n :param base_arc: base architecture to build neural network, defaults to build_kim2018\n :type base_arc: function, which takes an input shape and returns a keras model\n\n :Example:\n\n >>> from sgrna_modeler import datasets as da\n >>> from sgrna_modeler import models as sg\n >>> train_data = da.load_kim_2018_train()\n >>> train_model = sg.KerasSgrnaModel()\n >>> train_model.fit(train_data)\n >>> test_data = da.load_kim_2018_test()\n >>> test_predictions = train_model.predict(test_data)\n \"\"\"\n def __init__(self, random_state = 7, val_frac = 0.1, base_arc = None):\n \"\"\"Constructor\n \"\"\"\n self.base_name = 'Keras_CNN'\n self.val_frac = val_frac\n self.random_state = random_state\n if base_arc is None:\n self.base_arc = build_kim2018\n else:\n self.base_arc = base_arc\n self.train_dataset = None\n self.enzyme = None\n self.model = None\n self.model_history = None\n self.train_name = None\n\n def load_weights(self, weights, enzyme, name):\n \"\"\"Load previously trained weights\n\n :param enzyme: cas9 or cas12a\n :type enyme: dict\n :param weights: filepath to weights\n :type weights: str\n :param name: name of the model\n :type name:str\n \"\"\"\n if weights is None:\n weights = get_deepcpf1_weights()\n self.train_name = 'Seq-DeepCpf1'\n self.enzyme = en.cas12a\n else:\n self.train_name = name\n self.enzyme = enzyme\n model = self.base_arc(input_shape = (self.enzyme['context_length'],4))\n model.load_weights(weights)\n self.model = model\n return self\n\n def fit(self, train_dataset):\n \"\"\" Fit a model to the training data\n\n :param train_dataset: training data\n :type train_dataset: :class:`sgrna_modeler.datasets.ActivityData`\n :return: self\n \"\"\"\n self.train_dataset = train_dataset\n self.train_name = train_dataset.name\n self.enzyme = train_dataset.enzyme\n train_val_x, y = train_dataset.get_xy()\n encoded_train_val_x = fe.encode_seqs(train_val_x)\n train_x, val_x, train_y, val_y = train_test_split(encoded_train_val_x, y, test_size=self.val_frac,\n random_state=self.random_state)\n model = self.base_arc(input_shape = (self.enzyme['context_length'],4))\n model.compile(optimizer='RMSprop',loss='mse',metrics=['mae'])\n self.model_history = model.fit(train_x, train_y, epochs = 200,\n validation_data = (val_x, val_y),\n callbacks = [k.callbacks.EarlyStopping(patience=20,restore_best_weights=True),\n k.callbacks.History()],\n verbose = 0)\n self.model = model\n return self\n\n def predict(self, test_dataset):\n \"\"\"Predict activity of test data\n\n :param test_dataset: testing data\n :type test_dataset: :class:`sgrna_modeler.datasets.ActivityData`\n :return: dataframe of predictions and other meta information\n :rtype: pandas dataframe\n \"\"\"\n x, y = test_dataset.get_xy()\n encoded_x = fe.encode_seqs(x)\n predictions = self.model.predict(encoded_x)\n out_data = pd.DataFrame({'kmer': x, 'y': y})\n if test_dataset.group_column:\n out_data['group'] = test_dataset.data[test_dataset.group_column]\n else:\n out_data['group'] = ''\n out_data['prediction'] = predictions\n out_data['model'] = self.base_name\n out_data['training_data'] = self.train_name\n out_data['test_data'] = test_dataset.name\n return out_data\n\n def predict_seqs(self, seqs):\n \"\"\" Predict from sequences\n\n :param seqs: sequences to predict\n :return: numeric vector of predcitions\n \"\"\"\n featurized_x = fe.encode_seqs(seqs)\n predictions = self.model.predict(featurized_x).flatten()\n return predictions\n\nclass SklearnSgrnaModel(object):\n \"\"\"scikit-learn gradient boosting for modeling sgRNA activity\n\n :param random_state: set random state in train/test split for reproducibility\n :type random_state: int\n :param val_frac: amount of data to use for early stopping\n :type val_frac: float\n :param model: base model\n :type model: sklearn GradientBoostingRegressor\n :param features: features to model\n :type features: list\n\n :Example:\n >>> from sgrna_modeler import datasets as da\n >>> from sgrna_modeler import models as sg\n >>> train_model = sg.SklearnSgrnaModel()\n >>> rs2_data = da.load_doench_2016()\n >>> train_model.fit(rs2_data)\n \"\"\"\n def __init__(self, random_state = 7, val_frac = 0.1, model = None, features = None):\n \"\"\"Constructor\n \"\"\"\n self.base_name = 'Sklearn_GB'\n self.val_frac = val_frac\n self.random_state = random_state\n if model is None:\n # Gradient boosted model\n self.model = ensemble.GradientBoostingRegressor(n_iter_no_change=20,\n validation_fraction = self.val_frac,\n random_state=self.random_state)\n else:\n self.model = model\n if features is None:\n # Default features for RuleSet2\n self.features = ['Pos. Ind. 1mer', 'Pos. Ind. 2mer', 'Pos. Dep. 1mer', 'Pos. Dep. 2mer', 'GC content', 'Tm']\n else:\n self.features = features\n self.enzyme = None\n self.train_dataset = None\n self.train_name = None\n\n def load_model(self, model, enzyme, name):\n \"\"\"Load previously trained model\n\n :param enzyme: cas9 or cas12a\n :type enyme: dict\n :param model: filepath to trained model\n :type model: str (*.joblib)\n :param name: name of the model\n :type name:str\n \"\"\"\n self.enzyme = enzyme\n self.model = load(model)\n self.train_name = name\n return self\n\n def fit(self, train_dataset):\n \"\"\" Fit a model to the training data\n\n :param train_dataset: training data\n :type train_dataset: :class:`sgrna_modeler.datasets.ActivityData`\n :return: self\n \"\"\"\n self.train_name = train_dataset.name\n self.enzyme = train_dataset.enzyme\n train_val_x, y = train_dataset.get_xy()\n featurized_train_val_x = fe.featurize_guides(train_val_x, features=self.features,\n guide_start = self.enzyme['guide_start'],\n guide_length = self.enzyme['guide_length'])\n self.model.fit(featurized_train_val_x, y)\n return self\n\n def predict(self, test_dataset):\n \"\"\"Predict activity of test data\n\n :param test_dataset: testing data\n :type test_dataset: :class:`sgrna_modeler.datasets.ActivityData`\n :return: dataframe of predictions and other meta information\n :rtype: pandas dataframe\n \"\"\"\n x, y = test_dataset.get_xy()\n featurized_x = fe.featurize_guides(x, features=self.features,\n guide_start=test_dataset.enzyme['guide_start'],\n guide_length=test_dataset.enzyme['guide_length'])\n predictions = self.model.predict(featurized_x)\n out_data = pd.DataFrame({'kmer': x, 'y': y})\n if test_dataset.group_column:\n out_data['group'] = test_dataset.data[test_dataset.group_column]\n else:\n out_data['group'] = ''\n out_data['prediction'] = predictions\n out_data['model'] = self.base_name\n out_data['training_data'] = self.train_name\n out_data['test_data'] = test_dataset.name\n return out_data\n\n def predict_seqs(self, seqs):\n \"\"\" Predict from sequences\n\n :param seqs: sequences to predict\n :return: numeric vector of predcitions\n \"\"\"\n featurized_x = fe.featurize_guides(seqs, features=self.features,\n guide_start=self.enzyme['guide_start'],\n guide_length=self.enzyme['guide_length'])\n predictions = self.model.predict(featurized_x)\n return predictions\n\n"
] | [
[
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Dropout",
"sklearn.ensemble.GradientBoostingRegressor",
"tensorflow.keras.callbacks.History",
"pandas.DataFrame",
"tensorflow.keras.layers.AveragePooling1D",
"tensorflow.keras.models.Model",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Convolution1D",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.layers.Input"
]
] |
kagemeka/competitive-programming | [
"c70fe481bcd518f507b885fc9234691d8ce63171",
"c70fe481bcd518f507b885fc9234691d8ce63171"
] | [
"src/atcoder/abc212/g/sol_8.py",
"src/atcoder/abc016/d/sol_1.py"
] | [
"import typing\nimport numpy as np\nimport numba as nb\n\n\n\n\[email protected]\ndef find_divisors(\n n: int,\n) -> np.array:\n i = np.arange(int(n ** .5))\n i += 1\n i = i[n % i == 0]\n i = np.hstack((i, n // i))\n return np.unique(i)\n\n\n\[email protected]\ndef gpf(\n n: int = 1 << 20,\n) -> np.array:\n s = np.arange(n)\n s[:2] = -1\n i = 0\n while i * i < n - 1:\n i += 1\n if s[i] == i: s[i::i] = i\n return s\n\n\[email protected]\ndef lpf(\n n: int = 1 << 20,\n) -> np.array:\n s = np.arange(n)\n s[:2] = -1\n i = 0\n while i * i < n - 1:\n i += 1\n if s[i] != i: continue\n j = np.arange(i, n, i)\n s[j][s[j] == j] = i\n return s\n\n\[email protected]\ndef sieve_of_eratosthenes(\n n: int = 1 << 20,\n) -> np.array:\n return gpf(n) == np.arange(n)\n\n\n\[email protected]\ndef prime_numbers(\n n: int = 1 << 20,\n) -> np.array:\n s = sieve_of_eratosthenes(n)\n return np.flatnonzero(s)\n\n\n\[email protected]\ndef euler_totient(\n n: int,\n prime_numbers: np.array,\n) -> int:\n c = n\n for p in prime_numbers:\n if p * p > n: break\n if n % p: continue\n c = c // p * (p - 1)\n while not n % p: n //= p\n if n > 1:\n c = c // n * (n - 1)\n return c\n\n\[email protected](\n (nb.i8, ),\n cache=True,\n)\ndef solve(\n p: int,\n) -> typing.NoReturn:\n n = p - 1\n divs = find_divisors(n) \n pn = prime_numbers(1 << 20)\n mod = 998244353\n c = 1\n for d in divs:\n e = euler_totient(d, pn)\n e %= mod\n d %= mod\n c += e * d % mod \n c %= mod\n print(c)\n\n\ndef main() -> typing.NoReturn:\n p = int(input())\n solve(p)\n\n\nmain()",
"import typing\nimport sys \nimport numpy as np \nimport numba as nb \n\n\[email protected] \ndef cross(x0: int, y0: int, x1: int, y1: int) -> int:\n return x0 * y1 - x1 * y0\n\n\n\[email protected]((nb.i8, ) * 4 + (nb.i8[:, :], ), cache=True)\ndef solve(\n x0: int,\n y0: int,\n x1: int,\n y1: int,\n xy: np.ndarray,\n) -> typing.NoReturn:\n n = len(xy)\n xy = np.vstack((xy, xy[:1]))\n x, y = xy[:, 0], xy[:, 1]\n x2, y2 = x[:-1], y[:-1]\n x3, y3 = x[1:], y[1:]\n p0 = cross(x1 - x0, y1 - y0, x2 - x0, y2 - y0) \n p0 *= cross(x1 - x0, y1 - y0, x3 - x0, y3 - y0)\n p1 = cross(x0 - x2, y0 - y2, x3 - x2, y3 - y2)\n p1 *= cross(x1 - x2, y1 - y2, x3 - x2, y3 - y2)\n cnt = 1 + np.count_nonzero((p0 < 0) & (p1 < 0)) // 2\n print(cnt)\n \n\n\ndef main() -> typing.NoReturn:\n x0, y0, x1, y1 = map(int, input().split())\n n = int(input())\n xy = np.array(\n sys.stdin.read().split(),\n dtype=np.int64,\n ).reshape(n, 2)\n solve(x0, y0, x1, y1, xy)\n\n\nmain()"
] | [
[
"numpy.arange",
"numpy.hstack",
"numpy.flatnonzero",
"numpy.unique"
],
[
"numpy.vstack",
"numpy.count_nonzero"
]
] |
tobiasmaier/pytorch-lightning | [
"7f352cb69a8202e3f829419657597697ca5d99e2"
] | [
"pytorch_lightning/core/lightning.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"nn.Module with additional great features.\"\"\"\n\nimport collections\nimport copy\nimport inspect\nimport os\nimport re\nimport tempfile\nfrom abc import ABC\nfrom argparse import Namespace\nfrom pathlib import Path\nfrom typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Union\n\nimport torch\nfrom torch import ScriptModule, Tensor\nfrom torch.nn import Module\nfrom torch.optim.optimizer import Optimizer\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.core.grads import GradInformation\nfrom pytorch_lightning.core.hooks import CheckpointHooks, DataHooks, ModelHooks\nfrom pytorch_lightning.core.memory import ModelSummary\nfrom pytorch_lightning.core.optimizer import LightningOptimizer\nfrom pytorch_lightning.core.saving import ALLOWED_CONFIG_TYPES, ModelIO, PRIMITIVE_TYPES\nfrom pytorch_lightning.core.step_result import Result\nfrom pytorch_lightning.utilities import rank_zero_warn, TPU_AVAILABLE\nfrom pytorch_lightning.utilities.device_dtype_mixin import DeviceDtypeModuleMixin\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.parsing import AttributeDict, collect_init_args, get_init_args\n\nif TPU_AVAILABLE:\n import torch_xla.core.xla_model as xm\n\n\nclass LightningModule(\n ABC,\n DeviceDtypeModuleMixin,\n GradInformation,\n ModelIO,\n ModelHooks,\n DataHooks,\n CheckpointHooks,\n Module,\n):\n # Below is for property support of JIT in PyTorch 1.7\n # since none of them is important when using JIT, we are going to ignore them.\n __jit_unused_properties__ = [\n \"datamodule\",\n \"example_input_array\",\n \"hparams\",\n \"hparams_initial\",\n \"on_gpu\",\n \"current_epoch\",\n \"global_step\",\n ] + DeviceDtypeModuleMixin.__jit_unused_properties__\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # see (https://github.com/pytorch/pytorch/blob/3e6bb5233f9ca2c5aa55d9cda22a7ee85439aa6e/\n # torch/nn/modules/module.py#L227)\n torch._C._log_api_usage_once(f\"lightning.module.{self.__class__.__name__}\")\n\n self.exp_save_path = None\n\n self.loaded_optimizer_states_dict = {}\n\n #: Pointer to the trainer object\n self.trainer = None\n\n #: Pointer to the logger object\n self.logger = None\n\n #: True if using dp\n self.use_dp = False\n\n #: True if using ddp\n self.use_ddp = False\n\n #: True if using ddp2\n self.use_ddp2 = False\n\n # True if on tpu\n self.use_tpu = False\n\n #: True if using amp\n self.use_amp = False\n\n #: The precision used\n self.precision = 32\n\n # optionally can be set by user\n self._example_input_array = None\n self._datamodule = None\n self._results: Optional[Result] = None\n self._current_fx_name = ''\n self._running_manual_backward = False\n self._current_hook_fx_name = None\n self._current_dataloader_idx = None\n self._automatic_optimization: bool = True\n\n def optimizers(self, use_pl_optimizer: bool = True) -> Union[Optimizer, List[Optimizer], List[LightningOptimizer]]:\n if use_pl_optimizer:\n opts = list(self.trainer.lightning_optimizers.values())\n else:\n opts = self.trainer.optimizers\n\n # single optimizer\n if isinstance(opts, list) and len(opts) == 1 and isinstance(opts[0], Optimizer):\n return opts[0]\n # multiple opts\n return opts\n\n @property\n def example_input_array(self) -> Any:\n return self._example_input_array\n\n @property\n def current_epoch(self) -> int:\n \"\"\"The current epoch\"\"\"\n return self.trainer.current_epoch if self.trainer else 0\n\n @property\n def global_step(self) -> int:\n \"\"\"Total training batches seen across all epochs\"\"\"\n return self.trainer.global_step if self.trainer else 0\n\n @example_input_array.setter\n def example_input_array(self, example: Any) -> None:\n self._example_input_array = example\n\n @property\n def datamodule(self) -> Any:\n return self._datamodule\n\n @datamodule.setter\n def datamodule(self, datamodule: Any) -> None:\n self._datamodule = datamodule\n\n @property\n def on_gpu(self):\n \"\"\"\n True if your model is currently running on GPUs.\n Useful to set flags around the LightningModule for different CPU vs GPU behavior.\n \"\"\"\n return self.device.type == \"cuda\"\n\n @property\n def automatic_optimization(self) -> bool:\n \"\"\"\n If False you are responsible for calling .backward, .step, zero_grad.\n \"\"\"\n return self._automatic_optimization\n\n @automatic_optimization.setter\n def automatic_optimization(self, automatic_optimization: bool) -> None:\n self._automatic_optimization = automatic_optimization\n\n def print(self, *args, **kwargs) -> None:\n r\"\"\"\n Prints only from process 0. Use this in any distributed mode to log only once.\n\n Args:\n *args: The thing to print. Will be passed to Python's built-in print function.\n **kwargs: Will be passed to Python's built-in print function.\n\n Example:\n\n .. code-block:: python\n\n def forward(self, x):\n self.print(x, 'in forward')\n\n \"\"\"\n if self.trainer.is_global_zero:\n print(*args, **kwargs)\n\n def log(\n self,\n name: str,\n value: Any,\n prog_bar: bool = False,\n logger: bool = True,\n on_step: Optional[bool] = None,\n on_epoch: Optional[bool] = None,\n reduce_fx: Callable = torch.mean,\n tbptt_reduce_fx: Callable = torch.mean,\n tbptt_pad_token: int = 0,\n enable_graph: bool = False,\n sync_dist: bool = False,\n sync_dist_op: Union[Any, str] = 'mean',\n sync_dist_group: Optional[Any] = None,\n ):\n \"\"\"\n Log a key, value\n\n Example::\n\n self.log('train_loss', loss)\n\n The default behavior per hook is as follows\n\n .. csv-table:: ``*`` also applies to the test loop\n :header: \"LightningMoule Hook\", \"on_step\", \"on_epoch\", \"prog_bar\", \"logger\"\n :widths: 20, 10, 10, 10, 10\n\n \"training_step\", \"T\", \"F\", \"F\", \"T\"\n \"training_step_end\", \"T\", \"F\", \"F\", \"T\"\n \"training_epoch_end\", \"F\", \"T\", \"F\", \"T\"\n \"validation_step*\", \"F\", \"T\", \"F\", \"T\"\n \"validation_step_end*\", \"F\", \"T\", \"F\", \"T\"\n \"validation_epoch_end*\", \"F\", \"T\", \"F\", \"T\"\n\n Args:\n name: key name\n value: value name\n prog_bar: if True logs to the progress bar\n logger: if True logs to the logger\n on_step: if True logs at this step. None auto-logs at the training_step but not validation/test_step\n on_epoch: if True logs epoch accumulated metrics. None auto-logs at the val/test step but not training_step\n reduce_fx: reduction function over step values for end of epoch. Torch.mean by default\n tbptt_reduce_fx: function to reduce on truncated back prop\n tbptt_pad_token: token to use for padding\n enable_graph: if True, will not auto detach the graph\n sync_dist: if True, reduces the metric across GPUs/TPUs\n sync_dist_op: the op to sync across GPUs/TPUs\n sync_dist_group: the ddp group\n \"\"\"\n if self._results is not None:\n # in any epoch end can't log step metrics (only epoch metric)\n if 'epoch_end' in self._current_fx_name and on_step:\n m = f'on_step=True cannot be used on {self._current_fx_name} method'\n raise MisconfigurationException(m)\n\n if 'epoch_end' in self._current_fx_name and on_epoch is False:\n m = f'on_epoch cannot be False when called from the {self._current_fx_name} method'\n raise MisconfigurationException(m)\n\n # add log_dict\n # TODO: if logged twice fail with crash\n\n # set the default depending on the fx_name\n on_step = self.__auto_choose_log_on_step(on_step)\n on_epoch = self.__auto_choose_log_on_epoch(on_epoch)\n\n if self._current_hook_fx_name is not None:\n self.trainer.logger_connector.check_logging_in_callbacks(\n self._current_hook_fx_name,\n on_step=on_step,\n on_epoch=on_epoch\n )\n\n # make sure user doesn't introduce logic for multi-dataloaders\n if \"/dataloader_idx_\" in name:\n raise MisconfigurationException(\n f\"Logged key: {name} should not contain information about dataloader_idx.\")\n\n accelerator = self.trainer.accelerator_backend\n\n self._results.log(\n name,\n value,\n prog_bar,\n logger,\n on_step,\n on_epoch,\n reduce_fx,\n tbptt_reduce_fx,\n tbptt_pad_token,\n enable_graph,\n sync_dist,\n sync_dist_op,\n sync_dist_group,\n accelerator.sync_tensor,\n self._current_dataloader_idx,\n self.device,\n )\n\n def log_dict(\n self,\n dictionary: dict,\n prog_bar: bool = False,\n logger: bool = True,\n on_step: Optional[bool] = None,\n on_epoch: Optional[bool] = None,\n reduce_fx: Callable = torch.mean,\n tbptt_reduce_fx: Callable = torch.mean,\n tbptt_pad_token: int = 0,\n enable_graph: bool = False,\n sync_dist: bool = False,\n sync_dist_op: Union[Any, str] = 'mean',\n sync_dist_group: Optional[Any] = None,\n ):\n \"\"\"\n Log a dictonary of values at once\n\n Example::\n\n values = {'loss': loss, 'acc': acc, ..., 'metric_n': metric_n}\n self.log_dict(values)\n\n Args:\n dictionary: key value pairs (str, tensors)\n prog_bar: if True logs to the progress base\n logger: if True logs to the logger\n on_step: if True logs at this step. None auto-logs for training_step but not validation/test_step\n on_epoch: if True logs epoch accumulated metrics. None auto-logs for val/test step but not training_step\n reduce_fx: reduction function over step values for end of epoch. Torch.mean by default\n tbptt_reduce_fx: function to reduce on truncated back prop\n tbptt_pad_token: token to use for padding\n enable_graph: if True, will not auto detach the graph\n sync_dist: if True, reduces the metric across GPUs/TPUs\n sync_dist_op: the op to sync across GPUs/TPUs\n sync_dist_group: the ddp group:\n \"\"\"\n for k, v in dictionary.items():\n self.log(\n name=k,\n value=v,\n prog_bar=prog_bar,\n logger=logger,\n on_step=on_step,\n on_epoch=on_epoch,\n reduce_fx=reduce_fx,\n enable_graph=enable_graph,\n sync_dist=sync_dist,\n sync_dist_group=sync_dist_group,\n sync_dist_op=sync_dist_op,\n tbptt_pad_token=tbptt_pad_token,\n tbptt_reduce_fx=tbptt_reduce_fx,\n )\n\n def write_prediction(self, name, value, filename='predictions.pt'):\n self.trainer.evaluation_loop.predictions._add_prediction(name, value, filename)\n\n def write_prediction_dict(self, predictions_dict, filename='predictions.pt'):\n for k, v in predictions_dict.items():\n self.write_prediction(k, v, filename)\n\n def __auto_choose_log_on_step(self, on_step):\n if on_step is None:\n if self._current_fx_name in {'training_step', 'training_step_end'}:\n on_step = True\n elif self._current_fx_name in {'evaluation_step', 'evaluation_step_end',\n 'evaluation_epoch_end', 'training_epoch_end'}:\n on_step = False\n else:\n on_step = False\n\n return on_step\n\n def __auto_choose_log_on_epoch(self, on_epoch):\n if on_epoch is None:\n if self._current_fx_name in {'training_step', 'training_step_end'}:\n on_epoch = False\n elif self._current_fx_name in {'evaluation_step', 'evaluation_step_end',\n 'evaluation_epoch_end', 'training_epoch_end'}:\n on_epoch = True\n else:\n on_epoch = True\n\n return on_epoch\n\n def all_gather(self, tensor: Union[torch.Tensor], group: Optional[Any] = None, sync_grads: bool = False):\n r\"\"\"\n Allows users to call ``self.all_gather()`` from the LightningModule, thus making\n the ```all_gather``` operation accelerator agnostic.\n\n ```all_gather``` is a function provided by accelerators to gather a tensor from several\n distributed processes\n\n Args:\n tensor: tensor of shape (batch, ...)\n group: the process group to gather results from. Defaults to all processes (world)\n sync_grads: flag that allows users to synchronize gradients for all_gather op\n\n Return:\n A tensor of shape (world_size, batch, ...)\n \"\"\"\n return self.trainer.accelerator_backend.all_gather(tensor, group=group, sync_grads=sync_grads)\n\n def forward(self, *args, **kwargs):\n r\"\"\"\n Same as :meth:`torch.nn.Module.forward()`, however in Lightning you want this to define\n the operations you want to use for prediction (i.e.: on a server or as a feature extractor).\n\n Normally you'd call ``self()`` from your :meth:`training_step` method.\n This makes it easy to write a complex system for training with the outputs\n you'd want in a prediction setting.\n\n You may also find the :func:`~pytorch_lightning.core.decorators.auto_move_data` decorator useful\n when using the module outside Lightning in a production setting.\n\n Args:\n *args: Whatever you decide to pass into the forward method.\n **kwargs: Keyword arguments are also possible.\n\n Return:\n Predicted output\n\n Examples:\n .. code-block:: python\n\n # example if we were using this model as a feature extractor\n def forward(self, x):\n feature_maps = self.convnet(x)\n return feature_maps\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n feature_maps = self(x)\n logits = self.classifier(feature_maps)\n\n # ...\n return loss\n\n # splitting it this way allows model to be used a feature extractor\n model = MyModelAbove()\n\n inputs = server.get_request()\n results = model(inputs)\n server.write_results(results)\n\n # -------------\n # This is in stark contrast to torch.nn.Module where normally you would have this:\n def forward(self, batch):\n x, y = batch\n feature_maps = self.convnet(x)\n logits = self.classifier(feature_maps)\n return logits\n\n \"\"\"\n return super().forward(*args, **kwargs)\n\n def training_step(self, *args, **kwargs):\n r\"\"\"\n Here you compute and return the training loss and some additional metrics for e.g.\n the progress bar or logger.\n\n Args:\n batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):\n The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.\n batch_idx (int): Integer displaying index of this batch\n optimizer_idx (int): When using multiple optimizers, this argument will also be present.\n hiddens(:class:`~torch.Tensor`): Passed in if\n :paramref:`~pytorch_lightning.trainer.trainer.Trainer.truncated_bptt_steps` > 0.\n\n Return:\n Any of.\n\n - :class:`~torch.Tensor` - The loss tensor\n - `dict` - A dictionary. Can include any keys, but must include the key 'loss'\n - `None` - Training will skip to the next batch\n\n In this step you'd normally do the forward pass and calculate the loss for a batch.\n You can also do fancier things like multiple forward passes or something model specific.\n\n Example::\n\n def training_step(self, batch, batch_idx):\n x, y, z = batch\n out = self.encoder(x)\n loss = self.loss(out, x)\n return loss\n\n If you define multiple optimizers, this step will be called with an additional\n ``optimizer_idx`` parameter.\n\n .. code-block:: python\n\n # Multiple optimizers (e.g.: GANs)\n def training_step(self, batch, batch_idx, optimizer_idx):\n if optimizer_idx == 0:\n # do training_step with encoder\n if optimizer_idx == 1:\n # do training_step with decoder\n\n\n If you add truncated back propagation through time you will also get an additional\n argument with the hidden states of the previous step.\n\n .. code-block:: python\n\n # Truncated back-propagation through time\n def training_step(self, batch, batch_idx, hiddens):\n # hiddens are the hidden states from the previous truncated backprop step\n ...\n out, hiddens = self.lstm(data, hiddens)\n ...\n return {'loss': loss, 'hiddens': hiddens}\n\n Note:\n The loss value shown in the progress bar is smoothed (averaged) over the last values,\n so it differs from the actual loss returned in train/validation step.\n \"\"\"\n rank_zero_warn(\n \"`training_step` must be implemented to be used with the Lightning Trainer\"\n )\n\n def training_step_end(self, *args, **kwargs):\n \"\"\"\n Use this when training with dp or ddp2 because :meth:`training_step`\n will operate on only part of the batch. However, this is still optional\n and only needed for things like softmax or NCE loss.\n\n Note:\n If you later switch to ddp or some other mode, this will still be called\n so that you don't have to change your code\n\n .. code-block:: python\n\n # pseudocode\n sub_batches = split_batches_for_dp(batch)\n batch_parts_outputs = [training_step(sub_batch) for sub_batch in sub_batches]\n training_step_end(batch_parts_outputs)\n\n Args:\n batch_parts_outputs: What you return in `training_step` for each batch part.\n\n Return:\n Anything\n\n When using dp/ddp2 distributed backends, only a portion of the batch is inside the training_step:\n\n .. code-block:: python\n\n def training_step(self, batch, batch_idx):\n # batch is 1/num_gpus big\n x, y = batch\n\n out = self(x)\n\n # softmax uses only a portion of the batch in the denomintaor\n loss = self.softmax(out)\n loss = nce_loss(loss)\n return loss\n\n If you wish to do something with all the parts of the batch, then use this method to do it:\n\n .. code-block:: python\n\n def training_step(self, batch, batch_idx):\n # batch is 1/num_gpus big\n x, y = batch\n\n out = self.encoder(x)\n return {'pred': out}\n\n def training_step_end(self, training_step_outputs):\n gpu_0_pred = training_step_outputs[0]['pred']\n gpu_1_pred = training_step_outputs[1]['pred']\n gpu_n_pred = training_step_outputs[n]['pred']\n\n # this softmax now uses the full batch\n loss = nce_loss([gpu_0_pred, gpu_1_pred, gpu_n_pred])\n return loss\n\n See Also:\n See the :ref:`multi_gpu` guide for more details.\n \"\"\"\n\n def training_epoch_end(self, outputs: List[Any]) -> None:\n \"\"\"\n Called at the end of the training epoch with the outputs of all training steps.\n Use this in case you need to do something with all the outputs for every training_step.\n\n .. code-block:: python\n\n # the pseudocode for these calls\n train_outs = []\n for train_batch in train_data:\n out = training_step(train_batch)\n train_outs.append(out)\n training_epoch_end(train_outs)\n\n Args:\n outputs: List of outputs you defined in :meth:`training_step`, or if there are\n multiple dataloaders, a list containing a list of outputs for each dataloader.\n\n Return:\n None\n\n Note:\n If this method is not overridden, this won't be called.\n\n Example::\n\n def training_epoch_end(self, training_step_outputs):\n # do something with all training_step outputs\n return result\n\n With multiple dataloaders, ``outputs`` will be a list of lists. The outer list contains\n one entry per dataloader, while the inner list contains the individual outputs of\n each training step for that dataloader.\n\n .. code-block:: python\n\n def training_epoch_end(self, training_step_outputs):\n for out in training_step_outputs:\n # do something here\n \"\"\"\n\n def validation_step(self, *args, **kwargs):\n r\"\"\"\n Operates on a single batch of data from the validation set.\n In this step you'd might generate examples or calculate anything of interest like accuracy.\n\n .. code-block:: python\n\n # the pseudocode for these calls\n val_outs = []\n for val_batch in val_data:\n out = validation_step(val_batch)\n val_outs.append(out)\n validation_epoch_end(val_outs)\n\n Args:\n batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):\n The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.\n batch_idx (int): The index of this batch\n dataloader_idx (int): The index of the dataloader that produced this batch\n (only if multiple val dataloaders used)\n\n Return:\n Any of.\n\n - Any object or value\n - `None` - Validation will skip to the next batch\n\n .. code-block:: python\n\n # pseudocode of order\n out = validation_step()\n if defined('validation_step_end'):\n out = validation_step_end(out)\n out = validation_epoch_end(out)\n\n\n .. code-block:: python\n\n # if you have one val dataloader:\n def validation_step(self, batch, batch_idx)\n\n # if you have multiple val dataloaders:\n def validation_step(self, batch, batch_idx, dataloader_idx)\n\n Examples:\n .. code-block:: python\n\n # CASE 1: A single validation dataset\n def validation_step(self, batch, batch_idx):\n x, y = batch\n\n # implement your own\n out = self(x)\n loss = self.loss(out, y)\n\n # log 6 example images\n # or generated text... or whatever\n sample_imgs = x[:6]\n grid = torchvision.utils.make_grid(sample_imgs)\n self.logger.experiment.add_image('example_images', grid, 0)\n\n # calculate acc\n labels_hat = torch.argmax(out, dim=1)\n val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)\n\n # log the outputs!\n self.log_dict({'val_loss': loss, 'val_acc': val_acc})\n\n If you pass in multiple val dataloaders, :meth:`validation_step` will have an additional argument.\n\n .. code-block:: python\n\n # CASE 2: multiple validation dataloaders\n def validation_step(self, batch, batch_idx, dataloader_idx):\n # dataloader_idx tells you which dataset this is.\n\n Note:\n If you don't need to validate you don't need to implement this method.\n\n Note:\n When the :meth:`validation_step` is called, the model has been put in eval mode\n and PyTorch gradients have been disabled. At the end of validation,\n the model goes back to training mode and gradients are enabled.\n \"\"\"\n\n def validation_step_end(self, *args, **kwargs):\n \"\"\"\n Use this when validating with dp or ddp2 because :meth:`validation_step`\n will operate on only part of the batch. However, this is still optional\n and only needed for things like softmax or NCE loss.\n\n Note:\n If you later switch to ddp or some other mode, this will still be called\n so that you don't have to change your code.\n\n .. code-block:: python\n\n # pseudocode\n sub_batches = split_batches_for_dp(batch)\n batch_parts_outputs = [validation_step(sub_batch) for sub_batch in sub_batches]\n validation_step_end(batch_parts_outputs)\n\n Args:\n batch_parts_outputs: What you return in :meth:`validation_step`\n for each batch part.\n\n Return:\n None or anything\n\n .. code-block:: python\n\n # WITHOUT validation_step_end\n # if used in DP or DDP2, this batch is 1/num_gpus large\n def validation_step(self, batch, batch_idx):\n # batch is 1/num_gpus big\n x, y = batch\n\n out = self.encoder(x)\n loss = self.softmax(out)\n loss = nce_loss(loss)\n self.log('val_loss', loss)\n\n # --------------\n # with validation_step_end to do softmax over the full batch\n def validation_step(self, batch, batch_idx):\n # batch is 1/num_gpus big\n x, y = batch\n\n out = self(x)\n return out\n\n def validation_step_end(self, val_step_outputs):\n for out in val_step_outputs:\n # do something with these\n\n See Also:\n See the :ref:`multi_gpu` guide for more details.\n \"\"\"\n\n def validation_epoch_end(self, outputs: List[Any]) -> None:\n \"\"\"\n Called at the end of the validation epoch with the outputs of all validation steps.\n\n .. code-block:: python\n\n # the pseudocode for these calls\n val_outs = []\n for val_batch in val_data:\n out = validation_step(val_batch)\n val_outs.append(out)\n validation_epoch_end(val_outs)\n\n Args:\n outputs: List of outputs you defined in :meth:`validation_step`, or if there\n are multiple dataloaders, a list containing a list of outputs for each dataloader.\n\n Return:\n None\n\n Note:\n If you didn't define a :meth:`validation_step`, this won't be called.\n\n Examples:\n With a single dataloader:\n\n .. code-block:: python\n\n def validation_epoch_end(self, val_step_outputs):\n for out in val_step_outputs:\n # do something\n\n With multiple dataloaders, `outputs` will be a list of lists. The outer list contains\n one entry per dataloader, while the inner list contains the individual outputs of\n each validation step for that dataloader.\n\n .. code-block:: python\n\n def validation_epoch_end(self, outputs):\n for dataloader_output_result in outputs:\n dataloader_outs = dataloader_output_result.dataloader_i_outputs\n\n self.log('final_metric', final_value)\n \"\"\"\n\n def test_step(self, *args, **kwargs):\n r\"\"\"\n Operates on a single batch of data from the test set.\n In this step you'd normally generate examples or calculate anything of interest\n such as accuracy.\n\n .. code-block:: python\n\n # the pseudocode for these calls\n test_outs = []\n for test_batch in test_data:\n out = test_step(test_batch)\n test_outs.append(out)\n test_epoch_end(test_outs)\n\n Args:\n batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):\n The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.\n batch_idx (int): The index of this batch.\n dataloader_idx (int): The index of the dataloader that produced this batch\n (only if multiple test dataloaders used).\n\n Return:\n Any of.\n\n - Any object or value\n - `None` - Testing will skip to the next batch\n\n .. code-block:: python\n\n # if you have one test dataloader:\n def test_step(self, batch, batch_idx)\n\n # if you have multiple test dataloaders:\n def test_step(self, batch, batch_idx, dataloader_idx)\n\n Examples:\n .. code-block:: python\n\n # CASE 1: A single test dataset\n def test_step(self, batch, batch_idx):\n x, y = batch\n\n # implement your own\n out = self(x)\n loss = self.loss(out, y)\n\n # log 6 example images\n # or generated text... or whatever\n sample_imgs = x[:6]\n grid = torchvision.utils.make_grid(sample_imgs)\n self.logger.experiment.add_image('example_images', grid, 0)\n\n # calculate acc\n labels_hat = torch.argmax(out, dim=1)\n test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)\n\n # log the outputs!\n self.log_dict({'test_loss': loss, 'test_acc': test_acc})\n\n If you pass in multiple test dataloaders, :meth:`test_step` will have an additional\n argument.\n\n .. code-block:: python\n\n # CASE 2: multiple test dataloaders\n def test_step(self, batch, batch_idx, dataloader_idx):\n # dataloader_idx tells you which dataset this is.\n\n Note:\n If you don't need to test you don't need to implement this method.\n\n Note:\n When the :meth:`test_step` is called, the model has been put in eval mode and\n PyTorch gradients have been disabled. At the end of the test epoch, the model goes back\n to training mode and gradients are enabled.\n \"\"\"\n\n def test_step_end(self, *args, **kwargs):\n \"\"\"\n Use this when testing with dp or ddp2 because :meth:`test_step` will operate\n on only part of the batch. However, this is still optional\n and only needed for things like softmax or NCE loss.\n\n Note:\n If you later switch to ddp or some other mode, this will still be called\n so that you don't have to change your code.\n\n .. code-block:: python\n\n # pseudocode\n sub_batches = split_batches_for_dp(batch)\n batch_parts_outputs = [test_step(sub_batch) for sub_batch in sub_batches]\n test_step_end(batch_parts_outputs)\n\n Args:\n batch_parts_outputs: What you return in :meth:`test_step` for each batch part.\n\n Return:\n None or anything\n\n .. code-block:: python\n\n # WITHOUT test_step_end\n # if used in DP or DDP2, this batch is 1/num_gpus large\n def test_step(self, batch, batch_idx):\n # batch is 1/num_gpus big\n x, y = batch\n\n out = self(x)\n loss = self.softmax(out)\n self.log('test_loss', loss)\n\n # --------------\n # with test_step_end to do softmax over the full batch\n def test_step(self, batch, batch_idx):\n # batch is 1/num_gpus big\n x, y = batch\n\n out = self.encoder(x)\n return out\n\n def test_step_end(self, output_results):\n # this out is now the full size of the batch\n all_test_step_outs = output_results.out\n loss = nce_loss(all_test_step_outs)\n self.log('test_loss', loss)\n\n See Also:\n See the :ref:`multi_gpu` guide for more details.\n \"\"\"\n\n def test_epoch_end(\n self, outputs: List[Any]\n ) -> None:\n \"\"\"\n Called at the end of a test epoch with the output of all test steps.\n\n .. code-block:: python\n\n # the pseudocode for these calls\n test_outs = []\n for test_batch in test_data:\n out = test_step(test_batch)\n test_outs.append(out)\n test_epoch_end(test_outs)\n\n Args:\n outputs: List of outputs you defined in :meth:`test_step_end`, or if there\n are multiple dataloaders, a list containing a list of outputs for each dataloader\n\n Return:\n None\n\n Note:\n If you didn't define a :meth:`test_step`, this won't be called.\n\n Examples:\n With a single dataloader:\n\n .. code-block:: python\n\n def test_epoch_end(self, outputs):\n # do something with the outputs of all test batches\n all_test_preds = test_step_outputs.predictions\n\n some_result = calc_all_results(all_test_preds)\n self.log(some_result)\n\n With multiple dataloaders, `outputs` will be a list of lists. The outer list contains\n one entry per dataloader, while the inner list contains the individual outputs of\n each test step for that dataloader.\n\n .. code-block:: python\n\n def test_epoch_end(self, outputs):\n final_value = 0\n for dataloader_outputs in outputs:\n for test_step_out in dataloader_outputs:\n # do something\n final_value += test_step_out\n\n self.log('final_metric', final_value)\n \"\"\"\n\n def configure_optimizers(\n self,\n ):\n r\"\"\"\n Choose what optimizers and learning-rate schedulers to use in your optimization.\n Normally you'd need one. But in the case of GANs or similar you might have multiple.\n\n Return:\n Any of these 6 options.\n\n - Single optimizer.\n - List or Tuple - List of optimizers.\n - Two lists - The first list has multiple optimizers, the second a list of LR schedulers (or lr_dict).\n - Dictionary, with an 'optimizer' key, and (optionally) a 'lr_scheduler'\n key whose value is a single LR scheduler or lr_dict.\n - Tuple of dictionaries as described, with an optional 'frequency' key.\n - None - Fit will run without any optimizer.\n\n Note:\n The 'frequency' value is an int corresponding to the number of sequential batches\n optimized with the specific optimizer. It should be given to none or to all of the optimizers.\n There is a difference between passing multiple optimizers in a list,\n and passing multiple optimizers in dictionaries with a frequency of 1:\n In the former case, all optimizers will operate on the given batch in each optimization step.\n In the latter, only one optimizer will operate on the given batch at every step.\n\n The lr_dict is a dictionary which contains the scheduler and its associated configuration.\n The default configuration is shown below.\n\n .. code-block:: python\n\n {\n 'scheduler': lr_scheduler, # The LR scheduler instance (required)\n 'interval': 'epoch', # The unit of the scheduler's step size\n 'frequency': 1, # The frequency of the scheduler\n 'reduce_on_plateau': False, # For ReduceLROnPlateau scheduler\n 'monitor': 'val_loss', # Metric for ReduceLROnPlateau to monitor\n 'strict': True, # Whether to crash the training if `monitor` is not found\n 'name': None, # Custom name for LearningRateMonitor to use\n }\n\n Only the ``scheduler`` key is required, the rest will be set to the defaults above.\n\n Examples:\n .. code-block:: python\n\n # most cases\n def configure_optimizers(self):\n opt = Adam(self.parameters(), lr=1e-3)\n return opt\n\n # multiple optimizer case (e.g.: GAN)\n def configure_optimizers(self):\n generator_opt = Adam(self.model_gen.parameters(), lr=0.01)\n disriminator_opt = Adam(self.model_disc.parameters(), lr=0.02)\n return generator_opt, disriminator_opt\n\n # example with learning rate schedulers\n def configure_optimizers(self):\n generator_opt = Adam(self.model_gen.parameters(), lr=0.01)\n disriminator_opt = Adam(self.model_disc.parameters(), lr=0.02)\n discriminator_sched = CosineAnnealing(discriminator_opt, T_max=10)\n return [generator_opt, disriminator_opt], [discriminator_sched]\n\n # example with step-based learning rate schedulers\n def configure_optimizers(self):\n gen_opt = Adam(self.model_gen.parameters(), lr=0.01)\n dis_opt = Adam(self.model_disc.parameters(), lr=0.02)\n gen_sched = {'scheduler': ExponentialLR(gen_opt, 0.99),\n 'interval': 'step'} # called after each training step\n dis_sched = CosineAnnealing(discriminator_opt, T_max=10) # called every epoch\n return [gen_opt, dis_opt], [gen_sched, dis_sched]\n\n # example with optimizer frequencies\n # see training procedure in `Improved Training of Wasserstein GANs`, Algorithm 1\n # https://arxiv.org/abs/1704.00028\n def configure_optimizers(self):\n gen_opt = Adam(self.model_gen.parameters(), lr=0.01)\n dis_opt = Adam(self.model_disc.parameters(), lr=0.02)\n n_critic = 5\n return (\n {'optimizer': dis_opt, 'frequency': n_critic},\n {'optimizer': gen_opt, 'frequency': 1}\n )\n\n Note:\n\n Some things to know:\n\n - Lightning calls ``.backward()`` and ``.step()`` on each optimizer\n and learning rate scheduler as needed.\n\n - If you use 16-bit precision (``precision=16``), Lightning will automatically\n handle the optimizers for you.\n\n - If you use multiple optimizers, :meth:`training_step` will have an additional\n ``optimizer_idx`` parameter.\n\n - If you use LBFGS Lightning handles the closure function automatically for you.\n\n - If you use multiple optimizers, gradients will be calculated only\n for the parameters of current optimizer at each training step.\n\n - If you need to control how often those optimizers step or override the\n default ``.step()`` schedule, override the :meth:`optimizer_step` hook.\n\n - If you only want to call a learning rate scheduler every ``x`` step or epoch,\n or want to monitor a custom metric, you can specify these in a lr_dict:\n\n .. code-block:: python\n\n {\n 'scheduler': lr_scheduler,\n 'interval': 'step', # or 'epoch'\n 'monitor': 'val_f1',\n 'frequency': x,\n }\n\n \"\"\"\n rank_zero_warn(\n \"`configure_optimizers` must be implemented to be used with the Lightning Trainer\"\n )\n\n def manual_backward(self, loss: Tensor, optimizer: Optimizer, *args, **kwargs) -> None:\n \"\"\"\n Call this directly from your training_step when doing optimizations manually.\n By using this we can ensure that all the proper scaling when using 16-bit etc has been done for you\n\n This function forwards all args to the .backward() call as well.\n\n .. tip:: In manual mode we still automatically clip grads if Trainer(gradient_clip_val=x) is set\n\n .. tip:: In manual mode we still automatically accumulate grad over batches if\n Trainer(accumulate_grad_batches=x) is set and you use `optimizer.step()`\n\n Example::\n\n def training_step(...):\n (opt_a, opt_b) = self.optimizers()\n loss = ...\n # automatically applies scaling, etc...\n self.manual_backward(loss, opt_a)\n opt_a.step()\n \"\"\"\n # make sure we're using manual opt\n self._verify_is_manual_optimization('manual_backward')\n\n # backward\n self._running_manual_backward = True\n self.trainer.train_loop.backward(loss, optimizer, -1, *args, **kwargs)\n self._running_manual_backward = False\n\n def backward(self, loss: Tensor, optimizer: Optimizer, optimizer_idx: int, *args, **kwargs) -> None:\n \"\"\"\n Override backward with your own implementation if you need to.\n\n Args:\n loss: Loss is already scaled by accumulated grads\n optimizer: Current optimizer being used\n optimizer_idx: Index of the current optimizer being used\n\n Called to perform backward step.\n Feel free to override as needed.\n The loss passed in has already been scaled for accumulated gradients if requested.\n\n Example::\n\n def backward(self, loss, optimizer, optimizer_idx):\n loss.backward()\n\n \"\"\"\n if self.trainer.train_loop.automatic_optimization or self._running_manual_backward:\n loss.backward(*args, **kwargs)\n\n def toggle_optimizer(self, optimizer: Optimizer, optimizer_idx: int):\n \"\"\"\n Makes sure only the gradients of the current optimizer's parameters are calculated\n in the training step to prevent dangling gradients in multiple-optimizer setup.\n\n .. note:: Only called when using multiple optimizers\n\n Override for your own behavior\n\n Args:\n optimizer:\n optimizer_idx:\n \"\"\"\n for param in self.parameters():\n param.requires_grad = False\n\n for group in optimizer.param_groups:\n for param in group['params']:\n param.requires_grad = True\n\n def optimizer_step(\n self,\n epoch: int = None,\n batch_idx: int = None,\n optimizer: Optimizer = None,\n optimizer_idx: int = None,\n optimizer_closure: Optional[Callable] = None,\n on_tpu: bool = None,\n using_native_amp: bool = None,\n using_lbfgs: bool = None,\n ) -> None:\n r\"\"\"\n Override this method to adjust the default way the\n :class:`~pytorch_lightning.trainer.trainer.Trainer` calls each optimizer.\n By default, Lightning calls ``step()`` and ``zero_grad()`` as shown in the example\n once per optimizer.\n\n .. tip:: With `Trainer(enable_pl_optimizer=True)`, you can user `optimizer.step()` directly and it will handle zero_grad, accumulated gradients, AMP, TPU and more automatically for you.\n\n Warning:\n If you are overriding this method, make sure that you pass the ``optimizer_closure`` parameter\n to ``optimizer.step()`` function as shown in the examples. This ensures that\n ``train_step_and_backward_closure`` is called within\n :meth:`~pytorch_lightning.trainer.training_loop.TrainLoop.run_training_batch`.\n\n Args:\n epoch: Current epoch\n batch_idx: Index of current batch\n optimizer: A PyTorch optimizer\n optimizer_idx: If you used multiple optimizers this indexes into that list.\n optimizer_closure: closure for all optimizers\n on_tpu: true if TPU backward is required\n using_native_amp: True if using native amp\n using_lbfgs: True if the matching optimizer is lbfgs\n\n Examples:\n .. code-block:: python\n\n # DEFAULT\n def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,\n optimizer_closure, on_tpu, using_native_amp, using_lbfgs):\n optimizer.step(closure=optimizer_closure)\n\n # Alternating schedule for optimizer steps (i.e.: GANs)\n def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,\n optimizer_closure, on_tpu, using_native_amp, using_lbfgs):\n # update generator opt every 2 steps\n if optimizer_idx == 0:\n if batch_idx % 2 == 0 :\n optimizer.step(closure=optimizer_closure)\n optimizer.zero_grad()\n\n # update discriminator opt every 4 steps\n if optimizer_idx == 1:\n if batch_idx % 4 == 0 :\n optimizer.step(closure=optimizer_closure)\n optimizer.zero_grad()\n\n # ...\n # add as many optimizers as you want\n\n\n Here's another example showing how to use this for more advanced things such as\n learning rate warm-up:\n\n .. code-block:: python\n\n # learning rate warm-up\n def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,\n optimizer_closure, on_tpu, using_native_amp, using_lbfgs):\n # warm up lr\n if self.trainer.global_step < 500:\n lr_scale = min(1., float(self.trainer.global_step + 1) / 500.)\n for pg in optimizer.param_groups:\n pg['lr'] = lr_scale * self.learning_rate\n\n # update params\n optimizer.step(closure=optimizer_closure)\n optimizer.zero_grad()\n\n \"\"\"\n optimizer.step(closure=optimizer_closure)\n\n def optimizer_zero_grad(\n self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int\n ):\n optimizer.zero_grad()\n\n def tbptt_split_batch(self, batch: Tensor, split_size: int) -> list:\n r\"\"\"\n When using truncated backpropagation through time, each batch must be split along the\n time dimension. Lightning handles this by default, but for custom behavior override\n this function.\n\n Args:\n batch: Current batch\n split_size: The size of the split\n\n Return:\n List of batch splits. Each split will be passed to :meth:`training_step` to enable truncated\n back propagation through time. The default implementation splits root level Tensors and\n Sequences at dim=1 (i.e. time dim). It assumes that each time dim is the same length.\n\n Examples:\n .. code-block:: python\n\n def tbptt_split_batch(self, batch, split_size):\n splits = []\n for t in range(0, time_dims[0], split_size):\n batch_split = []\n for i, x in enumerate(batch):\n if isinstance(x, torch.Tensor):\n split_x = x[:, t:t + split_size]\n elif isinstance(x, collections.Sequence):\n split_x = [None] * len(x)\n for batch_idx in range(len(x)):\n split_x[batch_idx] = x[batch_idx][t:t + split_size]\n\n batch_split.append(split_x)\n\n splits.append(batch_split)\n\n return splits\n\n Note:\n Called in the training loop after\n :meth:`~pytorch_lightning.callbacks.base.Callback.on_batch_start`\n if :paramref:`~pytorch_lightning.trainer.Trainer.truncated_bptt_steps` > 0.\n Each returned batch split is passed separately to :meth:`training_step`.\n\n \"\"\"\n time_dims = [\n len(x[0])\n for x in batch\n if isinstance(x, (torch.Tensor, collections.Sequence))\n ]\n assert len(time_dims) >= 1, \"Unable to determine batch time dimension\"\n assert all(\n x == time_dims[0] for x in time_dims\n ), \"Batch time dimension length is ambiguous\"\n\n splits = []\n for t in range(0, time_dims[0], split_size):\n batch_split = []\n for i, x in enumerate(batch):\n if isinstance(x, torch.Tensor):\n split_x = x[:, t: t + split_size]\n elif isinstance(x, collections.Sequence):\n split_x = [None] * len(x)\n for batch_idx in range(len(x)):\n split_x[batch_idx] = x[batch_idx][t: t + split_size]\n\n batch_split.append(split_x)\n\n splits.append(batch_split)\n\n return splits\n\n def summarize(self, mode: Optional[str] = ModelSummary.MODE_DEFAULT) -> Optional[ModelSummary]:\n model_summary = None\n\n if mode in ModelSummary.MODES:\n model_summary = ModelSummary(self, mode=mode)\n log.info(\"\\n\" + str(model_summary))\n elif mode is not None:\n raise MisconfigurationException(\n f\"`mode` can be None, {', '.join(ModelSummary.MODES)}, got {mode}\"\n )\n\n return model_summary\n\n def freeze(self) -> None:\n r\"\"\"\n Freeze all params for inference.\n\n Example:\n .. code-block:: python\n\n model = MyLightningModule(...)\n model.freeze()\n\n \"\"\"\n for param in self.parameters():\n param.requires_grad = False\n\n self.eval()\n\n def unfreeze(self) -> None:\n \"\"\"\n Unfreeze all parameters for training.\n\n .. code-block:: python\n\n model = MyLightningModule(...)\n model.unfreeze()\n\n \"\"\"\n for param in self.parameters():\n param.requires_grad = True\n\n self.train()\n\n def get_progress_bar_dict(self) -> Dict[str, Union[int, str]]:\n r\"\"\"\n Implement this to override the default items displayed in the progress bar.\n By default it includes the average loss value, split index of BPTT (if used)\n and the version of the experiment when using a logger.\n\n .. code-block::\n\n Epoch 1: 4%|▎ | 40/1095 [00:03<01:37, 10.84it/s, loss=4.501, v_num=10]\n\n Here is an example how to override the defaults:\n\n .. code-block:: python\n\n def get_progress_bar_dict(self):\n # don't show the version number\n items = super().get_progress_bar_dict()\n items.pop(\"v_num\", None)\n return items\n\n Return:\n Dictionary with the items to be displayed in the progress bar.\n \"\"\"\n # call .item() only once but store elements without graphs\n running_train_loss = self.trainer.train_loop.running_loss.mean()\n avg_training_loss = None\n if running_train_loss is not None:\n avg_training_loss = running_train_loss.cpu().item()\n elif self.trainer.train_loop.automatic_optimization:\n avg_training_loss = float('NaN')\n\n tqdm_dict = {}\n if avg_training_loss is not None:\n tqdm_dict[\"loss\"] = f\"{avg_training_loss:.3g}\"\n\n if self.trainer.truncated_bptt_steps is not None:\n tqdm_dict[\"split_idx\"] = self.trainer.split_idx\n\n if self.trainer.logger is not None and self.trainer.logger.version is not None:\n version = self.trainer.logger.version\n # show last 4 places of long version strings\n version = version[-4:] if isinstance(version, str) else version\n tqdm_dict[\"v_num\"] = version\n\n return tqdm_dict\n\n def _verify_is_manual_optimization(self, fn_name):\n if self.trainer.train_loop.automatic_optimization:\n raise MisconfigurationException(\n f'to use {fn_name}, please disable automatic optimization:'\n ' set model property `automatic_optimization` as False'\n )\n\n @classmethod\n def _auto_collect_arguments(cls, frame=None) -> Tuple[Dict, Dict]:\n \"\"\"\n Collect all module arguments in the current constructor and all child constructors.\n The child constructors are all the ``__init__`` methods that reach the current class through\n (chained) ``super().__init__()`` calls.\n\n Args:\n frame: instance frame\n\n Returns:\n self_arguments: arguments dictionary of the first instance\n parents_arguments: arguments dictionary of the parent's instances\n \"\"\"\n if not frame:\n frame = inspect.currentframe()\n\n frame_args = collect_init_args(frame.f_back, [])\n self_arguments = frame_args[-1]\n\n # set hyper_parameters in child\n self_arguments = self_arguments\n parents_arguments = {}\n\n # add all arguments from parents\n for args in frame_args[:-1]:\n parents_arguments.update(args)\n return self_arguments, parents_arguments\n\n def save_hyperparameters(self, *args, frame=None) -> None:\n \"\"\"Save all model arguments.\n\n Args:\n args: single object of `dict`, `NameSpace` or `OmegaConf`\n or string names or argumenst from class `__init__`\n\n >>> from collections import OrderedDict\n >>> class ManuallyArgsModel(LightningModule):\n ... def __init__(self, arg1, arg2, arg3):\n ... super().__init__()\n ... # manually assign arguments\n ... self.save_hyperparameters('arg1', 'arg3')\n ... def forward(self, *args, **kwargs):\n ... ...\n >>> model = ManuallyArgsModel(1, 'abc', 3.14)\n >>> model.hparams\n \"arg1\": 1\n \"arg3\": 3.14\n\n >>> class AutomaticArgsModel(LightningModule):\n ... def __init__(self, arg1, arg2, arg3):\n ... super().__init__()\n ... # equivalent automatic\n ... self.save_hyperparameters()\n ... def forward(self, *args, **kwargs):\n ... ...\n >>> model = AutomaticArgsModel(1, 'abc', 3.14)\n >>> model.hparams\n \"arg1\": 1\n \"arg2\": abc\n \"arg3\": 3.14\n\n >>> class SingleArgModel(LightningModule):\n ... def __init__(self, params):\n ... super().__init__()\n ... # manually assign single argument\n ... self.save_hyperparameters(params)\n ... def forward(self, *args, **kwargs):\n ... ...\n >>> model = SingleArgModel(Namespace(p1=1, p2='abc', p3=3.14))\n >>> model.hparams\n \"p1\": 1\n \"p2\": abc\n \"p3\": 3.14\n \"\"\"\n if not frame:\n frame = inspect.currentframe().f_back\n init_args = get_init_args(frame)\n assert init_args, \"failed to inspect the self init\"\n if not args:\n # take all arguments\n hp = init_args\n self._hparams_name = \"kwargs\" if hp else None\n else:\n # take only listed arguments in `save_hparams`\n isx_non_str = [i for i, arg in enumerate(args) if not isinstance(arg, str)]\n if len(isx_non_str) == 1:\n hp = args[isx_non_str[0]]\n cand_names = [k for k, v in init_args.items() if v == hp]\n self._hparams_name = cand_names[0] if cand_names else None\n else:\n hp = {arg: init_args[arg] for arg in args if isinstance(arg, str)}\n self._hparams_name = \"kwargs\"\n\n # `hparams` are expected here\n if hp:\n self._set_hparams(hp)\n # make deep copy so there is not other runtime changes reflected\n self._hparams_initial = copy.deepcopy(self._hparams)\n\n def _set_hparams(self, hp: Union[dict, Namespace, str]) -> None:\n if isinstance(hp, Namespace):\n hp = vars(hp)\n if isinstance(hp, dict):\n hp = AttributeDict(hp)\n elif isinstance(hp, PRIMITIVE_TYPES):\n raise ValueError(f\"Primitives {PRIMITIVE_TYPES} are not allowed.\")\n elif not isinstance(hp, ALLOWED_CONFIG_TYPES):\n raise ValueError(f\"Unsupported config type of {type(hp)}.\")\n\n if isinstance(hp, dict) and isinstance(self.hparams, dict):\n self.hparams.update(hp)\n else:\n self._hparams = hp\n\n @torch.no_grad()\n def to_onnx(\n self,\n file_path: Union[str, Path],\n input_sample: Optional[Any] = None,\n **kwargs,\n ):\n \"\"\"\n Saves the model in ONNX format\n\n Args:\n file_path: The path of the file the onnx model should be saved to.\n input_sample: An input for tracing. Default: None (Use self.example_input_array)\n **kwargs: Will be passed to torch.onnx.export function.\n\n Example:\n >>> class SimpleModel(LightningModule):\n ... def __init__(self):\n ... super().__init__()\n ... self.l1 = torch.nn.Linear(in_features=64, out_features=4)\n ...\n ... def forward(self, x):\n ... return torch.relu(self.l1(x.view(x.size(0), -1)))\n\n >>> with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as tmpfile:\n ... model = SimpleModel()\n ... input_sample = torch.randn((1, 64))\n ... model.to_onnx(tmpfile.name, input_sample, export_params=True)\n ... os.path.isfile(tmpfile.name)\n True\n \"\"\"\n mode = self.training\n\n if input_sample is None:\n if self.example_input_array is None:\n raise ValueError(\n \"Could not export to ONNX since neither `input_sample` nor\"\n \" `model.example_input_array` attribute is set.\"\n )\n input_sample = self.example_input_array\n\n input_sample = self.transfer_batch_to_device(input_sample)\n\n if \"example_outputs\" not in kwargs:\n self.eval()\n kwargs[\"example_outputs\"] = self(input_sample)\n\n torch.onnx.export(self, input_sample, file_path, **kwargs)\n self.train(mode)\n\n @torch.no_grad()\n def to_torchscript(\n self,\n file_path: Optional[Union[str, Path]] = None,\n method: Optional[str] = 'script',\n example_inputs: Optional[Any] = None,\n **kwargs,\n ) -> Union[ScriptModule, Dict[str, ScriptModule]]:\n \"\"\"\n By default compiles the whole model to a :class:`~torch.jit.ScriptModule`.\n If you want to use tracing, please provided the argument `method='trace'` and make sure that either the\n example_inputs argument is provided, or the model has self.example_input_array set.\n If you would like to customize the modules that are scripted you should override this method.\n In case you want to return multiple modules, we recommend using a dictionary.\n\n Args:\n file_path: Path where to save the torchscript. Default: None (no file saved).\n method: Whether to use TorchScript's script or trace method. Default: 'script'\n example_inputs: An input to be used to do tracing when method is set to 'trace'.\n Default: None (Use self.example_input_array)\n **kwargs: Additional arguments that will be passed to the :func:`torch.jit.script` or\n :func:`torch.jit.trace` function.\n\n Note:\n - Requires the implementation of the\n :meth:`~pytorch_lightning.core.lightning.LightningModule.forward` method.\n - The exported script will be set to evaluation mode.\n - It is recommended that you install the latest supported version of PyTorch\n to use this feature without limitations. See also the :mod:`torch.jit`\n documentation for supported features.\n\n Example:\n >>> class SimpleModel(LightningModule):\n ... def __init__(self):\n ... super().__init__()\n ... self.l1 = torch.nn.Linear(in_features=64, out_features=4)\n ...\n ... def forward(self, x):\n ... return torch.relu(self.l1(x.view(x.size(0), -1)))\n ...\n >>> model = SimpleModel()\n >>> torch.jit.save(model.to_torchscript(), \"model.pt\") # doctest: +SKIP\n >>> os.path.isfile(\"model.pt\") # doctest: +SKIP\n >>> torch.jit.save(model.to_torchscript(file_path=\"model_trace.pt\", method='trace', # doctest: +SKIP\n ... example_inputs=torch.randn(1, 64))) # doctest: +SKIP\n >>> os.path.isfile(\"model_trace.pt\") # doctest: +SKIP\n True\n\n Return:\n This LightningModule as a torchscript, regardless of whether file_path is\n defined or not.\n \"\"\"\n mode = self.training\n\n if method == 'script':\n torchscript_module = torch.jit.script(self.eval(), **kwargs)\n elif method == 'trace':\n # if no example inputs are provided, try to see if model has example_input_array set\n if example_inputs is None:\n if self.example_input_array is None:\n raise ValueError(\n 'Choosing method=`trace` requires either `example_inputs`'\n ' or `model.example_input_array` to be defined'\n )\n example_inputs = self.example_input_array\n\n # automatically send example inputs to the right device and use trace\n example_inputs = self.transfer_batch_to_device(example_inputs)\n torchscript_module = torch.jit.trace(func=self.eval(), example_inputs=example_inputs, **kwargs)\n else:\n raise ValueError(\"The 'method' parameter only supports 'script' or 'trace',\"\n f\" but value given was: {method}\")\n\n self.train(mode)\n\n if file_path is not None:\n torch.jit.save(torchscript_module, file_path)\n\n return torchscript_module\n\n @property\n def hparams(self) -> Union[AttributeDict, dict, Namespace]:\n if not hasattr(self, \"_hparams\"):\n self._hparams = AttributeDict()\n return self._hparams\n\n @property\n def hparams_initial(self) -> AttributeDict:\n if not hasattr(self, \"_hparams_initial\"):\n return AttributeDict()\n # prevent any change\n return copy.deepcopy(self._hparams_initial)\n\n @hparams.setter\n def hparams(self, hp: Union[dict, Namespace, Any]):\n # TODO: remove this method in v1.3.0.\n rank_zero_warn(\n \"The setter for self.hparams in LightningModule is deprecated since v1.1.0 and will be\"\n \" removed in v1.3.0. Replace the assignment `self.hparams = hparams` with \"\n \" `self.save_hyperparameters()`.\",\n DeprecationWarning\n )\n hparams_assignment_name = self.__get_hparams_assignment_variable()\n self._hparams_name = hparams_assignment_name\n self._set_hparams(hp)\n # this resolves case when user does not uses `save_hyperparameters` and do hard assignement in init\n if not hasattr(self, \"_hparams_initial\"):\n self._hparams_initial = copy.deepcopy(self._hparams)\n\n def __get_hparams_assignment_variable(self):\n \"\"\"\n looks at the code of the class to figure out what the user named self.hparams\n this only happens when the user explicitly sets self.hparams\n \"\"\"\n try:\n class_code = inspect.getsource(self.__class__)\n lines = class_code.split(\"\\n\")\n for line in lines:\n line = re.sub(r\"\\s+\", \"\", line, flags=re.UNICODE)\n if \".hparams=\" in line:\n return line.split(\"=\")[1]\n except Exception:\n return \"hparams\"\n\n return None\n"
] | [
[
"torch.jit.save",
"torch.no_grad",
"torch.onnx.export",
"torch._C._log_api_usage_once"
]
] |
HotMaps/renovation_effect | [
"5b1fb81102b3c6ee531b719d8136ed9a343c2598"
] | [
"cm/app/api_v1/my_calculation_module_directory/CM/__delete_if_tested__/CEDM/create_csv_results.py"
] | [
"\nimport numpy as np\nimport os\nimport time\nimport sys\n\npath = os.path.dirname(os.path.dirname(os.path.dirname(os.path.\n abspath(__file__))))\nif path not in sys.path:\n sys.path.append(path)\n \nimport CM_intern.CEDM.modules.cyf.create_density_map as CDM\nimport CM_intern.CEDM.modules.Subfunctions as SF\nfrom CM_intern.common_modules.exportLayerDict import export_layer as expLyr\nimport CM_intern.common_modules.cliprasterlayer as CRL\n\nimport pickle\n\nTARGET_RESOLUTION = 100\n\ndef load_reference_raster_lyr(NUTS3_vector_path, strd_raster_path_full, outputpath, NUTS3_feat_id_LIST\n , MOST_RECENT_CUT=\"\"):\n \n datatype_int = 'uint32'\n #self.datatype_int16 = 'uint16'\n datatype = \"float32\"\n # common parameters\n noDataValue = 0\n \n #SaveLayerDict = {}\n # Get current extent -> Use the Population 1x1km raster as reference Layer\n key_field = \"NUTS_ID\" \n REFERENCE_RASTER_LAYER_COORD, Layer_is_uncut = CRL.create_reference_raster_layer_origin_extent_of_vctr_feat(strd_raster_path_full\n , NUTS3_vector_path, NUTS3_feat_id_LIST\n , Vctr_key_field=key_field)\n (REFERENCE_geotransform_obj, REFERENCE_RasterSize\n , REFERENCE_RESOLUTION, REFERENCE_extent) = REFERENCE_RASTER_LAYER_COORD\n \n REFERENCE_RasterResolution = REFERENCE_geotransform_obj[1]\n \n gto_hr = list(REFERENCE_geotransform_obj)\n gto_hr[1] = TARGET_RESOLUTION\n gto_hr[5] = -TARGET_RESOLUTION\n HighRes_gt_obj = tuple(gto_hr)\n \n SaveLayerDict = {}\n SaveLayerDict[\"Reference\"] = [\"%s/REFERENCE.tif\" % outputpath, REFERENCE_geotransform_obj\n , datatype_int\n , np.ones((REFERENCE_RasterSize), dtype=datatype_int) , noDataValue]\n \n \n # If data are the same as previous cut, then loading data can be done\n LOAD_DATA_PREVIOUS = False\n filename = MOST_RECENT_CUT\n if os.path.exists(MOST_RECENT_CUT):\n try:\n with open(MOST_RECENT_CUT, 'rb') as fobject:\n PREV_CUT = pickle.load(fobject)\n fobject.close()\n if PREV_CUT == REFERENCE_RASTER_LAYER_COORD:\n LOAD_DATA_PREVIOUS = True\n except Exception as e:\n print(\"Cannot import %s\"%MOST_RECENT_CUT)\n print(e)\n \n \n if LOAD_DATA_PREVIOUS != True:\n\n with open(filename, 'wb') as fobject:\n pickle.dump(REFERENCE_RASTER_LAYER_COORD, fobject, protocol=2)\n fobject.close()\n SaveLayerDict = expLyr(SaveLayerDict)\n \n return (REFERENCE_RasterResolution, HighRes_gt_obj, LOAD_DATA_PREVIOUS, Layer_is_uncut, REFERENCE_geotransform_obj, REFERENCE_RasterSize)\n\n\n\ndef main(main_path, path_in_raw, preproccessed_input_path, prj_path_output): \n st = time.time()\n \n data_type = \"uint8\"\n \n MOST_RECENT_CUT = main_path + prj_path_output + \"/MOST_RECENT_CUT.pk\" \n prepro_path = main_path + preproccessed_input_path\n org_data_path = main_path + path_in_raw\n p_ = org_data_path\n pi_ = org_data_path + \"/vector_input_data/\"\n NUTS3_vector_path = pi_ + \"/NUTS3.shp\"\n strd_raster_path_full = \"%s/%s\" %(org_data_path, \"Population.tif\")\n temp_path = \"/home/simulant/workspace/project/Hotmaps_DATA/heat_density_map/output_2/\" + os.sep + \"Temp\"\n SoilSeal_path_full = \"%s/%s\" %(org_data_path, \"_____ESM100m_final.tif\")\n \n \n \n #p_ = \"/home/simulant/workspace/project/Hotmaps_DATA/heat_density_map/output/\"\n \n \n \n sd = \"\"\n print(os.path.exists(p_))\n print(os.path.exists(pi_))\n fn = []\n NUTS3_feat_id_LIST = range(12000)\n (REFERENCE_RasterResolution, HighRes_gt_obj, LOAD_DATA_PREVIOUS\n , Ref_layer_is_uncut, REFERENCE_geotransform_obj, REFERENCE_RasterSize) = \\\n load_reference_raster_lyr(NUTS3_vector_path,\n strd_raster_path_full, \n temp_path, NUTS3_feat_id_LIST\n , MOST_RECENT_CUT)\n \n \n for f_ in os.listdir(\"%s/%s\" %(p_, sd)):\n if f_.endswith(\".tif\"):\n fn.append(\"%s/%s/%s\" %(p_, sd, f_))\n print(f_)\n if \"g100_clc12_v18_5\" in f_.lower():\n data, geotransform_obj = CRL.clip_raster_layer(fn[-1]\n , REFERENCE_geotransform_obj\n , REFERENCE_RasterSize)\n data2 = np.zeros((data.shape),dtype=\"f4\")\n data3 = np.zeros_like(data2)\n data4 = np.ones_like(data2) * 10.0 # 1000 m2\n data2[data <= 21] = 10.0\n data3[data <= 6] = 10.0\n data3[data == 9] = 10.0\n data3[data == 10] = 10.0\n data3[data == 11] = 10.0\n data3[data == 20] = 10.0\n print(np.sum(data2))\n print(np.sum(data3))\n print(np.sum(data4))\n \n \n elif \"ESM100m_final\" in f_: \n data5, geotransform_obj = CRL.clip_raster_layer(fn[-1]\n , REFERENCE_geotransform_obj\n , REFERENCE_RasterSize)\n data5 *= 10.0/100.0 # in 1000 m2, data5 Einheit = %\n print(np.sum(data5))\n \n \n \n print(time.time() - st)\n ARR_NUTS_ID_NUMBER, geotransform_obj = SF.rrl(\"%s/%s_id_number.tif\" %(prepro_path, \"NUTS3\"), data_type=\"uint16\")\n print(time.time() - st)\n ARR_LAU2_ID_NUMBER, geotransform_obj = SF.rrl(\"%s/%s_id_number.tif\" %(prepro_path, \"LAU2\"), data_type=\"uint32\")\n print(time.time() - st)\n \n \n \n #num_fn = len(fn)\n num_fn = 4\n \n RES_Table_NUTS = np.zeros((np.max(ARR_NUTS_ID_NUMBER)+1, num_fn+1), \"f4\") \n RES_Table_LAU = np.zeros((np.max(ARR_LAU2_ID_NUMBER)+1, num_fn+1), \"f4\") \n RES_Table_NUTS[:,0] = np.arange(RES_Table_NUTS.shape[0])\n RES_Table_LAU[:,0] = np.arange(RES_Table_LAU.shape[0])\n \n header = [\"DI\"]\n #for i, f_ in enumerate(fn):\n for i in range(num_fn):\n #print(f_)\n \n if i == 0:\n data = data2.copy()\n fn = \"dauersiedlungsraum\"\n elif i == 1:\n data = data3.copy()\n fn = \"dauersiedlungsraum_eng\" \n elif i == 2:\n data = data4.copy()\n fn = \"flaeche\"\n else:\n data = data5.copy()\n fn = \"ESM100m_final\"\n print(fn)\n header.append(fn) \n print(np.sum(data))\n #header.append(f_.split(\"/\")[-1]) \n #data, geotransform_obj = SF.rrl(f_, data_type=data_type)\n \n TABLE_RESULTS_NUTS = CDM.CreateResultsTableperIndicator(data, ARR_NUTS_ID_NUMBER) \n print(time.time() - st)\n TABLE_RESULTS_LAU = CDM.CreateResultsTableperIndicator(data, ARR_LAU2_ID_NUMBER) \n del data\n print(time.time() - st)\n RES_Table_NUTS[:, i+1] = TABLE_RESULTS_NUTS[:,-1]\n RES_Table_LAU[:, i+1] = TABLE_RESULTS_LAU[:,-1]\n #break\n \n header = \",\".join(header)\n np.savetxt(\"%s/%s.csv\" %(prepro_path, \"__TABLE_RES_LAU2\"), np.round(RES_Table_LAU, 3), delimiter=\",\", header=header, comments=\"\")\n np.savetxt(\"%s/%s.csv\" %(prepro_path, \"__TABLE_RES_NUTS\"), np.round(RES_Table_NUTS, 3), delimiter=\",\", header=header, comments=\"\")\n \n print(\"DONE\")"
] | [
[
"numpy.ones",
"numpy.sum",
"numpy.zeros_like",
"numpy.zeros",
"numpy.ones_like",
"numpy.arange",
"numpy.max",
"numpy.round"
]
] |
RiccardoNanni/bigbang | [
"70b9890fcd615ccb21a3685a9b33d79226e6fb36"
] | [
"bigbang/listserv.py"
] | [
"import datetime\nimport email\nimport email.parser\nimport glob\nimport mailbox\nimport os\nimport re\nimport subprocess\nimport time\nimport urllib\nimport warnings\nfrom email.header import Header\nfrom email.message import Message\nfrom email.mime.text import MIMEText\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\nimport requests\nimport yaml\nfrom bs4 import BeautifulSoup\n\n\nclass ListservMessageWarning(BaseException):\n \"\"\"Base class for Archive class specific exceptions\"\"\"\n\n pass\n\n\nclass ListservListWarning(BaseException):\n \"\"\"Base class for Archive class specific exceptions\"\"\"\n\n pass\n\n\nclass ListservArchiveWarning(BaseException):\n \"\"\"Base class for Archive class specific exceptions\"\"\"\n\n pass\n\n\nclass ListservMessage:\n \"\"\"\n Parameters\n ----------\n body\n subject\n fromname\n fromaddr\n toname\n toaddr\n date\n contenttype\n messageid\n\n Methods\n -------\n from_url\n get_header_from_html\n get_body_from_html\n get_header_from_listserv_file\n get_body_from_listserv_file\n get_name\n get_addr\n get_date\n remove_unwanted_header_content\n to_dict\n to_mbox\n\n Example\n -------\n msg = ListservMessage.from_url(\n list_name=\"3GPP_TSG_CT_WG6\",\n url=url_message,\n fields=\"total\",\n )\n \"\"\"\n\n empty_header = {\n \"subject\": None,\n \"fromname\": None,\n \"fromaddr\": None,\n \"toname\": None,\n \"toaddr\": None,\n \"date\": None,\n \"contenttype\": None,\n }\n\n def __init__(\n self,\n body: str,\n subject: str,\n fromname: str,\n fromaddr: str,\n toname: str,\n toaddr: str,\n date: str,\n contenttype: str,\n messageid: Optional[str] = None,\n ):\n self.body = body\n self.subject = subject\n self.fromname = fromname\n self.fromaddr = fromaddr\n self.toname = toname\n self.toaddr = toaddr\n self.date = date\n self.contenttype = contenttype\n\n @classmethod\n def from_url(\n cls,\n list_name: str,\n url: str,\n fields: str = \"total\",\n url_login: str = \"https://list.etsi.org/scripts/wa.exe?LOGON\",\n login: Optional[Dict[str, str]] = {\"username\": None, \"password\": None},\n session: Optional[str] = None,\n ) -> \"ListservMessage\":\n \"\"\"\n Args:\n \"\"\"\n # TODO implement field selection, e.g. return only header, body, etc.\n if session is None:\n session = get_auth_session(url_login, **login)\n soup = get_website_content(url, session=session)\n if fields in [\"header\", \"total\"]:\n header = ListservMessage.get_header_from_html(soup)\n else:\n header = cls.empty_header\n if fields in [\"body\", \"total\"]:\n body = ListservMessage.get_body_from_html(list_name, url, soup)\n else:\n body = None\n return cls(body, **header)\n\n @classmethod\n def from_listserv_file(\n cls,\n list_name: str,\n file_path: str,\n header_start_line_nr: int,\n fields: str = \"total\",\n ) -> \"ListservMessage\":\n file = open(file_path, \"r\")\n fcontent = file.readlines()\n file.close()\n header_end_line_nr = cls.get_header_end_line_nr(\n fcontent, header_start_line_nr\n )\n if fields in [\"header\", \"total\"]:\n header = cls.get_header_from_listserv_file(\n fcontent, header_start_line_nr, header_end_line_nr\n )\n else:\n header = cls.empty_header\n if fields in [\"body\", \"total\"]:\n body = cls.get_body_from_listserv_file(\n fcontent, header_end_line_nr\n )\n else:\n body = None\n return cls(body, **header)\n\n @classmethod\n def get_header_end_line_nr(\n cls,\n content: List[str],\n header_start_line_nr: int,\n ) -> List[int]:\n \"\"\"\n The header ends with the first empty line encountered.\n\n Args:\n content: The content of one LISTSERV-file.\n \"\"\"\n for lnr, lcont in enumerate(content[header_start_line_nr:]):\n if len(lcont) <= 1:\n header_end_line_nr = header_start_line_nr + lnr\n break\n return header_end_line_nr\n\n @classmethod\n def get_header_from_listserv_file(\n cls,\n content: List[str],\n header_start_line_nr: int,\n header_end_line_nr: int,\n ) -> Dict[str, str]:\n \"\"\"\n Args:\n content:\n \"\"\"\n content = content[header_start_line_nr:header_end_line_nr]\n # collect important info from LISTSERV header\n header = {}\n for lnr in range(len(content)):\n line = content[lnr]\n # get header keyword and value\n if re.match(r\"\\S+:\\s+\\S+\", line):\n key = line.split(\":\")[0]\n value = line.replace(key + \":\", \"\").strip().rstrip(\"\\n\")\n # if not at the end of header\n if lnr < len(content) - 1:\n # if header-keyword value is split over two lines\n if not re.match(r\"\\S+:\\s+\\S+\", content[lnr + 1]):\n value += \" \" + content[lnr + 1].strip().rstrip(\"\\n\")\n header[key.lower()] = value\n\n header = cls.format_header_content(header)\n header = cls.remove_unwanted_header_content(header)\n return header\n\n @classmethod\n def get_body_from_listserv_file(\n cls,\n content: List[str],\n header_end_line_nr: int,\n ) -> str:\n \"\"\"\"\"\"\n found = False\n # find body 'position' in file\n for line_nr, line in enumerate(content[header_end_line_nr:]):\n if \"=\" * 73 in line:\n body_end_line_nr = line_nr + header_end_line_nr\n found = True\n break\n if not found:\n body_end_line_nr = -1\n # get body content\n body = content[header_end_line_nr:body_end_line_nr]\n # remove empty lines and join into one string\n body = (\"\").join([line for line in body if len(line) > 1])\n return body\n\n @classmethod\n def get_header_from_html(cls, soup: BeautifulSoup) -> Dict[str, str]:\n \"\"\"\"\"\"\n text = soup.find(\n \"b\",\n text=re.compile(r\"^\\bSubject\\b\"),\n ).parent.parent.parent.parent.text\n # collect important info from LISTSERV header\n header = {}\n for field in text.split(\"Parts/Attachments:\")[0].splitlines():\n if len(field) == 0:\n continue\n field_name = field.split(\":\")[0].strip()\n field_body = field.replace(field_name + \":\", \"\").strip()\n header[field_name.lower()] = field_body\n\n header = cls.format_header_content(header)\n header = cls.remove_unwanted_header_content(header)\n return header\n\n @staticmethod\n def get_body_from_html(\n list_name: str, url: str, soup: BeautifulSoup\n ) -> str:\n \"\"\"\"\"\"\n url_root = (\"/\").join(url.split(\"/\")[:-2])\n a_tags = soup.select(f'a[href*=\"A3=\"][href*=\"{list_name}\"]')\n href_plain_text = [\n tag.get(\"href\") for tag in a_tags if \"Fplain\" in tag.get(\"href\")\n ][0]\n body_soup = get_website_content(\n urllib.parse.urljoin(url_root, href_plain_text)\n )\n return body_soup.find(\"pre\").text\n\n @classmethod\n def format_header_content(cls, header: Dict[str, str]) -> Dict[str, str]:\n header[\"fromname\"] = cls.get_name(header[\"from\"]).strip()\n header[\"fromaddr\"] = cls.get_addr(header[\"from\"])\n header[\"toname\"] = cls.get_name(header[\"reply-to\"]).strip()\n header[\"toaddr\"] = cls.get_addr(header[\"reply-to\"])\n header[\"date\"] = cls.get_date(header[\"date\"])\n header[\"contenttype\"] = header[\"content-type\"]\n return header\n\n @classmethod\n def remove_unwanted_header_content(\n cls, header: Dict[str, str]\n ) -> Dict[str, str]:\n for key in list(header.keys()):\n if key not in list(cls.empty_header.keys()):\n del header[key]\n return header\n\n @staticmethod\n def get_name(line: str) -> str:\n # get string in between < and >\n email_of_sender = re.findall(r\"\\<(.*)\\>\", line)\n if email_of_sender:\n # remove email_of_sender from line\n name = line.replace(\"<\" + email_of_sender[0] + \">\", \"\")\n # remove special characters\n name = re.sub(r\"[^a-zA-Z0-9]+\", \" \", name)\n else:\n name = line\n return name\n\n @staticmethod\n def get_addr(line: str) -> str:\n # get string in between < and >\n email_of_sender = re.findall(r\"\\<(.*)\\>\", line)\n if email_of_sender:\n email_of_sender = email_of_sender[0]\n else:\n email_of_sender = None\n return email_of_sender\n\n @staticmethod\n def get_date(line: str) -> str:\n line = (\" \").join(line.split(\" \")[:-1]).lstrip()\n # convert format to local version of date and time\n date_time_obj = datetime.datetime.strptime(\n line, \"%a, %d %b %Y %H:%M:%S\"\n )\n return date_time_obj.strftime(\"%c\")\n\n @staticmethod\n def create_message_id(\n date: str,\n from_address: str,\n ) -> str:\n message_id = (\".\").join([date, from_address])\n # remove special characters\n message_id = re.sub(r\"[^a-zA-Z0-9]+\", \"\", message_id)\n return message_id\n\n def to_dict(self) -> Dict[str, str]:\n dic = {\n \"Body\": self.body,\n \"Subject\": self.subject,\n \"FromName\": self.fromname,\n \"FromAddr\": self.fromaddr,\n \"ToName\": self.toname,\n \"ToAddr\": self.toaddr,\n \"Date\": self.date,\n \"ContentType\": self.contenttype,\n }\n return dic\n\n def to_mbox(self, filepath: str, mode: str = \"w\"):\n \"\"\"\n Safe mail list to .mbox files.\n \"\"\"\n message_id = ListservMessage.create_message_id(\n self.date,\n self.fromaddr,\n )\n f = open(filepath, mode, encoding=\"utf-8\")\n f.write(\"\\n\")\n # check that header was selected\n if self.subject is not None:\n f.write(f\"From b'{self.fromaddr}' {self.date}\\n\")\n f.write(f\"Content-Type: {self.contenttype}\\n\")\n f.write(f\"MIME-Version: 1.0\\n\")\n f.write(f\"In-Reply-To: {self.toname} <b'{self.toaddr}'>\\n\")\n f.write(f\"From: {self.fromname} <b'{self.fromaddr}'>\\n\")\n f.write(f\"Subject: b'{self.subject}\\n\")\n f.write(f\"Message-ID: <{message_id}>'\\n\")\n f.write(f\"Date: {self.date}'\\n\")\n f.write(\"\\n\")\n # check that body was selected\n if self.body is not None:\n f.write(self.body)\n f.write(\"\\n\")\n f.close()\n\n\nclass ListservList:\n \"\"\"\n This class handles a single mailing list of a public archive in the\n LISTSERV 16.5 format.\n\n Parameters\n ----------\n name\n The of whom the list (e.g. 3GPP_COMMON_IMS_XFER, IEEESCO-DIFUSION, ...)\n source\n Contains the information of the location of the mailing list.\n It can be either an URL where the list or a path to the file(s).\n msgs\n List of ListservMessage objects\n\n Methods\n -------\n from_url\n from_messages\n from_listserv_files\n from_listserv_directories\n get_messages_from_url\n get_period_urls\n get_line_numbers_of_header_starts\n get_index_of_elements_in_selection\n to_dict\n to_pandas_dataframe\n to_mbox\n\n Example\n -------\n mlist = ListservList.from_url(\n \"3GPP_TSG_CT_WG6\",\n url=\"https://list.etsi.org/scripts/wa.exe?A0=3GPP_TSG_CT_WG6\",\n select={\n \"years\": (2020, 2021),\n \"months\": \"January\",\n \"weeks\": [1,5],\n \"fields\": \"header\",\n },\n )\n \"\"\"\n\n def __init__(\n self,\n name: str,\n source: Union[List[str], str],\n msgs: List[ListservMessage],\n ):\n self.name = name\n self.source = source\n self.messages = msgs\n\n def __len__(self) -> int:\n return len(self.messages)\n\n def __iter__(self):\n return iter(self.messages)\n\n def __getitem__(self, index) -> ListservMessage:\n return self.messages[index]\n\n @classmethod\n def from_url(\n cls,\n name: str,\n url: str,\n select: dict,\n url_login: str = \"https://list.etsi.org/scripts/wa.exe?LOGON\",\n login: Optional[Dict[str, str]] = {\"username\": None, \"password\": None},\n session: Optional[str] = None,\n ) -> \"ListservList\":\n \"\"\"\n Args:\n name: Name of the list of messages, e.g. '3GPP_TSG_SA_WG2_UPCON'\n url: URL to the LISTSERV list.\n select: Selection criteria that can filter messages by:\n - content, i.e. header and/or body\n - period, i.e. written in a certain year, month, week-of-month\n \"\"\"\n if session is None:\n session = get_auth_session(url_login, **login)\n if \"fields\" not in list(select.keys()):\n select[\"fields\"] = \"total\"\n msgs = cls.get_messages_from_url(name, url, select, session)\n return cls.from_messages(name, url, msgs)\n\n @classmethod\n def from_messages(\n cls,\n name: str,\n url: str,\n messages: List[Union[str, ListservMessage]],\n fields: str = \"total\",\n url_login: str = \"https://list.etsi.org/scripts/wa.exe?LOGON\",\n login: Optional[Dict[str, str]] = {\"username\": None, \"password\": None},\n session: Optional[str] = None,\n ) -> \"ListservList\":\n \"\"\"\n Args:\n messages: Can either be a list of URLs to specific LISTSERV messages\n or a list of `ListservMessage` objects.\n \"\"\"\n if not messages:\n # create empty ListservList for ListservArchive\n msgs = messages\n elif isinstance(messages[0], str):\n # create ListservList from message URLs\n if session is None:\n session = get_auth_session(url_login, **login)\n msgs = []\n for idx, url in enumerate(messages):\n msgs.append(\n ListservMessage.from_url(\n list_name=name,\n url=url,\n fields=fields,\n session=session,\n )\n )\n else:\n # create ListservList from list of ListservMessages\n msgs = messages\n return cls(name, url, msgs)\n\n @classmethod\n def from_listserv_directories(\n cls,\n name: str,\n directorypaths: List[str],\n filedsc: str,\n select: Optional[dict] = None,\n ) -> \"ListservList\":\n \"\"\"\n Args:\n name: Name of the list of messages, e.g. '3GPP_TSG_SA_WG2_UPCON'.\n directorypaths: List of directory paths where LISTSERV formatted\n messages are.\n filedsc: A description of the relevant files, e.g. *.LOG?????\n select: Selection criteria that can filter messages by:\n - content, i.e. header and/or body\n - period, i.e. written in a certain year, month, week-of-month\n \"\"\"\n _filepaths = []\n # run through directories and collect all filepaths\n for directorypath in directorypaths:\n _filepaths.append(\n get_all_file_from_directory(directorypath, filedsc)\n )\n # flatten list of lists\n filepaths = [fp for li in _filepaths for fp in li]\n return cls.from_listserv_files(name, filepaths, select)\n\n @classmethod\n def from_listserv_files(\n cls,\n name: str,\n filepaths: List[str],\n select: Optional[dict] = None,\n ) -> \"ListservList\":\n \"\"\"\n Args:\n name: Name of the list of messages, e.g. '3GPP_TSG_SA_WG2_UPCON'\n filepaths: List of file paths where LISTSERV formatted messages are.\n Such files can have a file extension of the form: *.LOG1405D\n select: Selection criteria that can filter messages by:\n - content, i.e. header and/or body\n - period, i.e. written in a certain year, month, week-of-month\n \"\"\"\n if select is None:\n select = {\"fields\": \"total\"}\n msgs = []\n for filepath in filepaths:\n # TODO: implement selection filter\n file = open(filepath, \"r\")\n fcontent = file.readlines()\n # get positions of all Emails in file\n header_start_line_nrs = cls.get_line_numbers_of_header_starts(\n fcontent\n )\n file.close()\n # run through all messages in file\n for msg_nr in header_start_line_nrs:\n msgs.append(\n ListservMessage.from_listserv_file(\n name,\n filepath,\n msg_nr,\n select[\"fields\"],\n )\n )\n return cls(name, filepaths, msgs)\n\n @classmethod\n def get_messages_from_url(\n cls,\n name: str,\n url: str,\n select: Optional[dict] = None,\n session: Optional[dict] = None,\n ) -> List[ListservMessage]:\n \"\"\"\n Generator that yields all messages within a certain period\n (e.g. January 2021, Week 5).\n\n Args:\n name: Name of the list of messages, e.g. '3GPP_TSG_SA_WG2_UPCON'\n url: URL to the LISTSERV list.\n select: Selection criteria that can filter messages by:\n - content, i.e. header and/or body\n - period, i.e. written in a certain year, month, week-of-month\n session: AuthSession\n \"\"\"\n if select is None:\n select = {\"fields\": \"total\"}\n msgs = []\n # run through periods\n for period_url in ListservList.get_period_urls(url, select):\n # run through messages within period\n for msg_url in ListservList.get_messages_urls(name, period_url):\n msgs.append(\n ListservMessage.from_url(\n name,\n msg_url,\n select[\"fields\"],\n session=session,\n )\n )\n # wait between loading messages, for politeness\n time.sleep(1)\n return msgs\n\n @classmethod\n def get_period_urls(\n cls, url: str, select: Optional[dict] = None\n ) -> List[str]:\n \"\"\"\n All messages within a certain period\n (e.g. January 2021, Week 5).\n \"\"\"\n url_root = (\"/\").join(url.split(\"/\")[:-2])\n # create dictionary with key indicating period and values the url\n periods, urls_of_periods = cls.get_all_periods_and_their_urls(\n url_root, get_website_content(url)\n )\n\n if any(\n period in list(select.keys())\n for period in [\"years\", \"months\", \"weeks\"]\n ):\n for key, value in select.items():\n if key == \"years\":\n cond = lambda x: int(re.findall(r\"\\d{4}\", x)[0])\n elif key == \"months\":\n cond = lambda x: x.split(\" \")[0]\n elif key == \"weeks\":\n cond = lambda x: int(x.split(\" \")[-1])\n else:\n continue\n\n periodquants = [cond(period) for period in periods]\n\n indices = ListservList.get_index_of_elements_in_selection(\n periodquants,\n urls_of_periods,\n value,\n )\n\n periods = [periods[idx] for idx in indices]\n urls_of_periods = [urls_of_periods[idx] for idx in indices]\n return urls_of_periods\n\n @staticmethod\n def get_all_periods_and_their_urls(\n url_root: str,\n soup: BeautifulSoup,\n ) -> Tuple[List[str], List[str]]:\n periods = [list_tag.find(\"a\").text for list_tag in soup.find_all(\"li\")]\n urls_of_periods = [\n urllib.parse.urljoin(url_root, list_tag.find(\"a\").get(\"href\"))\n for list_tag in soup.find_all(\"li\")\n ]\n return periods, urls_of_periods\n\n @staticmethod\n def get_index_of_elements_in_selection(\n times: List[Union[int, str]],\n urls: List[str],\n filtr: Union[tuple, list, int, str],\n ) -> List[int]:\n \"\"\"\n Filter out messages that where in a specific period. Period here is a set\n containing units of year, month, and week-of-month which can have the following\n example elements:\n - years: (1992, 2010), [2000, 2008], 2021\n - months: [\"January\", \"July\"], \"November\"\n - weeks: (1, 4), [1, 5], 2\n\n Args:\n times: A list containing information of the period for each\n group of ListservMessage.\n urls: Corresponding URLs of each group of ListservMessage of which the\n period info is contained in `times`.\n filtr: Containing info on what should be filtered.\n\n Returns:\n Indices of to the elements in `times`/`ursl`.\n \"\"\"\n if isinstance(filtr, tuple):\n # filter year or week in range\n cond = lambda x: (np.min(filtr) <= x <= np.max(filtr))\n if isinstance(filtr, list):\n # filter in year, week, or month in list\n cond = lambda x: x in filtr\n if isinstance(filtr, int):\n # filter specific year or week\n cond = lambda x: x == filtr\n if isinstance(filtr, str):\n # filter specific month\n cond = lambda x: x == filtr\n return [idx for idx, time in enumerate(times) if cond(time)]\n\n @classmethod\n def get_messages_urls(cls, name: str, url: str) -> List[str]:\n \"\"\"\n Args:\n name: Name of the `ListservList`\n url: URL to group of messages that are within the same period.\n\n Returns:\n List to URLs from which`ListservMessage` can be initialized.\n \"\"\"\n url_root = (\"/\").join(url.split(\"/\")[:-2])\n soup = get_website_content(url)\n a_tags = soup.select(f'a[href*=\"A2=\"][href*=\"{name}\"]')\n if a_tags:\n a_tags = [\n urllib.parse.urljoin(url_root, url.get(\"href\"))\n for url in a_tags\n ]\n return a_tags\n\n @classmethod\n def get_line_numbers_of_header_starts(\n cls, content: List[str]\n ) -> List[int]:\n \"\"\"\n By definition LISTSERV logs seperate new messages by a row\n of 73 equal signs.\n\n Args:\n content: The content of one LISTSERV-file.\n\n Returns:\n List of line numbers where header starts\n \"\"\"\n return [\n line_nr for line_nr, line in enumerate(content) if \"=\" * 73 in line\n ]\n\n def to_dict(self) -> Dict[str, List[str]]:\n \"\"\"\n Place all message into a dictionary of the form:\n dic = {\n \"Subject\": [messages[0], ... , messages[n]],\n .\n .\n .\n \"ContentType\": [messages[0], ... , messages[n]]\n }\n \"\"\"\n # initialize dictionary\n dic = {}\n for key in list(self.messages[0].to_dict().keys()):\n dic[key] = []\n # run through messages\n for msg in self.messages:\n # run through message attributes\n for key, value in msg.to_dict().items():\n dic[key].append(value)\n return dic\n\n def to_pandas_dataframe(self) -> pd.DataFrame:\n return pd.DataFrame.from_dict(self.to_dict())\n\n def to_mbox(self, dir_out: str, filename: Optional[str] = None):\n \"\"\"\n Safe mail list to .mbox files.\n\n Args:\n \"\"\"\n if filename is None:\n filepath = f\"{dir_out}/{self.name}.mbox\"\n else:\n filepath = f\"{dir_out}/{filename}.mbox\"\n first = True\n for msg in self.messages:\n if first:\n msg.to_mbox(filepath, mode=\"w\")\n first = False\n else:\n msg.to_mbox(filepath, mode=\"a\")\n\n\nclass ListservArchive(object):\n \"\"\"\n This class handles a public mailing list archive that uses the\n LISTSERV 16.5 format.\n An archive is a list of ListservList elements.\n\n Parameters\n ----------\n name\n The of whom the archive is (e.g. 3GPP, IEEE, ...)\n url\n The URL where the archive lives\n lists\n A list containing the mailing lists as `ListservList` types\n\n Methods\n -------\n from_url\n from_mailing_lists\n get_lists\n get_sections\n to_dict\n to_pandas_dataframe\n to_mbox\n\n Example\n -------\n arch = ListservArchive.from_url(\n \"3GPP\",\n \"https://list.etsi.org/scripts/wa.exe?\",\n \"https://list.etsi.org/scripts/wa.exe?HOME\",\n select={\n \"years\": (2020, 2021),\n \"months\": \"January\",\n \"weeks\": [1,5],\n \"fields\": \"header\",\n },\n )\n \"\"\"\n\n def __init__(self, name: str, url: str, lists: List[ListservList]):\n self.name = name\n self.url = url\n self.lists = lists\n\n def __len__(self):\n return len(self.lists)\n\n def __iter__(self):\n return iter(self.lists)\n\n def __getitem__(self, index):\n return self.lists[index]\n\n @classmethod\n def from_url(\n cls,\n name: str,\n url_root: str,\n url_home: str,\n select: dict,\n url_login: str = \"https://list.etsi.org/scripts/wa.exe?LOGON\",\n login: Optional[Dict[str, str]] = {\"username\": None, \"password\": None},\n session: Optional[str] = None,\n ) -> \"ListservArchive\":\n \"\"\"\n Create ListservArchive from a given URL.\n\n Args:\n name:\n url_root:\n url_home:\n select:\n \"\"\"\n session = get_auth_session(url_login, **login)\n lists = cls.get_lists_from_url(url_root, url_home, select, session)\n return cls.from_mailing_lists(name, url_root, lists, select)\n\n @classmethod\n def from_mailing_lists(\n cls,\n name: str,\n url_root: str,\n url_mailing_lists: Union[List[str], List[ListservList]],\n select: dict,\n url_login: str = \"https://list.etsi.org/scripts/wa.exe?LOGON\",\n login: Optional[Dict[str, str]] = {\"username\": None, \"password\": None},\n session: Optional[str] = None,\n ) -> \"ListservArchive\":\n \"\"\"\n Create ListservArchive from a given list of 'ListservList'.\n\n Args:\n name:\n url_root:\n url_mailing_lists:\n\n \"\"\"\n if isinstance(url_mailing_lists[0], str):\n if session is None:\n session = get_auth_session(url_login, **login)\n lists = []\n for idx, url in enumerate(url_mailing_lists):\n lists.append(\n ListservList.from_url(\n name=idx,\n url=url,\n select=select,\n session=session,\n )\n )\n else:\n lists = url_mailing_lists\n return cls(name, url_root, lists)\n\n @staticmethod\n def get_lists_from_url(\n url_root: str,\n url_home: str,\n select: dict,\n session: Optional[str] = None,\n ) -> List[ListservList]:\n \"\"\"\n Created dictionary of all lists in the archive.\n\n Args:\n\n Returns:\n archive_dict: the keys are the names of the lists and the value their url\n \"\"\"\n archive = []\n # run through archive sections\n for url in list(\n ListservArchive.get_sections(url_root, url_home).keys()\n )[:1]:\n soup = get_website_content(url)\n a_tags_in_section = soup.select(\n 'a[href*=\"A0=\"][onmouseover*=\"showDesc\"][onmouseout*=\"hideDesc\"]',\n )\n\n # run through archive lists in section\n for a_tag in a_tags_in_section:\n value = urllib.parse.urljoin(url_root, a_tag.get(\"href\"))\n key = value.split(\"A0=\")[-1]\n mlist = ListservList.from_url(\n name=key,\n url=value,\n select=select,\n session=session,\n )\n if len(mlist) != 0:\n archive.append(mlist)\n return archive\n\n def get_sections(url_root: str, url_home: str) -> int:\n \"\"\"\n Get different sections of archive. On the website they look like:\n [3GPP] [3GPP–AT1] [AT2–CONS] [CONS–EHEA] [EHEA–ERM_] ...\n\n Returns:\n If sections exist, it returns their urls and names. Otherwise it returns\n the url_home.\n \"\"\"\n soup = get_website_content(url_home)\n sections = soup.select(\n 'a[href*=\"INDEX=\"][href*=\"p=\"]',\n )\n archive_sections_dict = {}\n if sections:\n for sec in sections:\n key = urllib.parse.urljoin(url_root, sec.get(\"href\"))\n value = sec.text\n if value in [\"Next\", \"Previous\"]:\n continue\n archive_sections_dict[key] = value\n # TODO check that p=1 is included\n else:\n archive_sections_dict[url_home] = \"Home\"\n return archive_sections_dict\n\n def to_dict(self) -> Dict[str, List[str]]:\n \"\"\"\n Place all message in all lists into a dictionary of the form:\n dic = {\n \"Subject\": [messages[0], ... , messages[n]],\n .\n .\n .\n \"ListName\": [messages[0], ... , messages[n]]\n }\n \"\"\"\n # initialize dictionary\n dic = {}\n for key in list(self.lists[0].messages[0].to_dict().keys()):\n dic[key] = []\n dic[\"ListName\"] = []\n # run through lists\n for mlist in self.lists:\n # run through messages\n for msg in mlist.messages:\n # run through message attributes\n for key, value in msg.to_dict().items():\n dic[key].append(value)\n dic[\"ListName\"].append(mlist.name)\n return dic\n\n def to_pandas_dataframe(self) -> pd.DataFrame:\n return pd.DataFrame.from_dict(self.to_dict())\n\n def to_mbox(self, dir_out: str):\n \"\"\"\n Save Archive content to .mbox files\n \"\"\"\n for llist in self.lists:\n llist.to_mbox(dir_out)\n\n\ndef get_auth_session(\n url_login: str, username: str, password: str\n) -> requests.Session:\n \"\"\" Create AuthSession \"\"\"\n # ask user for login keys\n username, password = get_login_from_terminal(username, password)\n if username is None or password is None:\n # continue without authentication\n return None\n else:\n # Start the AuthSession\n session = requests.Session()\n # Create the payload\n payload = {\n \"LOGIN1\": \"\",\n \"Y\": username,\n \"p\": password,\n \"X\": \"\",\n }\n # Post the payload to the site to log in\n session.post(url_login, data=payload)\n return session\n\n\ndef get_login_from_terminal(\n username: Union[str, None],\n password: Union[str, None],\n file_auth: str = \"../config/authentication.yaml\",\n) -> Tuple[Union[str, None]]:\n \"\"\"\n Get login key from user during run time if 'username' and/or 'password' is 'None'.\n Return 'None' if no reply within 15 sec.\n \"\"\"\n if username is None or password is None:\n record = True\n else:\n record = False\n if username is None:\n username = ask_for_input(\"Enter your Email: \")\n if password is None:\n password = ask_for_input(\"Enter your Password: \")\n if record and isinstance(username, str) and isinstance(password, str):\n loginkey_to_file(username, password, file_auth)\n return username, password\n\n\ndef ask_for_input(request: str) -> Union[str, None]:\n timeout = 15\n end_time = time.time() + timeout\n while time.time() < end_time:\n reply = input(request)\n try:\n assert isinstance(reply, str)\n break\n except Exception:\n reply = None\n continue\n return reply\n\n\ndef loginkey_to_file(\n username: str,\n password: str,\n file_auth: str,\n) -> None:\n \"\"\" Safe login key to yaml \"\"\"\n file = open(file_auth, \"w\")\n file.write(f\"username: '{username}'\\n\")\n file.write(f\"password: '{password}'\")\n file.close()\n\n\ndef get_website_content(\n url: str,\n session: Optional[requests.Session] = None,\n) -> BeautifulSoup:\n \"\"\" Get HTML code from website \"\"\"\n # TODO: include option to change BeautifulSoup args\n if session is None:\n sauce = requests.get(url)\n assert sauce.status_code == 200\n soup = BeautifulSoup(sauce.content, \"lxml\")\n else:\n sauce = session.get(url)\n soup = BeautifulSoup(sauce.text, \"lxml\")\n return soup\n\n\ndef get_all_file_from_directory(directory: str, file_dsc: str) -> List[str]:\n \"\"\" Get paths of all files matching file_dsc in directory \"\"\"\n template = f\"{directory}{file_dsc}\"\n file_paths = glob.glob(template)\n return file_paths\n"
] | [
[
"numpy.max",
"numpy.min"
]
] |
bhigy/discrete-repr | [
"3d4a4fc3833df3a1fa287c78c7402ce6df09abd4"
] | [
"metrics.py"
] | [
"from collections import Counter\nfrom itertools import groupby\nfrom math import log2\nimport numpy as np\n\n\ndef segments_start(array):\n return [i for i in range(len(array)) if i == 0 or array[i] != array[i-1]]\n\n\ndef split_sequences(array, start):\n end = start[1:] + [len(array)]\n return [array[s:e] for s, e in zip(start, end)]\n\n\ndef coverage_top_1(labels, codes):\n '''\n Computes the coverage of label segments by the most frequent co-occuring\n code.\n '''\n start = segments_start(labels)\n segments = split_sequences(codes, start)\n return [sorted(Counter(s).values())[-1] / len(s) for s in segments]\n\n\ndef compute_joint_probability(x, y):\n labels_x = np.unique(x)\n idx_x = {v: i for i, v in enumerate(labels_x)}\n labels_y = np.unique(y)\n idx_y = {v: i for i, v in enumerate(labels_y)}\n counts_xy = np.zeros([len(labels_x), len(labels_y)])\n for xi, yi in zip(x, y):\n counts_xy[idx_x[xi], idx_y[yi]] += 1\n return labels_x, labels_y, counts_xy / len(x)\n\n\ndef conditional_entropy(x, y):\n labels_x, labels_y, p_xy = compute_joint_probability(x, y)\n p_y = np.sum(p_xy, axis=0)\n h_x_y = 0\n for i_x in range(len(labels_x)):\n for i_y in range(len(labels_y)):\n if p_xy[i_x, i_y] > 0:\n h_x_y -= p_xy[i_x, i_y] * log2(p_xy[i_x, i_y] / p_y[i_y])\n return h_x_y\n\n\ndef count_repetitions(array):\n return [len(list(v)) for _, v in groupby(array)]\n"
] | [
[
"numpy.sum",
"numpy.unique"
]
] |
shivammalviya712/Real-Time-Trigger-Word-Detection | [
"7ad9144d31ef407f7326750633471dcb30cb5e46"
] | [
"code/realtime.py"
] | [
"\"\"\"Implement the model in real time.\"\"\"\n\n# Third party modules\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sounddevice as sd\nfrom pydub import AudioSegment\nfrom pydub.playback import play\n\n\nclass Realtime:\n \"\"\"Implement the modle in real time.\"\"\"\n def __init__(self, settings):\n \"\"\"Intiallise the attributes.\"\"\"\n self.Ty = settings.Ty\n self.Tx = settings.Tx\n self.Tnew = settings.Tnew\n self.n_freq = settings.n_freq\n self.fs = settings.fs\n self.duration = settings.duration\n self.threshold = settings.threshold\n self.new_x = None\n self.chime = AudioSegment.from_wav(\n './dataset/activate/chime/chime.wav')\n self.x = np.zeros((1, self.Tx, self.n_freq))\n self.new_audio = np.zeros(shape=(int(self.Tnew * self.fs), 2))\n\n sd.default.samplerate = self.fs\n sd.default.channels = 2\n\n \n def refresh_audio(self):\n \"\"\"It adds spectrogram of new audio \n to the x.\n \"\"\"\n self.new_audio = sd.rec(frames=int(self.Tnew * self.fs))\n sd.wait()\n self.new_x = self.spectrogram(self.new_audio).T\n self.x[0, :self.Tx-len(self.new_x)] = self.x[0, len(self.new_x):]\n self.x[0, self.Tx-len(self.new_x):] = self.new_x\n\n\n\n def spectrogram(self, sound, plotting=False):\n \"\"\"It generates the spectrogram \n of the sound given.\n \n # Arguments\n sound: ndarray\n The recorded sound.\n\n # Returns\n x: ndarray\n The spectrogram of the sound.\n \"\"\"\n nfft = 200\n noverlap = 120\n nchannels = sound.ndim\n if nchannels == 1:\n x, freqs, bins, im = plt.specgram(\n x=sound, NFFT=nfft, Fs=self.fs, noverlap=noverlap)\n elif nchannels == 2:\n x, freqs, bins, im = plt.specgram(\n x=sound[:, 0], NFFT=nfft, Fs=self.fs, noverlap=noverlap)\n else:\n print('The audio has more than 2 channels') \n \n if plotting==True:\n plt.show(block=False)\n plt.pause(0.001)\n\n return x\n\n\n def check_trigger(self, y):\n \"\"\"It checks if wake word is\n predicted or not. If the wake\n word is present then it produces\n a chime sound.\n \n # Arguments\n y: ndarray\n Prediction of our model for\n Realtime.x as the input.\n \"\"\"\n for i in range(self.Ty-1, -1, -1):\n if y[0, i] > self.threshold:\n play(self.chime)\n break "
] | [
[
"matplotlib.pyplot.specgram",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.pause"
]
] |
mfarthin/PyDMD | [
"ac2c800cfa9fb23ab110d2b2957b5681e2aa5055"
] | [
"pydmd/hodmd.py"
] | [
"\"\"\"\nDerived module from dmdbase.py for higher order dmd.\n\nReference:\n- S. L Clainche, J. M. Vega, Higher Order Dynamic Mode Decomposition.\nJournal on Applied Dynamical Systems, 16(2), 882-925, 2017.\n\"\"\"\nimport numpy as np\n\nfrom .dmdbase import DMDBase\nfrom .utils import compute_tlsq\n\n\nclass HODMD(DMDBase):\n \"\"\"\n Higher Order Dynamic Mode Decomposition\n\n :param svd_rank: the rank for the truncation; If 0, the method computes the\n optimal rank and uses it for truncation; if positive interger, the\n method uses the argument for the truncation; if float between 0 and 1,\n the rank is the number of the biggest singular values that are needed\n to reach the 'energy' specified by `svd_rank`; if -1, the method does\n not compute truncation.\n :type svd_rank: int or float\n :param int tlsq_rank: rank truncation computing Total Least Square. Default\n is 0, that means no truncation.\n :param bool exact: flag to compute either exact DMD or projected DMD.\n Default is False.\n :param opt: argument to control the computation of DMD modes amplitudes. See\n :class:`DMDBase`. Default is False.\n :type opt: bool or int\n :param rescale_mode: Scale Atilde as shown in\n 10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its\n eigendecomposition. None means no rescaling, 'auto' means automatic\n rescaling using singular values, otherwise the scaling factors.\n :type rescale_mode: {'auto'} or None or numpy.ndarray\n :param bool forward_backward: If True, the low-rank operator is computed\n like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is\n False.\n :param int d: the new order for spatial dimension of the input snapshots.\n Default is 1.\n :param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by\n magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary\n part to break ties) if `sorted_eigs='real'`. Default: False.\n :type sorted_eigs: {'real', 'abs'} or False\n :param reconstruction_method: Due to how HODMD is defined, we have several\n versions of the same snapshot. The parameter `reconstruction_method`\n allows changing how these versions are combined in `reconstructed_data`.\n If `'first'`, only the first version is selected (default behavior);\n if `'mean'` we take the mean of all the versions; if the parameter is an\n array of floats of size `d`, the return value is the weighted average\n of the versions.\n :type reconstruction_method: {'first', 'mean'} or array-like\n \"\"\"\n\n def __init__(self, svd_rank=0, tlsq_rank=0, exact=False, opt=False,\n rescale_mode=None, forward_backward=False, d=1, sorted_eigs=False,\n reconstruction_method='first'):\n super(HODMD, self).__init__(svd_rank=svd_rank, tlsq_rank=tlsq_rank,\n exact=exact, opt=opt, rescale_mode=rescale_mode,\n sorted_eigs=sorted_eigs)\n self._d = d\n\n if isinstance(reconstruction_method, list):\n if len(reconstruction_method) != d:\n raise ValueError('The length of the array of weights must be equal to d')\n elif isinstance(reconstruction_method, np.ndarray):\n if reconstruction_method.ndim > 1 or reconstruction_method.shape[0] != d:\n raise ValueError('The length of the array of weights must be equal to d')\n self._reconstruction_method = reconstruction_method\n\n @property\n def d(self):\n return self._d\n\n def reconstructions_of_timeindex(self, timeindex=None):\n rec = super(HODMD, self).reconstructed_data\n space_dim = rec.shape[0] // self.d\n time_instants = rec.shape[1] + self.d - 1\n\n # for each time instance, we take the mean of all its appearences.\n # each snapshot appears at most d times (for instance, the first and the\n # last appear only once).\n reconstructed_snapshots = np.full((time_instants, self.d, space_dim), np.nan, dtype=np.complex128)\n\n for time_slice_idx in range(rec.shape[1]):\n time_slice = rec[:, time_slice_idx]\n\n for i in range(self.d):\n mx = time_slice[space_dim * i : space_dim * (i + 1)]\n if not np.ma.is_masked(mx):\n reconstructed_snapshots[time_slice_idx + i, i] = mx\n\n if timeindex is None:\n return reconstructed_snapshots\n else:\n return reconstructed_snapshots[timeindex]\n\n @property\n def reconstructed_data(self):\n rec = self.reconstructions_of_timeindex()\n rec = np.ma.array(rec, mask=np.isnan(rec))\n\n if self._reconstruction_method == 'first':\n return rec[:,0].T\n elif self._reconstruction_method == 'mean':\n return np.mean(rec, axis=1).T\n elif (isinstance(self._reconstruction_method, list) or\n isinstance(self._reconstruction_method, np.ndarray)):\n return np.average(rec, axis=1, weights=self._reconstruction_method).T\n else:\n raise ValueError(\"The reconstruction method wasn't recognized: {}\"\n .format(self._reconstruction_method))\n\n def fit(self, X):\n \"\"\"\n Compute the Dynamic Modes Decomposition to the input data.\n\n :param X: the input snapshots.\n :type X: numpy.ndarray or iterable\n \"\"\"\n snp, self._snapshots_shape = self._col_major_2darray(X)\n self._snapshots = np.concatenate(\n [\n snp[:, i:snp.shape[1] - self.d + i + 1]\n for i in range(self.d)\n ],\n axis=0)\n\n n_samples = self._snapshots.shape[1]\n X = self._snapshots[:, :-1]\n Y = self._snapshots[:, 1:]\n\n X, Y = compute_tlsq(X, Y, self.tlsq_rank)\n U, s, V = self.operator.compute_operator(X,Y)\n\n # Default timesteps\n self.original_time = {'t0': 0, 'tend': n_samples - 1, 'dt': 1}\n self.dmd_time = {'t0': 0, 'tend': n_samples - 1, 'dt': 1}\n\n self._b = self._compute_amplitudes()\n\n return self\n"
] | [
[
"numpy.ma.is_masked",
"numpy.isnan",
"numpy.full",
"numpy.average",
"numpy.mean"
]
] |
martahal/DeepLearning | [
"c3a70a117c2f3417832c7caecd3baf6cd9862ae2"
] | [
"GenerativeModelling/gen_autoencoder_routine.py"
] | [
"from GenerativeModelling.Autoencoder import Autoencoder\nfrom GenerativeModelling.Encoder import Encoder\nfrom GenerativeModelling.Decoder import Decoder\nfrom GenerativeModelling.verification_net import VerificationNet\nfrom SemiSupervisedLearning import visualisations\nfrom GenerativeModelling.Trainer import Trainer\nfrom GenerativeModelling.stacked_mnist import StackedMNISTData, DataMode\nfrom GenerativeModelling import utils\n\nimport torch\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport pathlib\n\n\nclass Generative_AE_Routine:\n\n def __init__(\n self,\n data,\n autoencoder_learning_rate: float,\n autoencoder_loss_function: str,\n autoencoder_optimizer: str,\n autoencoder_epochs: int,\n\n\n latent_vector_size: int,\n batch_size: int,\n num_samples: int,\n save_path: str\n\n ):\n self.data = utils.get_data_to_tensors(data, batch_size)\n self.image_dimensions = (data.test_images.shape[-1], data.test_images.shape[-2], data.test_images.shape[-3])\n self.num_samples = num_samples\n self.batch_size = batch_size\n self.latent_vector_size = latent_vector_size\n\n\n self.encoder = Encoder(\n input_shape=self.image_dimensions,\n num_filters=16,\n last_conv_layer_dim= (16,10, 10), #(32, 4, 4),\n output_vector_size=latent_vector_size)\n\n self.decoder = Decoder(\n input_size=latent_vector_size,\n encoder_last_layer_dim=self.encoder.last_conv_layer_dim,\n hidden_filters=self.encoder.num_filters,\n output_size=self.image_dimensions)\n\n self.autoencoder = Autoencoder(self.encoder, self.decoder, self.image_dimensions)\n\n self.autoencoder_trainer = Trainer(\n batch_size=batch_size,\n lr=autoencoder_learning_rate,\n epochs=autoencoder_epochs,\n model=self.autoencoder,\n data=self.data,\n loss_function=autoencoder_loss_function,\n optimizer=autoencoder_optimizer,\n early_stop_count = 4,\n model_save_path=save_path,\n )\n\n def train_autoencoder(self):\n #self.autoencoder_trainer.load_best_model()\n self.autoencoder_trainer.do_autoencoder_train()\n self.plot_autoencoder_training(self.autoencoder_trainer)\n\n def reconstruct_test_data(self, load_model_path=None):\n if load_model_path is not None:\n # self.vae_trainer.load_best_model() Does not return the model but sets the self.model in trainer to be best model\n # see if we can do:\n self.autoencoder.load_state_dict(torch.load(pathlib.Path(load_model_path).joinpath(\"best.ckpt\")))\n print(f'Loaded model from {load_model_path}')\n #selecting a fixed sample of the test data we like to visualize\n visualisation_data = self.data[1]\n images, reconstructions, labels = utils.make_reconstructions(\n self.autoencoder,\n visualisation_data,\n num_images=25,\n batch_size=self.batch_size,\n image_dimensions=self.image_dimensions,\n title=f'AE_z_size:{self.latent_vector_size}_lr_{self.autoencoder_trainer.lr}_epochs:{self.autoencoder_trainer.epochs}'\n )\n # checking quality of reproduced images\n return images, reconstructions, labels\n\n def anomaly_detection(self, k, load_model_path=None):\n if load_model_path is not None:\n # self.vae_trainer.load_best_model() Does not return the model but sets the self.model in trainer to be best model\n # see if we can do:\n self.autoencoder.load_state_dict(torch.load(pathlib.Path(load_model_path).joinpath(\"best.ckpt\")))\n print(f'Loaded model from {load_model_path}')\n # Calculate reconstruction loss (MSE) for test data\n # plot the k most anomalous images\n images, reconstructions, losses = self.autoencoder_trainer.ae_detect_anomaly_by_loss()\n\n worst_indices = np.argsort(losses)[-1:-(k + 1):-1]\n print(\"Anomaly loss values:\", [losses[index] for index in worst_indices])\n anomalies = np.array([images[index] for index in worst_indices])\n visualisations.show_images_and_reconstructions(anomalies, f'AE_Anomalies_latent_size:{self.latent_vector_size}_lr_{self.autoencoder_trainer.lr}_epochs:{self.autoencoder_trainer.epochs}')\n\n\n\n def generate_samples(self, load_model_path=None):\n if load_model_path is not None:\n # self.vae_trainer.load_best_model() Does not return the model but sets the self.model in trainer to be best model\n # see if we can do:\n self.autoencoder.load_state_dict(torch.load(pathlib.Path(load_model_path).joinpath(\"best.ckpt\")))\n print(f'Loaded model from {load_model_path}')\n Z = self.get_latent_vector_and_classes(self.autoencoder.encoder, self.num_samples)#, self.dataloaders)\n generated_images = utils.generate_images_from_Z(Z, self.autoencoder.decoder, self.image_dimensions, title=\"Gen_AE_generated_images\")\n return generated_images\n\n def check_autoencoder_performance(self, verification_net, tolerance, images, labels=None, load_model_path=None):\n if load_model_path is not None:\n # self.vae_trainer.load_best_model() Does not return the model but sets the self.model in trainer to be best model\n # see if we can do:\n self.autoencoder.load_state_dict(torch.load(pathlib.Path(load_model_path).joinpath(\"best.ckpt\")))\n print(f'Loaded model from {load_model_path}')\n coverage = verification_net.check_class_coverage(\n data=images,\n tolerance=tolerance\n )\n print(f\"Coverage: {100 * coverage:.2f}%\")\n if labels is not None:\n #if coverage != 0.0:\n predictability, accuracy = verification_net.check_predictability(\n data=images,\n correct_labels=labels,\n tolerance=tolerance\n )\n print(f\"Predictability: {100 * predictability:.2f}%\")\n print(f\"Accuracy: {100 * accuracy:.2f}%\")\n else:\n if coverage != 0.0:\n predictability, accuracy = verification_net.check_predictability(\n data=images,\n tolerance=tolerance\n )\n print(f\"Predictability: {100 * predictability:.2f}%\")\n\n @staticmethod\n def get_latent_vector_and_classes(encoder, n_samples):\n \"\"\"\n samples a random distribution of the latent vectors, Z\n :param encoder: The encoder that produces the latent vectors\n :param n_samples: number of samples from Z\n :return: a random sample of Z from the standard normal distribution\n \"\"\"\n p = torch.distributions.Normal(torch.zeros(encoder.output_vector_size), torch.ones(encoder.output_vector_size))\n temp_tensor = torch.ones(n_samples)\n Z = p.sample(sample_shape=temp_tensor.shape) # Wow, so ugly, but my brain hurts now\n return Z\n\n @staticmethod\n def plot_autoencoder_training(autoencoder_trainer):\n plt.figure(figsize=(10, 8))\n plt.title('Autoencoder loss')\n visualisations.plot_metric(autoencoder_trainer.train_history['loss'], label='Autoencoder training loss',\n averaged_plot=True)\n visualisations.plot_metric(autoencoder_trainer.validation_history['loss'], label='Autoencoder validation loss',\n averaged_plot=False)\n # plt.ylim(bottom=0, top=1)\n plt.legend()\n plt.savefig(f'figures/autoencoder_{autoencoder_trainer.loss_function}_{autoencoder_trainer.epochs}_training.png')\n\n\n\ndef main():\n torch.manual_seed(0)\n \"\"\" GENERATIVE AUTOENCODER ROUTINE\"\"\"\n batch_size = 16\n data_object = StackedMNISTData(mode=DataMode.MONO_FLOAT_COMPLETE, default_batch_size=batch_size)\n #instantiate verification network\n net = VerificationNet(force_learn=False)\n net.train(generator=data_object, epochs=5) # gen=data_object, makes sure we test on the same type of data as the model was trained on\n verification_tolerance = 0.8 if data_object.channels == 1 else 0.5\n\n autoencoder_learning_rate = 0.0002\n autoencoder_loss_function = 'MSE' #'binary_cross_entropy' # AVAILABLE 'binary_cross_entropy'\n autoencoder_optimizer = 'adam'#'SGD'# # AVAILABLE 'SGD' # #\n autoencoder_epochs = 1 # Optimal for MNIST: 3\n\n num_samples = 2000\n latent_vector_size = 64 # recommended for MNIST between 16 and 64\n gen_name = 'Test_gen_AE'\n gen_ae_save_path = f'checkpoints/gen_AE/{gen_name}'\n gen_autoencoder = Generative_AE_Routine(\n data_object,\n autoencoder_learning_rate,\n autoencoder_loss_function,\n autoencoder_optimizer,\n autoencoder_epochs,\n\n latent_vector_size,\n batch_size,\n num_samples,\n gen_ae_save_path\n )\n gen_autoencoder.train_autoencoder()\n images, reconstructions, labels = gen_autoencoder.reconstruct_test_data()\n #Check quality of reconstructions\n gen_autoencoder.check_autoencoder_performance(net, verification_tolerance, reconstructions, labels)\n#\n ##Generate samples\n #generated_images = gen_autoencoder.generate_samples()\n#\n ##check quality of generated images\n #gen_autoencoder.check_autoencoder_performance(net, verification_tolerance, generated_images)\n#\n #\"\"\" ANOMALY DETECTOR AUTOENCODER ROUTINE\"\"\"\n #data_object = StackedMNISTData(mode=DataMode.MONO_FLOAT_MISSING, default_batch_size=batch_size)\n #number_anom_images_to_show = 16\n #anom_name = 'Test_anom_AE'\n #anom_ae_save_path = f'checkpoints/anom_AE/{anom_name}/'\n #anom_autoencoder = Generative_AE_Routine(\n # data_object,\n # autoencoder_learning_rate,\n # autoencoder_loss_function,\n # autoencoder_optimizer,\n # autoencoder_epochs,\n #\n # latent_vector_size,\n # batch_size,\n # num_samples,\n # anom_ae_save_path\n #)\n #anom_autoencoder.train_autoencoder()\n#\n #anom_autoencoder.anomaly_detection(number_anom_images_to_show)\nif __name__ == '__main__':\n main()"
] | [
[
"torch.ones",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"torch.manual_seed",
"matplotlib.pyplot.savefig",
"numpy.argsort",
"torch.zeros",
"matplotlib.pyplot.title",
"numpy.array"
]
] |
mlepori1/Representations_Of_Syntax | [
"7a09004a1e443618ee6b1645e54224766c3965f1"
] | [
"Natural_and_Artificial/MARCC/test_aug500_const_1.py"
] | [
"import sys\n\nif not sys.warnoptions:\n import warnings\n warnings.simplefilter(\"ignore\")\n\nimport numpy as np\nimport csv\nimport copy\n\nfrom torch import optim\nfrom torch.nn import BCELoss\nfrom torch.optim import Adam\nimport torch\n\nfrom random import shuffle\nimport random\n\nimport models\nimport pickle\n\n\nembed_matrix = pickle.load(open('./data/embed_matrix.pkl', 'rb'))\nword2idx = pickle.load(open('./data/word2idx.pkl', 'rb'))\n\nany_attractors = pickle.load(open('./data/final_any_attractors.pkl', 'rb'))\none_attractor = pickle.load(open('./data/final_one_attractor.pkl', 'rb'))\ntwo_attractors = pickle.load(open('./data/final_two_attractors.pkl', 'rb'))\nthree_attractors = pickle.load(open('./data/final_three_attractors.pkl', 'rb'))\nfour_attractors = pickle.load(open('./data/final_four_attractors.pkl', 'rb'))\nno_attractors = pickle.load(open('./data/final_no_attractors.pkl', 'rb'))\n\nmodel = models.TreeLSTMClassifier(100, 100, len(word2idx.keys()), 'constituency', pretrained_embeddings=embed_matrix)\nmodel.load_state_dict(torch.load('./augmented_models/aug_const_1_500_model'))\n\nprint(\"No Attractors: \" + str(len(no_attractors)))\nprint(\"Any Attractors: \" + str(len(any_attractors)))\nprint(\"One Attractor: \" + str(len(one_attractor)))\nprint(\"Two Attractors: \" + str(len(two_attractors)))\nprint(\"Three Attractors: \" + str(len(three_attractors)))\nprint(\"Four Attractors: \" + str(len(four_attractors)))\n\n\n############################ Test on No Attractors Test Set\n\nprint('Running on No Attractors Set')\ncorrect = 0\n\nnot_processed = 0\nfor element in no_attractors:\n\n seq = element[0]\n const_tree = element[1]\n dep_tags = element[2]\n dep_tree = element[3]\n label = torch.FloatTensor(element[4])\n try:\n output = model(const_tree, dep_tree, dep_tags, seq)\n if (output > .5 and label == 1) or (output < .5 and label == 0):\n correct += 1\n except:\n not_processed += 1\n\nif not_processed != 0:\n print('Not Processed: ' + str(not_processed))\nprint('Accuracy on No Attractors: ' + str(correct/(len(no_attractors) - not_processed)))\n\n############################ Test on Attractors Test Set\n\nprint('Running on Any Attractors Test Set')\ncorrect = 0\n\nnot_processed = 0\nfor element in any_attractors:\n\n seq = element[0]\n const_tree = element[1]\n dep_tags = element[2]\n dep_tree = element[3]\n label = torch.FloatTensor(element[4])\n\n try:\n output = model(const_tree, dep_tree, dep_tags, seq)\n if (output > .5 and label == 1) or (output < .5 and label == 0):\n correct += 1\n except:\n not_processed += 1\n\nif not_processed != 0:\n print('Not Processed: ' + str(not_processed))\nprint('Accuracy on Any Attractors Test: ' + str(correct/(len(any_attractors) - not_processed)))\n\n\n############################ Test on One Attractors Test Set\n\nprint('Running on One Attractor Test Set')\ncorrect = 0\n\nnot_processed = 0\nfor element in one_attractor:\n\n seq = element[0]\n const_tree = element[1]\n dep_tags = element[2]\n dep_tree = element[3]\n label = torch.FloatTensor(element[4])\n\n try:\n output = model(const_tree, dep_tree, dep_tags, seq)\n if (output > .5 and label == 1) or (output < .5 and label == 0):\n correct += 1\n except:\n not_processed += 1\n\nif not_processed != 0:\n print(\"Not Processed: \" + str(not_processed))\nprint('Accuracy on One Attractor Test: ' + str(correct/(len(one_attractor) - not_processed)))\n\n\n############################ Test on Two Attractors Test Set\n\nprint('Running on Two Attractors Test Set')\ncorrect = 0\nnot_processed = 0\n\nfor element in two_attractors:\n\n seq = element[0]\n const_tree = element[1]\n dep_tags = element[2]\n dep_tree = element[3]\n label = torch.FloatTensor(element[4])\n\n try:\n output = model(const_tree, dep_tree, dep_tags, seq)\n if (output > .5 and label == 1) or (output < .5 and label == 0):\n correct += 1\n except:\n not_processed += 1\n\nif len(two_attractors) != 0:\n\n if not_processed != 0:\n print(\"Not Processed: \" + str(not_processed))\n print('Accuracy on Two Attractors Test: ' + str(correct/(len(two_attractors) - not_processed)))\n\n############################ Test on Three Attractors Test Set\n\nprint('Running on Three Attractors Test Set')\ncorrect = 0\nnot_processed = 0\n\nfor element in three_attractors:\n\n seq = element[0]\n const_tree = element[1]\n dep_tags = element[2]\n dep_tree = element[3]\n label = torch.FloatTensor(element[4])\n\n try:\n output = model(const_tree, dep_tree, dep_tags, seq)\n if (output > .5 and label == 1) or (output < .5 and label == 0):\n correct += 1\n except:\n not_processed += 1\n\nif len(three_attractors) != 0:\n\n if not_processed != 0:\n print(\"Not Processed: \" + str(not_processed))\n print('Accuracy on Three Attractors Test: ' + str(correct/(len(three_attractors) - not_processed)))\n\n\n############################ Test on Four Attractors Test Set\n\nprint('Running on Four Attractors Test Set')\ncorrect = 0\nnot_processed = 0\n\nfor element in four_attractors:\n\n seq = element[0]\n const_tree = element[1]\n dep_tags = element[2]\n dep_tree = element[3]\n label = torch.FloatTensor(element[4])\n\n try:\n output = model(const_tree, dep_tree, dep_tags, seq)\n if (output > .5 and label == 1) or (output < .5 and label == 0):\n correct += 1\n except:\n not_processed += 1\n\nif len(four_attractors) != 0:\n if not_processed != 0:\n print(\"Not Processed: \" + str(not_processed))\n print('Accuracy on Four Attractors Test: ' + str(correct/(len(four_attractors) - not_processed)))"
] | [
[
"torch.FloatTensor",
"torch.load"
]
] |
VitaliyPavlyukov/AutoMLWhitebox | [
"4acd55624490707a7fbf036631533e29123bb1bd"
] | [
"autowoe/lib/types_handler/types_handler.py"
] | [
"import collections\n\nimport pandas as pd\n\nfrom typing import Dict, Hashable, Optional, Any\nfrom copy import deepcopy\n\nfrom .features_checkers_handlers import dates_handler, dates_checker, cat_checker\n\n\nclass TypesHandler:\n \"\"\"\n Класс для автоматического определения типов признаков.\n Базовая имплементация порядка разработки:\n\n 0.\n 0.a) Парсим то, что указал юзер\n 0.b) Даты пасим С указанием сезонности (\"m\", \"d\", \"wd\", \"h\", \"min\")\n (месяц, день, день недели, час, минута)\n 1.\n Если стринга, то категория\n 2.\n Если отношение shape[1] к количеству уникальных значений >> 5, то категория\n \"\"\"\n\n def __init__(self,\n train: pd.DataFrame,\n public_features_type: Dict[Hashable, Any],\n max_bin_count: Dict[Hashable, Optional[int]] = None,\n features_monotone_constraints: Optional[dict] = None):\n \"\"\"\n\n Args:\n train:\n public_features_type:\n max_bin_count:\n features_monotone_constraints:\n \"\"\"\n self.__train = deepcopy(train)\n self.__public_features_type = deepcopy(public_features_type)\n self.__private_features_type = dict()\n\n if max_bin_count is None:\n max_bin_count = {}\n self.__max_bin_count = collections.defaultdict(lambda: None, max_bin_count)\n\n if features_monotone_constraints is None:\n features_monotone_constraints = {}\n self.__features_monotone_constraints = collections.defaultdict(lambda: \"0\", features_monotone_constraints)\n\n @property\n def train(self):\n \"\"\"\n Read only\n\n Return:\n\n \"\"\"\n return self.__train\n\n @property\n def public_features_type(self):\n \"\"\"\n Read only\n\n Return:\n\n \"\"\"\n return self.__public_features_type\n\n @property\n def private_features_type(self):\n \"\"\"\n Read only\n\n Returns:\n\n \"\"\"\n return self.__private_features_type\n\n @property\n def max_bin_count(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n return self.__max_bin_count\n\n @property\n def features_monotone_constraints(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n return self.__features_monotone_constraints\n\n def __feature_handler(self, feature_name):\n \"\"\"\n\n Args:\n feature_name:\n\n Returns:\n\n \"\"\"\n if dates_checker(self.__train[feature_name]):\n new_features, feature_type = dates_handler(self.__train[feature_name])\n self.__public_features_type[feature_name] = feature_type\n for new_feature_name, new_feature in new_features:\n self.__train[new_feature_name] = new_feature\n self.__max_bin_count[new_feature_name] = self.max_bin_count[feature_name]\n self.__private_features_type[new_feature_name] = \"real\"\n self.__features_monotone_constraints[new_feature_name] = \\\n self.features_monotone_constraints[feature_name]\n\n elif cat_checker(self.__train[feature_name]):\n self.__public_features_type[feature_name] = \"cat\"\n self.__private_features_type[feature_name] = \"cat\"\n self.__features_monotone_constraints[feature_name] = \"1\"\n else:\n self.__public_features_type[feature_name] = \"real\"\n self.__private_features_type[feature_name] = \"real\"\n\n def transform(self):\n \"\"\"\n Основной метод данного класса.\n Если feature_type[feature] == None, то парсим тип признкака\n Иначе происходит обработка указанных типов.\n Возмоожные типы признаков:\n \"cat\"\n \"real\"\n (\"%Y%d%m\", (\"m\", \"d\", \"wd\", \"h\", \"min\"))\n\n Returns:\n\n \"\"\"\n for feature_name in self.public_features_type:\n if not self.public_features_type[feature_name]:\n self.__feature_handler(feature_name)\n elif isinstance(self.public_features_type[feature_name], tuple): # переданы данные для дат\n new_features, _ = dates_handler(self.train[feature_name], self.public_features_type[feature_name])\n for new_feature_name, new_feature in new_features:\n self.__train[new_feature_name] = new_feature\n self.__max_bin_count[new_feature_name] = self.max_bin_count[feature_name]\n self.__private_features_type[new_feature_name] = \"real\"\n self.__features_monotone_constraints[new_feature_name] = \\\n self.__features_monotone_constraints[feature_name]\n\n elif self.public_features_type[feature_name] == \"cat\":\n self.__private_features_type[feature_name] = \"cat\"\n self.__features_monotone_constraints[feature_name] = \"1\"\n\n elif self.public_features_type[feature_name] == \"real\":\n self.__private_features_type[feature_name] = \"real\"\n self.__train[feature_name] = pd.to_numeric(self.train[feature_name], errors=\"coerce\")\n\n else:\n raise ValueError(\"The specified data type is not supported\")\n\n return (self.train, self.public_features_type, self.private_features_type,\n self.max_bin_count, self.features_monotone_constraints)\n"
] | [
[
"pandas.to_numeric"
]
] |
bombrun/GaiaLQSO | [
"b4d787a4d80732cbb5a3762c34298f2430dd0540"
] | [
"lens/sie/random.py"
] | [
"import numpy as np\nimport pandas as pd\nimport astropy.units as u\nimport healpy as hp\n\nfrom lens.sie.plot import *\n\ndef angle2pixel(ra_deg,dec_deg):\n \"\"\" return healpix index 12\"\"\"\n phi = ra_deg * np.pi / 180\n theta = np.pi/2 - (dec_deg * np.pi/180)\n return hp.ang2pix(4096,theta,phi,nest=True)\n\n\ndef lensedQSO(f,scale,w,y,dy,gy):\n \"\"\" to generate a lensed QSO\n f : SIE lens eliptisicity parameter\n scale : a scale parameter (TODO link it to some physical parameter of the lens)\n w : lens orientation\n y : source position relative to the lens\n dy : source proper motion relative to the lens\n gy : source magnitude (assume that the magnitude is defined as 2.5 log10(flux))\n \"\"\"\n # locations of lens images in the source plane\n xs,phis = sie.solve(f,y[0],y[1])\n \n # compute images position proper motion and magnitude \n ra = []\n dec = []\n pmra = []\n pmdec = []\n g = []\n R = np.array([[np.cos(w),np.sin(w)],[-np.sin(w),np.cos(w)]])\n for phi,x in zip(phis,xs) :\n dx = np.dot(R,np.dot(np.linalg.inv(sie.A(x,phi,f)),dy))\n ra.append(x*np.cos(phi+w)*scale)\n dec.append(x*np.sin(phi+w)*scale)\n pmra.append(dx[0]*scale)\n pmdec.append(dx[1]*scale)\n g.append(gy-2.5*np.log10(np.abs(sie.magnification(x,phi,f))))\n \n # set a pandas data frame to store the result\n res = pd.DataFrame()\n res['ra'] = ra\n res['dec'] = dec\n res['pmra'] = pmra\n res['pmdec'] = pmdec\n res['phot_g_mean_mag'] = g\n return res\n\ndef getSourceId(ra_rad,dec_rad):\n x = np.asarray(ra_rad)\n y = np.asarray(dec_rad)\n s=34359738368\n sourceid = angle2pixel(x*u.rad.to(u.deg),y*u.rad.to(u.deg))*s\n if x.size==1 :\n return sourceid + np.int64(np.random.uniform(0,s))\n else :\n return sourceid + np.int64(np.random.uniform(0,s,x.size))\n\ndef randomLQSO(verbose=False):\n \"\"\" a dummy random lensed QSO generator \"\"\"\n \n #scale \n scale = np.random.uniform(1,2)\n \n # lens parameter\n f = np.random.uniform()\n \n # relative source-lens position\n y = np.random.uniform(-0.5,0.5,2)\n \n # relative source-lens proper motion\n dy = np.random.normal(0,0.1,2)\n \n # source magnitude\n gy = np.random.uniform(18,20)\n \n # random lens orientation\n w = np.random.uniform(0,2*np.pi)\n \n # wrap the data\n data = f,scale,w,y,dy,gy\n \n # to visualise the lens\n if verbose :\n print(data)\n plotLensSourceImage(f,y[0],y[1])\n \n res = lensedQSO(*data)\n \n # sky location\n ra = np.random.uniform(0,2*np.pi)\n dec = np.random.uniform(-np.pi/2+0.1,np.pi/2-0.1) # a bit wrong as we exclude the pole\n while(np.abs(dec) < 10*u.deg.to(u.rad)) :\n dec = np.random.uniform(-np.pi/2+0.1,np.pi/2-0.1) # a bit wrong as we exclude the pole\n res['ra'] = ra + res.ra*u.arcsecond.to(u.rad)\n res['dec'] = dec + res.dec*u.arcsecond.to(u.rad)\n res['source_id'] = getSourceId(res.ra,res.dec)\n res.index=res.source_id\n res['qsoid'] = res.phot_g_mean_mag.idxmin()\n return res\n\ndef generateLQSO(n):\n \"\"\"return n random QSO in a pandas DataFrame\"\"\"\n res = [randomLQSO() for i in range(0,n)]\n return pd.concat(res)"
] | [
[
"numpy.random.uniform",
"pandas.DataFrame",
"numpy.abs",
"numpy.asarray",
"numpy.cos",
"pandas.concat",
"numpy.random.normal",
"numpy.sin"
]
] |
DongChengdongHangZhou/CycleGAN-tiff | [
"e13a4d702ac6ce3e13af4946a1bc6657c1a2089e"
] | [
"util/visualizer.py"
] | [
"import numpy as np\nimport os\nimport sys\nimport ntpath\nimport time\nfrom . import util, html\nfrom subprocess import Popen, PIPE\nimport tifffile as tiff\n\n\nif sys.version_info[0] == 2:\n VisdomExceptionBase = Exception\nelse:\n VisdomExceptionBase = ConnectionError\n\n\ndef save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):\n \"\"\"Save images to the disk.\n\n Parameters:\n webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)\n visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs\n image_path (str) -- the string is used to create image paths\n aspect_ratio (float) -- the aspect ratio of saved images\n width (int) -- the images will be resized to width x width\n\n This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.\n \"\"\"\n image_dir = webpage.get_image_dir()\n short_path = ntpath.basename(image_path[0])\n name = os.path.splitext(short_path)[0]\n\n webpage.add_header(name)\n ims, txts, links = [], [], []\n\n for label, im_data in visuals.items():\n im = util.tensor2im(im_data)\n image_name = '%s_%s.png' % (name, label)\n save_path = os.path.join(image_dir, image_name)\n util.save_image(im, save_path, aspect_ratio=aspect_ratio)\n ims.append(image_name)\n txts.append(label)\n links.append(image_name)\n webpage.add_images(ims, txts, links, width=width)\n\n\ndef save_images_test(save_dir,visuals, image_path):\n \"\"\"Save images to the disk.\n\n Parameters:\n webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)\n visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs\n image_path (str) -- the string is used to create image paths\n aspect_ratio (float) -- the aspect ratio of saved images\n width (int) -- the images will be resized to width x width\n\n This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.\n \"\"\"\n image_dir = save_dir\n short_path = ntpath.basename(image_path[0])\n name = os.path.splitext(short_path)[0]\n\n for label, im_data in visuals.items():\n im = (im_data[0][0]).cpu().numpy()\n image_name = '%s_%s.tiff' % (name, label)\n save_path = os.path.join(image_dir, image_name)\n tiff.imsave(save_path,im)\n\n\n\nclass Visualizer():\n \"\"\"This class includes several functions that can display/save images and print/save logging information.\n\n It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.\n \"\"\"\n\n def __init__(self, opt):\n \"\"\"Initialize the Visualizer class\n\n Parameters:\n opt -- stores all the experiment flags; needs to be a subclass of BaseOptions\n Step 1: Cache the training/test options\n Step 2: connect to a visdom server\n Step 3: create an HTML object for saveing HTML filters\n Step 4: create a logging file to store training losses\n \"\"\"\n self.opt = opt # cache the option\n self.display_id = opt.display_id\n self.use_html = opt.isTrain and not opt.no_html\n self.win_size = opt.display_winsize\n self.name = opt.name\n self.port = opt.display_port\n self.saved = False\n if self.display_id > 0: # connect to a visdom server given <display_port> and <display_server>\n import visdom\n self.ncols = opt.display_ncols\n self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env)\n if not self.vis.check_connection():\n self.create_visdom_connections()\n\n if self.use_html: # create an HTML object at <checkpoints_dir>/web/; images will be saved under <checkpoints_dir>/web/images/\n self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')\n self.img_dir = os.path.join(self.web_dir, 'images')\n print('create web directory %s...' % self.web_dir)\n util.mkdirs([self.web_dir, self.img_dir])\n # create a logging file to store training losses\n self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')\n with open(self.log_name, \"a\") as log_file:\n now = time.strftime(\"%c\")\n log_file.write('================ Training Loss (%s) ================\\n' % now)\n\n def reset(self):\n \"\"\"Reset the self.saved status\"\"\"\n self.saved = False\n\n def create_visdom_connections(self):\n \"\"\"If the program could not connect to Visdom server, this function will start a new server at port < self.port > \"\"\"\n cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port\n print('\\n\\nCould not connect to Visdom server. \\n Trying to start a server....')\n print('Command: %s' % cmd)\n Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)\n\n def display_current_results(self, visuals, epoch, save_result):\n \"\"\"Display current results on visdom; save current results to an HTML file.\n\n Parameters:\n visuals (OrderedDict) - - dictionary of images to display or save\n epoch (int) - - the current epoch\n save_result (bool) - - if save the current results to an HTML file\n \"\"\"\n if self.display_id > 0: # show images in the browser using visdom\n ncols = self.ncols\n if ncols > 0: # show all the images in one visdom panel\n ncols = min(ncols, len(visuals))\n h, w = next(iter(visuals.values())).shape[:2]\n table_css = \"\"\"<style>\n table {border-collapse: separate; border-spacing: 4px; white-space: nowrap; text-align: center}\n table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black}\n </style>\"\"\" % (w, h) # create a table css\n # create a table of images.\n title = self.name\n label_html = ''\n label_html_row = ''\n images = []\n idx = 0\n for label, image in visuals.items():\n image_numpy = util.tensor2im(image)\n label_html_row += '<td>%s</td>' % label\n images.append(image_numpy.transpose([2, 0, 1]))\n idx += 1\n if idx % ncols == 0:\n label_html += '<tr>%s</tr>' % label_html_row\n label_html_row = ''\n white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255\n while idx % ncols != 0:\n images.append(white_image)\n label_html_row += '<td></td>'\n idx += 1\n if label_html_row != '':\n label_html += '<tr>%s</tr>' % label_html_row\n try:\n self.vis.images(images, nrow=ncols, win=self.display_id + 1,\n padding=2, opts=dict(title=title + ' images'))\n label_html = '<table>%s</table>' % label_html\n self.vis.text(table_css + label_html, win=self.display_id + 2,\n opts=dict(title=title + ' labels'))\n except VisdomExceptionBase:\n self.create_visdom_connections()\n\n else: # show each image in a separate visdom panel;\n idx = 1\n try:\n for label, image in visuals.items():\n image_numpy = util.tensor2im(image)\n self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label),\n win=self.display_id + idx)\n idx += 1\n except VisdomExceptionBase:\n self.create_visdom_connections()\n\n if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved.\n self.saved = True\n # save images to the disk\n for label, image in visuals.items():\n image_numpy = util.tensor2im(image)\n img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))\n util.save_image(image_numpy, img_path)\n\n # update website\n webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=1)\n for n in range(epoch, 0, -1):\n webpage.add_header('epoch [%d]' % n)\n ims, txts, links = [], [], []\n\n for label, image_numpy in visuals.items():\n image_numpy = util.tensor2im(image)\n img_path = 'epoch%.3d_%s.png' % (n, label)\n ims.append(img_path)\n txts.append(label)\n links.append(img_path)\n webpage.add_images(ims, txts, links, width=self.win_size)\n webpage.save()\n\n def plot_current_losses(self, epoch, counter_ratio, losses):\n \"\"\"display the current losses on visdom display: dictionary of error labels and values\n\n Parameters:\n epoch (int) -- current epoch\n counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1\n losses (OrderedDict) -- training losses stored in the format of (name, float) pairs\n \"\"\"\n if not hasattr(self, 'plot_data'):\n self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}\n self.plot_data['X'].append(epoch + counter_ratio)\n self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']])\n try:\n self.vis.line(\n X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),\n Y=np.array(self.plot_data['Y']),\n opts={\n 'title': self.name + ' loss over time',\n 'legend': self.plot_data['legend'],\n 'xlabel': 'epoch',\n 'ylabel': 'loss'},\n win=self.display_id)\n except VisdomExceptionBase:\n self.create_visdom_connections()\n\n # losses: same format as |losses| of plot_current_losses\n def print_current_losses(self, epoch, iters, losses, t_comp, t_data):\n \"\"\"print current losses on console; also save the losses to the disk\n\n Parameters:\n epoch (int) -- current epoch\n iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)\n losses (OrderedDict) -- training losses stored in the format of (name, float) pairs\n t_comp (float) -- computational time per data point (normalized by batch_size)\n t_data (float) -- data loading time per data point (normalized by batch_size)\n \"\"\"\n message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)\n for k, v in losses.items():\n message += '%s: %.3f ' % (k, v)\n\n print(message) # print the message\n with open(self.log_name, \"a\") as log_file:\n log_file.write('%s\\n' % message) # save the message\n"
] | [
[
"numpy.array"
]
] |
pjgao/Deep-Forest | [
"0fdec38b671ababfcc3476807fe512aa993d4fd4"
] | [
"tests/test_buffer.py"
] | [
"import os\nimport pytest\nimport numpy as np\n\nfrom deepforest import _io as io\n\n\nopen_buffer = io.Buffer(use_buffer=True,\n buffer_dir=\"./\",\n store_est=True,\n store_pred=True,\n store_data=True)\n\n\nclose_buffer = io.Buffer(use_buffer=False)\n\nX = np.zeros((42, 42), dtype=np.uint8)\n\n\ndef test_buffer_name():\n name = open_buffer.name\n assert isinstance(name, str)\n\n name = close_buffer.name\n assert name is None\n\n\ndef test_store_data_close_buffer():\n \"\"\"When `store_data` is False, the buffer directly returns the array.\"\"\"\n ret = close_buffer.cache_data(0, X)\n assert isinstance(ret, np.ndarray)\n\n\ndef test_store_data_open_buffer():\n \"\"\"\n When `store_data` is True, the buffer returns the memmap object of the\n dumped array.\n \"\"\"\n layer_idx = 0\n ret = open_buffer.cache_data(layer_idx, X, is_training_data=True)\n assert isinstance(ret, np.memmap)\n assert os.path.exists(os.path.join(\n open_buffer.data_dir_, \"joblib_train_{}.mmap\".format(layer_idx)))\n\n ret = open_buffer.cache_data(layer_idx, X, is_training_data=False)\n assert isinstance(ret, np.memmap)\n assert os.path.exists(os.path.join(\n open_buffer.data_dir_, \"joblib_test_{}.mmap\".format(layer_idx)))\n\n\ndef test_load_estimator_missing():\n err_msg = \"Missing estimator in the path: unknown.est.\"\n with pytest.raises(FileNotFoundError, match=err_msg):\n open_buffer.load_estimator(\"unknown.est\")\n\n\ndef test_load_predictor_missing():\n err_msg = \"Missing predictor in the path: unknown.est.\"\n with pytest.raises(FileNotFoundError, match=err_msg):\n open_buffer.load_predictor(\"unknown.est\")\n"
] | [
[
"numpy.zeros"
]
] |
qwertpi/techdiff-textgen | [
"fd7578a24e11b96d86a92d2935b6153b1bea73f8"
] | [
"train.py"
] | [
"from json import dump\nfrom math import ceil\nfrom random import randint\nimport string\n\nfrom keras.layers import Input, Dense, Embedding\n#uncoment if using CPU\n##from keras.layers import LSTM\n#comment out the line bellow if using CPU\nfrom keras.layers import CuDNNLSTM as LSTM\nfrom keras.models import Model, load_model\nfrom keras.metrics import categorical_accuracy\nfrom keras.utils import to_categorical, plot_model\nimport numpy as np\n\ndef DataGenerator(x_data, y_data, batch_size, lookback_length):\n '''\n A generator that yields batches of training x and y data\n :param x_data: list, the input data that batches should be drawn from\n :param y_data: list, the output data that batches should be drawn from\n :param batch_size: int, the number of datapoints that should be yielded in each batch\n :param lookback_length: int, the length that the model expects every datapoint to be\n :returns: numpy array, an x batch\n :returns: numpy array, a y batch\n '''\n indexes = np.arange(len(x_data))\n while True:\n batch_indexes = np.random.choice(indexes, batch_size)\n\n X = []\n Y = []\n i = 0\n for i in batch_indexes:\n curr_X = x_data[i]\n if len(curr_X) >= 1:\n #cuts off a random number of words from the start of the datapoint as a form of dropout\n curr_X = curr_X[randint(0, len(curr_X) - 1):]\n #padds with 0s until the datapoint is lookback_length long\n while len(curr_X) < lookback_length:\n curr_X.append(0)\n X.append(curr_X)\n Y.append(y_data[i])\n\n X = np.array(X)\n Y = np.array(Y)\n yield X, Y\n \n#this is what will be removed from words\npunctuation = list(string.punctuation)+[\" \"]+[\"\"]\n\nlines = []\nwith open(\"data.txt\", \"r\", encoding=\"ascii\", errors=\"ignore\") as f:\n for line in f.read().splitlines():\n curr_line = \"\"\n #we aren't intrested in blank lines\n if line != \"\":\n for word in line.split(\" \"):\n #theres a problem in the bash download pipeline that means the filenames get scattered through the data file\n if \".en\" not in word:\n for char in word:\n #removes puntuation characters\n if char not in string.punctuation:\n curr_line += char\n curr_line += \" \"\n lines.append(curr_line.lower())\n\n#generates a list of words which appear frequently enough to be tokenized\nvalid_words = []\nword_counts = {}\nfor line in lines:\n for word in line.split(\" \"):\n if word not in valid_words and word not in punctuation:\n try:\n word_counts[word] += 1\n #the threshold is currently set at 45 occurences over the entire file but this is by no means defiantely the best value\n if word_counts[word] > 45:\n valid_words.append(word)\n del word_counts[word]\n except KeyError:\n word_counts[word] = 1\n\n#how many words the model will take as input\n#I felt an input of 20 words struck a good balance but feel free to change\nmax_len = 20\n\nX = []\nY = []\n\nword_to_token = {}\n\n#generates the dictionary for word token lookups\ni = 2\nfor word in valid_words:\n word_to_token[word] = i\n i += 1\nword_count = max(word_to_token.values())\nprint(word_count)\n\ndef to_token(word):\n '''\n Takes a word and outputs the coresponding token\n :param word: string, the word to be tokenzied\n :returns: int, the token\n '''\n word = word.lower()\n if word in word_to_token:\n return word_to_token[word]\n return 1\n\n#generates the x and y data by tokenizing segments of each line\n#the best analogy for what this does is it slides a window of size max_len words along each line with a stride of 1\n#and then adds the tokenized contents of the winodw to the X list\n#and then adds the tokenized word after the end of the window to the Y list\nfor line in lines:\n line = line.split(\" \")\n try:\n i = 1\n j = -1*(max_len - 1)\n while True:\n y_tokenized = [to_token(line[i])]\n if y_tokenized != [1] and y_tokenized != [None]:\n tokenized = list(map(to_token, line[j:i]))\n X.append(tokenized)\n Y.append(y_tokenized)\n i += 1\n j += 1\n except IndexError:\n pass\n\n#makes the Y data one-hot encoded\nY = to_categorical(np.array(Y))\n\n#creates an inverse dictionary for going from token to word\ntoken_to_word = {}\nfor key, value in zip(word_to_token.keys(), word_to_token.values()):\n token_to_word[value] = key\n\n#saves each token dictionary to a json file\ndump(word_to_token, open(\"word_to_token.json\", 'w'))\ndump(token_to_word, open(\"token_to_word.json\", 'w'))\n\n#trys to resume training if a model file already exists\ntry:\n open(\"model.h5\").close()\n model = load_model(\"model.h5\")\nexcept FileNotFoundError:\n print(\"Creating new models\")\n inp = Input((max_len,))\n #embedding size is 2 times the cube root of the word count\n embedding = Embedding(word_count, 2*ceil(word_count**(1/3)))(inp)\n lstm = LSTM(512, return_sequences=True)(embedding)\n lstm = LSTM(256)(lstm)\n dense_out = Dense(Y.shape[-1], activation=\"softmax\")(lstm)\n model = Model(inp, dense_out)\n #mse is used beacuse we want to capture the probability distribution\n model.compile(\"adam\", \"mse\", metrics=[categorical_accuracy])\n plot_model(model, \"model.png\", show_shapes=True, expand_nested=True)\n\nbatch_size = 256\nepoch = 0\nnum_samples = len(X)\nDataGen = DataGenerator(X, Y, batch_size, 20)\ntarget_epoch = 0\n#I found training stagnated at around epoch 200\nwhile target_epoch < 250:\n x, y = next(DataGen)\n loss, acc = model.train_on_batch(x, y)\n #if we have gone past the epoch which we are lookign for\n if (epoch*batch_size)//num_samples > target_epoch:\n #gives a rough esitmate of the number of passes over the dataset\n print(\"Epoch\", (epoch*batch_size)//num_samples)\n print(f\"Accuracy: {acc} Loss: {loss}\")\n model.save(\"model.h5\")\n target_epoch += 10\n epoch += 1\n"
] | [
[
"numpy.array",
"numpy.random.choice"
]
] |
Mithrillion/BiQA | [
"f61bea95521f5b2ffd838aa60aecaad568de6564"
] | [
"scripts/data_utils.py"
] | [
"import numpy as np\nimport re\nimport torch.utils.data as tud\nimport torch\nimport shutil\n\n\ndef get_word_ids(doc, rnn_encode=True, max_length=100,\n nr_unk=100, nr_var=600, rev_dic=None, relabel=True, ent_dict=None):\n queue = list(doc)\n X = np.zeros(max_length, dtype='int32')\n # M = np.zeros(max_length, dtype='int32')\n V = np.zeros(max_length, dtype='int32')\n words = []\n if ent_dict is None:\n ent_dict = {}\n k = 0\n while len(words) <= max_length and queue:\n word = queue.pop(0)\n if rnn_encode or (not word.is_punct and not word.is_space):\n words.append(word)\n words.sort()\n for j, token in enumerate(words):\n if token.text == '@placeholder':\n X[j] = 1\n V[j] = 1\n elif token.text[:7] == '@entity':\n # temporary dix\n # TODO: properly fix entity replacement\n try:\n num = int(re.search(r'\\d+', token.text[7:]).group(0))\n if 0 <= num < nr_var:\n if relabel:\n if num not in ent_dict.keys():\n ent_dict[num] = k\n k += 1\n X[j] = ent_dict[num] + 2\n V[j] = ent_dict[num] + 2\n else:\n X[j] = num + 2\n V[j] = num + 2\n except AttributeError:\n X[j] = (token.shape % nr_unk) + 2 + nr_var\n elif token.text in rev_dic.keys():\n X[j] = rev_dic[token.text] + nr_unk + nr_var + 2\n # M[j] = 1\n else:\n # X: [null; ph; vars; unks; vocab]\n X[j] = (token.shape % nr_unk) + 2 + nr_var\n if j >= max_length - 1:\n break\n return X, V, ent_dict\n\n\nclass QADataset(tud.Dataset):\n def __init__(self, data_df, nlp, rev_dic, relabel=True, lang_id=None):\n self.data_df = data_df\n self.nlp = nlp\n self.rev_dic = rev_dic\n self.relabel = relabel\n self.lang_id = lang_id\n\n def __len__(self):\n return self.data_df.shape[0]\n\n def __getitem__(self, i):\n\n story = self.nlp(self.data_df['story'].iloc[i].lower(), parse=False, tag=False, entity=False)\n s, s_var, ent_dict = get_word_ids(story, max_length=2000, rev_dic=self.rev_dic, relabel=self.relabel)\n s_len = np.sum(s != 0)\n\n question = self.nlp(self.data_df['question'].iloc[i].lower(), parse=False, tag=False, entity=False)\n q, q_var, ent_dict = get_word_ids(question, max_length=50, rev_dic=self.rev_dic, relabel=self.relabel,\n ent_dict=ent_dict)\n q_len = np.sum(q != 0)\n\n if self.relabel:\n answer = ent_dict[int(re.search(r'\\d+', self.data_df['answer'].iloc[i]).group(0))]\n else:\n answer = int(re.search(r'\\d+', self.data_df['answer'].iloc[i]).group(0))\n\n if self.lang_id is not None:\n return self.lang_id, s, q, s_len, q_len, s_var, q_var, answer\n else:\n return s, q, s_len, q_len, s_var, q_var, answer\n\n\nclass BiQADataset(tud.Dataset):\n def __init__(self, data_df_1, data_df_2, nlp_1, nlp_2, rev_dic_1, rev_dic_2, relabel=True, l2_supersample=5):\n self.data_df_1 = data_df_1\n self.data_df_2 = data_df_2\n self.nlp_1 = nlp_1\n self.nlp_2 = nlp_2\n self.rev_dic_1 = rev_dic_1\n self.rev_dic_2 = rev_dic_2\n self.relabel = relabel\n self.l2_supersample = l2_supersample\n\n def __len__(self):\n return self.data_df_1.shape[0] + self.data_df_2.shape[0] * self.l2_supersample\n\n def __getitem__(self, i):\n\n if i < self.data_df_1.shape[0]:\n story = self.nlp_1(self.data_df_1['story'].iloc[i].lower(), parse=False, tag=False, entity=False)\n s, s_var, ent_dict = get_word_ids(story, max_length=2000, rev_dic=self.rev_dic_1, relabel=self.relabel)\n s_len = np.sum(s != 0)\n\n question = self.nlp_1(self.data_df_1['question'].iloc[i].lower(), parse=False, tag=False, entity=False)\n q, q_var, ent_dict = get_word_ids(question, max_length=50, rev_dic=self.rev_dic_1, relabel=self.relabel,\n ent_dict=ent_dict)\n q_len = np.sum(q != 0)\n\n if self.relabel:\n answer = ent_dict[int(re.search(r'\\d+', self.data_df_1['answer'].iloc[i]).group(0))]\n else:\n answer = int(re.search(r'\\d+', self.data_df_1['answer'].iloc[i]).group(0))\n\n return 0, s, q, s_len, q_len, s_var, q_var, answer\n\n else:\n i = (i - self.data_df_1.shape[0]) % self.data_df_2.shape[0]\n story = self.nlp_2(self.data_df_2['story'].iloc[i].lower(), parse=False, tag=False, entity=False)\n s, s_var, ent_dict = get_word_ids(story, max_length=2000, rev_dic=self.rev_dic_2, relabel=self.relabel)\n s_len = np.sum(s != 0)\n\n question = self.nlp_2(self.data_df_2['question'].iloc[i].lower(), parse=False, tag=False, entity=False)\n q, q_var, ent_dict = get_word_ids(question, max_length=50, rev_dic=self.rev_dic_2, relabel=self.relabel,\n ent_dict=ent_dict)\n q_len = np.sum(q != 0)\n\n if self.relabel:\n answer = ent_dict[int(re.search(r'\\d+', self.data_df_2['answer'].iloc[i]).group(0))]\n else:\n answer = int(re.search(r'\\d+', self.data_df_2['answer'].iloc[i]).group(0))\n\n return 1, s, q, s_len, q_len, s_var, q_var, answer\n\n\ndef get_embeddings(f, nr_unk=100, nr_var=600, meta=None):\n if meta is None:\n nr_vector, ndim = f.readline().split(\" \")\n else:\n nr_vector, ndim = meta.split(\" \")\n nr_vector = int(nr_vector)\n ndim = int(ndim)\n vectors = np.zeros((nr_vector + nr_unk + nr_var + 2, ndim), dtype='float32')\n dic = dict()\n i = 0\n line = f.readline()\n while line:\n parts = line.split(\" \")\n if len(parts) != ndim + 1 and len(parts) != ndim + 2:\n print(line)\n raise ValueError(\"Vector size mismatch! Got {0}, expected {1} (+1)!\".\n format(len(parts), ndim + 1))\n else:\n word = parts[0]\n vec = np.array(parts[1: 1 + ndim]).astype(np.float32)\n vectors[i + nr_unk + nr_var + 2, :] = vec / np.linalg.norm(vec)\n dic[i] = word\n i += 1\n line = f.readline()\n rev_dic = {v: k for k, v, in dic.items()}\n return vectors, dic, rev_dic\n\n\ndef save_checkpoint(state, is_best, filename='checkpoint.en.packed.pth.tar',\n best_name='model_best.en.packed.pth.tar'):\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, best_name)\n\n\ndef sort_batch(batch, sort_ind=3, pack=True):\n if pack:\n _, orders = torch.sort(batch[sort_ind], dim=0, descending=True)\n return [x[orders] for x in batch]\n else:\n return batch\n\n"
] | [
[
"numpy.sum",
"numpy.zeros",
"torch.save",
"numpy.array",
"numpy.linalg.norm",
"torch.sort"
]
] |
dbseorms16/drnxgaze | [
"c7b84189c263456c648829bc399a5edb2ec17bb8"
] | [
"estimate_gaze_standalone.py"
] | [
"#!/usr/bin/env python\n\n# Licensed under Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode)\n\nfrom __future__ import print_function, division, absolute_import\n\nimport argparse\nimport os\nimport sys\nimport time\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom rt_gene.extract_landmarks_method_base import LandmarkMethodBase\nfrom rt_gene.gaze_tools import get_phi_theta_from_euler, limit_yaw\nfrom rt_gene.gaze_tools_standalone import euler_from_matrix\n\nfrom previous_state import PeopleState\n\n\n\n\n\nscript_path = os.path.dirname(os.path.realpath(__file__))\n\n\ndef load_camera_calibration(calibration_file):\n import yaml\n with open(calibration_file, 'r') as f:\n cal = yaml.safe_load(f)\n\n dist_coefficients = np.array(cal['distortion_coefficients']['data'], dtype='float32').reshape(1, 5)\n camera_matrix = np.array(cal['camera_matrix']['data'], dtype='float32').reshape(3, 3)\n\n return dist_coefficients, camera_matrix\n\n\ndef extract_eye_image_patches(subjects):\n for subject in subjects:\n le_c, re_c, leftcenter_coor, rightcenter_coor, _, _ = subject.get_eye_image_from_landmarks(subject, landmark_estimator.eye_image_size)\n subject.left_eye_color = le_c\n subject.right_eye_color = re_c\n subject.leftcenter_coor = leftcenter_coor\n subject.rightcenter_coor = rightcenter_coor\n\n\n\n\ndef init_previous(num,bbox_l_list):\n people_list.clear()\n\n for i in range(num):\n people_list.append(PeopleState(bbox_l_list[i]))\n\ndef append_people(faceboxes):\n for facebox in faceboxes:\n append_flag =True\n for people in people_list:\n if people.isTheSame(facebox[0]):\n append_flag = False\n break\n if append_flag:\n people_list.append(PeopleState(facebox[0]))\n\n\ndef del_people(faceboxes):\n i=0\n while i < len(people_list):\n del_flag = True\n for facebox in faceboxes:\n if people_list[i].isTheSame(facebox[0]):\n del_flag =False\n break\n if del_flag:\n del people_list[i]\n else:\n i+=1\n\ndef check_people(faceboxes):\n append_people(faceboxes)\n del_people(faceboxes)\n\ndef get_people(facebox_l):\n for idx in range(len(people_list)):\n if people_list[idx].isTheSame(facebox_l):\n return idx\n\n\n# head_theta =[]\n# head_phi =[]\n# gaze_theta = []\n# gaze_phi =[]\npeople_list =[]\nFPS = \"0\"\ngaze_error =[]\nheadpose_error =[]\n\nEVAL =False\nSHIFT =False\ndef estimate_gaze(base_name, color_img, dist_coefficients, camera_matrix ,label=False):\n global FPS\n\n\n # cv2.putText(color_img, \"FPS : \"+FPS, (10,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2, cv2.LINE_AA)\n # start = time.time()\n\n #face box의 위치를 반환.(모든 대상 list로 반환) -[left_x, top_y, right_x, bottom_y]\n # faceboxes = landmark_estimator.get_face_bb(color_img)\n\n # if len(faceboxes) == 0:\n faceboxes = [[0,0,120,36]]\n # tqdm.write('Could not find faces in the image')\n # # if EVAL and not SHIFT:\n # # head_phi.append(-100)\n # # head_theta.append(-100)\n # # gaze_phi.append(-100)\n # # gaze_theta.append(-100)\n # return\n\n check_people(faceboxes)\n\n subjects = landmark_estimator.get_subjects_from_faceboxes(color_img, faceboxes)\n extract_eye_image_patches(subjects)\n\n input_r_list = []\n input_l_list = []\n input_head_list = []\n valid_subject_list = []\n\n people_count = 1;\n frame_img =color_img\n\n for idx, subject in enumerate(subjects):\n # people_idx = get_people(faceboxes[idx][0])\n # people_list[people_idx].set_bbox_l(faceboxes[idx][0])\n\n if subject.left_eye_color is None or subject.right_eye_color is None:\n tqdm.write('Failed to extract eye image patches')\n continue\n\n success, rotation_vector, _ = cv2.solvePnP(landmark_estimator.model_points,\n subject.landmarks.reshape(len(subject.landmarks), 1, 2),\n cameraMatrix=camera_matrix,\n distCoeffs=dist_coefficients, flags=cv2.SOLVEPNP_DLS)\n\n if not success:\n tqdm.write('Not able to extract head pose for subject {}'.format(idx))\n continue\n\n _rotation_matrix, _ = cv2.Rodrigues(rotation_vector)\n _rotation_matrix = np.matmul(_rotation_matrix, np.array([[0, 1, 0], [0, 0, -1], [-1, 0, 0]]))\n _m = np.zeros((4, 4))\n _m[:3, :3] = _rotation_matrix\n _m[3, 3] = 1\n # Go from camera space to ROS space\n _camera_to_ros = [[0.0, 0.0, 1.0, 0.0],\n [-1.0, 0.0, 0.0, 0.0],\n [0.0, -1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]]\n roll_pitch_yaw = list(euler_from_matrix(np.dot(_camera_to_ros, _m)))\n roll_pitch_yaw = limit_yaw(roll_pitch_yaw)\n\n phi_head, theta_head = get_phi_theta_from_euler(roll_pitch_yaw)\n # if EVAL:\n # head_phi.append(phi_head)\n # head_theta.append(theta_head)\n # face_image_resized = cv2.resize(subject.face_color, dsize=(224, 224), interpolation=cv2.INTER_CUBIC)\n # head_pose_image = landmark_estimator.visualize_headpose_result(face_image_resized, (phi_head, theta_head))\n\n #color_image의 facebox에 headpose vector를 그림.\n if EVAL:\n head_pose_image, headpose_err = landmark_estimator.visualize_headpose_result(frame_img,faceboxes[idx], (phi_head, theta_head), people_list[people_idx],label)\n print(headpose_err)\n else:\n head_pose_image, headpose_err = landmark_estimator.visualize_headpose_result(frame_img,faceboxes[idx], (phi_head, theta_head), people_list[people_idx])\n\n frame_img = head_pose_image\n if EVAL:\n headpose_error.append(headpose_err)\n print(\"head pose error:\",headpose_err)\n\n if args.mode =='image':\n #show headpose\n # if args.vis_headpose:\n # plt.axis(\"off\")\n # plt.imshow(cv2.cvtColor(head_pose_image, cv2.COLOR_BGR2RGB))\n # plt.show()\n\n if args.save_headpose:\n cv2.imwrite(os.path.join(args.output_path, os.path.splitext(base_name)[0]+str(people_count) + '_headpose.jpg'), head_pose_image)\n people_count +=1\n #size 등 format 변경.\n input_r_list.append(gaze_estimator.input_from_image(subject.right_eye_color))\n input_l_list.append(gaze_estimator.input_from_image(subject.left_eye_color))\n input_head_list.append([theta_head, phi_head])\n valid_subject_list.append(idx)\n\n\n # if args.mode =='video':\n # # plt.axis(\"off\")\n # # plt.imshow(cv2.cvtColor(head_pose_image, cv2.COLOR_BGR2RGB))\n # # plt.show()\n # headpose_out_video.write(frame_img)\n\n if len(valid_subject_list) == 0:\n return\n\n # returns [subject : [gaze_pose]]\n gaze_est = gaze_estimator.estimate_gaze_twoeyes(inference_input_left_list=input_l_list,\n inference_input_right_list=input_r_list,\n\n inference_headpose_list=input_head_list)\n people_count = 1\n for subject_id, gaze, headpose in zip(valid_subject_list, gaze_est.tolist(), input_head_list):\n subject = subjects[subject_id]\n facebox = faceboxes[subject_id]\n people_idx = get_people(facebox[0])\n # Build visualizations\n # r_gaze_img = gaze_estimator.visualize_eye_result(subject.right_eye_color, gaze)\n # l_gaze_img = gaze_estimator.visualize_eye_result(subject.left_eye_color, gaze)\n # if EVAL:\n # gaze_theta.append(gaze[0])\n # gaze_phi.append(gaze[1])\n if EVAL:\n r_gaze_img, r_gaze_err = gaze_estimator.visualize_eye_result(frame_img, gaze, subject.leftcenter_coor, facebox,people_list[people_idx], \"gaze_r\", label)\n l_gaze_img, l_gaze_err = gaze_estimator.visualize_eye_result(r_gaze_img, gaze, subject.rightcenter_coor, facebox,people_list[people_idx], \"gaze_l\", label)\n else:\n r_gaze_img, r_gaze_err = gaze_estimator.visualize_eye_result(frame_img, gaze, subject.leftcenter_coor, facebox,people_list[people_idx], \"gaze_r\")\n l_gaze_img, l_gaze_err = gaze_estimator.visualize_eye_result(r_gaze_img, gaze, subject.rightcenter_coor, facebox,people_list[people_idx], \"gaze_l\")\n\n frame_img = l_gaze_img\n if EVAL:\n print(\"right gaze error:\",r_gaze_err)\n print(\"left gaze error:\",l_gaze_err)\n gaze_error.append(r_gaze_err)\n gaze_error.append(l_gaze_err)\n\n #show gaze image\n # if args.vis_gaze:\n # plt.axis(\"off\")\n # plt.imshow(cv2.cvtColor(s_gaze_img, cv2.COLOR_BGR2RGB))\n # plt.show()\n if args.mode =='image':\n if args.save_gaze:\n cv2.imwrite(os.path.join(args.output_path, os.path.splitext(base_name)[0]+str(people_count) + '_gaze.jpg'), frame_img)\n # cv2.imwrite(os.path.join(args.output_path, os.path.splitext(base_name)[0] + '_left.jpg'), subject.left_eye_color)\n # cv2.imwrite(os.path.join(args.output_path, os.path.splitext(base_name)[0] + '_right.jpg'), subject.right_eye_color)\n\n\n if args.save_estimate:\n with open(os.path.join(args.output_path, os.path.splitext(base_name)[0] + '_output.txt'), 'w+') as f:\n f.write(os.path.splitext(base_name)[0] + ', [' + str(headpose[1]) + ', ' + str(headpose[0]) + ']' +\n\n ', [' + str(gaze[1]) + ', ' + str(gaze[0]) + ']' + '\\n')\n people_count +=1\n if args.mode =='video':\n out_video.write(frame_img)\n # end = time.time()\n # delay_time = end-start\n # FPS = str(int(1/delay_time))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Estimate gaze from images')\n parser.add_argument('im_path', type=str, default=os.path.abspath(os.path.join(script_path, './samples_gaze/')),\n nargs='?', help='Path to an image or a directory containing images')\n parser.add_argument('video_path', type=str, default=os.path.abspath(os.path.join(script_path, './samples_video/')),\n nargs='?', help='Path to an video or a directory containing videos')\n parser.add_argument('--calib-file', type=str, dest='calib_file', default=None, help='Camera calibration file')\n parser.add_argument('--vis-headpose', dest='vis_headpose', action='store_true', help='Display the head pose images')\n parser.add_argument('--no-vis-headpose', dest='vis_headpose', action='store_false', help='Do not display the head pose images')\n parser.add_argument('--save-headpose', dest='save_headpose', action='store_true', help='Save the head pose images')\n parser.add_argument('--no-save-headpose', dest='save_headpose', action='store_false', help='Do not save the head pose images')\n parser.add_argument('--vis-gaze', dest='vis_gaze', action='store_true', help='Display the gaze images')\n parser.add_argument('--no-vis-gaze', dest='vis_gaze', action='store_false', help='Do not display the gaze images')\n parser.add_argument('--save-gaze', dest='save_gaze', action='store_true', help='Save the gaze images')\n parser.add_argument('--save-estimate', dest='save_estimate', action='store_true', help='Save the predictions in a text file')\n parser.add_argument('--no-save-gaze', dest='save_gaze', action='store_false', help='Do not save the gaze images')\n parser.add_argument('--gaze_backend', choices=['tensorflow', 'pytorch'], default='pytorch')\n parser.add_argument('--mode', choices=['video', 'image'], default='image')\n parser.add_argument('--output_path', type=str, default=os.path.abspath(os.path.join(script_path, './samples_gaze/out')),\n help='Output directory for head pose and gaze images')\n parser.add_argument('--models', nargs='+', type=str, default=[os.path.abspath(os.path.join(script_path, '../model_nets/Model_allsubjects1.h5'))],\n help='List of gaze estimators')\n parser.add_argument('--device-id-facedetection', dest=\"device_id_facedetection\", type=str, default='cuda:0', help='Pytorch device id. Set to \"cpu:0\" to disable cuda')\n\n parser.set_defaults(vis_gaze=True)\n parser.set_defaults(save_gaze=True)\n parser.set_defaults(vis_headpose=False)\n parser.set_defaults(save_headpose=True)\n parser.set_defaults(save_estimate=False)\n\n args = parser.parse_args()\n\n\n image_path_list = []\n video_path_list = []\n\n\n if args.mode == 'image':\n if os.path.isfile(args.im_path):\n image_path_list.append(os.path.split(args.im_path)[1])\n args.im_path = os.path.split(args.im_path)[0]\n elif os.path.isdir(args.im_path):\n for image_file_name in sorted(os.listdir(args.im_path)):\n if image_file_name.endswith('.jpg') or image_file_name.endswith('.png'):\n if '_gaze' not in image_file_name and '_headpose' not in image_file_name:\n image_path_list.append(image_file_name)\n else:\n tqdm.write('Provide either a path to an image or a path to a directory containing images')\n sys.exit(1)\n else:\n args.output_path = os.path.abspath(os.path.join(script_path, './samples_video/out'))\n if os.path.isfile(args.video_path):\n video_path_list.append(os.path.split(args.video_path)[1])\n args.video_path_list = os.path.split(video_path_list)[0]\n elif os.path.isdir(args.video_path):\n for video_file_name in sorted(os.listdir(args.video_path)):\n if video_file_name.endswith('.mp4') or video_file_name.endswith('.avi'):\n if '_gaze' not in video_path_list and '_headpose' not in video_path_list:\n video_path_list.append(video_file_name)\n else:\n tqdm.write('Provide either a path to an video or a path to a directory containing videos')\n sys.exit(1)\n print(\"========================video list==================\")\n print(video_path_list)\n tqdm.write('Loading networks')\n landmark_estimator = LandmarkMethodBase(device_id_facedetection=args.device_id_facedetection,\n checkpoint_path_face=os.path.abspath(os.path.join(script_path, \"rt_gene/model_nets/SFD/s3fd_facedetector.pth\")),\n checkpoint_path_landmark=os.path.abspath(\n os.path.join(script_path, \"rt_gene/model_nets/phase1_wpdc_vdc.pth.tar\")),\n model_points_file=os.path.abspath(os.path.join(script_path, \"rt_gene/model_nets/face_model_68.txt\")))\n\n if args.gaze_backend == \"tensorflow\":\n from rt_gene.estimate_gaze_tensorflow import GazeEstimator\n\n gaze_estimator = GazeEstimator(\"/gpu:0\", args.models)\n elif args.gaze_backend == \"pytorch\":\n from rt_gene.estimate_gaze_pytorch import GazeEstimator\n\n gaze_estimator = GazeEstimator(\"cuda:0\", args.models)\n else:\n raise ValueError(\"Incorrect gaze_base backend, choices are: tensorflow or pytorch\")\n\n if not os.path.isdir(args.output_path):\n os.makedirs(args.output_path)\n\n if args.mode == 'image':\n for image_file_name in tqdm(image_path_list):\n tqdm.write('Estimate gaze on ' + image_file_name)\n image = cv2.imread(os.path.join(args.im_path, image_file_name))\n if image is None:\n tqdm.write('Could not load ' + image_file_name + ', skipping this image.')\n continue\n\n if args.calib_file is not None:\n _dist_coefficients, _camera_matrix = load_camera_calibration(args.calib_file)\n else:\n im_width, im_height = image.shape[1], image.shape[0]\n # tqdm.write('WARNING!!! You should provide the camera calibration file, otherwise you might get bad results. Using a crude approximation!')\n _dist_coefficients, _camera_matrix = np.zeros((1, 5)), np.array(\n [[im_height, 0.0, im_width / 2.0], [0.0, im_height, im_height / 2.0], [0.0, 0.0, 1.0]])\n\n estimate_gaze(image_file_name, image, _dist_coefficients, _camera_matrix)\n else:\n print(\"=-------------------------video path list--------------------\")\n print(video_path_list)\n\n allVideo_total_error = 0\n for video_file_name in tqdm(video_path_list):\n tqdm.write('Estimate gaze on ' + video_file_name)\n\n video = cv2.VideoCapture(os.path.join(args.video_path, video_file_name))\n width = video.get(cv2.CAP_PROP_FRAME_WIDTH)\n height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)\n fps = video.get(cv2.CAP_PROP_FPS)\n fourcc = cv2.VideoWriter_fourcc(*'DIVX') # 코덱 정의\n\n #head pose + gaze\n out_path =os.path.join(args.output_path, video_file_name)\n out_video = cv2.VideoWriter(out_path, fourcc, fps, (int(width), int(height))) # VideoWriter 객체 정의\n\n #head pose와 gaze를 각각 출력하고 싶을 때.\n # gaze_out_path = os.path.join(args.output_path, 'gaze_'+video_file_name)\n # headpose_out_path = os.path.join(args.output_path, 'headpose'+video_file_name)\n # gaze_out_video = cv2.VideoWriter(gaze_out_path, fourcc, fps, (int(width), int(height))) # VideoWriter 객체 정의\n # headpose_out_video = cv2.VideoWriter(headpose_out_path, fourcc, fps, (int(width), int(height))) # VideoWriter 객체 정의\n\n label = []\n if EVAL:\n label_path = \"s000_label_combined.txt\"\n if SHIFT:\n # Shift video name :Shift_{num}.mp4\n # 해당하는 라벨을 고정으로 넘김.\n image_name = video_file_name.split(\"_\")[1]\n image_name = image_name.split(\".\")[0]\n with open(label_path, \"r\") as f:\n for line in f:\n line = line.split(\",\")\n if line[0] != image_name:\n continue\n else:\n label = line\n print(f\"======= label found {line[0]} ======\")\n break\n\n else:\n f = open(label_path, \"r\")\n #header 제거\n f.readline()\n\n count=0\n while video.isOpened():\n ret, frame = video.read()\n if not ret:\n break\n #RT_GENE DataSet -> 한줄씩 읽어와 넘김.\n if EVAL:\n if not SHIFT:\n label = f.readline()\n label = label.split(\",\")\n\n for i in range(1,5):\n label[i] = float(label[i])\n\n\n count+=1\n print(\"frame\",count)\n\n # 대상의 수에 맞게 people_list 초기화\n if count == 1:\n num, bboxes_l = landmark_estimator.get_init_value(frame)\n init_previous(num, bboxes_l)\n if not ret:\n print(\"Error:: Frame Road Fail\")\n break\n\n\n if args.calib_file is not None:\n _dist_coefficients, _camera_matrix = load_camera_calibration(args.calib_file)\n\n else:\n im_width, im_height = frame.shape[1], frame.shape[0]\n # tqdm.write(\n # 'WARNING!!! You should provide the camera calibration file, otherwise you might get bad results. Using a crude approximation!')\n _dist_coefficients, _camera_matrix = np.zeros((1, 5)), np.array(\n [[im_height, 0.0, im_width / 2.0], [0.0, im_height, im_height / 2.0], [0.0, 0.0, 1.0]])\n print(label)\n estimate_gaze(video_file_name, frame, _dist_coefficients, _camera_matrix,label)\n\n if EVAL:\n average_headpose_err = 0\n average_gaze_err =0\n average_total_err = 0\n frame_num = len(headpose_error)\n\n for error in headpose_error:\n average_headpose_err+= error\n for error in gaze_error:\n average_gaze_err+= error\n\n average_headpose_err /=frame_num\n average_gaze_err/= (frame_num*2)\n total_error = (average_headpose_err +average_gaze_err) /2\n allVideo_total_error+= total_error\n\n print(\"==================Average Error=================\")\n\n print(\"frame :\",frame_num)\n print(\"Average Headpose Error :\",average_headpose_err)\n print(\"Average Gaze Error :\",average_gaze_err)\n print(\"Average Error(Total) :\", total_error)\n\n video.release()\n out_video.release()\n\n if EVAL:\n f.close()\n headpose_error.clear()\n gaze_error.clear()\n if EVAL:\n print(\"all Video Average Error :\",allVideo_total_error/len(video_path_list))\n\n\n\n\n\n # if EVAL:\n # label_path = \"s000_label_combined.txt\"\n # average_head_phi=0\n # average_head_theta =0\n # average_gaze_phi=0\n # average_gaze_theta=0\n #\n # frame_num = len(head_theta)\n # skip_frame =0\n #\n # image_name = video_file_name.split(\"_\")[1]\n # image_name = image_name.split(\".\")[0]\n\n # with open(label_path,\"r\") as f:\n # label=[]\n #\n # for line in f:\n # line = line.split(\",\")\n # if line[0] != image_name:\n # continue\n # else:\n # label = np.copy(line)\n # print(\"======= label found ======\")\n # break\n # for i in range(frame_num):\n # if (head_phi[i] == -100):\n # skip_frame += 1\n # continue\n # average_head_phi += abs(head_phi[i] - float(label[1].strip()))\n # average_head_theta += abs(head_phi[i] - float(label[2].strip()))\n # average_gaze_phi += abs(head_phi[i] - float(label[3].strip()))\n # average_gaze_theta += abs(head_phi[i] - float(label[4].strip()))\n\n\n\n # with open(label_path,\"r\") as f:\n # #header 제거\n # line = f.readline()\n # for i in range(frame_num):\n # line = f.readline()\n # if(head_phi[i] == -100):\n # skip_frame+=1\n # continue\n # line = line.split(\",\")\n #\n # average_head_phi += abs(head_phi[i] - float(line[1].strip()))\n # average_head_theta += abs(head_phi[i] - float(line[2].strip()))\n # average_gaze_phi += abs(head_phi[i] - float(line[3].strip()))\n # average_gaze_theta += abs(head_phi[i] - float(line[4].strip()))\n # average_head_phi /=frame_num\n # average_head_theta /=frame_num\n # average_gaze_phi /=frame_num\n # average_gaze_theta /=frame_num\n #\n # total_error = (average_head_theta + average_head_phi +average_gaze_theta +average_gaze_phi) /4\n #\n # print(\"============================= Average Error ==========================\")\n # print(\"evaluate frame :\",frame_num-skip_frame)\n # print(\"Head_phi Error =\",average_head_phi)\n # print(\"Head_theta Error =\",average_head_theta)\n # print(\"Gaze_phi Error =\",average_gaze_phi)\n # print(\"Gaze_theta Error =\",average_gaze_theta)\n # print(\"Total Error =\",total_error)\n\n\n\n\n\n"
] | [
[
"numpy.array",
"numpy.dot",
"numpy.zeros"
]
] |
mbmccoy/jax | [
"74346f464bc8369d81964305fcf05f95f43fb2d3"
] | [
"jaxlib/pocketfft.py"
] | [
"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# flatbuffers needs importlib.util but fails to import it itself.\nimport importlib.util # noqa: F401\nfrom typing import List\n\nimport jaxlib.mlir.ir as ir\nimport jaxlib.mlir.dialects.mhlo as mhlo\n\n\nfrom . import _pocketfft\nfrom . import pocketfft_flatbuffers_py_generated as pd\nimport numpy as np\n\nimport flatbuffers\nfrom jaxlib import xla_client\n\nfor _name, _value in _pocketfft.registrations().items():\n xla_client.register_custom_call_target(_name, _value, platform=\"cpu\")\n\nFftType = xla_client.FftType\n\nflatbuffers_version_2 = hasattr(flatbuffers, \"__version__\")\n\n\ndef _pocketfft_descriptor(shape: List[int], dtype, fft_type: FftType,\n fft_lengths: List[int]) -> bytes:\n n = len(shape)\n assert len(fft_lengths) >= 1\n assert len(fft_lengths) <= n, (fft_lengths, n)\n\n builder = flatbuffers.Builder(128)\n\n forward = fft_type in (FftType.FFT, FftType.RFFT)\n if fft_type == FftType.RFFT:\n pocketfft_type = pd.PocketFftType.R2C\n\n assert dtype in (np.float32, np.float64), dtype\n out_dtype = np.dtype(np.complex64 if dtype == np.float32 else np.complex128)\n pocketfft_dtype = (\n pd.PocketFftDtype.COMPLEX64\n if dtype == np.float32 else pd.PocketFftDtype.COMPLEX128)\n\n assert shape[-len(fft_lengths):] == fft_lengths, (shape, fft_lengths)\n out_shape = list(shape)\n out_shape[-1] = out_shape[-1] // 2 + 1\n\n elif fft_type == FftType.IRFFT:\n pocketfft_type = pd.PocketFftType.C2R\n assert np.issubdtype(dtype, np.complexfloating), dtype\n\n out_dtype = np.dtype(np.float32 if dtype == np.complex64 else np.float64)\n pocketfft_dtype = (\n pd.PocketFftDtype.COMPLEX64\n if dtype == np.complex64 else pd.PocketFftDtype.COMPLEX128)\n\n assert shape[-len(fft_lengths):-1] == fft_lengths[:-1]\n out_shape = list(shape)\n out_shape[-1] = fft_lengths[-1]\n assert (out_shape[-1] // 2 + 1) == shape[-1]\n else:\n pocketfft_type = pd.PocketFftType.C2C\n\n assert np.issubdtype(dtype, np.complexfloating), dtype\n out_dtype = dtype\n pocketfft_dtype = (\n pd.PocketFftDtype.COMPLEX64\n if dtype == np.complex64 else pd.PocketFftDtype.COMPLEX128)\n\n assert shape[-len(fft_lengths):] == fft_lengths, (shape, fft_lengths)\n out_shape = shape\n\n # PocketFft does not allow size 0 dimensions.\n if 0 in shape or 0 in out_shape:\n return b\"\", out_dtype, out_shape\n\n # Builds a PocketFftDescriptor flatbuffer. This descriptor is passed to the\n # C++ kernel to describe the FFT to perform.\n pd.PocketFftDescriptorStartShapeVector(builder, n)\n for d in reversed(shape if fft_type != FftType.IRFFT else out_shape):\n builder.PrependUint64(d)\n if flatbuffers_version_2:\n pocketfft_shape = builder.EndVector()\n else:\n pocketfft_shape = builder.EndVector(n)\n\n pd.PocketFftDescriptorStartStridesInVector(builder, n)\n stride = dtype.itemsize\n for d in reversed(shape):\n builder.PrependUint64(stride)\n stride *= d\n if flatbuffers_version_2:\n strides_in = builder.EndVector()\n else:\n strides_in = builder.EndVector(n)\n pd.PocketFftDescriptorStartStridesOutVector(builder, n)\n stride = out_dtype.itemsize\n for d in reversed(out_shape):\n builder.PrependUint64(stride)\n stride *= d\n if flatbuffers_version_2:\n strides_out = builder.EndVector()\n else:\n strides_out = builder.EndVector(n)\n\n pd.PocketFftDescriptorStartAxesVector(builder, len(fft_lengths))\n for d in range(len(fft_lengths)):\n builder.PrependUint32(n - d - 1)\n if flatbuffers_version_2:\n axes = builder.EndVector()\n else:\n axes = builder.EndVector(len(fft_lengths))\n\n scale = 1. if forward else (1. / np.prod(fft_lengths))\n pd.PocketFftDescriptorStart(builder)\n pd.PocketFftDescriptorAddDtype(builder, pocketfft_dtype)\n pd.PocketFftDescriptorAddFftType(builder, pocketfft_type)\n pd.PocketFftDescriptorAddShape(builder, pocketfft_shape)\n pd.PocketFftDescriptorAddStridesIn(builder, strides_in)\n pd.PocketFftDescriptorAddStridesOut(builder, strides_out)\n pd.PocketFftDescriptorAddAxes(builder, axes)\n pd.PocketFftDescriptorAddForward(builder, forward)\n pd.PocketFftDescriptorAddScale(builder, scale)\n descriptor = pd.PocketFftDescriptorEnd(builder)\n builder.Finish(descriptor)\n return builder.Output(), out_dtype, out_shape\n\n\ndef pocketfft_mhlo(a, dtype, *, fft_type: FftType, fft_lengths: List[int]):\n \"\"\"PocketFFT kernel for CPU.\"\"\"\n a_type = ir.RankedTensorType(a.type)\n n = len(a_type.shape)\n\n fft_lengths = list(fft_lengths)\n descriptor_bytes, out_dtype, out_shape = _pocketfft_descriptor(\n list(a_type.shape), dtype, fft_type, fft_lengths)\n\n if out_dtype == np.float32:\n out_type = ir.F32Type.get()\n elif out_dtype == np.float64:\n out_type = ir.F64Type.get()\n elif out_dtype == np.complex64:\n out_type = ir.ComplexType.get(ir.F32Type.get())\n elif out_dtype == np.complex128:\n out_type = ir.ComplexType.get(ir.F64Type.get())\n else:\n raise ValueError(f\"Unknown output type {out_dtype}\")\n\n if 0 in a_type.shape or 0 in out_shape:\n zero = mhlo.ConstOp(ir.RankedTensorType.get([], out_type),\n ir.DenseElementsAttr.get(np.array(0, dtype=out_dtype),\n type=out_type))\n return mhlo.BroadcastOp(\n ir.RankedTensorType.get(out_shape, out_type),\n zero,\n ir.DenseElementsAttr.get(np.asarray(out_shape, np.int64))).result\n\n u8_type = ir.IntegerType.get_unsigned(8)\n descriptor = mhlo.ConstOp(\n ir.RankedTensorType.get([len(descriptor_bytes)], u8_type),\n ir.DenseElementsAttr.get(np.frombuffer(descriptor_bytes, dtype=np.uint8),\n type=u8_type))\n layout = ir.DenseIntElementsAttr.get(np.arange(n - 1, -1, -1),\n type=ir.IndexType.get())\n return mhlo.CustomCallOp(\n [ir.RankedTensorType.get(out_shape, out_type)],\n [descriptor, a],\n call_target_name = ir.StringAttr.get(\"pocketfft\"),\n has_side_effect=ir.BoolAttr.get(False),\n backend_config=ir.StringAttr.get(\"\"),\n api_version=ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 2),\n called_computations=ir.ArrayAttr.get([]),\n operand_layouts=ir.ArrayAttr.get([\n ir.DenseIntElementsAttr.get(np.array([0], np.int64),\n type=ir.IndexType.get()),\n layout,\n ]),\n result_layouts=ir.ArrayAttr.get([layout])).result\n"
] | [
[
"numpy.dtype",
"numpy.issubdtype",
"numpy.asarray",
"numpy.arange",
"numpy.prod",
"numpy.array",
"numpy.frombuffer"
]
] |
Qiza-lyhm/mmcv-1 | [
"362a90f8bfffe62d5802925944f540ed16b2731e"
] | [
"tests/test_ops/test_bbox.py"
] | [
"# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport pytest\nimport torch\n\nfrom mmcv.device.mlu import IS_MLU_AVAILABLE\nfrom mmcv.utils import IS_CUDA_AVAILABLE\n\n\nclass TestBBox(object):\n\n def _test_bbox_overlaps(self, device, dtype=torch.float):\n from mmcv.ops import bbox_overlaps\n b1 = torch.tensor([[1.0, 1.0, 3.0, 4.0], [2.0, 2.0, 3.0, 4.0],\n [7.0, 7.0, 8.0, 8.0]]).to(device).type(dtype)\n b2 = torch.tensor([[0.0, 2.0, 2.0, 5.0], [2.0, 1.0, 3.0,\n 3.0]]).to(device).type(dtype)\n should_output = np.array([[0.33333334, 0.5], [0.2, 0.5], [0.0, 0.0]])\n out = bbox_overlaps(b1, b2, offset=1)\n assert np.allclose(out.cpu().numpy(), should_output, 1e-2)\n\n b1 = torch.tensor([[1.0, 1.0, 3.0, 4.0], [2.0, 2.0, 3.0,\n 4.0]]).to(device).type(dtype)\n b2 = torch.tensor([[0.0, 2.0, 2.0, 5.0], [2.0, 1.0, 3.0,\n 3.0]]).to(device).type(dtype)\n should_output = np.array([0.33333334, 0.5])\n out = bbox_overlaps(b1, b2, aligned=True, offset=1)\n assert np.allclose(out.cpu().numpy(), should_output, 1e-2)\n\n b1 = torch.tensor([[0.0, 0.0, 3.0, 3.0]]).to(device).type(dtype)\n b2 = torch.tensor([[4.0, 0.0, 5.0, 3.0], [3.0, 0.0, 4.0, 3.0],\n [2.0, 0.0, 3.0, 3.0], [1.0, 0.0, 2.0,\n 3.0]]).to(device).type(dtype)\n should_output = np.array([0, 0.2, 0.5, 0.5])\n out = bbox_overlaps(b1, b2, offset=1)\n assert np.allclose(out.cpu().numpy(), should_output, 1e-2)\n\n @pytest.mark.parametrize('device', [\n pytest.param(\n 'cuda',\n marks=pytest.mark.skipif(\n not IS_CUDA_AVAILABLE, reason='requires CUDA support')),\n pytest.param(\n 'mlu',\n marks=pytest.mark.skipif(\n not IS_MLU_AVAILABLE, reason='requires MLU support'))\n ])\n def test_bbox_overlaps_float(self, device):\n self._test_bbox_overlaps(device, dtype=torch.float)\n\n @pytest.mark.parametrize('device', [\n pytest.param(\n 'cuda',\n marks=pytest.mark.skipif(\n not IS_CUDA_AVAILABLE, reason='requires CUDA support')),\n pytest.param(\n 'mlu',\n marks=pytest.mark.skipif(\n not IS_MLU_AVAILABLE, reason='requires MLU support'))\n ])\n def test_bbox_overlaps_half(self, device):\n self._test_bbox_overlaps(device, dtype=torch.half)\n"
] | [
[
"numpy.array",
"torch.tensor"
]
] |
ZenanLin1999/FPGA_accerator_with_mnist_dataset | [
"1db3d698ebe3cf57050af9465e0b83ffef717d25"
] | [
"int16_version/tensorflow_mnist/mnist_int16.py"
] | [
"# -*- coding: utf-8 -*-\nimport input_data\nimport tensorflow as tf\nimport numpy as np\nfrom tf_fix import *\n\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\nsess = tf.InteractiveSession()\n\nwith tf.name_scope('input'): \n\tx = tf.placeholder(\"float\", shape=[None, 784])\n\ty_ = tf.placeholder(\"float\", shape=[None, 10])\n\ndef weight_variable(shape):\n\tinitial = tf.truncated_normal(shape, stddev=0.1);\n\treturn tf.Variable(initial)\n\ndef conv2d(x, W):\n\treturn tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef max_pool_4x4(x):\n\treturn tf.nn.max_pool(x, ksize=[1, 4, 4, 1], strides=[1, 4, 4,1], padding='SAME')\n\n#First Convolutional Layer\nwith tf.name_scope('1st_CNN'): \n\tW_conv1 = weight_variable([3, 3, 1, 32])\n\tx_image = tf.reshape(x, [-1,28,28,1])\n\th_conv1 = conv2d(x_image, W_conv1) #[28,28,32]\n\th_pool1 = max_pool_4x4(h_conv1) #[7,7,32]\n\n#Densely Connected Layer\nwith tf.name_scope('Densely_NN'): \n\tW_fc1 = weight_variable([ 7* 7* 32, 256])\n\th_pool2_flat = tf.reshape(h_pool1, [-1, 7* 7* 32])\n\th_fc1= tf.matmul(h_pool2_flat , W_fc1) # [256]\n\n#Dropout\nwith tf.name_scope('Dropout'):\n\tkeep_prob = tf.placeholder(\"float\")\n\th_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n#Readout Layer\nwith tf.name_scope('Softmax'):\n\tW_fc2 = weight_variable([256, 10])\n\th_fc2 = tf.matmul(h_fc1_drop, W_fc2)\n\ty_conv=tf.nn.softmax(h_fc2)\n\nwith tf.name_scope('Loss'):\n\tcross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))\n\nwith tf.name_scope('Train'):\n\ttrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\t#train_step = tf.train.AdamOptimizer(5e-5).minimize(cross_entropy)\n\nwith tf.name_scope('Accuracy'):\n\tcorrect_prediction = tf.equal(tf.argmax(y_conv ,1), tf.argmax(y_,1))\n\taccuracy = tf.reduce_mean(tf.cast(correct_prediction , \"float\"))\n\ntf.initialize_all_variables().run()\n\nfor i in range(1000):\n\tbatch = mnist.train.next_batch(400);\n\tif i%200 == 0:\n\t\ttrain_accuracy = accuracy.eval(feed_dict={x:batch[0], y_: batch[1], keep_prob:1.0});\n\t\tprint(\"step %d, training accuracy %g\"%(i, train_accuracy));\n\ttrain_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob:0.5});\n\nprint(\"test accuracy %g\"%accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))\n\nprint(\"=================================================\")\nf_cfg = open('./record/MNIST_LARGE_cfg.py', 'w')\n\nGet_Feature_Fraction_Part(x,\"img\",{x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0},f_cfg)\nRecord_Weight(W_conv1,\"W_conv1\",f_cfg)\n#print(W_conv1.eval())\nGet_Feature_Fraction_Part(h_conv1,\"h_conv1\",{x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0},f_cfg)\nGet_Feature_Fraction_Part(h_pool1,\"h_pool1\",{x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0},f_cfg)\n\t\nRecord_Weight(tf.reshape(W_fc1,[7,7,32,256]),\"W_fc1\",f_cfg)\nGet_Feature_Fraction_Part(h_fc1,\"h_fc1\",{x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0},f_cfg)\n\t\nRecord_Weight(tf.reshape(W_fc2,[1,1,256,10]),\"W_fc2\",f_cfg)\nGet_Feature_Fraction_Part(h_fc2,\"h_fc2\",{x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0},f_cfg)\t\t\n\nf_cfg.close();\nprint(\"=================================================\")\n\nsess.close()\n"
] | [
[
"tensorflow.initialize_all_variables",
"tensorflow.placeholder",
"tensorflow.nn.max_pool",
"tensorflow.reshape",
"tensorflow.truncated_normal",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.conv2d",
"tensorflow.InteractiveSession",
"tensorflow.matmul",
"tensorflow.name_scope",
"tensorflow.cast",
"tensorflow.argmax",
"tensorflow.Variable",
"tensorflow.log",
"tensorflow.nn.softmax",
"tensorflow.nn.dropout"
]
] |
granatumx/gbox-py | [
"b3e264a22bc6a041f2dd631d952eae29c0ecae21",
"b3e264a22bc6a041f2dd631d952eae29c0ecae21"
] | [
"sample_coloring.py",
"scanpy_normalization.py"
] | [
"#!/usr/bin/env python\n\nimport math\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.spatial import ConvexHull\nfrom colour import Color\nfrom matplotlib.patches import Polygon\nimport statistics as st\n\nfrom granatum_sdk import Granatum\n\nCOLORS = [\"#3891ea\", \"#29ad19\", \"#ac2d58\", \"#db7580\", \"#ed2310\", \"#ca2dc2\", \"#5f7575\", \"#7cc1b5\", \"#c3bd78\", \"#4ffa24\"]\n\ndef main():\n gn = Granatum()\n sample_coords = gn.get_import(\"viz_data\")\n value = gn.get_import(\"value\")\n# print(value)\n coloring_type = gn.get_arg(\"coloring_type\")\n bounding_stdev = gn.get_arg(\"bounding_stdev\")\n\n coords = sample_coords.get(\"coords\")\n dim_names = sample_coords.get(\"dimNames\")\n\n df = pd.DataFrame(\n {\"x\": [a[0] for a in coords.values()], \"y\": [a[1] for a in coords.values()], \"value\": pd.Series(value)},\n index=coords.keys()\n )\n\n# print(df)\n\n if coloring_type == \"categorical\":\n uniq = df[\"value\"].unique();\n num = uniq.shape[0]\n COLORS2 = plt.get_cmap('gist_rainbow')\n carr = [0]*df.shape[0]\n listcats = list(df[\"value\"]) \n miny = min(list(df[\"y\"]))\n maxy = max(list(df[\"y\"]))\n scaley = (maxy-miny)/650\n print(\"Scaley = {}\".format(scaley))\n\n for i, cat in enumerate(df[\"value\"].unique()):\n dff = df[df[\"value\"] == cat]\n xs = list(dff[\"x\"])\n ys = list(dff[\"y\"])\n #avgx = sum(dff[\"x\"]) / len(dff[\"x\"]) \n #avgy = sum(dff[\"y\"]) / len(dff[\"y\"]) \n #plt.scatter(x=dff[\"x\"], y=dff[\"y\"], s=5000 / df.shape[0], c=COLORS[i].hex_l, label=cat)\n #plt.scatter(x=dff[\"x\"], y=dff[\"y\"], s=5000 / df.shape[0], c=[abs(hash(cat)) % 256]*len(dff[\"x\"]), cmap=COLORS2, label=cat)\n #plt.scatter(x=dff[\"x\"], y=dff[\"y\"], s=5000 / df.shape[0], c=abs(hash(cat)) % 256, cmap=COLORS2, label=cat)\n colorindex = abs(hash(cat)) % 256\n craw = COLORS2(colorindex/255.0)\n color = (craw[0], craw[1], craw[2], 0.2)\n whitetransparent = (1, 1, 1, 0.5)\n coloropaque = (craw[0], craw[1], craw[2], 1.0)\n if len(xs)>3:\n pts = list(zip(xs, ys))\n cent = np.mean(pts, axis=0)\n lengs = list(map(lambda p: math.sqrt((p[0]-cent[0])*(p[0]-cent[0])+(p[1]-cent[1])*(p[1]-cent[1])), pts))\n avgleng = st.mean(lengs)\n stdleng = st.stdev(lengs)*bounding_stdev\n rpts = []\n if(stdleng > 0.0):\n for j, ln in enumerate(lengs):\n if(ln - avgleng < stdleng):\n rpts.append(pts[j])\n pts = rpts\n cent = np.mean(pts, axis=0)\n hull = ConvexHull(pts)\n ptslist = []\n for pt in hull.simplices:\n ptslist.append(pts[pt[0]])\n ptslist.append(pts[pt[1]])\n ptslist.sort(key=lambda p: np.arctan2(p[1]-cent[1], p[0]-cent[0]))\n ptslist = ptslist[0::2]\n ptslist.insert(len(ptslist), ptslist[0])\n lowestpt = ptslist[0]\n for pt in ptslist:\n if(pt[1] < lowestpt[1]):\n lowestpt = pt\n poly = Polygon(1.1*(np.array(ptslist)-cent)+cent, facecolor=color)\n poly.set_capstyle('round')\n plt.gca().add_patch(poly)\n plt.text(lowestpt[0], lowestpt[1]-scaley*10, cat, fontsize=6, ha=\"center\", va=\"center\", color=\"black\", bbox=dict(boxstyle=\"round\",fc=whitetransparent,ec=coloropaque))\n for j,x in enumerate(listcats):\n if x == cat:\n carr[j] = int(abs(hash(cat)) % 256)\n \n plt.scatter(x=df[\"x\"], y=df[\"y\"], s=5000 / df.shape[0], c=carr, cmap=COLORS2)\n lgd = plt.legend(markerscale=6, loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=5)\n#60 / (5000 / df.shape[0])\n elif coloring_type == \"continuous\":\n plt.scatter(x=df[\"x\"], y=df[\"y\"], s=5000 / df.shape[0], c=df[\"value\"], cmap=\"Reds\")\n plt.colorbar()\n\n plt.xlabel(dim_names[0])\n plt.ylabel(dim_names[1])\n # plt.tight_layout()\n\n gn.add_current_figure_to_results(\n \"Scatter-plot\",\n dpi=75,\n width=750,\n height=650,\n# savefig_kwargs={'bbox_extra_artists': (lgd,), 'bbox_inches': 'tight'}\n savefig_kwargs={'bbox_inches': 'tight'}\n )\n\n gn.commit()\n\n\nif __name__ == \"__main__\":\n main()\n",
"from itertools import combinations\n\nimport multiprocessing\nimport scanpy.api as sc\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.preprocessing import quantile_transform\nfrom scipy.sparse import csc_matrix\n\nfrom granatum_sdk import Granatum\n\n# import pandas as pd\n# import seaborn as sns\n\n\nnans = np.array([np.nan, np.nan])\nzeros = np.array([0, 0])\n\n\ndef trim_extreme(x, a, b):\n low = np.percentile(x, a)\n high = np.percentile(x, b)\n filtered = x[(x > low) & (x < high)]\n return filtered.copy()\n\n\ndef make_plot(adata, log_trans=False):\n violin_data = []\n for cell in adata.X:\n filtered = cell.toarray().flatten()\n #filtered = trim_extreme(filtered, 5, 95)\n if log_trans:\n #cell = np.log1p(cell)\n filtered = np.log1p(filtered)\n if filtered.shape[0] == 0:\n #cell = zeros\n filtered = zeros\n\n violin_data.append(filtered)\n\n plt.figure()\n plt.boxplot(violin_data)\n plt.xlabel('Cells')\n plt.ylabel('Expression lvl (log transformed)')\n plt.tight_layout()\n\ndef quantile_normalization(mat):\n # double argsort for getting the corresponding ranks for\n # each element in the vector\n\n rank_mat = np.argsort(np.argsort(mat, 1), 1)\n medians = np.median(np.sort(mat, 1), 0)\n normalized = np.zeros_like(mat)\n\n for i in range(rank_mat.shape[0]):\n normalized[i, :] = medians[rank_mat[i, :]]\n\n # normalized = quantile_transform(mat, copy=False)\n\n #return normalized.tolist()\n return sc.AnnData(csc_matrix(normalized))\n\n\ndef main():\n gn = Granatum()\n\n adata = gn.ann_data_from_assay(gn.get_import('assay'))\n num_cells_to_sample = gn.get_arg('num_cells_to_sample')\n method = gn.get_arg('method')\n log_trans_when_plot = gn.get_arg('log_trans_when_plot')\n\n if num_cells_to_sample > adata.shape[0]:\n num_cells_to_sample = adata.shape[0]\n\n sampled_cells_idxs = np.sort(np.random.choice(adata.shape[0], num_cells_to_sample, replace=False))\n\n make_plot(adata[sampled_cells_idxs, :], log_trans=log_trans_when_plot)\n gn.add_current_figure_to_results(\n 'Before normalization: Each bar in the box plot represents one cell. Only cells between the 5th and 95th percentile are shown.',\n height=350,\n dpi=75 * 40 / max(40, num_cells_to_sample)\n )\n\n if method == 'quantile':\n adata = quantile_normalization(adata.X.toarray())\n elif method == 'scanpy':\n sc.pp.normalize_total(adata)\n else:\n raise ValueError()\n\n make_plot(adata[sampled_cells_idxs, :], log_trans=log_trans_when_plot)\n gn.add_current_figure_to_results(\n 'After normalization: Each bar in the box plot represents one cell. Only cells between the 5th and 95th percentile are shown.',\n height=350,\n dpi=75 * 40 / max(40, num_cells_to_sample)\n )\n\n gn.export_statically(gn.assay_from_ann_data(adata), 'Normalized assay')\n\n gn.commit()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.arctan2",
"scipy.spatial.ConvexHull",
"matplotlib.pyplot.legend",
"pandas.Series",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.ylabel",
"numpy.mean",
"matplotlib.pyplot.colorbar",
"numpy.array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter"
],
[
"numpy.zeros_like",
"matplotlib.pyplot.boxplot",
"numpy.sort",
"numpy.log1p",
"scipy.sparse.csc_matrix",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"numpy.argsort",
"numpy.random.choice",
"numpy.percentile",
"matplotlib.pyplot.ylabel",
"numpy.array",
"matplotlib.pyplot.xlabel"
]
] |
Bheshaj-Kumar/Transformer-Grapheme-to-Phoneme-Conversion | [
"cc1ff53498cf9d178e1880b5d074ec91559ac95a"
] | [
"model/new_models.py"
] | [
"import sys\nimport numpy as np\nimport tensorflow as tf\nfrom model.transformer_utils import create_encoder_padding_mask, create_mel_padding_mask, create_look_ahead_mask\n#from preprocessing.text import Pipeline\nfrom model.layers import PreBottleNeckDecoder, Encoder, Decoder, SpeakerModule\nfrom utils.losses import model_loss, crossentropy_loss\nimport data_utils \n\nclass Transformer(tf.keras.models.Model):\n\n def __init__(self,\n encoder_model_dimension : int,\n decoder_model_dimension : int,\n encoder_num_heads : int,\n decoder_num_heads : int,\n encoder_num_layers : int,\n decoder_num_layers : int,\n encoder_maximum_position_encoding : int,\n decoder_maximum_position_encoding : int,\n encoder_feed_forward_dimension : int,\n decoder_feed_forward_dimension : int,\n dropout_rate : int,\n encoder_vocab_size : int,\n decoder_vocab_size : int,\n debug : bool,\n diagonal_bandwidth_b : int,\n diagonal_rate_regul_coeff : float,\n layernorm : bool,\n Ldc : bool,\n buckets: list,\n training: bool,\n epoch_path: str,\n **kwargs\n ):\n \n super(Transformer, self).__init__(**kwargs)\n self.Ldc = Ldc\n self.buckets = buckets\n self.b = diagonal_bandwidth_b\n self.lamda = diagonal_rate_regul_coeff\n self.isTraining = training\n if self.isTraining:\n try:\n print(\"\\n\\n\")\n with open(epoch_path, 'r') as f:\n self.epoch = int(f.readlines()[0])\n print(\"Successfully loaded epoch count.\")\n except:\n print(\"Could not load epoch path : %s\" % epoch_path)\n print(\"Creating a file with default epoch = 1\")\n with open(epoch_path, 'w+') as f:\n f.write(str(1))\n print(\"Successfully created epoch file.\")\n self.epoch = 1\n self.epoch_path = epoch_path\n\n self.encoder = Encoder(d_model = encoder_model_dimension,\n num_heads = encoder_num_heads,\n num_layers = encoder_num_layers,\n dff = encoder_feed_forward_dimension,\n input_vocab_size = encoder_vocab_size, #change\n maximum_position_encoding = encoder_maximum_position_encoding,\n layernorm= layernorm,\n rate = dropout_rate,\n name = 'Encoder')\n\n self.decoder = Decoder(d_model = decoder_model_dimension,\n num_heads = decoder_num_heads,\n dff = decoder_feed_forward_dimension,\n maximum_position_encoding = decoder_maximum_position_encoding,\n output_vocab_size = decoder_vocab_size, #change\n num_layers = decoder_num_layers,\n rate = dropout_rate,\n name = 'Decoder')\n \n self.decoder_layers = decoder_num_layers\n self.regul_coeff = diagonal_rate_regul_coeff\n self.loss_bandwidth = diagonal_bandwidth_b\n self.decoder_model_dim = decoder_model_dimension\n self.linear = tf.keras.layers.Dense(decoder_vocab_size, name= 'linear')\n ## remaining ##\n self.training_input_signature = [\n tf.TensorSpec(shape=(None, None), dtype = tf.int32),\n tf.TensorSpec(shape=(None), dtype = tf.int64),\n tf.TensorSpec(shape=(None, None), dtype = tf.int32),\n tf.TensorSpec(shape=(None), dtype = tf.int64),\n\n ]\n\n self.forward_input_signature = [\n tf.TensorSpec(shape=(None, None), dtype= tf.int32),\n tf.TensorSpec(shape=(None, 1), dtype= tf.int32),\n #tf.TensorSpec(shape=(None, None, mel_channels), dtype = tf.float32)\n ]\n\n self.encoder_signature = [\n tf.TensorSpec(shape=(None, None), dtype=tf.int32)\n ]\n\n self.decoder_signature = [\n tf.TensorSpec(shape=(None, None, encoder_model_dimension), dtype = tf.float32),\n tf.TensorSpec(shape=(None, None), dtype=tf.float32),\n tf.TensorSpec(shape=(None, None, None, None), dtype=tf.float32)\n ]\n\n self.debug = debug\n self._apply_all_signatures()\n\n @property\n def step(self):\n return int(self.optimizer.iterations)\n\n def _apply_signature(self, function, signature):\n if self.debug:\n return function\n else:\n return tf.function(input_signature=signature)(function)\n def increment_epoch(self):\n self.epoch += 1\n with open(self.epoch_path,'w') as f:\n f.write(str(self.epoch))\n\n def call(self):\n self._apply_all_signature()\n\n def _apply_all_signatures(self):\n #self.forward = self._apply_signature(self._forward, self.forward_input_signature)\n self.train_step = self._apply_signature(self._train_step, self.training_input_signature)\n self.val_step = self._apply_signature(self._val_step, self.training_input_signature)\n self.forward_encoder = self._apply_signature(self._forward_encoder, self.encoder_signature)\n self.forward_decoder = self._apply_signature(self._forward_decoder, self.decoder_signature)\n\n def _call_encoder(self, inputs, training):\n padding_mask = create_encoder_padding_mask(inputs)\n enc_input = inputs \n enc_output, attn_weights = self.encoder(enc_input,\n training= training,\n mask = padding_mask)\n return enc_output, padding_mask, attn_weights\n\n def _call_decoder(self, dec_input, enc_output, enc_padding_mask, training):\n dec_target_padding_mask = create_mel_padding_mask(dec_input)\n look_ahead_mask = create_look_ahead_mask(tf.shape(dec_input)[1])\n combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)\n dec_output, attention_weights = self.decoder(x = dec_input,\n enc_output = enc_output,\n training = training,\n look_ahead_mask = combined_mask,\n padding_mask = enc_padding_mask\n )\n linear = self.linear(dec_output) \n model_out = {'linear':linear, 'decoder_attention':attention_weights, 'decoder_output':dec_output}\n return model_out\n\n def _forward(self, inp, output): # not getting used\n model_out = self.__call__(inputs = inp,\n speaker_input = sp_id,\n targets = output,\n training = False) \n \n return model_out\n\n def _forward_encoder(self, inputs):\n return self._call_encoder(inputs, training = False)\n \n def _forward_decoder(self, encoder_output, targets, encoder_padding_mask):\n return self._call_decoder(targets, encoder_output, encoder_padding_mask, training = False) \n\n def _gta_forward(self, encoder_inputs, seq_len, decoder_inputs, seq_len_target, training):\n tar_inp = decoder_inputs[:,:-1]\n tar_real = decoder_inputs[:,1:]\n\n seq_len = int(tf.shape(tar_inp)[1]) \n\n with tf.GradientTape() as tape:\n model_out = self.__call__(inputs= encoder_inputs,\n targets = tar_inp,\n training = training)\n\n loss = model_loss(tar_real, \n model_out['linear']\n )\n model_out.update({'loss' : loss})\n model_out.update({'target': tar_inp}) \n return model_out, tape\n\n def _train_step(self, encoder_inputs, seq_len, _decoder_inputs, seq_len_target): \n model_out, tape = self._gta_forward(encoder_inputs, seq_len, _decoder_inputs, seq_len_target, training= True)\n gradients = tape.gradient(model_out['loss'], self.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))\n\n return model_out\n\n def _val_step(self, encoder_inputs, seq_len, decoder_inputs, seq_len_target):\n model_out, _ = self._gta_forward(encoder_inputs, seq_len, decoder_inputs, seq_len_target, training= False)\n return model_out\n\n def _compile(self, optimizer):\n self.compile(loss = crossentropy_loss,\n optimizer = optimizer)\n\n def call(self, inputs, targets, training): \n encoder_output, encoder_padding_mask, encoder_attention = self._call_encoder(inputs, training=training) \n model_out = self._call_decoder(targets, encoder_output, encoder_padding_mask, training= training)\n model_out.update({'encoder_attention' : encoder_attention})\n return model_out\n\n def _compute_centroid(self, attention, win_c, s):\n attention = tf.reduce_mean(attention, axis=1)\n if s >= attention.shape[-1]:\n return win_c\n C_s = tf.cast(tf.reduce_sum(attention, 1).numpy()[:, s], tf.int32)\n if win_c + 3 * self.r >= C_s:\n return win_c + 3*self.r\n return win_c\n\n def predict(self, encoder_input, max_length = 20, verbose = True):\n #print(inp.shape)\n start_vec = tf.convert_to_tensor(tf.constant([data_utils.GO_ID]), dtype= tf.int32)\n inp = tf.cast(tf.expand_dims(encoder_inp, 0), tf.int32) \n output = tf.cast(tf.expand_dims(start_vec, 0), tf.int32)\n output_concat = tf.cast(tf.expand_dims(start_vec, 0), tf.int32) \n out_dict = {}\n encoder_output, encoder_padding_mask, encoder_attention = self.forward_encoder(encoder_inp) \n \n for i in range(max_length + 1):\n model_out = self.forward_decoder(encoder_output, output, encoder_padding_mask)\n output = tf.concat([output, model_out['mel_linear'][:1, -1:, :]], axis=-2) \n output_concat = tf.concat([tf.cast(output_concat, tf.int32), model_out['mel_linear'][:1, -1:, :]],\n axis=-2) ####### UNCLEAR -SELF.R ##########\n out_dict = {'linear': output_concat[0, 1:, :],\n 'decoder_attention': model_out['decoder_attention'],\n 'encoder_attention': encoder_attention}\n predictions = model_out['mel_linear'][:,-1:,:]\n prediction_id = tf.cast(tf.argmax(predictions, axis = -1), dtype= tf.int32)\n if verbose:\n sys.stdout.write(f'\\rpred word phoneme: {i}')\n if prediction_id == data_utils.EOS_ID:\n if verbose:\n print('Stopping')\n break\n \n return out_dict\n\n def set_constants(self, learning_rate: float= None):\n if learning_rate is not None:\n self.optimizer.lr.assign(learning_rate)\n\n def get_batch(self, data, bucket_id=None):\n \"\"\"Prepare minibatch from given data.\n Args:\n data: A list of datapoints (all from same bucket).\n bucket_id: Bucket ID of data. This is irrevelant for training but\n for evaluation we can limit the padding by the bucket size.\n Returns:\n Batched input IDs, input sequence length, output IDs & output\n sequence length\n \"\"\"\n #if not self.isTraining:\n # # During evaluation the bucket size limits the amount of padding\n # _, decoder_size = self.buckets[bucket_id]\n \n _, decoder_size = tf.cond(tf.math.equal(self.isTraining, False), lambda:self.buckets[bucket_id])\n encoder_inputs, decoder_inputs = [], []\n batch_size = len(data)\n\n #seq_len = np.zeros((batch_size), dtype=np.int64)\n #seq_len_target = np.zeros((batch_size), dtype=np.int64)\n\n seq_len = tf.zeros((batch_size), dtype=tf.int64)\n seq_len_target = tf.zeros((batch_size), dtype=tf.int64)\n \n for i, sample in enumerate(data):\n encoder_input, decoder_input = sample\n seq_len[i] = len(encoder_input)\n if not self.isTraining:\n seq_len_target[i] = decoder_size\n else:\n # 1 is added to output sequence length because the EOS token is\n # crucial to \"halt\" the decoder. Consider it the punctuation\n # mark of a English sentence. Both are necessary.\n seq_len_target[i] = len(decoder_input) + 1\n\n # Maximum input and output length which limit the padding till them\n max_len_source = max(seq_len)\n max_len_target = max(seq_len_target)\n\n for i, sample in enumerate(data):\n encoder_input, decoder_input = sample\n # Encoder inputs are padded and then reversed.\n encoder_pad_size = max_len_source - len(encoder_input)\n encoder_pad = [data_utils.PAD_ID] * encoder_pad_size\n # Encoder input is reversed - https://arxiv.org/abs/1409.3215\n #encoder_inputs.append(list(reversed(encoder_input)) + encoder_pad) \n\n encoder_inputs.append(encoder_input + encoder_pad) # removed reversed\n # 1 is added to decoder_input because GO_ID is considered a part of\n # decoder input. While EOS_ID is also added, it's really used by\n # the target tensor (self.tensor) in the core code above.\n decoder_pad_size = max_len_target - (len(decoder_input) + 1)\n decoder_inputs.append([data_utils.GO_ID] +\n decoder_input +\n [data_utils.EOS_ID] +\n [data_utils.PAD_ID] * decoder_pad_size)\n\n # Both the id sequences are made time major via transpose\n encoder_inputs = np.asarray(encoder_inputs, dtype=np.int32)# changed transpose\n decoder_inputs = np.asarray(decoder_inputs, dtype=np.int32)# same\n return tf.convert_to_tensor(encoder_inputs, dtype= tf.int32), tf.convert_to_tensor(seq_len, dtype= tf.int64), tf.convert_to_tensor(decoder_inputs,dtype= tf.int32),tf.convert_to_tensor(seq_len_target, dtype= tf.int64)\n\n\n"
] | [
[
"tensorflow.math.equal",
"tensorflow.zeros",
"tensorflow.shape",
"tensorflow.function",
"tensorflow.reduce_mean",
"numpy.asarray",
"tensorflow.expand_dims",
"tensorflow.cast",
"tensorflow.GradientTape",
"tensorflow.convert_to_tensor",
"tensorflow.keras.layers.Dense",
"tensorflow.TensorSpec",
"tensorflow.concat",
"tensorflow.argmax",
"tensorflow.constant",
"tensorflow.reduce_sum",
"tensorflow.maximum"
]
] |
cvanoort/differentiable-plasticity | [
"28c53765ed38f80fd5a5c49e3e62a0e6555eb669"
] | [
"maze/plotfigure.py"
] | [
"# Code for making a figure\n#\n# Copyright (c) 2018 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport glob\nimport matplotlib.pyplot as plt\nimport scipy\nfrom scipy import stats\n\ncolorz = ['r', 'b', 'g', 'c', 'm', 'y', 'orange', 'k']\n\ngroupnames = glob.glob('./loss_*rngseed_0.txt')\n# groupnames = glob.glob('./loss_*new*eplen_250*rngseed_0.txt')\n\nplt.rc('font', size=14)\n\n\ndef mavg(x, N):\n cumsum = np.cumsum(np.insert(x, 0, 0))\n return (cumsum[N:] - cumsum[:-N]) / N\n\n\nplt.ion()\n# plt.figure(figsize=(5,4)) # Smaller figure = relative larger fonts\nplt.figure()\n\nallmedianls = []\nalllosses = []\nposcol = 0\nminminlen = 999999\nfor numgroup, groupname in enumerate(groupnames):\n if \"lstm\" in groupname:\n continue\n g = groupname[:-6] + \"*\"\n print(\"====\", groupname)\n fnames = glob.glob(g)\n fulllosses = []\n losses = []\n lgts = []\n for fn in fnames:\n z = np.loadtxt(fn)\n\n # For each run, we average the losses over K successive episodes - otherwise figure is unreadable due to noise!\n z = mavg(z, 10)\n\n z = z[::10] # Decimation - speed things up!\n\n z = z[:2001]\n\n if len(z) < 1000:\n print(fn)\n continue\n # z = z[:90]\n lgts.append(len(z))\n fulllosses.append(z)\n minlen = min(lgts)\n if minlen < minminlen:\n minminlen = minlen\n print(minlen)\n # if minlen < 1000:\n # continue\n for z in fulllosses:\n losses.append(z[:minlen])\n\n losses = np.array(losses)\n alllosses.append(losses)\n\n meanl = np.mean(losses, axis=0)\n stdl = np.std(losses, axis=0)\n # cil = stdl / np.sqrt(losses.shape[0]) * 1.96 # 95% confidence interval - assuming normality\n cil = stdl / np.sqrt(\n losses.shape[0]) * 2.5 # 95% confidence interval - approximated with the t-distribution for 7 d.f. (?)\n\n medianl = np.median(losses, axis=0)\n allmedianls.append(medianl)\n q1l = np.percentile(losses, 25, axis=0)\n q3l = np.percentile(losses, 75, axis=0)\n\n highl = np.max(losses, axis=0)\n lowl = np.min(losses, axis=0)\n # highl = meanl+stdl\n # lowl = meanl-stdl\n\n xx = range(len(meanl))\n\n # xticks and labels\n xt = range(0, len(meanl), 500)\n xtl = [str(10 * 10 * i) for i in\n xt] # Because of decimation above, and only every 10th loss is recorded in the files\n\n if \"plastic\" in groupname:\n lbl = \"Plastic\"\n elif \"rnn\" in groupname:\n lbl = \"Non-plastic\"\n\n # plt.plot(mavg(meanl, 100), label=g) #, color='blue')\n # plt.fill_between(xx, lowl, highl, alpha=.2)\n # plt.fill_between(xx, q1l, q3l, alpha=.1)\n # plt.plot(meanl) #, color='blue')\n ####plt.plot(mavg(medianl, 100), label=g) #, color='blue') # mavg changes the number of points !\n # plt.plot(mavg(q1l, 100), label=g, alpha=.3) #, color='blue')\n # plt.plot(mavg(q3l, 100), label=g, alpha=.3) #, color='blue')\n # plt.fill_between(xx, q1l, q3l, alpha=.2)\n # plt.plot(medianl, label=g) #, color='blue')\n\n AVGSIZE = 1\n\n xlen = len(mavg(q1l, AVGSIZE))\n plt.plot(mavg(medianl, AVGSIZE), color=colorz[poscol % len(colorz)],\n label=lbl) # mavg changes the number of points !\n plt.fill_between(range(xlen), mavg(q1l, AVGSIZE), mavg(q3l, AVGSIZE), alpha=.2, color=colorz[poscol % len(colorz)])\n\n # xlen = len(mavg(meanl, AVGSIZE))\n # plt.plot(mavg(meanl, AVGSIZE), label=g, color=colorz[poscol % len(colorz)]) # mavg changes the number of points !\n # plt.fill_between( range(xlen), mavg(meanl - cil, AVGSIZE), mavg(meanl + cil, AVGSIZE), alpha=.2, color=colorz[poscol % len(colorz)])\n\n poscol += 1\n\n # plt.fill_between( range(xlen), mavg(lowl, 100), mavg(highl, 100), alpha=.2, color=colorz[numgroup % len(colorz)])\n\n # plt.plot(mavg(losses[0], 1000), label=g, color=colorz[numgroup % len(colorz)])\n # for curve in losses[1:]:\n # plt.plot(mavg(curve, 1000), color=colorz[numgroup % len(colorz)])\n\nps = []\n# Adapt for varying lengths across groups\n# for n in range(0, alllosses[0].shape[1], 3):\nfor n in range(0, minminlen):\n ps.append(scipy.stats.ranksums(alllosses[0][:, n], alllosses[1][:, n]).pvalue)\nps = np.array(ps)\nnp.mean(ps[-500:] < .05)\nnp.mean(ps[-500:] < .01)\n\nplt.legend(loc='best', fontsize=14)\n# plt.xlabel('Loss (sum square diff. b/w final output and target)')\nplt.xlabel('Number of Episodes')\nplt.ylabel('Reward')\nplt.xticks(xt, xtl)\n# plt.tight_layout()\n"
] | [
[
"numpy.sqrt",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure",
"numpy.std",
"numpy.median",
"numpy.insert",
"numpy.percentile",
"numpy.max",
"matplotlib.pyplot.ylabel",
"numpy.min",
"numpy.loadtxt",
"scipy.stats.ranksums",
"numpy.array",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.xlabel",
"numpy.mean"
]
] |
greenelab/phenoplier | [
"95f04b17f0b5227560fcf32ac0a85b2c5aa9001f"
] | [
"nbs/13_consensus_clustering/py/030_03-analysis-coassociation.py"
] | [
"# ---\n# jupyter:\n# jupytext:\n# cell_metadata_filter: all,-execution,-papermill,-trusted\n# formats: ipynb,py//py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.7.1\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown] tags=[]\n# # Description\n\n# %% [markdown] tags=[]\n# It analyzes how clusters of traits were grouped across the ensemble partitions. For example, a stable cluster (obtained from consensus partitions) of cardiovascular diseases can show that all traits were always grouped together across all partitions of the ensemble; another cluster might show that some traits were clustered more often than others, representing a less stable group of traits.\n\n# %% [markdown] tags=[]\n# # Modules loading\n\n# %% tags=[]\n# %load_ext autoreload\n# %autoreload 2\n\n# %% tags=[]\nfrom IPython.display import display\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom utils import generate_result_set_name\nimport conf\n\n# %% [markdown] tags=[]\n# # Settings\n\n# %% tags=[]\nCONSENSUS_CLUSTERING_DIR = Path(\n conf.RESULTS[\"CLUSTERING_DIR\"], \"consensus_clustering\"\n).resolve()\n\ndisplay(CONSENSUS_CLUSTERING_DIR)\n\n# %% [markdown] tags=[]\n# ## Load data\n\n# %% tags=[]\nINPUT_SUBSET = \"umap\"\n\n# %% tags=[]\nINPUT_STEM = \"z_score_std-projection-smultixcan-efo_partial-mashr-zscores\"\n\n# %% tags=[]\nDR_OPTIONS = {\n \"n_components\": 5,\n \"metric\": \"euclidean\",\n \"n_neighbors\": 15,\n \"random_state\": 0,\n}\n\n# %% tags=[]\ninput_filepath = Path(\n conf.RESULTS[\"DATA_TRANSFORMATIONS_DIR\"],\n INPUT_SUBSET,\n generate_result_set_name(\n DR_OPTIONS, prefix=f\"{INPUT_SUBSET}-{INPUT_STEM}-\", suffix=\".pkl\"\n ),\n).resolve()\ndisplay(input_filepath)\n\nassert input_filepath.exists(), \"Input file does not exist\"\n\ninput_filepath_stem = input_filepath.stem\ndisplay(input_filepath_stem)\n\n# %% tags=[]\ndata_umap = pd.read_pickle(input_filepath)\n\n# %% tags=[]\ndata_umap.shape\n\n# %% tags=[]\ndata_umap.head()\n\n# %% [markdown] tags=[]\n# # Load best partitions\n\n# %% tags=[]\ninput_file = Path(CONSENSUS_CLUSTERING_DIR, \"best_partitions_by_k.pkl\").resolve()\ndisplay(input_file)\n\n# %% tags=[]\nbest_partitions = pd.read_pickle(input_file)\n\n# %% tags=[]\nbest_partitions.shape\n\n# %% tags=[]\nbest_partitions.head()\n\n# %% [markdown] tags=[]\n# # Load coassociation matrix\n\n# %% tags=[]\ninput_file = Path(CONSENSUS_CLUSTERING_DIR, \"ensemble_coassoc_matrix.npy\").resolve()\ndisplay(input_file)\n\n# %% tags=[]\ncoassoc_matrix = np.load(input_file)\n\n# %% tags=[]\ncoassoc_matrix = pd.DataFrame(\n data=1.0 - coassoc_matrix,\n index=data_umap.index.copy(),\n columns=data_umap.index.copy(),\n)\n\n# %% tags=[]\ncoassoc_matrix.shape\n\n# %% tags=[]\ncoassoc_matrix.head()\n\n# %% [markdown] tags=[]\n# The coassociation matrix shows the percentage of times a pair of traits was clustered together across the ensemble partitions.\n\n# %% [markdown] tags=[]\n# ## Stats\n\n# %% [markdown] tags=[]\n# Here I show some general stats of the coassociation matrix, useful to compare results below. For instance, if a pair of traits got clustered together 61% of the times, how strong is that?\n\n# %% tags=[]\ndf = coassoc_matrix.where(np.triu(np.ones(coassoc_matrix.shape)).astype(np.bool))\ndf = df.stack().reset_index()\n\ncoassoc_matrix_stats = df[0].describe(\n percentiles=[0.25, 0.50, 0.75, 0.80, 0.90, 0.95, 0.99]\n)\n\n# %% tags=[]\ncoassoc_matrix_stats.apply(str)\n\n# %% [markdown] tags=[]\n# On average, a pair of clusters appear together in 45% of the clusters in the ensemble (the median is 48%). That makes sense, since for some partitions the resolution (number of clusters) might not be enough to get smaller clusters.\n\n# %% [markdown] tags=[]\n# # Plot coassociation values\n\n# %% [markdown] tags=[]\n# ## Functions\n\n# %% tags=[]\nfrom IPython.display import HTML\n\n\n# %% tags=[]\ndef plot_cluster(data, partition, cluster_number, figsize=None):\n k = np.unique(partition).shape[0]\n\n display(HTML(f\"<h3>Cluster {k}.{cluster_number}</h3>\"))\n\n k_traits = data.loc[partition == cluster_number].index\n\n with sns.plotting_context(\"paper\"):\n f, ax = plt.subplots(figsize=figsize) # (figsize=(8, 8))\n\n display(\n sns.heatmap(\n data=coassoc_matrix.loc[k_traits, k_traits],\n vmin=coassoc_matrix_stats[\"50%\"],\n vmax=1.0,\n annot=True,\n fmt=\".2f\",\n square=True,\n )\n )\n\n\n# %% tags=[]\nk = 29\ndisplay(HTML(f\"<h2>k: {k}</h2>\"))\ndisplay(best_partitions.loc[k])\n\npart = best_partitions.loc[k, \"partition\"]\npart_stats = pd.Series(part).value_counts()\ndisplay(part_stats)\n\n# %% tags=[]\nplot_cluster(data_umap, part, 10)\n\n# %% [markdown] tags=[]\n# The plot above shows that these 8 keratometry measurements (such as 3mm weak meridian left) were always clustered together in all partitions of the ensemble, representing a very strong/stable grouping.\n\n# %% tags=[]\nplot_cluster(data_umap, part, 15, figsize=(10, 10))\n\n# %% [markdown] tags=[]\n# The \"heel bone mineral density\" cluster is not as strong as the keratometry one, since some trait pairs have a coassociation value of 0.89. However, 0.89 is quite higher than the 99 percentile of the coassociation values (which is 0.69).\n\n# %% tags=[]\n"
] | [
[
"numpy.load",
"pandas.read_pickle",
"pandas.Series",
"numpy.ones",
"matplotlib.pyplot.subplots",
"numpy.unique"
]
] |
tensorleap/tensorflow-onnx | [
"56f6070828928bbb0f30890b2229eec8b663213d",
"56f6070828928bbb0f30890b2229eec8b663213d"
] | [
"tests/test_tf_shape_inference.py",
"tests/test_cudnn_compatible_gru.py"
] | [
"# SPDX-License-Identifier: Apache-2.0\n\n\n\"\"\"Unit Tests for Tensorflow shape inference.\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.ops import variables as variables_lib\nfrom tensorflow.python.ops import init_ops\n\nfrom backend_test_base import Tf2OnnxBackendTestBase\nfrom common import * # pylint: disable=wildcard-import, unused-wildcard-import\nfrom tf2onnxnightly import utils\nfrom tf2onnxnightly.tf_utils import get_tf_tensor_shape\nfrom tf2onnxnightly.shape_inference import infer_shape_for_graph\nfrom tf2onnxnightly.tf_loader import tf_reset_default_graph, tf_session, tf_placeholder, tf_optimize\n\n# pylint: disable=missing-docstring\n\n\nclass TFShapeInferenceTests(Tf2OnnxBackendTestBase):\n def _run_test_case(self, input_names_with_port, output_names_with_port):\n try:\n tf.compat.v1.disable_eager_execution()\n except: # pylint: disable=bare-except\n pass\n graph_def = None\n with tf_session() as sess:\n # freeze graph\n origin_graph = sess.graph\n variables_lib.global_variables_initializer().run()\n output_name_without_port = [n.split(':')[0] for n in output_names_with_port]\n graph_def = tf.graph_util.convert_variables_to_constants(\n sess, sess.graph_def,\n output_name_without_port\n )\n\n tf_reset_default_graph()\n tf.import_graph_def(graph_def, name='')\n\n # optimize graph\n graph_def = tf_optimize(input_names_with_port, output_names_with_port, sess.graph_def, True)\n\n with tf_session() as sess:\n if self.config.is_debug_mode:\n if not os.path.exists(self.test_data_directory):\n os.makedirs(self.test_data_directory)\n model_path = os.path.join(self.test_data_directory, self._testMethodName + \"_after_tf_optimize.pb\")\n utils.save_protobuf(model_path, graph_def)\n self.logger.debug(\"created file %s\", model_path)\n\n tf_reset_default_graph()\n tf.import_graph_def(graph_def, name='')\n\n with tf_session() as sess:\n inferred_graph = infer_shape_for_graph(sess.graph)\n # compare each operation\n for op in origin_graph.get_operations():\n inferred_op = None\n try:\n inferred_op = inferred_graph.get_operation_by_name(op.name)\n except KeyError:\n continue\n self._compare_shape_for_op(op, inferred_op)\n\n def _compare_shape_for_op(self, op1, op2):\n \"\"\"Align outputs of op2 to op1.\"\"\"\n for out1, out2 in zip(op1.outputs, op2.outputs):\n expected_shape = get_tf_tensor_shape(out1)\n if out1 is not None:\n actual_shape = get_tf_tensor_shape(out2)\n self.assertTrue(utils.are_shapes_compatible(expected_shape, actual_shape))\n\n @check_tf_max_version(\"1.15\", \"_run_test_case needs to supported tf-2\")\n def test_while_loop_with_ta_read_and_write(self):\n i = tf_placeholder(tf.int32, (), name=\"input_1\")\n inputs = tf_placeholder(tf.float32, (10,), name=\"input_2\")\n\n inputs_2 = tf.identity(inputs)\n input_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True).unstack(inputs_2)\n output_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)\n\n c = lambda i, *_: tf.logical_and(tf.less(i, 10), i >= 0)\n\n def b(i, out_ta):\n new_i = tf.add(i, 1)\n x = input_ta.read(i)\n x = x + 3\n out_ta_new = out_ta.write(i, x)\n return new_i, out_ta_new\n\n i_final, out_final = tf.while_loop(c, b, [i, output_ta])\n _ = tf.identity(i_final, name=\"i\")\n _ = tf.identity(out_final.stack(), name=\"output_ta\")\n input_names_with_port = [\"input_1:0\", \"input_2:0\"]\n\n output_names_with_port = [\"i:0\", \"output_ta:0\"]\n self._run_test_case(input_names_with_port, output_names_with_port)\n\n @check_tf_max_version(\"1.15\", \"_run_test_case needs to supported tf-2\")\n def test_map_fn(self):\n def fn0(elem):\n res = elem + elem * elem\n return res\n\n def fn1(elem):\n res = elem[0] * elem[1] + elem[0]\n return res\n\n x_val = 100 * np.random.random_sample([2, 10]).astype(np.float32)\n y_val = 100 * np.random.random_sample([2, 10]).astype(np.float32)\n\n # test fn0\n x = tf_placeholder(tf.float32, shape=x_val.shape, name=\"input_0\")\n x_ = tf.identity(x)\n res_ = tf.map_fn(fn0, x_, dtype=tf.float32)\n _ = tf.identity(res_, name=\"output_0\")\n input_names_with_port = [\"input_0:0\"]\n output_names_with_port = [\"output_0:0\"]\n self._run_test_case(input_names_with_port, output_names_with_port)\n tf_reset_default_graph()\n\n # test fn1\n x = tf_placeholder(tf.float32, shape=x_val.shape, name=\"input_0\")\n y = tf_placeholder(tf.float32, shape=y_val.shape, name=\"input_1\")\n x_ = tf.identity(x)\n y_ = tf.identity(y)\n res_ = tf.map_fn(fn1, (x_, y_), dtype=tf.float32)\n _ = tf.identity(res_, name=\"output_0\")\n input_names_with_port = [\"input_0:0\", \"input_1:0\"]\n output_names_with_port = [\"output_0:0\"]\n self._run_test_case(input_names_with_port, output_names_with_port)\n\n @check_tf_max_version(\"1.15\", \"_run_test_case needs to supported tf-2\")\n def test_bidrectional_attention_wrapper_lstm_encoder(self):\n size = 30\n time_step = 3\n input_size = 4\n attn_size = size\n batch_size = 9\n\n # shape [batch size, time step, size]\n # attention_state: usually the output of an RNN encoder.\n # This tensor should be shaped `[batch_size, max_time, ...]`\n encoder_time_step = time_step\n encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')\n encoder_x_val = np.stack([encoder_x_val] * batch_size)\n encoder_x = tf_placeholder(tf.float32, encoder_x_val.shape, name=\"input_1\")\n encoder_cell = tf.nn.rnn_cell.LSTMCell(size)\n attention_states, _ = tf.nn.dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)\n # [9, 3, 30], [9, 30]\n attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,\n attention_states)\n\n match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)\n cell = tf.nn.rnn_cell.LSTMCell(size)\n match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,\n attention_mechanism,\n attention_layer_size=attn_size,\n cell_input_fn=match_input_fn,\n output_attention=False)\n match_cell_bk = tf.contrib.seq2seq.AttentionWrapper(cell,\n attention_mechanism,\n attention_layer_size=attn_size,\n cell_input_fn=match_input_fn,\n output_attention=False)\n\n decoder_time_step = 6\n decoder_x_val = np.random.randn(decoder_time_step, batch_size, input_size).astype('f')\n\n decoder_x = tf_placeholder(tf.float32, decoder_x_val.shape, name=\"input_2\")\n seq_length = tf_placeholder(tf.int32, (batch_size), name=\"input_3\")\n (match_output_fw, match_output_bk), (match_state_fw, match_state_bk) = \\\n tf.nn.bidirectional_dynamic_rnn(cell_fw=match_cell_fw,\n cell_bw=match_cell_bk,\n inputs=decoder_x,\n sequence_length=tf.identity(seq_length),\n dtype=tf.float32,\n time_major=True)\n\n matched_output = tf.concat([match_output_fw, match_output_bk], axis=-1)\n matched_state = tf.concat([match_state_fw.cell_state, match_state_bk.cell_state], -1)\n\n _ = tf.identity(matched_output, name=\"output_0\")\n _ = tf.identity(matched_state, name=\"final_state\")\n\n input_names_with_port = [\"input_1:0\", \"input_2:0\", \"input_3:0\"]\n output_names_with_port = [\"output_0:0\", \"final_state:0\"]\n self._run_test_case(input_names_with_port, output_names_with_port)\n\n @check_tf_max_version(\"1.15\", \"_run_test_case needs to supported tf-2\")\n def test_dynamic_decode_normal_stop(self):\n batch_size = 2\n num_units = 4\n vocab_size = 5\n embedding_size = 3\n go_token = 0\n end_token = 1\n\n embedding = tf.constant(np.ones([vocab_size, embedding_size], dtype=np.float32))\n state_val = np.reshape([np.ones([num_units], dtype=np.float32) * i for i in range(batch_size)],\n [batch_size, num_units])\n encoder_state = tf.nn.rnn_cell.LSTMStateTuple(state_val, state_val)\n\n cell_initializer = init_ops.constant_initializer(\n np.array([[-0.9592235, 0.42451382, 0.7437744, -0.54485345, -0.80763197,\n 0.19663906, -0.22738314, 0.7762785, 0.7464578, 0.27227187,\n 0.7661047, 0.3596425, -0.8528242, -0.89316916, -0.48946142,\n 0.87882376],\n [0.86586094, -0.75018406, 0.25992537, -0.69368935, 0.2515502,\n -0.26379275, 0.8954313, 0.5759742, -0.7753072, -0.4388857,\n 0.95751476, -0.82085776, -0.9467752, -0.37055635, -0.18570113,\n -0.86504984],\n [0.02305841, 0.3850248, 0.893692, -0.6866486, -0.83703446,\n -0.9828961, 0.3989377, -0.59993076, 0.5330808, 0.6916566,\n 0.98468065, -0.6047034, 0.10823512, 0.34599304, -0.7834821,\n -0.7852347],\n [0.81643987, 0.31507468, -0.51369476, -0.12273741, 0.9701307,\n -0.79669356, -0.34496522, -0.88750815, -0.17995334, 0.34707904,\n -0.09201193, 0.5363934, -0.87229705, -0.5073328, -0.95894027,\n 0.5481839],\n [-0.84093595, -0.2341497, -0.86047816, 0.43370056, -0.39073753,\n 0.37730122, 0.48026466, 0.3004985, -0.60727096, 0.9043884,\n -0.37619448, 0.22490788, -0.03739262, 0.61672115, 0.478899,\n -0.40780973],\n [0.31202435, -0.22045255, -0.6087918, 0.95115066, 0.00199413,\n -0.688287, -0.1103518, 0.4169519, 0.7913246, -0.9844644,\n -0.6193857, 0.38659644, -0.4726901, -0.44781208, -0.5174744,\n -0.605911],\n [0.66771054, 0.34912825, 0.22297978, -0.4990945, 0.24057317,\n -0.5540829, 0.92277217, 0.74939895, -0.35278273, -0.21587133,\n -0.28613377, -0.8794241, -0.40119147, 0.67175174, -0.22741508,\n 0.37898326]], dtype=np.float32))\n dense_initializer = init_ops.constant_initializer(\n np.array([[0.56177187, -0.6233454, 0.73997784, 0.35032558, 0.6479795],\n [0.6831174, -0.34233975, 0.39330363, 0.45177555, -0.49649096],\n [-0.98890066, 0.6175642, 0.09800482, -0.6721206, 0.48805737],\n [0.19671416, 0.2623148, 0.742548, 0.13555217, 0.56009054]], dtype=np.float32))\n\n cell = tf.nn.rnn_cell.LSTMCell(\n num_units=num_units,\n initializer=cell_initializer,\n state_is_tuple=True)\n\n helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(\n embedding=embedding,\n start_tokens=tf.tile([go_token], [batch_size]),\n end_token=end_token)\n\n output_layer = tf.layers.Dense(vocab_size, kernel_initializer=dense_initializer)\n decoder = tf.contrib.seq2seq.BasicDecoder(\n cell=cell,\n helper=helper,\n initial_state=encoder_state,\n output_layer=output_layer)\n\n outputs, state, sequence_lengths = tf.contrib.seq2seq.dynamic_decode(\n decoder=decoder,\n maximum_iterations=6)\n\n _ = tf.identity(outputs.rnn_output, name=\"rnn_output\")\n _ = tf.identity(outputs.sample_id, name=\"sample_id\")\n _ = tf.identity(state, name=\"state\")\n _ = tf.identity(sequence_lengths, name=\"sequence_lengths\")\n\n output_names_with_port = [\n \"rnn_output:0\",\n # \"sample_id:0\", # incomplete type support for Transpose on onnxruntime 0.2.1\n \"state:0\",\n ]\n\n self._run_test_case([], output_names_with_port)\n\n @check_tf_max_version(\"1.15\", \"_run_test_case needs to supported tf-2\")\n def test_while_loop_in_cond(self):\n x_val = np.array([1, 2, 3], dtype=np.float32)\n y_val = np.array([4, 5, 6], dtype=np.float32)\n x = tf_placeholder(tf.float32, x_val.shape, name=\"input_1\")\n y = tf_placeholder(tf.float32, y_val.shape, name=\"input_2\")\n\n def cond_graph():\n b = tf.constant(np.array([0], dtype=np.int32), dtype=tf.int32)\n # while_loop\n c = lambda y: tf.reduce_any(tf.less(y, 10))\n b = lambda i: tf.add(y, 1)\n return tf.while_loop(c, b, [y])\n\n res = tf.cond(x[0] < y[0], lambda: x, cond_graph, name=\"test_cond\")\n _ = tf.identity(res, name=\"output\")\n\n input_names_with_port = [\"input_1:0\", \"input_2:0\"]\n output_names_with_port = [\"output:0\"]\n self._run_test_case(input_names_with_port, output_names_with_port)\n\n @check_tf_max_version(\"1.15\", \"_run_test_case needs to supported tf-2\")\n def test_cond_in_while_loop(self):\n i = tf.placeholder(tf.int32, (), name=\"input_1\")\n inputs = tf.placeholder(tf.float32, (10,), name=\"input_2\")\n\n inputs_2 = tf.identity(inputs)\n input_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True).unstack(inputs_2)\n output_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)\n\n c = lambda i, *_: tf.logical_and(tf.less(i, 10), i >= 0)\n\n def b(i, out_ta):\n new_i = tf.add(i, 1)\n x = input_ta.read(i)\n x = tf.cond(x > 0, lambda: x - 1, lambda: x + 3)\n out_ta_new = out_ta.write(i, x)\n return new_i, out_ta_new\n\n i_final, out_final = tf.while_loop(c, b, [i, output_ta])\n _ = tf.identity(i_final, name=\"i\")\n _ = tf.identity(out_final.stack(), name=\"output_ta\")\n input_names_with_port = [\"input_1:0\", \"input_2:0\"]\n\n output_names_with_port = [\"i:0\", \"output_ta:0\"]\n self._run_test_case(input_names_with_port, output_names_with_port)\n\n\nif __name__ == \"__main__\":\n unittest_main()\n",
"# SPDX-License-Identifier: Apache-2.0\n\n\n\"\"\"Unit Tests for gru.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import variable_scope\nfrom backend_test_base import Tf2OnnxBackendTestBase\nfrom common import unittest_main, check_gru_count, check_tf_max_version, check_opset_after_tf_version\nfrom tf2onnxnightly.tf_loader import is_tf2\n\n\n# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,cell-var-from-loop\n\nif is_tf2():\n MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell\n dynamic_rnn = tf.compat.v1.nn.dynamic_rnn\n bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn\nelse:\n GRUBlockCell = tf.contrib.rnn.GRUBlockCell\n MultiRNNCell = tf.contrib.rnn.MultiRNNCell\n CudnnCompatibleGRUCell = tf.contrib.cudnn_rnn.CudnnCompatibleGRUCell\n dynamic_rnn = tf.nn.dynamic_rnn\n bidirectional_dynamic_rnn = tf.nn.bidirectional_dynamic_rnn\n\n\n# TODO: as a workaround, set batch_size to 1 for now to bypass a onnxruntime bug, revert it when the bug is fixed\nclass CudnnCompatibleGRUTests(Tf2OnnxBackendTestBase):\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_single_dynamic_gru(self):\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n\n def func(x):\n # no scope\n cell = CudnnCompatibleGRUCell(units)\n outputs, cell_state = dynamic_rnn(\n cell,\n x,\n dtype=tf.float32)\n\n return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")\n\n input_names_with_port = [\"input_1:0\"]\n feed_dict = {\"input_1:0\": x_val}\n output_names_with_port = [\"output:0\", \"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-03, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_multiple_dynamic_gru(self):\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n\n def func(x):\n gru_output_list = []\n gru_cell_state_list = []\n # no scope\n cell = CudnnCompatibleGRUCell(units)\n outputs, cell_state = dynamic_rnn(\n cell,\n x,\n dtype=tf.float32)\n gru_output_list.append(outputs)\n gru_cell_state_list.append(cell_state)\n\n # given scope\n cell = CudnnCompatibleGRUCell(units)\n with variable_scope.variable_scope(\"root1\") as scope:\n outputs, cell_state = dynamic_rnn(\n cell,\n x,\n dtype=tf.float32,\n sequence_length=[4],\n scope=scope)\n gru_output_list.append(outputs)\n gru_cell_state_list.append(cell_state)\n\n return tf.identity(gru_output_list, name=\"output\"), tf.identity(gru_cell_state_list, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\", \"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06)\n # graph_validator=lambda g: check_gru_count(g, 2))\n\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_single_dynamic_gru_seq_length_is_const(self):\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n def func(x):\n initializer = init_ops.constant_initializer(0.5)\n\n # no scope\n cell = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n outputs, cell_state = dynamic_rnn(\n cell,\n x,\n dtype=tf.float32,\n sequence_length=[5])\n\n return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\", \"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_single_dynamic_gru_seq_length_is_not_const(self):\n for np_dtype in [np.int32, np.int64, np.float32]:\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n y_val = np.array([5], dtype=np_dtype)\n\n def func(x, seq_length):\n initializer = init_ops.constant_initializer(0.5)\n # no scope\n cell = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n outputs, cell_state = dynamic_rnn(\n cell,\n x,\n dtype=tf.float32,\n sequence_length=tf.identity(seq_length))\n\n return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val, \"input_2:0\": y_val}\n input_names_with_port = [\"input_1:0\", \"input_2:0\"]\n output_names_with_port = [\"output:0\", \"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-03, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_single_dynamic_gru_placeholder_input(self):\n units = 5\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)\n x_val = np.stack([x_val] * 1)\n def func(x):\n initializer = init_ops.constant_initializer(0.5)\n # no scope\n cell = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n outputs, cell_state = dynamic_rnn(\n cell,\n x,\n dtype=tf.float32) # by default zero initializer is used\n\n return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\", \"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-03, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_single_dynamic_gru_ch_zero_state_initializer(self):\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n def func(x):\n initializer = init_ops.constant_initializer(0.5)\n # no scope\n cell = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n\n # defining initial state\n initial_state = cell.zero_state(batch_size, dtype=tf.float32)\n outputs, cell_state = dynamic_rnn(\n cell,\n x,\n initial_state=initial_state,\n dtype=tf.float32)\n\n return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\", \"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-03, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_single_dynamic_gru_random_weights(self):\n hidden_size = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n\n def func(x):\n initializer = tf.random_uniform_initializer(-1.0, 1.0)\n\n # no scope\n cell = CudnnCompatibleGRUCell(\n hidden_size,\n kernel_initializer=initializer)\n\n outputs, cell_state = dynamic_rnn(\n cell,\n x,\n dtype=tf.float32)\n\n return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\", \"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.0001,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_single_dynamic_gru_random_weights2(self):\n hidden_size = 128\n batch_size = 1\n x_val = np.random.randn(1, 133).astype('f')\n x_val = np.stack([x_val] * batch_size)\n\n def func(x):\n initializer = tf.random_uniform_initializer(0.0, 1.0)\n # no scope\n cell = CudnnCompatibleGRUCell(\n hidden_size,\n kernel_initializer=initializer)\n\n outputs, cell_state = dynamic_rnn(\n cell,\n x,\n dtype=tf.float32)\n\n return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\", \"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.01,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_dynamic_gru_output_consumed_only(self):\n units = 5\n batch_size = 6\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n def func(x):\n initializer = tf.random_uniform_initializer(-1.0, 1.0)\n cell1 = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n\n outputs, _ = dynamic_rnn(\n cell1,\n x,\n dtype=tf.float32)\n\n return tf.identity(outputs, name=\"output\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.0001,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_dynamic_gru_state_consumed_only(self):\n units = 5\n batch_size = 6\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n\n def func(x):\n initializer = tf.random_uniform_initializer(-1.0, 1.0)\n cell1 = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n\n _, cell_state = dynamic_rnn(\n cell1,\n x,\n dtype=tf.float32)\n\n return tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=0.0001, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_opset_after_tf_version(\"1.15\", 10, \"might need ReverseV2\")\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_dynamic_bigru(self):\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n\n def func(x):\n initializer = init_ops.constant_initializer(0.5)\n\n # bigru, no scope\n cell1 = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n cell2 = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n outputs, cell_state = bidirectional_dynamic_rnn(\n cell1,\n cell2,\n x,\n dtype=tf.float32)\n\n return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\", \"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_opset_after_tf_version(\"1.15\", 10, \"might need ReverseV2\")\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_dynamic_bigru_output_consumed_only(self):\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n\n def func(x):\n initializer = init_ops.constant_initializer(0.5)\n\n # bigru, no scope\n cell1 = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n cell2 = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n outputs, _ = bidirectional_dynamic_rnn(\n cell1,\n cell2,\n x,\n dtype=tf.float32)\n\n return tf.identity(outputs, name=\"output\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_opset_after_tf_version(\"1.15\", 10, \"might need ReverseV2\")\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_dynamic_bigru_state_consumed_only(self):\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n\n def func(x):\n initializer = init_ops.constant_initializer(0.5)\n\n # bigru, no scope\n cell1 = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n cell2 = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n _, cell_state = bidirectional_dynamic_rnn(\n cell1,\n cell2,\n x,\n dtype=tf.float32)\n\n return tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_opset_after_tf_version(\"1.15\", 10, \"might need ReverseV2\")\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_dynamic_bidirectional_but_one_gru(self):\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n\n def func(x):\n initializer = init_ops.constant_initializer(0.5)\n\n # bigru, no scope\n cell = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n outputs, cell_state = bidirectional_dynamic_rnn(\n cell,\n cell,\n x,\n dtype=tf.float32)\n\n return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\", \"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_opset_after_tf_version(\"1.15\", 10, \"might need ReverseV2\")\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_dynamic_bidirectional_but_one_gru_and_output_consumed_only(self):\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n def func(x):\n # bigru, no scope\n cell = CudnnCompatibleGRUCell(\n units)\n outputs, _ = bidirectional_dynamic_rnn(\n cell,\n cell,\n x,\n dtype=tf.float32)\n\n return tf.identity(outputs, name=\"output\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_opset_after_tf_version(\"1.15\", 10, \"might need ReverseV2\")\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_dynamic_bidirectional_but_one_gru_and_state_consumed_only(self):\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n\n def func(x):\n\n # bigru, no scope\n cell = CudnnCompatibleGRUCell(\n units)\n _, cell_state = bidirectional_dynamic_rnn(\n cell,\n cell,\n x,\n dtype=tf.float32)\n\n return tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n\nif __name__ == '__main__':\n unittest_main()\n"
] | [
[
"numpy.ones",
"tensorflow.nn.dynamic_rnn",
"tensorflow.concat",
"numpy.stack",
"tensorflow.nn.rnn_cell.LSTMStateTuple",
"tensorflow.identity",
"tensorflow.contrib.seq2seq.dynamic_decode",
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.less",
"tensorflow.import_graph_def",
"tensorflow.contrib.seq2seq.AttentionWrapper",
"tensorflow.cond",
"tensorflow.layers.Dense",
"tensorflow.contrib.seq2seq.BahdanauAttention",
"tensorflow.contrib.seq2seq.BasicDecoder",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.while_loop",
"tensorflow.tile",
"tensorflow.graph_util.convert_variables_to_constants",
"numpy.random.random_sample",
"tensorflow.placeholder",
"tensorflow.map_fn",
"numpy.random.randn",
"tensorflow.add",
"tensorflow.TensorArray",
"numpy.array"
],
[
"numpy.stack",
"tensorflow.identity",
"numpy.random.randn",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.random_uniform_initializer",
"numpy.array",
"tensorflow.python.ops.init_ops.constant_initializer"
]
] |
cnheider/xgboost | [
"e7fbc8591fa7277ee4c474b7371c48c11b34cbde"
] | [
"tests/python/test_training_continuation.py"
] | [
"import xgboost as xgb\nimport testing as tm\nimport numpy as np\nimport unittest\n\nrng = np.random.RandomState(1337)\n\n\nclass TestTrainingContinuation(unittest.TestCase):\n num_parallel_tree = 3\n\n xgb_params_01 = {\n 'silent': 1,\n 'nthread': 1,\n }\n\n xgb_params_02 = {\n 'silent': 1,\n 'nthread': 1,\n 'num_parallel_tree': num_parallel_tree\n }\n\n xgb_params_03 = {\n 'silent': 1,\n 'nthread': 1,\n 'num_class': 5,\n 'num_parallel_tree': num_parallel_tree\n }\n\n def test_training_continuation(self):\n tm._skip_if_no_sklearn()\n from sklearn.datasets import load_digits\n from sklearn.metrics import mean_squared_error\n\n digits_2class = load_digits(2)\n digits_5class = load_digits(5)\n\n X_2class = digits_2class['data']\n y_2class = digits_2class['target']\n\n X_5class = digits_5class['data']\n y_5class = digits_5class['target']\n\n dtrain_2class = xgb.DMatrix(X_2class, label=y_2class)\n dtrain_5class = xgb.DMatrix(X_5class, label=y_5class)\n\n gbdt_01 = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=10)\n ntrees_01 = len(gbdt_01.get_dump())\n assert ntrees_01 == 10\n\n gbdt_02 = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=0)\n gbdt_02.save_model('xgb_tc.model')\n\n gbdt_02a = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=10, xgb_model=gbdt_02)\n gbdt_02b = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=10, xgb_model=\"xgb_tc.model\")\n ntrees_02a = len(gbdt_02a.get_dump())\n ntrees_02b = len(gbdt_02b.get_dump())\n assert ntrees_02a == 10\n assert ntrees_02b == 10\n\n res1 = mean_squared_error(y_2class, gbdt_01.predict(dtrain_2class))\n res2 = mean_squared_error(y_2class, gbdt_02a.predict(dtrain_2class))\n assert res1 == res2\n\n res1 = mean_squared_error(y_2class, gbdt_01.predict(dtrain_2class))\n res2 = mean_squared_error(y_2class, gbdt_02b.predict(dtrain_2class))\n assert res1 == res2\n\n gbdt_03 = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=3)\n gbdt_03.save_model('xgb_tc.model')\n\n gbdt_03a = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=7, xgb_model=gbdt_03)\n gbdt_03b = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=7, xgb_model=\"xgb_tc.model\")\n ntrees_03a = len(gbdt_03a.get_dump())\n ntrees_03b = len(gbdt_03b.get_dump())\n assert ntrees_03a == 10\n assert ntrees_03b == 10\n\n res1 = mean_squared_error(y_2class, gbdt_03a.predict(dtrain_2class))\n res2 = mean_squared_error(y_2class, gbdt_03b.predict(dtrain_2class))\n assert res1 == res2\n\n gbdt_04 = xgb.train(self.xgb_params_02, dtrain_2class, num_boost_round=3)\n assert gbdt_04.best_ntree_limit == (gbdt_04.best_iteration + 1) * self.num_parallel_tree\n\n res1 = mean_squared_error(y_2class, gbdt_04.predict(dtrain_2class))\n res2 = mean_squared_error(y_2class, gbdt_04.predict(dtrain_2class, ntree_limit=gbdt_04.best_ntree_limit))\n assert res1 == res2\n\n gbdt_04 = xgb.train(self.xgb_params_02, dtrain_2class, num_boost_round=7, xgb_model=gbdt_04)\n assert gbdt_04.best_ntree_limit == (gbdt_04.best_iteration + 1) * self.num_parallel_tree\n\n res1 = mean_squared_error(y_2class, gbdt_04.predict(dtrain_2class))\n res2 = mean_squared_error(y_2class, gbdt_04.predict(dtrain_2class, ntree_limit=gbdt_04.best_ntree_limit))\n assert res1 == res2\n\n gbdt_05 = xgb.train(self.xgb_params_03, dtrain_5class, num_boost_round=7)\n assert gbdt_05.best_ntree_limit == (gbdt_05.best_iteration + 1) * self.num_parallel_tree\n gbdt_05 = xgb.train(self.xgb_params_03, dtrain_5class, num_boost_round=3, xgb_model=gbdt_05)\n assert gbdt_05.best_ntree_limit == (gbdt_05.best_iteration + 1) * self.num_parallel_tree\n\n res1 = gbdt_05.predict(dtrain_5class)\n res2 = gbdt_05.predict(dtrain_5class, ntree_limit=gbdt_05.best_ntree_limit)\n np.testing.assert_almost_equal(res1, res2)\n"
] | [
[
"numpy.random.RandomState",
"sklearn.datasets.load_digits",
"numpy.testing.assert_almost_equal"
]
] |
feldman4/NatureProtocols | [
"a0a6775b8edfc493ac6265b1844040c1ae29c33b"
] | [
"ops/ngs.py"
] | [
"import pandas as pd\nimport numpy as np\nfrom glob import glob\nfrom natsort import natsorted\n\n# TODO: from ops.constants import *\nfrom . import utils\n\ndef load_hist(filename, threshold):\n try:\n return (pd.read_csv(filename, sep='\\s+', header=None)\n .rename(columns={0: 'count', 1: 'seq'})\n .query('count > @threshold')\n .assign(fraction=lambda x: x['count']/x['count'].sum())\n .assign(log10_fraction=lambda x: np.log10(x['fraction']))\n .assign(file=filename)\n )\n except pd.errors.EmptyDataError:\n return None\n\n\ndef load_sgRNA_hists(histogram_files, threshold=3):\n pat = '(?P<plate>T.)_(?P<well>(?P<row>.)(?P<col>..))_S'\n cols = ['dataset', 'plate', 'well', 'row', 'col', \n 'count', 'log10_fraction', 'fraction', 'sgRNA']\n arr = []\n for dataset, search in histogram_files.items():\n files = natsorted(glob(search))\n (pd.concat([load_hist(f, threshold) for f in files])\n .rename(columns={'seq': 'sgRNA'})\n .pipe(lambda x: pd.concat([x['file'].str.extract(pat), x], \n axis=1))\n .pipe(utils.cast_cols, int_cols=['col'])\n .drop(['file'], axis=1)\n .assign(dataset=dataset)\n [cols]\n .pipe(arr.append)\n )\n\n return pd.concat(arr)\n\ndef calc_stats(df_hist, df_design, extra_cols=[]):\n sample_cols = ['dataset', 'plate', 'well', 'subpool'] + extra_cols\n sizes = df_design.groupby('subpool').size()\n fractions = (df_hist\n .groupby(sample_cols)\n ['fraction'].sum()\n .apply('{0:.1%}'.format)\n )\n\n cols = {'NGS_count': 'sgRNA_detected', \n 'NGS_missing': 'sgRNA_missing', \n 'NGS_designed': 'sgRNA_designed'}\n\n final_cols = ['NGS_fraction', 'NGS_Q10', 'NGS_Q50', 'NGS_Q90', 'NGS_Q90_10',\n 'NGS_mean', 'NGS_std', 'NGS_max', 'NGS_min', 'sgRNA_designed', \n 'sgRNA_detected', 'sgRNA_missing']\n\n return (df_hist\n .groupby(sample_cols)['count']\n .describe(percentiles=[0.1, 0.5, 0.9])\n .rename(columns={'10%': 'Q10', \n '50%': 'Q50', \n '90%': 'Q90'})\n .join(sizes.rename('designed'), on='subpool')\n .assign(Q90_10=lambda x: x.eval('Q90 / Q10'))\n .assign(missing=lambda x: x.eval('designed - count').astype(int))\n .pipe(utils.cast_cols, int_cols=['count', 'max', 'min'])\n .join(fractions)\n .rename(columns=lambda x: 'NGS_' + x)\n .rename(columns=cols)\n [final_cols]\n .sort_values(['dataset', 'plate', 'well', 'sgRNA_detected'],\n ascending=[True, True, True, False])\n ) \n\n\ndef identify_pool(df_hist, df_design):\n cols = ['subpool', 'spots_per_oligo']\n return (df_hist\n .join(df_design.set_index('sgRNA')[cols], on='sgRNA')\n .pipe(add_design_rank, df_design)\n .sort_values(['dataset', 'plate', 'well', 'sgRNA', 'design_rank'])\n .groupby(['dataset', 'plate', 'well', 'sgRNA']).head(1)\n .sort_values(['dataset', 'plate', 'well', 'fraction'], \n ascending=[True, True, True, False])\n .assign(mapped=lambda x: 1 - x['subpool'].isnull())\n .assign(mapped_fraction=lambda x: x.eval('fraction * mapped')) \n )\n\n\ndef add_design_rank(df_hist, df_design):\n \"\"\"For one file\n \"\"\"\n a = df_design.groupby('subpool').size()\n b = df_hist.groupby('subpool').size()\n ranked = (((b / a) * np.log10(a))\n .dropna().sort_values(ascending=False))\n designs = {k: v for v, k in enumerate(list(ranked.index))}\n get_design = lambda x: designs.get(x, 1e10)\n return (df_hist.assign(design_rank=lambda x: \n x['subpool'].apply(get_design)))"
] | [
[
"pandas.read_csv",
"numpy.log10",
"pandas.concat"
]
] |
norfordb/groundmotion | [
"3f714894a34d9d37e1ac236f26b4366e25a05056"
] | [
"gmprocess/metrics/reduction/arias.py"
] | [
"# Third party imports\nimport numpy as np\nfrom scipy import integrate\n\n# Local imports\nfrom gmprocess.constants import GAL_TO_PCTG\nfrom gmprocess.metrics.reduction.reduction import Reduction\nfrom gmprocess.stationstream import StationStream\nfrom gmprocess.stationtrace import StationTrace\n\n\nclass Arias(Reduction):\n \"\"\"Class for calculation of arias intensity.\"\"\"\n def __init__(self, reduction_data, bandwidth=None, percentile=None,\n period=None, smoothing=None):\n \"\"\"\n Args:\n reduction_data (obspy.core.stream.Stream or numpy.ndarray): Intensity\n measurement component.\n percentile (float): Percentile for rotation calculations. Default\n is None.\n period (float): Period for smoothing (Fourier amplitude spectra)\n calculations. Default is None.\n smoothing (string): Smoothing type. Default is None.\n bandwidth (float): Bandwidth for the smoothing operation. Default\n is None.\n \"\"\"\n super().__init__(reduction_data, bandwidth=None, percentile=None,\n period=None, smoothing=None)\n self.arias_stream = None\n self.result = self.get_arias()\n\n\n def get_arias(self):\n \"\"\"\n Performs calculation of arias intensity.\n\n Returns:\n arias_intensities: Dictionary of arias intensity for each channel.\n \"\"\"\n arias_intensities = {}\n arias_stream = StationStream([])\n for trace in self.reduction_data:\n dt = trace.stats['delta']\n # convert from cm/s/s to m/s/s\n acc = trace.data * 0.01\n\n # Calculate Arias Intensity\n integrated_acc2 = integrate.cumtrapz(acc * acc, dx=dt)\n arias_intensity = integrated_acc2 * np.pi * GAL_TO_PCTG / 2\n channel = trace.stats.channel\n trace.stats.standard.units = 'veloc'\n trace.stats.npts = len(arias_intensity)\n arias_stream.append(StationTrace(arias_intensity, trace.stats))\n arias_intensities[channel] = np.abs(np.max(arias_intensity))\n self.arias_stream = arias_stream\n return arias_intensities\n"
] | [
[
"scipy.integrate.cumtrapz",
"numpy.max"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.