repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
alirkaya/card-fraud-detection | [
"90e4639c55a8502e03c35660a7b86a847c069cfb"
] | [
"Documentation/my_module.py"
] | [
"\"\"\"Prepared by [Ali Rifat Kaya](https://www.linkedin.com/in/alirifatkaya/)\n\"\"\"\n\n\ndef pr_auc_score(y_test, predicted_probabilities):\n \"\"\"Return AUCPR (Area Under Curve Precision-Recall) score\n\n Parameters\n ----------\n\n y_test : Test set target values\n Example:\n # df is a pandas dataframe with features and target variable\n # where 'Class' is the target variable\n >>> import pandas as pd\n >>> from sklearn.model_selection import train_test_split\n\n >>> X = df.drop('Class', axis=1).values # input matrix\n >>> y = df['Class'].values # target array\n >>> X_train, X_test, y_train, y_test = train_test_split(X,\n y,\n test_size=0.3,\n random_state=1)\n >>> # y_test is the target values for test set\n\n\n predicted_probabilities : Predicted probabilities for positive class\n Example:\n >>> from sklearn.linear_model import LogisticRegression\n >>> lr = LogisticRegression()\n >>> lr.fit(X_train, y_train)\n >>> predicted_probabilities = lr.predict_proba(X_test)[:, 1]\n\n\n Returns\n -------\n\n auc_score : The AUCPR score for the given target values and probabilities\n \"\"\"\n from sklearn.metrics import precision_recall_curve\n from sklearn.metrics import auc\n\n precision, recall, threshold = precision_recall_curve(y_test,\n predicted_probabilities)\n auc_score = auc(recall, precision)\n\n return auc_score\n\n\ndef scoring_functions():\n \"\"\"Returns a list of scoring functions as a list\n * Accuracy Score\n * Precision Score\n * Recall Score\n * Specificity Score\n * F1 Score\n * F2 Score\n * Matthews Correlation Coefficient\n * Geometric Mean Score\n * AUCPR Score\n * AUCROC Score\n\n Returns\n -------\n\n List of scoring fucntions\n Example:\n >>> list_of_scoring_functions = scores()\n >>> for scoring_function in list_of_scoring_functions:\n ... print(scoring_function)\n ### prints\n # accuracy_score\n # precision_score\n # recall_score\n # specificity_score\n # f1_score\n # fbeta_score\n # geometric_mean_score\n # matthews_corrcoef\n # roc_auc_score\n # pr_auc_score\n \"\"\"\n from sklearn.metrics import accuracy_score\n from sklearn.metrics import precision_score\n from sklearn.metrics import recall_score\n from imblearn.metrics import specificity_score\n from sklearn.metrics import f1_score\n from sklearn.metrics import fbeta_score\n from imblearn.metrics import geometric_mean_score\n from sklearn.metrics import matthews_corrcoef\n from sklearn.metrics import roc_auc_score\n\n list_of_scoring_functions = [\n accuracy_score,\n precision_score,\n recall_score,\n specificity_score,\n f1_score,\n fbeta_score,\n geometric_mean_score,\n matthews_corrcoef,\n roc_auc_score,\n pr_auc_score\n ]\n\n return list_of_scoring_functions\n\n\ndef do_cross_validation(X, y, estimators, cv=None, resample=None, scalers=[False], verbose=True, sleep_time=None):\n \"\"\" Return Cross-Validation score for each fold by fitting the model from\n scratch.\n\n Parameters\n ----------\n\n X: The input matrix\n Example:\n # df is a pandas dataframe with features and target variable\n # where 'Class' is the target variable\n >>> X = df.drop('Class', axis=1).values\n\n\n y: The target array\n Example:\n # df is a pandas dataframe with features and target variable\n # where the 'Class' is the target variable\n >>> y = df['Class'].values\n\n\n estimators: A list of tuple(s) where the tuple is ('estimator_name', estimator)\n Example:\n >>> from sklearn.linear_model import LogisticRegresion\n >>> lr = LogisticRegresion()\n >>> estimators = [('Logistic Regression', lr)]\n\n >>> from sklearn.linear_model import LogisticRegresion\n >>> from sklearn.ensemble import RandomForestClassifier\n >>> lr = LogisticRegresion()\n >>> rf = RandomForestClassifier()\n >>> estimators = [('Logistic Regression', lr),\n ... ('Random Forest Classifier', rf)]\n\n\n cv: Cross-Validation object. If no cross-validation object is passed to `cv`,\n then cv is `StratifiedKFold(n_splits=5, shuffle=True, random_state=1)`\n by default.\n Example:\n >>> from sklearn.model_selection import StratifiedKFold\n >>> cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)\n\n\n resample: if True, resample the training data and fits the models using the\n resampled training data. Do NOT touch to validation data.\n Default value is `None`.\n Example:\n >>> from imblearn.over_sampling import SMOTE\n >>> from sklearn.linear_model import LogisticRegresion\n >>> smote = SMOTE()\n >>> resample = [('SMOTE', smote)]\n >>> lr = LogisticRegresion()\n >>> estimators = [('Logistic Regression', lr)]\n >>> do_cross_validation(X, y, estimators=estimators, cv, resample=resample, scaler=[True], verbose=False)\n\n\n scalers: An array of boolean values, each value is for the corresponding\n estimator.\n Default value is `[False]`\n Example:\n >>> from sklearn.linear_model import LogisticRegression\n >>> from sklearn.ensemble import RandomForestClassifier\n >>> lr = LogisticRegression()\n >>> rf = RandomForestClassifier()\n >>> models = [lr, rf]\n >>> scalers = [True, False]\n >>> cv_results = do_cross_validation(X, y, estimators=models,\n cv, scalers=scalers, verbose=False)\n\n\n print: if True, prints out information about each fold such as size of the\n training data and test data, AUCPR and AUCROC scores for each fold,\n and predicted labels.\n Default value is `True`.\n\n\n sleep_time: Sleeping time in seconds between each iteration\n Example:\n >>> sleep_time=1800 # 30 mins\n >>> cv_results = do_cross_validation(X, y, estimators=models,\n cv, scalers=scalers, verbose=False,\n sleep_time=sleep_time)\n\n\n\n Returns\n -------\n\n Nested dictionary of results with\n * precisions and recalls to plot precision-recall curve\n * fpr and tpr to plot roc curve\n Example:\n >>> {\n 'Logistic Regression' : {\n 'accuracy_score' : [], # cross validation accuracy scores as a list\n ...\n 'tprs' : [] # cross validation tpr for each fold\n }\n }\n\n\n Verbose\n ------\n\n For each fold of cross-validation, prints the followings:\n * The estimator\n * Training set and validation set sizes\n * AUCPR score for training and validation sets\n * AUCROC score for training and validation sets\n * Number of True Positives in the validation set\n * Number of False Positives in the validation set\n \"\"\"\n from sklearn.preprocessing import scale\n from sklearn.metrics import roc_auc_score\n from sklearn.metrics import fbeta_score\n from sklearn.metrics import confusion_matrix\n from sklearn.metrics import precision_recall_curve\n from sklearn.metrics import roc_curve\n from sklearn.base import clone\n from time import sleep\n\n scores = {}\n list_of_scoring_functions = scoring_functions()\n metrics = ['accuracy_score', 'precision_score', 'recall_score',\n 'specificity_score', 'f1_score', 'f2_score',\n 'geometric_mean_score', 'matthews_corrcoef', 'roc_auc_score',\n 'pr_auc_score']\n\n if not cv:\n cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)\n\n i = 0 # tracks the folds\n for train_idx, validation_idx in cv.split(X, y):\n X_train, X_validation = X[train_idx], X[validation_idx]\n y_train, y_validation = y[train_idx], y[validation_idx]\n\n X_train_copy = X_train.copy()\n y_train_copy = y_train.copy()\n X_validation_copy = X_validation.copy()\n\n if resample:\n if verbose:\n print('Fold {}:'.format(i + 1))\n\n for name, method in resample:\n X_train_copy_resample, y_train_copy_resample = method.fit_resample(\n X_train_copy, y_train_copy)\n if verbose:\n print('\\n'+name)\n print('-' * 81)\n print('Number of transactions in the original training dataset:', X_train_copy.shape[0])\n print('Number of transactions in the resampled training dataset:',\n X_train_copy_resample.shape[0])\n print('-' * 81)\n print('Number of Fraudulent Transactions in the original training dataset:',\n y_train_copy.sum())\n print('Number of Fraudulent Transactions in the resampled training dataset',\n y_train_copy_resample.sum())\n print('=' * 81)\n\n for estimator, scaler in zip(estimators, scalers):\n ml_name, ml = estimator\n estimator_ = clone(ml)\n if scaler:\n X_train_copy_resample_scaled = scale(\n X_train_copy_resample)\n X_validation_scaled = scale(X_validation_copy)\n estimator_.fit(\n X_train_copy_resample_scaled, y_train_copy_resample)\n preds = estimator_.predict(X_validation_scaled)\n probas_training = estimator_.predict_proba(X_train_copy_resample_scaled)[\n :, 1]\n probas = estimator_.predict_proba(\n X_validation_scaled)[:, 1]\n else:\n estimator_.fit(X_train_copy_resample,\n y_train_copy_resample)\n preds = estimator_.predict(X_validation_copy)\n probas_training = estimator_.predict_proba(\n X_train_copy_resample)[:, 1]\n probas = estimator_.predict_proba(\n X_validation_copy)[:, 1]\n\n precision, recall, threshold = precision_recall_curve(\n y_validation, probas)\n fpr, tpr, threshold = roc_curve(y_validation, probas)\n tn, fp, fn, tp = confusion_matrix(y_validation, preds).ravel()\n\n if verbose:\n print('\\n' + ml_name + ' with ' + name)\n print('-' * 81)\n print('Training data AUCPR score: {}'.format(\n pr_auc_score(y_train_copy_resample, probas_training)))\n print('Validation data AUCPR score: {}'.format(\n pr_auc_score(y_validation, probas)))\n\n print('\\nTraining data AUCROC score: {}'.format(\n roc_auc_score(y_train_copy_resample, probas_training)))\n print('Validation data AUCROC score: {}'.format(\n roc_auc_score(y_validation, probas)))\n print('-' * 81)\n print('There are {} fraudulent transactions in the validation '\n 'set'.format(y_validation.sum()))\n print('{} out of {} predicted fraudulent transactions '\n 'are true fraudulent transactions'.format(\n tp, fp + tp))\n print()\n\n key_ = ml_name + '_' + name\n if key_ not in scores.keys():\n scores[key_] = {}\n\n plots = ['precisions', 'recalls', 'fprs', 'tprs']\n for key in plots:\n if key not in scores[key_]:\n scores[key_][key] = []\n\n scores[key_]['precisions'].append(precision)\n scores[key_]['recalls'].append(recall)\n scores[key_]['fprs'].append(fpr)\n scores[key_]['tprs'].append(tpr)\n\n for metric_name, metric in zip(metrics, list_of_scoring_functions):\n if metric_name not in scores[key_].keys():\n scores[key_][metric_name] = []\n if metric in [roc_auc_score, pr_auc_score]:\n scores[key_][metric_name].append(metric(y_validation,\n probas))\n elif metric == fbeta_score:\n scores[key_][metric_name].append(metric(y_validation,\n preds,\n beta=2))\n else:\n scores[key_][metric_name].append(metric(y_validation,\n preds))\n if sleep_time:\n print('sleeping... {} seconds'.format(sleep_time))\n sleep(sleep_time)\n\n if verbose:\n print()\n else:\n if verbose:\n print('Fold {}:'.format(i + 1))\n print('\\nNumber of Observations in the Training Data: {}'\n .format(X_train.shape[0]))\n print('Number of Observations in the Validation Data: {}:'\n .format(y_validation.shape[0]))\n print('=' * 81)\n for estimator, scaler in zip(estimators, scalers):\n ml_name, ml = estimator\n estimator_ = clone(ml)\n if scaler:\n X_train_scaled = scale(X_train_copy)\n X_validation_scaled = scale(X_validation_copy)\n estimator_.fit(X_train_scaled, y_train)\n preds = estimator_.predict(X_validation_scaled)\n probas_training = estimator_.predict_proba(X_train_scaled)[\n :, 1]\n probas = estimator_.predict_proba(X_validation_scaled)[:, 1]\n else:\n estimator_.fit(X_train, y_train)\n preds = estimator_.predict(X_validation)\n probas_training = estimator_.predict_proba(X_train)[:, 1]\n probas = estimator_.predict_proba(X_validation)[:, 1]\n\n precision, recall, threshold = precision_recall_curve(\n y_validation, probas)\n fpr, tpr, threshold = roc_curve(y_validation, probas)\n tn, fp, fn, tp = confusion_matrix(y_validation, preds).ravel()\n\n if verbose:\n print('\\n' + ml_name)\n print(('-' * 81))\n print(('Training data AUCPR score: {}'.format(\n pr_auc_score(y_train, probas_training))))\n print(('Validation data AUCPR score: {}'.format(\n pr_auc_score(y_validation, probas))))\n\n print(('\\nTraining data AUCROC score: {}'.format(\n roc_auc_score(y_train, probas_training))))\n print(('Validation data AUCROC score: {}'.format(\n roc_auc_score(y_validation, probas))))\n print(('-' * 81))\n print('There are {} fraudulent transactions in the validation '\n 'set'.format(y_validation.sum()))\n print('{} out of {} predicted fraudulent transactions '\n 'are true fraudulent transactions'.format(\n tp, fp + tp))\n print()\n\n if ml_name not in scores.keys():\n scores[ml_name] = {}\n\n plots = ['precisions', 'recalls', 'fprs', 'tprs']\n for key in plots:\n if key not in scores[ml_name]:\n scores[ml_name][key] = []\n\n scores[ml_name]['precisions'].append(precision)\n scores[ml_name]['recalls'].append(recall)\n scores[ml_name]['fprs'].append(fpr)\n scores[ml_name]['tprs'].append(tpr)\n\n for metric_name, metric in zip(metrics, list_of_scoring_functions):\n if metric_name not in scores[ml_name].keys():\n scores[ml_name][metric_name] = []\n if metric in [roc_auc_score, pr_auc_score]:\n scores[ml_name][metric_name].append(metric(y_validation,\n probas))\n elif metric == fbeta_score:\n scores[ml_name][metric_name].append(metric(y_validation,\n preds,\n beta=2))\n else:\n scores[ml_name][metric_name].append(metric(y_validation,\n preds))\n if sleep_time:\n print('sleeping... {} seconds'.format(sleep_time))\n sleep(sleep_time)\n if verbose:\n print()\n i += 1\n if verbose:\n print('=' * 81)\n print('=' * 81)\n\n return scores\n\n\ndef plot_confusion_matrix(y, predictions, title=None, ax=None, cmap='Purples', cbar=False):\n \"\"\"Plots Confusion Matrix\n\n Parameters\n ----------\n\n y: The target array\n Example:\n # df is a pandas dataframe with features and target variable\n # where the 'Class' is the target variable\n >>> y = df['Class'].values\n\n predictions: The predicted labels\n Example:\n # df is a pandas dataframe with features and target variable\n # where the 'Class' is the target variable\n >>> from sklearn.linear_model import LogisticRegresion\n >>> X = df.drop('Class', axis=1).values\n >>> y = df['Class'].values\n >>> lr = LogisticRegresion()\n >>> lr.fit(X_train, y_train)\n >>> predictions = lr.predict(X_test) # predicted labels\n\n title: Title of the plot\n Example:\n >>> title = 'Logistic Regression'\n >>> plot_confusion_matrix(y, predictions, title=title, ax, cmap, cbar)\n\n ax: An axis object\n Example:\n >>> fig, ax = plt.subplots()\n >>> plot_confusion_matrix(y, predictions, title, ax=ax)\n\n cmap: The color map for the confusion matrix\n Example:\n >>> import matplotlib.pyplot as plt\n >>> plt.colormaps() # prints all available color maps\n\n cbar: If True shows the color bar next to the confusion matrix\n Example\n >>> plot_confusion_matrix(y, predictions, title, ax=ax, cbar=True)\n\n Returns\n -------\n\n ax: Axes object\n \"\"\"\n\n from pandas import DataFrame\n import matplotlib.pyplot as plt\n from sklearn.metrics import confusion_matrix\n from seaborn import heatmap\n\n if ax is None:\n ax = plt.gca()\n\n cm_df = DataFrame(confusion_matrix(y, predictions))\n\n # Use a seaborn heatmap to plot confusion matrices\n # The dataframe is transposed to make Actual values on x-axis and\n # predicted values on y-axis\n # annot = True includes the numbers in each box\n # vmin and vmax just adjusts the color value\n heatmap(cm_df.T,\n annot=True,\n annot_kws={\"size\": 15},\n cmap=cmap,\n vmin=0,\n vmax=800,\n fmt='.0f',\n linewidths=1,\n linecolor=\"white\",\n cbar=cbar,\n xticklabels=[\"Genuine\", \"Fraud\"],\n yticklabels=[\"Genuine\", \"Fraud\"],\n ax=ax)\n\n # adjusts the heights of the top and bottom squares of the heatmap\n # matplotlib 3.1.1 has a bug that shows only the half of the top\n # and bottom rows of the heatmap\n # bottom, top = ax.get_ylim()\n # _ = ax.set_ylim(bottom + 0.5, top - 0.5)\n\n # ax.set_ylabel(\"Predicted\", fontweight='bold', fontsize=15)\n # ax.set_xlabel(\"Actual\", fontweight='bold', fontsize=15)\n ax.set_xticklabels([\"Genuine\", \"Fraud\"], fontsize=13)\n ax.set_yticklabels([\"Genuine\", \"Fraud\"], fontsize=13)\n ax.set_title(title, fontweight='bold', pad=5)\n\n return ax\n\n\ndef plot_precision_recall_curve(y_test, precisions, recalls, title, ax=None):\n \"\"\"Plots Precision-Recall Curve\n\n Parameters\n ----------\n\n y: The target array of the test set\n Example:\n # df is a pandas dataframe with features and target variable\n # where the 'Class' is the target variable\n >>> X = df.drop('Class', axis=1).values\n >>> y = df.Class.values\n >>> X_train, X_test, y_train, y_test = train_test_split(X,\n y,\n test_size=0.3,\n random_state=1)\n\n\n precisions: Precision score for each threshold\n Example:\n # df is a pandas dataframe with features and target variable\n # where 'Class' is the target variable\n >>> from sklearn.linear_model import LogisticRegresion\n >>> from sklearn.metrics import precision_recall_curve\n >>> X = df.drop('Class', axis=1).values\n >>> y = df.Class.values\n >>> X_train, X_test, y_train, y_test = train_test_split(X,\n y,\n test_size=0.3,\n random_state=1)\n >>> lr = LogisticRegresion()\n >>> lr.fit(X_train, y_train)\n >>> predicted_probabilities = lr.predict_proba(X_test)[:, 1]\n >>> precision, _, _ = precision_recall_curve(y, predicted_probabilities)\n\n\n recalls: Recall score for each threshold\n Example:\n # df is a pandas dataframe with features and target variable\n # where 'Class' is the target variable\n >>> from sklearn.linear_model import LogisticRegresion\n >>> from sklearn.metrics import precision_recall_curve\n >>> X = df.drop('Class', axis=1).values\n >>> y = df.Class.values\n >>> X_train, X_test, y_train, y_test = train_test_split(X,\n y,\n test_size=0.3,\n random_state=1)\n >>> lr = LogisticRegresion()\n >>> lr.fit(X_train, y_train)\n >>> predicted_probabilities = lr.predict_proba(X_test)[:, 1]\n >>> _, recall, _ = precision_recall_curve(y, predicted_probabilities)\n\n\n title: Title of the plot\n Example:\n >>> title = 'Logistic Regression'\n >>> plot_precision_recall_curve(precisions, recalls, title=title, ax)\n\n\n ax: An axis object\n Example:\n >>> fig, ax = plt.subplots()\n >>> plot_confusion_matrix(y, predictions, title, ax=ax)\n\n\n Returns\n -------\n\n ax: Axes object\n \"\"\"\n\n from numpy import linspace\n from numpy import interp\n from numpy import mean\n from numpy import std\n from numpy import minimum\n from numpy import maximum\n from sklearn.metrics import auc\n import matplotlib.pyplot as plt\n\n if ax is None:\n ax = plt.gca()\n\n # Metrics\n prs = []\n aucs = []\n mean_recall = linspace(0, 1, 100)\n\n # plots PR curve for each fold\n i = 0\n for precision, recall in zip(precisions, recalls):\n prs.append(interp(mean_recall, precision, recall))\n pr_auc = auc(recall, precision)\n aucs.append(pr_auc)\n ax.plot(recall,\n precision,\n lw=3,\n alpha=0.5,\n label='Fold %d (AUCPR = %0.2f)' % (i + 1, pr_auc))\n i += 1\n\n # plots the mean AUCPR curve\n ax.axhline(y_test.sum() / y_test.shape[0],\n linestyle='--',\n alpha=0.8,\n label='No Skill')\n mean_precision = mean(prs, axis=0)\n mean_auc = auc(mean_recall, mean_precision)\n std_auc = std(aucs)\n ax.plot(mean_precision,\n mean_recall,\n color='navy',\n label=r'Mean (AUCPR = %0.3f $\\pm$ %0.2f)' % (mean_auc, std_auc),\n lw=4)\n\n ax.set_title(title)\n ax.set_xlim([-0.05, 1.05])\n ax.set_ylim([-0.05, 1.05])\n ax.legend(fontsize='xx-small')\n\n return ax\n\n\ndef plot_roc_curve(fprs, tprs, title, ax=None):\n \"\"\"Plots ROC (Receiver Operating Curve)\n\n Parameters\n ----------\n\n fprs: False Positive Rate for each threshold\n Example:\n # df is a pandas dataframe with features and target variable\n # where 'Class' is the target variable\n >>> from sklearn.linear_model import LogisticRegresion\n >>> from sklearn.metrics import precision_recall_curve\n >>> X = df.drop('Class', axis=1).values\n >>> y = df.Class.values\n >>> X_train, X_test, y_train, y_test = train_test_split(X,\n y,\n test_size=0.3,\n random_state=1)\n >>> lr = LogisticRegresion()\n >>> lr.fit(X_train, y_train)\n >>> predicted_probabilities = lr.predict_proba(X_test)[:, 1]\n >>> fpr, _, _ = roc_curve(y, predicted_probabilities)\n\n\n tprs: True Positive Rate for each threshold\n Example:\n # df is a pandas dataframe with features and target variable\n # where 'Class' is the target variable\n >>> from sklearn.linear_model import LogisticRegresion\n >>> from sklearn.metrics import precision_recall_curve\n >>> X = df.drop('Class', axis=1).values\n >>> y = df.Class.values\n >>> X_train, X_test, y_train, y_test = train_test_split(X,\n y,\n test_size=0.3,\n random_state=1)\n >>> lr = LogisticRegresion()\n >>> lr.fit(X_train, y_train)\n >>> predicted_probabilities = lr.predict_proba(X_test)[:, 1]\n >>> _, tpr, _ = roc_curve(y, predicted_probabilities)\n\n\n title: Title of the plot\n Example:\n >>> title = 'Logistic Regression'\n >>> plot_precision_recall_curve(precisions, recalls, title=title, ax)\n\n\n ax: An axis object\n Example:\n >>> fig, ax = plt.subplots()\n >>> plot_confusion_matrix(y, predictions, title, ax=ax)\n\n\n Returns\n -------\n\n ax: Axes object\n \"\"\"\n\n from numpy import linspace\n from numpy import interp\n from numpy import mean\n from numpy import std\n from numpy import minimum\n from numpy import maximum\n from sklearn.metrics import auc\n import matplotlib.pyplot as plt\n\n if ax is None:\n ax = plt.gca()\n\n # Metrics\n tprs_ = []\n aucs = []\n mean_fpr = linspace(0, 1, 100)\n\n # plots ROC curves for each fold\n i = 0\n for fpr, tpr in zip(fprs, tprs):\n interp_tpr = interp(mean_fpr, fpr, tpr)\n interp_tpr[0] = 0.0\n tprs_.append(interp_tpr)\n roc_auc = auc(fpr, tpr)\n aucs.append(roc_auc)\n ax.plot(fpr,\n tpr,\n lw=3,\n alpha=0.5,\n label='ROC Fold %d (AUC = %0.2f)' % (i + 1, roc_auc))\n\n i += 1\n\n # Plot mean ROC Curve\n ax.plot([0, 1], [0, 1],\n linestyle='--',\n lw=3,\n color='k',\n label='No Skill',\n alpha=.8)\n mean_tpr = mean(tprs_, axis=0)\n mean_tpr[-1] = 1.0\n mean_auc = auc(mean_fpr, mean_tpr)\n std_auc = std(aucs)\n ax.plot(mean_fpr,\n mean_tpr,\n color='navy',\n label=r'Mean ROC (AUC = %0.3f $\\pm$ %0.2f)' % (mean_auc, std_auc),\n lw=4)\n\n # calculates the standard deviation and fills the +-1 standard deviation\n # of the mean ROC curve\n std_tpr = std(tprs_, axis=0)\n tprs_upper = minimum(mean_tpr + std_tpr, 1)\n tprs_lower = maximum(mean_tpr - std_tpr, 0)\n ax.fill_between(mean_fpr,\n tprs_lower,\n tprs_upper,\n color='grey',\n alpha=.2,\n label=r'$\\pm$ 1 Standard Deviation')\n\n ax.set_xlim([-0.05, 1.05])\n ax.set_ylim([-0.05, 1.05])\n ax.set_title(title)\n ax.legend(loc='lower right', fontsize='xx-small')\n\n return ax\n\n\ndef calculate_statistics(cv_scores):\n \"\"\"Returns mean and standard deviation of CV scores\n \"\"\"\n from numpy import array\n\n not_scores = ['precisions', 'recalls', 'fprs', 'tprs', 'predictions']\n mean_scores = {}\n std_dev = {}\n for k, v in cv_scores.items():\n mean_scores[k] = []\n std_dev[k] = []\n for key, value in v.items():\n if key not in not_scores:\n mean_scores[k].append(array(value).mean())\n std_dev[k].append(array(value).std())\n return mean_scores, std_dev\n\n\ndef make_df_statistics(cv_results):\n \"\"\"Return results from `calculate_statistics` into a DataFrame\"\"\"\n from pandas import DataFrame\n\n metrics = [\n 'accuracy_score', 'precision_score', 'recall_score', 'specificity_score',\n 'f1_score', 'f2_score', 'geometric_mean_score', 'matthews_corrcoef',\n 'roc_auc_score', 'pr_auc_score']\n new_metrics = metrics[-3:]\n df = DataFrame(cv_results)\n df['metrics'] = metrics\n df.set_index('metrics', inplace=True)\n df.index.name = None\n df = df.loc[new_metrics, :]\n df = df.T\n return df\n\n\ndef train_model(estimators, X, y, scalers=[False]):\n from sklearn.preprocessing import scale\n from sklearn.metrics import roc_auc_score\n from sklearn.metrics import fbeta_score\n from sklearn.metrics import precision_recall_curve\n from sklearn.metrics import roc_curve\n\n scores = {}\n list_of_scoring_functions = scoring_functions()\n metrics = ['accuracy_score', 'precision_score', 'recall_score',\n 'specificity_score', 'f1_score', 'f2_score',\n 'geometric_mean_score', 'matthews_corrcoef', 'roc_auc_score',\n 'pr_auc_score']\n\n for estimator, scaler in zip(estimators, scalers):\n ml_name, ml = estimator\n X_copy = X.copy()\n\n if scaler:\n X_scaled = scale(X_copy)\n ml.fit(X_scaled, y)\n preds = ml.predict(X_scaled)\n probas = ml.predict_proba(X_scaled)[:, 1]\n else:\n ml.fit(X, y)\n preds = ml.predict(X)\n probas = ml.predict_proba(X)[:, 1]\n\n if ml_name not in scores.keys():\n scores[ml_name] = {}\n\n for metric_name, metric in zip(metrics, list_of_scoring_functions):\n if metric_name not in scores[ml_name].keys():\n scores[ml_name][metric_name] = []\n if metric in [roc_auc_score, pr_auc_score]:\n scores[ml_name][metric_name].append(metric(y,\n probas))\n elif metric == fbeta_score:\n scores[ml_name][metric_name].append(metric(y,\n preds,\n beta=2))\n else:\n scores[ml_name][metric_name].append(metric(y,\n preds))\n return scores\n\n\ndef test_model(estimators, X, y, scalers=[False]):\n from sklearn.preprocessing import scale\n from sklearn.metrics import roc_auc_score\n from sklearn.metrics import fbeta_score\n from sklearn.metrics import precision_recall_curve\n from sklearn.metrics import roc_curve\n\n scores = {}\n list_of_scoring_functions = scoring_functions()\n metrics = ['accuracy_score', 'precision_score', 'recall_score',\n 'specificity_score', 'f1_score', 'f2_score',\n 'geometric_mean_score', 'matthews_corrcoef', 'roc_auc_score',\n 'pr_auc_score']\n\n for estimator, scaler in zip(estimators, scalers):\n ml_name, ml = estimator\n\n X_copy = X.copy()\n if scaler:\n X_scaled = scale(X_copy)\n preds = ml.predict(X_scaled)\n probas = ml.predict_proba(X_scaled)[:, 1]\n else:\n preds = ml.predict(X)\n probas = ml.predict_proba(X)[:, 1]\n\n precision, recall, threshold = precision_recall_curve(\n y, probas)\n fpr, tpr, threshold = roc_curve(y, probas)\n\n if ml_name not in scores.keys():\n scores[ml_name] = {}\n\n keys = ['precisions', 'recalls', 'fprs', 'tprs', 'predictions']\n values = [precision, recall, fpr, tpr, preds]\n for key, value in zip(keys, values):\n if key not in scores[ml_name].keys():\n scores[ml_name][key] = []\n scores[ml_name][key].append(value)\n\n for metric_name, metric in zip(metrics, list_of_scoring_functions):\n if metric_name not in scores[ml_name].keys():\n scores[ml_name][metric_name] = []\n if metric in [roc_auc_score, pr_auc_score]:\n scores[ml_name][metric_name].append(metric(y,\n probas))\n elif metric == fbeta_score:\n scores[ml_name][metric_name].append(metric(y,\n preds,\n beta=2))\n else:\n scores[ml_name][metric_name].append(metric(y,\n preds))\n return scores\n"
] | [
[
"matplotlib.pyplot.gca",
"sklearn.metrics.roc_auc_score",
"numpy.minimum",
"numpy.maximum",
"numpy.linspace",
"sklearn.metrics.confusion_matrix",
"sklearn.metrics.precision_recall_curve",
"pandas.DataFrame",
"sklearn.metrics.roc_curve",
"sklearn.base.clone",
"numpy.std",
"numpy.mean",
"numpy.interp",
"sklearn.metrics.auc",
"numpy.array",
"sklearn.preprocessing.scale"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
onnela-lab/mech-mle | [
"8a36c1a75dbbc665feb642f527aeb09e7462e90b",
"8a36c1a75dbbc665feb642f527aeb09e7462e90b"
] | [
"drosa.py",
"python45.py"
] | [
"# JP Onnela\r\n# April 20, 2021\r\n\r\n# Edited May 17, 2021 by Jonathan Larson\r\n\r\nimport networkx as nx\r\nimport random\r\nimport scipy.stats as ss\r\nimport time\r\n\r\ndef generate_DMC(q_mod, q_con, n):\r\n \"\"\"Generate DMC model realization given parameters.\"\"\"\r\n G = nx.Graph()\r\n G.add_edge(0,1)\r\n new_nodes = list(range(2,n))\r\n anchor_nodes = []\r\n for v in new_nodes:\r\n u = random.choice(list(G.nodes()))\r\n anchor_nodes.append(u)\r\n G.add_node(v)\r\n \r\n # duplication\r\n G.add_edges_from([(v,w) for w in G.neighbors(u)])\r\n \r\n # mutation\r\n for w in list(G.neighbors(u)):\r\n if ss.bernoulli.rvs(q_mod):\r\n edge = random.choice([(v,w), (u,w)])\r\n G.remove_edge(*edge)\r\n \r\n # complementation\r\n if ss.bernoulli.rvs(q_con):\r\n G.add_edge(u,v)\r\n return (G, new_nodes, anchor_nodes)\r\n\r\n\r\ndef deconstruct_DMC(G, alpha, beta):\r\n \"\"\"Deconstruct a DMC graph over a single step.\"\"\"\r\n # reverse complementation\r\n if G.has_edge(alpha, beta):\r\n G.remove_edge(alpha, beta)\r\n w = 1\r\n else:\r\n w = 0\r\n\r\n # reverse mutation\r\n alpha_neighbors = set(G.neighbors(alpha))\r\n beta_neighbors = set(G.neighbors(beta))\r\n x = len(alpha_neighbors & beta_neighbors)\r\n y = len(alpha_neighbors | beta_neighbors)\r\n for neighbor in alpha_neighbors:\r\n G.add_edge(beta, neighbor)\r\n\r\n # reverse duplication\r\n G.remove_node(alpha)\r\n return (w, x, y)\r\n\r\n\r\ndef find_min_uni_pair(G):\r\n \"\"\"Find pair of nodes that have minimal cardinality of the union of their neighbors.\"\"\"\r\n alpha = None\r\n beta = None\r\n union_size = G.number_of_nodes()\r\n nodes = list(G.nodes())\r\n random.shuffle(nodes)\r\n for u in nodes:\r\n for v in nodes:\r\n if u > v:\r\n u_neighbors = set(G.neighbors(u))\r\n v_neighbors = set(G.neighbors(v))\r\n y = len(u_neighbors | v_neighbors)\r\n if G.has_edge(u,v):\r\n y = y - 2\r\n if y < union_size:\r\n union_size = y\r\n alpha = u\r\n beta = v\r\n return (alpha, beta, union_size)\r\n\r\n\r\ndef deconstruct(G):\r\n \"\"\"Deconstruct the graph until.\"\"\"\r\n alphas = []\r\n betas = []\r\n W = 0\r\n X = 0\r\n Y = 0\r\n (alpha, beta, union_size) = find_min_uni_pair(G)\r\n while (not alpha is None and not beta is None):\r\n print(\"Number of nodes remaining:\", G.number_of_nodes())\r\n alphas.append(alpha)\r\n betas.append(beta)\r\n (w, x, y) = deconstruct_DMC(G, alpha, beta)\r\n W += w\r\n X += x\r\n Y += y\r\n (alpha, beta, union_size) = find_min_uni_pair(G)\r\n return (alphas, betas, W, X, Y)\r\n\r\n\r\ndef estimate_parms(W, X, Y, n):\r\n \"\"\"Compute estimates of q_mod and q_con parameters.\"\"\"\r\n q_mod_hat = 1 - X / Y\r\n q_con_hat = W / (n - 1)\r\n return (q_mod_hat, q_con_hat)\r\n\r\n\r\ndef read_edgelist(input_file):\r\n \"\"\"Read edgelist from input file\"\"\"\r\n G = nx.Graph()\r\n counter = 0\r\n for line in open(input_file):\r\n counter += 1\r\n line = line.rstrip().split(\"\\t\")\r\n node_i = line[0]\r\n node_j = line[1]\r\n G.add_edge(node_i, node_j)\r\n return (G, counter)\r\n\r\n\r\ndef print_stats(G, new_nodes, anchor_nodes):\r\n \"\"\"Print out some statistics.\"\"\"\r\n print(\"Nodes:\", G.nodes())\r\n print(\"Edges:\", G.edges())\r\n print(\"New nodes (alpha):\", new_nodes)\r\n print(\"Anchor nodes (beta):\", anchor_nodes)\r\n\r\ndef save_results(output_file):\r\n\tF = open(output_file, \"w\")\r\n\t# alphas\r\n\tfor alpha in alphas:\r\n\t\tF.write(str(alpha) + \" \")\r\n\tF.write(\"\\n\")\t\r\n\t# betas\r\n\tfor beta in betas:\r\n\t\tF.write(str(beta) + \" \")\r\n\tF.write(\"\\n\")\t\r\n\t# others\r\n\tF.write(str(W) + \" \" + str(X) + \" \" + str(Y) + \" \" + str(q_mod_hat) + \" \" + str(q_con_hat))\r\n\tF.close()\r\n\r\n\r\n# ----------------------------------------------------------------\r\n\r\n\r\n# input and output files\r\ninput_file = \"drosa.tsv\"\r\n\r\n# read data\r\n(G, counter) = read_edgelist(input_file)\r\nG.remove_edges_from(nx.selfloop_edges(G))\r\nprint(G.number_of_edges())\r\n\r\n# degenerate graph\r\nn = G.number_of_nodes()\r\nstart = time.time()\r\n(alphas, betas, W, X, Y) = deconstruct(G)\r\nend = time.time()\r\nprint(\"Time elapsed:\", end - start)\r\n(q_mod_hat, q_con_hat) = estimate_parms(W, X, Y, n)\r\nprint(\"Parameter estimates:\", q_mod_hat, q_con_hat)\r\n\r\n\r\n\r\n\r\n",
"# JP Onnela\r\n# April 20, 2021\r\n\r\n# Edited May 12, 2021 by Jonathan Larson\r\n\r\nimport networkx as nx\r\nimport random\r\nimport scipy.stats as ss\r\nimport time\r\n\r\ndef generate_DMC(q_mod, q_con, n):\r\n \"\"\"Generate DMC model realization given parameters.\"\"\"\r\n G = nx.Graph()\r\n G.add_edge(0,1)\r\n new_nodes = list(range(2,n))\r\n anchor_nodes = []\r\n for v in new_nodes:\r\n u = random.choice(list(G.nodes()))\r\n anchor_nodes.append(u)\r\n G.add_node(v)\r\n \r\n # duplication\r\n G.add_edges_from([(v,w) for w in G.neighbors(u)])\r\n \r\n # mutation\r\n for w in list(G.neighbors(u)):\r\n if ss.bernoulli.rvs(q_mod):\r\n edge = random.choice([(v,w), (u,w)])\r\n G.remove_edge(*edge)\r\n \r\n # complementation\r\n if ss.bernoulli.rvs(q_con):\r\n G.add_edge(u,v)\r\n return (G, new_nodes, anchor_nodes)\r\n\r\n\r\ndef deconstruct_DMC(G, alpha, beta):\r\n \"\"\"Deconstruct a DMC graph over a single step.\"\"\"\r\n # reverse complementation\r\n if G.has_edge(alpha, beta):\r\n G.remove_edge(alpha, beta)\r\n w = 1\r\n else:\r\n w = 0\r\n\r\n # reverse mutation\r\n alpha_neighbors = set(G.neighbors(alpha))\r\n beta_neighbors = set(G.neighbors(beta))\r\n x = len(alpha_neighbors & beta_neighbors)\r\n y = len(alpha_neighbors | beta_neighbors)\r\n for neighbor in alpha_neighbors:\r\n G.add_edge(beta, neighbor)\r\n\r\n # reverse duplication\r\n G.remove_node(alpha)\r\n return (w, x, y)\r\n\r\n\r\ndef find_min_uni_pair(G):\r\n \"\"\"Find pair of nodes that have minimal cardinality of the union of their neighbors.\"\"\"\r\n alpha = None\r\n beta = None\r\n union_size = G.number_of_nodes()\r\n nodes = list(G.nodes())\r\n random.shuffle(nodes)\r\n for u in nodes:\r\n for v in nodes:\r\n if u > v:\r\n u_neighbors = set(G.neighbors(u))\r\n v_neighbors = set(G.neighbors(v))\r\n y = len(u_neighbors | v_neighbors)\r\n if G.has_edge(u,v):\r\n y = y - 2\r\n if y < union_size:\r\n union_size = y\r\n alpha = u\r\n beta = v\r\n return (alpha, beta, union_size)\r\n\r\n\r\ndef deconstruct(G):\r\n \"\"\"Deconstruct the graph until.\"\"\"\r\n alphas = []\r\n betas = []\r\n W = 0\r\n X = 0\r\n Y = 0\r\n (alpha, beta, union_size) = find_min_uni_pair(G)\r\n while (not alpha is None and not beta is None):\r\n print(\"Number of nodes remaining:\", G.number_of_nodes())\r\n alphas.append(alpha)\r\n betas.append(beta)\r\n (w, x, y) = deconstruct_DMC(G, alpha, beta)\r\n W += w\r\n X += x\r\n Y += y\r\n (alpha, beta, union_size) = find_min_uni_pair(G)\r\n return (alphas, betas, W, X, Y)\r\n\r\n\r\ndef estimate_parms(W, X, Y, n):\r\n \"\"\"Compute estimates of q_mod and q_con parameters.\"\"\"\r\n q_mod_hat = 1 - X / Y\r\n q_con_hat = W / (n - 1)\r\n return (q_mod_hat, q_con_hat)\r\n\r\n\r\ndef read_edgelist(input_file):\r\n \"\"\"Read edgelist from input file\"\"\"\r\n G = nx.Graph()\r\n counter = 0\r\n for line in open(input_file):\r\n counter += 1\r\n line = line.rstrip().split(\"\\t\")\r\n node_i = line[0]\r\n node_j = line[1]\r\n G.add_edge(node_i, node_j)\r\n return (G, counter)\r\n\r\n\r\ndef print_stats(G, new_nodes, anchor_nodes):\r\n \"\"\"Print out some statistics.\"\"\"\r\n print(\"Nodes:\", G.nodes())\r\n print(\"Edges:\", G.edges())\r\n print(\"New nodes (alpha):\", new_nodes)\r\n print(\"Anchor nodes (beta):\", anchor_nodes)\r\n\r\ndef save_results(output_file):\r\n\tF = open(output_file, \"w\")\r\n\t# alphas\r\n\tfor alpha in alphas:\r\n\t\tF.write(str(alpha) + \" \")\r\n\tF.write(\"\\n\")\t\r\n\t# betas\r\n\tfor beta in betas:\r\n\t\tF.write(str(beta) + \" \")\r\n\tF.write(\"\\n\")\t\r\n\t# others\r\n\tF.write(str(W) + \" \" + str(X) + \" \" + str(Y) + \" \" + str(q_mod_hat) + \" \" + str(q_con_hat))\r\n\tF.close()\r\n\r\n\r\n# ----------------------------------------------------------------\r\n\r\n\r\n# input and output files\r\ninput_file = \"HuRI.tsv\"\r\n\r\n# read data\r\n(G, counter) = read_edgelist(input_file)\r\n\r\n# sample nodes\r\nsampled_nodes = random.sample(list(G.nodes()),round(0.45 * G.number_of_nodes()))\r\nG.remove_nodes_from([n for n in G if n not in set(sampled_nodes)])\r\nG.remove_edges_from(nx.selfloop_edges(G))\r\nprint(G.number_of_edges())\r\n\r\n# degenerate graph\r\nn = G.number_of_nodes()\r\nstart = time.time()\r\n(alphas, betas, W, X, Y) = deconstruct(G)\r\nend = time.time()\r\nprint(\"Time elapsed:\", end - start)\r\n(q_mod_hat, q_con_hat) = estimate_parms(W, X, Y, n)\r\nprint(\"Parameter estimates:\", q_mod_hat, q_con_hat)\r\n\r\n\r\n\r\n\r\n"
] | [
[
"scipy.stats.bernoulli.rvs"
],
[
"scipy.stats.bernoulli.rvs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jasonfan1997/threeML | [
"21b1c76ad3423f745b9f56413d93ee01d1d5855f",
"21b1c76ad3423f745b9f56413d93ee01d1d5855f",
"21b1c76ad3423f745b9f56413d93ee01d1d5855f",
"21b1c76ad3423f745b9f56413d93ee01d1d5855f",
"21b1c76ad3423f745b9f56413d93ee01d1d5855f",
"21b1c76ad3423f745b9f56413d93ee01d1d5855f"
] | [
"threeML/test/test_fits_file.py",
"threeML/utils/time_interval.py",
"threeML/io/calculate_flux.py",
"threeML/test/test_response.py",
"threeML/utils/step_parameter_generator.py",
"threeML/io/results_table.py"
] | [
"from threeML.io.fits_file import FITSExtension, FITSFile\nimport numpy as np\nimport astropy.io.fits as fits\n\nimport pytest\n\n\nclass DUMMYEXT(FITSExtension):\n def __init__(self, test_value):\n\n data_list = [(\"TEST_VALUE\", test_value)]\n\n super(DUMMYEXT, self).__init__(\n tuple(data_list), ((\"EXTNAME\", \"TEST\", \"Extension name\"),)\n )\n\n\nclass DUMMYFITS(FITSFile):\n def __init__(self, test_value):\n\n dummy_extension = DUMMYEXT(test_value)\n\n super(DUMMYFITS, self).__init__(fits_extensions=[dummy_extension])\n\n\ndef test_fits_file():\n\n dtypes = [\n np.int16,\n np.int32,\n np.int64,\n np.uint16,\n np.uint32,\n np.float32,\n np.float64,\n ]\n dtype_keys = [\"I\", \"J\", \"K\", \"I\", \"J\", \"E\", \"D\"]\n\n for i, dt in enumerate(dtypes):\n\n test_values = np.ones(10, dtype=dt)\n\n dummy_fits = DUMMYFITS(test_value=test_values)\n\n assert len(dummy_fits._hdu_list) == 2\n\n assert dummy_fits.index_of(\"TEST\") == 1\n\n assert dummy_fits[\"TEST\"].header[\"TFORM1\"] == dtype_keys[i]\n\n assert np.alltrue(dummy_fits[\"TEST\"].data[\"TEST_VALUE\"] == test_values)\n\n file_name = \"test_fits%d.fits\" % i\n\n dummy_fits.writeto(file_name, overwrite=True)\n\n with pytest.raises(IOError):\n\n dummy_fits.writeto(file_name, overwrite=False)\n\n read_dummy_fits = fits.open(file_name)\n\n assert len(read_dummy_fits) == 2\n\n assert read_dummy_fits.index_of(\"TEST\") == 1\n\n assert read_dummy_fits[\"TEST\"].header[\"TFORM1\"] == dtype_keys[i]\n\n assert np.alltrue(read_dummy_fits[\"TEST\"].data[\"TEST_VALUE\"] == test_values)\n",
"from threeML.utils.interval import Interval, IntervalSet\nfrom threeML.io.rich_display import display\n\nimport collections\nimport pandas as pd\n\n\nclass TimeInterval(Interval):\n def __add__(self, number):\n \"\"\"\n Return a new time interval equal to the original time interval shifted to the right by number\n\n :param number: a float\n :return: a new TimeInterval instance\n \"\"\"\n\n return self.new(self._start + number, self._stop + number)\n\n def __sub__(self, number):\n \"\"\"\n Return a new time interval equal to the original time interval shifted to the left by number\n\n :param number: a float\n :return: a new TimeInterval instance\n \"\"\"\n\n return self.new(self._start - number, self._stop - number)\n\n @property\n def duration(self):\n\n return super(TimeInterval, self)._get_width()\n\n @property\n def start_time(self):\n\n return self._start\n\n @property\n def stop_time(self):\n\n return self._stop\n\n @property\n def half_time(self):\n\n return self.mid_point\n\n def __repr__(self):\n\n return \"time interval %s - %s (duration: %s)\" % (\n self.start_time,\n self.stop_time,\n self.duration,\n )\n\n\nclass TimeIntervalSet(IntervalSet):\n \"\"\"\n A set of time intervals\n\n \"\"\"\n\n INTERVAL_TYPE = TimeInterval\n\n @property\n def start_times(self):\n \"\"\"\n Return the starts fo the set\n\n :return: list of start times\n \"\"\"\n\n return self.starts\n\n @property\n def stop_times(self):\n \"\"\"\n Return the stops of the set\n\n :return:\n \"\"\"\n\n return self.stops\n\n @property\n def absolute_start_time(self):\n \"\"\"\n the minimum of the start times\n :return:\n \"\"\"\n\n return self.absolute_start\n\n @property\n def absolute_stop_time(self):\n \"\"\"\n the maximum of the stop times\n :return:\n \"\"\"\n\n return self.absolute_stop\n\n @property\n def time_edges(self):\n \"\"\"\n return an array of time edges if contiguous\n :return:\n \"\"\"\n\n return self.edges\n\n def __add__(self, number):\n \"\"\"\n Shift all time intervals to the right by number\n\n :param number: a float\n :return: new TimeIntervalSet instance\n \"\"\"\n\n new_set = self.new()\n new_set.extend([time_interval + number for time_interval in self._intervals])\n\n return new_set\n\n def __sub__(self, number):\n \"\"\"\n Shift all time intervals to the left by number (in place)\n\n :param number: a float\n :return: new TimeIntervalSet instance\n \"\"\"\n\n new_set = self.new(\n [time_interval - number for time_interval in self._intervals]\n )\n\n return new_set\n\n def _create_pandas(self):\n\n time_interval_dict = collections.OrderedDict()\n\n time_interval_dict[\"Start\"] = []\n time_interval_dict[\"Stop\"] = []\n time_interval_dict[\"Duration\"] = []\n time_interval_dict[\"Midpoint\"] = []\n\n for i, interval in enumerate(self._intervals):\n\n time_interval_dict[\"Start\"].append(interval.start)\n time_interval_dict[\"Stop\"].append(interval.stop)\n time_interval_dict[\"Duration\"].append(interval.duration)\n time_interval_dict[\"Midpoint\"].append(interval.half_time)\n\n df = pd.DataFrame(data=time_interval_dict)\n\n return df\n\n def display(self):\n \"\"\"\n Display the time intervals\n\n :return: None\n \"\"\"\n\n display(self._create_pandas())\n\n def __repr__(self):\n\n return self._create_pandas().to_string()\n",
"from builtins import range\n\n__author__ = \"grburgess\"\n\nimport collections\n\nimport numpy as np\nimport pandas as pd\n\nfrom threeML.exceptions.custom_exceptions import custom_warnings\nfrom threeML.io.logging import setup_logger\n# from threeML.io.rich_display import display\nfrom threeML.utils.fitted_objects.fitted_point_sources import \\\n FittedPointSourceSpectralHandler\nfrom threeML.utils.progress_bar import tqdm\n\nlog =setup_logger(__name__)\n\ndef _setup_analysis_dictionaries(\n analysis_results,\n energy_range,\n energy_unit,\n flux_unit,\n use_components,\n components_to_use,\n confidence_level,\n equal_tailed,\n differential,\n sources_to_use,\n include_extended,\n):\n \"\"\"\n helper function to pull out analysis details that are common to flux and plotting functions\n\n\n :param analysis_results:\n :param energy_range:\n :param energy_unit:\n :param flux_unit:\n :param use_components:\n :param components_to_use:\n :param confidence_level:\n :param fraction_of_samples:\n :param differential:\n :param sources_to_use:\n :param include_extended:\n :return:\n \"\"\"\n\n bayesian_analyses = collections.OrderedDict()\n mle_analyses = collections.OrderedDict()\n\n # first we split up the bayesian and mle analysis\n\n mle_sources = collections.OrderedDict()\n bayes_sources = collections.OrderedDict()\n\n for analysis in analysis_results:\n\n items = (\n list(analysis.optimized_model.point_sources.items())\n if not include_extended\n else list(analysis.optimized_model.sources.items())\n )\n\n for source_name, source in items:\n\n if source_name in sources_to_use or not sources_to_use:\n\n if analysis.analysis_type == \"MLE\":\n\n # keep track of duplicate sources\n\n mle_sources.setdefault(source_name, []).append(1)\n\n if len(mle_sources[source_name]) > 1:\n name = \"%s_%d\" % (source_name, len(\n mle_sources[source_name]))\n\n else:\n\n name = source_name\n\n try:\n\n comps = [\n c.name for c in source.spectrum.main.composite.functions\n ]\n\n except:\n\n comps = []\n\n # duplicate components\n comps = [\n \"%s_n%i\" % (s, suffix) if num > 1 else s\n for s, num in list(collections.Counter(comps).items())\n for suffix in range(1, num + 1)\n ]\n\n mle_analyses[name] = {\n \"source\": source_name,\n \"analysis\": analysis,\n \"component_names\": comps,\n }\n\n else:\n\n bayes_sources.setdefault(source_name, []).append(1)\n\n # keep track of duplicate sources\n\n if len(bayes_sources[source_name]) > 1:\n name = \"%s_%d\" % (source_name, len(\n bayes_sources[source_name]))\n\n else:\n\n name = source_name\n\n try:\n\n comps = [\n c.name for c in source.spectrum.main.composite.functions\n ]\n\n except:\n\n comps = []\n\n # duplicate components\n comps = [\n \"%s_n%i\" % (s, suffix) if num > 1 else s\n for s, num in list(collections.Counter(comps).items())\n for suffix in range(1, num + 1)\n ]\n\n bayesian_analyses[name] = {\n \"source\": source_name,\n \"analysis\": analysis,\n \"component_names\": comps,\n }\n\n # keep track of the number of sources we will use\n\n num_sources_to_use = 0\n\n # go through the MLE analysis and build up some fitted sources\n\n if mle_analyses:\n \n for key in tqdm(list(mle_analyses.keys()), desc=\"processing MLE analyses\"):\n\n # if we want to use this source\n\n if (\n not use_components\n or (\"total\" in components_to_use)\n or (not mle_analyses[key][\"component_names\"])\n ):\n mle_analyses[key][\"fitted point source\"] = FittedPointSourceSpectralHandler(\n mle_analyses[key][\"analysis\"],\n mle_analyses[key][\"source\"],\n energy_range,\n energy_unit,\n flux_unit,\n confidence_level,\n equal_tailed=equal_tailed,\n is_differential_flux=differential,\n )\n\n num_sources_to_use += 1\n\n # see if there are any components to use\n\n if use_components:\n\n num_components_to_use = 0\n\n component_dict = {}\n\n for component in mle_analyses[key][\"component_names\"]:\n\n # if we want to plot all the components\n\n if not components_to_use:\n\n component_dict[component] = FittedPointSourceSpectralHandler(\n mle_analyses[key][\"analysis\"],\n mle_analyses[key][\"source\"],\n energy_range,\n energy_unit,\n flux_unit,\n confidence_level,\n equal_tailed,\n component=component,\n is_differential_flux=differential,\n )\n\n num_components_to_use += 1\n\n else:\n\n # otherwise pick off only the ones of interest\n\n if component in components_to_use:\n component_dict[component] = FittedPointSourceSpectralHandler(\n mle_analyses[key][\"analysis\"],\n mle_analyses[key][\"source\"],\n energy_range,\n energy_unit,\n flux_unit,\n confidence_level,\n equal_tailed,\n component=component,\n is_differential_flux=differential,\n )\n\n num_components_to_use += 1\n\n # save these to the dict\n\n mle_analyses[key][\"components\"] = component_dict\n\n # keep track of how many components we need to plot\n\n if use_components:\n\n num_sources_to_use += num_components_to_use\n\n if \"total\" in components_to_use:\n num_sources_to_use += 1\n\n # else:\n #\n # num_sources_to_use += 1\n\n # repeat for the bayes analyses\n\n if bayesian_analyses:\n \n for key in tqdm(list(bayesian_analyses.keys()), desc=\"processing Bayesian analyses\"):\n\n # if we have a source to use\n\n if (\n not use_components\n or (\"total\" in components_to_use)\n or (not bayesian_analyses[key][\"component_names\"])\n ):\n bayesian_analyses[key][\n \"fitted point source\"\n ] = FittedPointSourceSpectralHandler(\n bayesian_analyses[key][\"analysis\"],\n bayesian_analyses[key][\"source\"],\n energy_range,\n energy_unit,\n flux_unit,\n confidence_level,\n equal_tailed,\n is_differential_flux=differential,\n )\n\n num_sources_to_use += 1\n\n # if we want to use components\n\n if use_components:\n\n num_components_to_use = 0\n\n component_dict = {}\n\n for component in bayesian_analyses[key][\"component_names\"]:\n\n # extracting all components\n\n if not components_to_use:\n component_dict[component] = FittedPointSourceSpectralHandler(\n bayesian_analyses[key][\"analysis\"],\n bayesian_analyses[key][\"source\"],\n energy_range,\n energy_unit,\n flux_unit,\n confidence_level,\n equal_tailed,\n component=component,\n is_differential_flux=differential,\n )\n\n num_components_to_use += 1\n\n # or just some of them\n\n if component in components_to_use:\n component_dict[component] = FittedPointSourceSpectralHandler(\n bayesian_analyses[key][\"analysis\"],\n bayesian_analyses[key][\"source\"],\n energy_range,\n energy_unit,\n flux_unit,\n confidence_level,\n equal_tailed,\n component=component,\n is_differential_flux=differential,\n )\n\n num_components_to_use += 1\n\n bayesian_analyses[key][\"components\"] = component_dict\n\n # keep track of everything we added on\n\n if use_components and num_components_to_use > 0:\n\n num_sources_to_use += num_components_to_use\n\n if \"total\" in components_to_use:\n num_sources_to_use += 1\n #\n # else:\n #\n # num_sources_to_use += 1\n\n # we may have the same source in a bayesian and mle analysis.\n # we want to plot them, but make sure to label them differently.\n # so let's keep track of them\n\n duplicate_keys = []\n\n for key in list(mle_analyses.keys()):\n\n if key in list(bayesian_analyses.keys()):\n duplicate_keys.append(key)\n\n return mle_analyses, bayesian_analyses, num_sources_to_use, duplicate_keys\n\n\ndef _collect_sums_into_dictionaries(analyses, use_components, components_to_use):\n \"\"\"\n\n :param analyses:\n :param use_components:\n :param components_to_use:\n :return:\n \"\"\"\n\n total_analysis = []\n\n component_sum_dict = collections.OrderedDict()\n\n num_sources_to_use = 0\n\n for key in list(analyses.keys()):\n\n # we won't assume to plot the total until the end\n\n use_total = False\n\n if use_components:\n\n # append all the components we want to sum to their\n # own key\n\n if (not list(analyses[key][\"components\"].keys())) or (\n \"total\" in components_to_use\n ):\n use_total = True\n\n for component in list(analyses[key][\"components\"].keys()):\n component_sum_dict.setdefault(component, []).append(\n analyses[key][\"components\"][component]\n )\n\n else:\n\n use_total = True\n\n if use_total:\n # append the total spectrum\n\n total_analysis.append(analyses[key][\"fitted point source\"])\n\n if use_components:\n\n for key, values in list(component_sum_dict.items()):\n num_sources_to_use += len(values)\n\n num_sources_to_use += len(total_analysis)\n\n return total_analysis, component_sum_dict, num_sources_to_use\n\n\ndef _append_best_fit_and_errors(\n samples, _defaults, label, fluxes, p_errors, n_errors, labels\n):\n if _defaults[\"best_fit\"] == \"average\":\n\n best_fit = samples.average[0, 0]\n\n else:\n\n best_fit = samples.median[0, 0]\n\n positive_error = samples.upper_error[0, 0]\n\n negative_error = samples.lower_error[0, 0]\n\n fluxes.append(best_fit)\n p_errors.append(positive_error)\n n_errors.append(negative_error)\n labels.append(label)\n\n\ndef _compute_output(analyses, _defaults, out):\n fluxes = []\n p_errors = []\n n_errors = []\n labels = []\n\n # go thru the mle analysis and get the fluxes\n for key in list(analyses.keys()):\n\n # we won't assume to plot the total until the end\n\n get_total = False\n\n if _defaults[\"use_components\"]:\n\n # if this source has no components or none that we wish to plot\n # then we will get the total flux after this\n\n if (not list(analyses[key][\"components\"].keys())) or (\n \"total\" in _defaults[\"components_to_use\"]\n ):\n get_total = True\n\n for component in list(analyses[key][\"components\"].keys()):\n # extract the information and plot it\n\n samples = analyses[key][\"components\"][component]\n\n label = \"%s: %s\" % (key, component)\n\n _append_best_fit_and_errors(\n samples, _defaults, label, fluxes, p_errors, n_errors, labels\n )\n\n else:\n\n get_total = True\n\n if get_total:\n # it ends up that we need to plot the total spectrum\n # which is just a repeat of the process\n\n samples = analyses[key][\"fitted point source\"]\n\n label = \"%s: total\" % key\n\n _append_best_fit_and_errors(\n samples, _defaults, label, fluxes, p_errors, n_errors, labels\n )\n\n if fluxes:\n # now make a data frame\n\n mle_df = pd.DataFrame(\n {\"flux\": fluxes, \"low bound\": n_errors, \"hi bound\": p_errors}, index=labels\n )\n mle_df = mle_df[[\"flux\", \"low bound\", \"hi bound\"]]\n mle_df = mle_df[[\"flux\", \"low bound\", \"hi bound\"]]\n out.append(mle_df)\n\n # display(mle_df)\n\n else:\n\n out.append(None)\n\n\ndef _compute_output_with_components(_defaults, component_sum_dict, total_analysis, out):\n\n fluxes = []\n n_errors = []\n p_errors = []\n labels = []\n\n if _defaults[\"use_components\"] and list(component_sum_dict.keys()):\n\n # we have components to calculate\n\n for component, values in list(component_sum_dict.items()):\n\n summed_analysis = sum(values)\n\n if _defaults[\"best_fit\"] == \"average\":\n\n best_fit = summed_analysis.average[0, 0]\n\n else:\n\n best_fit = summed_analysis.median[0, 0]\n\n positive_error = summed_analysis.upper_error[0, 0]\n\n negative_error = summed_analysis.lower_error[0, 0]\n\n label = component\n\n fluxes.append(best_fit)\n p_errors.append(positive_error)\n n_errors.append(negative_error)\n labels.append(label)\n\n if total_analysis:\n\n summed_analysis = sum(total_analysis)\n\n if _defaults[\"best_fit\"] == \"average\":\n\n best_fit = summed_analysis.average[0, 0]\n\n else:\n\n best_fit = summed_analysis.median[0, 0]\n\n positive_error = summed_analysis.upper_error[0, 0]\n\n negative_error = summed_analysis.lower_error[0, 0]\n\n label = \"total\"\n\n fluxes.append(best_fit)\n p_errors.append(positive_error)\n n_errors.append(negative_error)\n labels.append(label)\n\n if fluxes:\n # now make a data frame\n\n df = pd.DataFrame(\n {\"flux\": fluxes, \"low bound\": n_errors, \"hi bound\": p_errors}, index=labels\n )\n df = df[[\"flux\", \"low bound\", \"hi bound\"]]\n out.append(df)\n\n # display(df)\n\n else:\n\n out.append(None)\n\n\ndef calculate_point_source_flux(*args, **kwargs):\n\n log.error(\n \"The use of calculate_point_source_flux is deprecated. Please use the .get_point_source_flux()\"\n \" method of the JointLikelihood.results or the BayesianAnalysis.results member. For example:\"\n \" jl.results.get_point_source_flux().\"\n )\n\n return _calculate_point_source_flux(*args, **kwargs)\n\n\ndef _calculate_point_source_flux(ene_min, ene_max, *analyses, **kwargs):\n \"\"\"\n\n :param ene_min: lower energy bound for the flux\n :param ene_max: upper energy bound for the flux\n :param analyses: fitted JointLikelihood or BayesianAnalysis objects\n :param sources_to_use: (optional) list of PointSource string names to plot from the analysis\n :param energy_unit: (optional) astropy energy unit in string form (can also be frequency)\n :param flux_unit: (optional) astropy flux unit in string form\n :param ene_min: (optional) minimum energy to plot\n :param ene_max: (optional) maximum energy to plot\n :param use_components: (optional) True or False to plot the spectral components\n :param components_to_use: (optional) list of string names of the components to plot: including 'total'\n will also plot the total spectrum\n :param include_extended: (optional) if True, plot extended source spectra (spatially integrated) as well.\n\n :return: mle_dataframe, bayes_dataframe\n \"\"\"\n\n _defaults = {\n \"confidence_level\": 0.68,\n \"equal_tailed\": True,\n \"best_fit\": \"median\",\n \"energy_unit\": \"keV\",\n \"flux_unit\": \"erg/(s cm2)\",\n \"ene_min\": ene_min,\n \"ene_max\": ene_max,\n \"use_components\": False,\n \"components_to_use\": [],\n \"sources_to_use\": [],\n \"sum_sources\": False,\n \"include_extended\": False,\n }\n\n for key, value in list(kwargs.items()):\n\n if key in _defaults:\n _defaults[key] = value\n\n # set up the integral limits\n\n energy_range = np.array([_defaults[\"ene_min\"], _defaults[\"ene_max\"]])\n\n mle_analyses, bayesian_analyses, _, _ = _setup_analysis_dictionaries(\n analyses,\n energy_range,\n _defaults[\"energy_unit\"],\n _defaults[\"flux_unit\"],\n _defaults[\"use_components\"],\n _defaults[\"components_to_use\"],\n _defaults[\"confidence_level\"],\n _defaults[\"equal_tailed\"],\n differential=False,\n sources_to_use=_defaults[\"sources_to_use\"],\n include_extended=_defaults[\"include_extended\"],\n )\n\n out = []\n\n if not _defaults[\"sum_sources\"]:\n\n # Process the MLE analyses\n\n _compute_output(mle_analyses, _defaults, out)\n\n # now do the bayesian side\n\n _compute_output(bayesian_analyses, _defaults, out)\n\n else:\n\n # instead we now sum the fluxes\n # we keep bayes and mle apart\n\n total_analysis_mle, component_sum_dict_mle, _ = _collect_sums_into_dictionaries(\n mle_analyses, _defaults[\"use_components\"], _defaults[\"components_to_use\"]\n )\n\n _compute_output_with_components(\n _defaults, component_sum_dict_mle, total_analysis_mle, out\n )\n\n # now do the bayesian side\n\n (\n total_analysis_bayes,\n component_sum_dict_bayes,\n _,\n ) = _collect_sums_into_dictionaries(\n bayesian_analyses,\n _defaults[\"use_components\"],\n _defaults[\"components_to_use\"],\n )\n\n _compute_output_with_components(\n _defaults, component_sum_dict_bayes, total_analysis_bayes, out\n )\n\n return out\n",
"import numpy as np\nimport os\nimport pytest\nimport warnings\n\nfrom threeML.io.package_data import get_path_of_data_file\nfrom threeML.utils.OGIP.response import (\n InstrumentResponseSet,\n InstrumentResponse,\n OGIPResponse,\n)\nfrom threeML.utils.time_interval import TimeInterval\n\n\ndef get_matrix_elements():\n\n # In[5]: np.diagflat([1, 2, 3, 4])[:3, :]\n\n matrix = np.diagflat([1.0, 2.0, 3.0, 4.0])[:3, :]\n\n # Now matrix is:\n # array([[1, 0, 0, 0],\n # [0, 2, 0, 0],\n # [0, 0, 3, 0]])\n\n mc_energies = [1.0, 2.0, 3.0, 4.0, 5.0]\n\n ebounds = [1.0, 2.5, 4.5, 5.0]\n\n return matrix, mc_energies, ebounds\n\n\ndef get_matrix_set_elements():\n\n matrix, mc_energies, ebounds = get_matrix_elements()\n\n rsp_a = InstrumentResponse(matrix, ebounds, mc_energies)\n\n # Make another matrix with the same matrix but divided by 2\n other_matrix = matrix / 2.0\n\n rsp_b = InstrumentResponse(other_matrix, ebounds, mc_energies)\n\n # Remember: the second matrix is like the first one divided by two, and it covers twice as much time.\n # They cover 0-10 s the first one, and 10-30 the second one.\n\n # Fake an exposure getter by using a fixed 10% deadtime\n livetime_fraction = 0.9\n exposure_getter = lambda t1, t2: livetime_fraction * (t2 - t1)\n\n # Fake a count getter\n law = lambda x: 1.23 * x\n # The counts getter is the integral of the law\n counts_getter = (lambda t1, t2: 1.23 * 0.5 *\n (t2**2.0 - t1**2.0) * livetime_fraction)\n\n return [rsp_a, rsp_b], exposure_getter, counts_getter\n\n\ndef get_matrix_set_elements_with_coverage(reference_time=0.0):\n\n [rsp_a, rsp_b], exposure_getter, counts_getter = get_matrix_set_elements()\n\n # By making the coverage interval twice for the second matrix we restore parity with the first one,\n # so that the weighting by exposure should simply return the first matrix\n\n rsp_a._coverage_interval = TimeInterval(0.0, 10.0) + reference_time\n rsp_b._coverage_interval = TimeInterval(10.0, 30.0) + reference_time\n\n return [rsp_a, rsp_b], exposure_getter, counts_getter\n\n\ndef test_instrument_response_constructor():\n\n # Make a fake test matrix\n\n matrix, mc_energies, ebounds = get_matrix_elements()\n\n rsp = InstrumentResponse(matrix, ebounds, mc_energies)\n\n assert np.all(rsp.matrix == matrix)\n assert np.all(rsp.ebounds == ebounds)\n assert np.all(rsp.monte_carlo_energies == mc_energies)\n\n # Now with coverage interval\n\n with pytest.raises(RuntimeError):\n\n _ = InstrumentResponse(matrix, ebounds, mc_energies, \"10-20\")\n\n rsp = InstrumentResponse(matrix, ebounds, mc_energies, TimeInterval(10.0, 20.0))\n\n assert rsp.rsp_filename is None\n assert rsp.arf_filename is None\n assert rsp.coverage_interval == TimeInterval(10.0, 20.0)\n\n # Check that we do not accept nans in the matrix\n matrix[2, 2] = np.nan\n\n with pytest.raises(RuntimeError):\n\n _ = InstrumentResponse(matrix, ebounds, mc_energies, \"10-20\")\n\n\ndef test_instrument_response_replace_matrix():\n\n matrix, mc_energies, ebounds = get_matrix_elements()\n\n rsp = InstrumentResponse(matrix, ebounds, mc_energies)\n\n new_matrix = matrix / 2.0\n\n rsp.replace_matrix(new_matrix)\n\n assert np.all(rsp.matrix == new_matrix)\n\n with pytest.raises(RuntimeError):\n\n rsp.replace_matrix(np.random.uniform(0, 1, 100).reshape(10, 10))\n\n\ndef test_instrument_response_set_function_and_convolve():\n\n # A very basic test. More tests will be made against XSpec later\n\n matrix, mc_energies, ebounds = get_matrix_elements()\n\n rsp = InstrumentResponse(matrix, ebounds, mc_energies)\n\n # Integral of a constant, so we know easily what the output should be\n\n #integral_function = lambda e1, e2: e2 - e1\n\n def integral_function():\n return np.array(mc_energies)[1:] - np.array(mc_energies)[:-1]\n\n \n rsp.set_function(integral_function)\n\n folded_counts = rsp.convolve()\n\n assert np.all(folded_counts == [1.0, 2.0, 3.0])\n\n\ndef test__instrument_response_energy_to_channel():\n\n matrix, mc_energies, ebounds = get_matrix_elements()\n\n rsp = InstrumentResponse(matrix, ebounds, mc_energies)\n\n assert rsp.energy_to_channel(1.5) == 0\n assert rsp.energy_to_channel(2.6) == 1\n assert rsp.energy_to_channel(4.75) == 2\n assert rsp.energy_to_channel(100.0) == 3\n\n\ndef test_instrument_response_plot_response():\n\n matrix, mc_energies, ebounds = get_matrix_elements()\n\n rsp = InstrumentResponse(matrix, ebounds, mc_energies)\n\n rsp.plot_matrix()\n\n\ndef test_OGIP_response_first_channel():\n\n # Get path of response file\n rsp_file = get_path_of_data_file(\"ogip_test_gbm_n6.rsp\")\n\n rsp = OGIPResponse(rsp_file)\n\n assert rsp.first_channel == 1\n\n\ndef test_OGIP_response_arf_rsp_accessors():\n\n # Then load rsp and arf in XSpec\n\n rsp_file = get_path_of_data_file(\"ogip_test_xmm_pn.rmf\")\n\n arf_file = get_path_of_data_file(\"ogip_test_xmm_pn.arf\")\n\n rsp = OGIPResponse(rsp_file, arf_file=arf_file)\n\n assert rsp.arf_filename == arf_file\n assert rsp.rsp_filename == rsp_file\n\n\ndef test_response_write_to_fits1():\n\n matrix, mc_energies, ebounds = get_matrix_elements()\n\n rsp = InstrumentResponse(matrix, ebounds, mc_energies)\n\n temp_file = \"__test.rsp\"\n\n rsp.to_fits(temp_file, \"TEST\", \"TEST\", overwrite=True)\n\n # Now check that reloading gives back the same matrix\n rsp_reloaded = OGIPResponse(temp_file)\n\n assert np.allclose(rsp_reloaded.matrix, rsp.matrix)\n assert np.allclose(rsp_reloaded.ebounds, rsp.ebounds)\n assert np.allclose(rsp_reloaded.monte_carlo_energies, rsp.monte_carlo_energies)\n\n os.remove(temp_file)\n\n\ndef test_response_write_to_fits2():\n\n # Now do the same for a response read from a file\n\n rsp_file = get_path_of_data_file(\"ogip_test_gbm_n6.rsp\")\n\n rsp = OGIPResponse(rsp_file)\n\n temp_file = \"__test.rsp\"\n\n rsp.to_fits(temp_file, \"TEST\", \"TEST\", overwrite=True)\n\n rsp_reloaded = OGIPResponse(temp_file)\n\n assert np.allclose(rsp_reloaded.matrix, rsp.matrix)\n assert np.allclose(rsp_reloaded.ebounds, rsp.ebounds)\n assert np.allclose(rsp_reloaded.monte_carlo_energies, rsp.monte_carlo_energies)\n\n os.remove(temp_file)\n\n\ndef test_response_write_to_fits3():\n\n # Now do the same for a file with a ARF\n\n rsp_file = get_path_of_data_file(\"ogip_test_xmm_pn.rmf\")\n\n arf_file = get_path_of_data_file(\"ogip_test_xmm_pn.arf\")\n\n rsp = OGIPResponse(rsp_file, arf_file=arf_file)\n\n temp_file = \"__test.rsp\"\n\n rsp.to_fits(temp_file, \"TEST\", \"TEST\", overwrite=True)\n\n rsp_reloaded = OGIPResponse(temp_file)\n\n assert np.allclose(rsp_reloaded.matrix, rsp.matrix)\n assert np.allclose(rsp_reloaded.ebounds, rsp.ebounds)\n assert np.allclose(rsp_reloaded.monte_carlo_energies, rsp.monte_carlo_energies)\n\n os.remove(temp_file)\n\n\ndef test_response_set_constructor():\n\n [rsp_aw, rsp_bw], exposure_getter, counts_getter = get_matrix_set_elements()\n\n with pytest.raises(RuntimeError):\n\n # This should raise because there is no time information for the matrices\n\n _ = InstrumentResponseSet([rsp_aw, rsp_bw], exposure_getter, counts_getter)\n\n # Add the time information\n\n (\n [rsp_a, rsp_b],\n exposure_getter,\n counts_getter,\n ) = get_matrix_set_elements_with_coverage()\n\n # This should work now\n rsp_set = InstrumentResponseSet([rsp_a, rsp_b], exposure_getter, counts_getter)\n\n assert rsp_set[0] == rsp_a\n assert rsp_set[1] == rsp_b\n\n # Check that the constructor order the matrices by time when needed\n # This should work now\n rsp_set = InstrumentResponseSet([rsp_b, rsp_a], exposure_getter, counts_getter)\n\n assert rsp_set[0] == rsp_a\n assert rsp_set[1] == rsp_b\n\n # Now test construction from the .from_rsp2 method\n rsp2_file = get_path_of_data_file(\"ogip_test_gbm_b0.rsp2\")\n\n with warnings.catch_warnings():\n\n warnings.simplefilter(\"error\", np.VisibleDeprecationWarning)\n\n rsp_set = InstrumentResponseSet.from_rsp2_file(\n rsp2_file, exposure_getter, counts_getter\n )\n\n assert len(rsp_set) == 3\n\n # Now test that we cannot initialize a response set with matrices which have non-contiguous coverage intervals\n matrix, mc_energies, ebounds = get_matrix_elements()\n\n rsp_c = InstrumentResponse(matrix, ebounds, mc_energies, TimeInterval(0.0, 10.0))\n rsp_d = InstrumentResponse(matrix, ebounds, mc_energies, TimeInterval(20.0, 30.0))\n\n with pytest.raises(RuntimeError):\n\n _ = InstrumentResponseSet([rsp_c, rsp_d], exposure_getter, counts_getter)\n\n\ndef test_response_set_weighting():\n\n (\n [rsp_a, rsp_b],\n exposure_getter,\n counts_getter,\n ) = get_matrix_set_elements_with_coverage()\n\n rsp_set = InstrumentResponseSet([rsp_a, rsp_b], exposure_getter, counts_getter)\n\n # here we are waiting by exposure. We have:\n\n # weight1 = (0.9 * 5.0) = 4.5\n # weight2 = (0.9 * 15.0) = 13.5\n # sum = weight1 + weight2 = 18.0\n # new_matrix = rsp_a * weight1/sum + rsp_b * weight2 / sum\n\n # but rsp_b = rsp_a / 2.0, so:\n\n # new_matrix = rsp_a * weight1 / sum + rsp_a / 2.0 * weight2 / sum = 1 / sum * rsp_a * (weight1 + weight2 / 2.0)\n\n # so in the end:\n\n # new_matrix = 0.625 * rsp_a\n\n weighted_matrix = rsp_set.weight_by_exposure(\"5.0 - 25.0\")\n\n assert np.allclose(weighted_matrix.matrix, 0.625 * rsp_a.matrix)\n\n # here we are waiting by exposure. We have:\n\n # weight1 = 55.35\n # weight2 = 442.8\n\n # so:\n\n # new_matrix = 1 / sum * rsp_a * (weight1 + weight2 / 2.0) = 0.5555555555555555 * rsp_a\n\n weighted_matrix = rsp_set.weight_by_counts(\"0.0 - 30.0\")\n\n assert np.allclose(weighted_matrix.matrix, 0.5555555555555555 * rsp_a.matrix)\n\n # Here we weight by counts in the interval 5.0 - 25.0\n # With the same math as before:\n\n weighted_matrix = rsp_set.weight_by_counts(\"5.0 - 25.0\")\n\n assert np.allclose(weighted_matrix.matrix, 0.5625000000000001 * rsp_a.matrix)\n\n\ndef test_response_set_weighting_with_reference_time():\n\n # Now repeat the same tests but using a reference time\n ref_time = 123.456\n\n (\n [rsp_a, rsp_b],\n exposure_getter,\n counts_getter,\n ) = get_matrix_set_elements_with_coverage(reference_time=ref_time)\n\n rsp_set = InstrumentResponseSet(\n [rsp_a, rsp_b], exposure_getter, counts_getter, reference_time=ref_time\n )\n\n assert rsp_set.reference_time == ref_time\n\n weighted_matrix = rsp_set.weight_by_exposure(\"5.0 - 25.0\")\n\n assert np.allclose(weighted_matrix.matrix, 0.625 * rsp_a.matrix)\n\n weighted_matrix = rsp_set.weight_by_counts(\"0.0 - 30.0\")\n\n assert np.allclose(weighted_matrix.matrix, 0.5555555555555555 * rsp_a.matrix)\n\n weighted_matrix = rsp_set.weight_by_counts(\"5.0 - 25.0\")\n\n assert np.allclose(weighted_matrix.matrix, 0.5625000000000001 * rsp_a.matrix)\n\n\ndef test_response_set_weighting_with_disjoint_intervals():\n\n ref_time = 123.456\n\n (\n [rsp_a, rsp_b],\n exposure_getter,\n counts_getter,\n ) = get_matrix_set_elements_with_coverage(reference_time=ref_time)\n\n rsp_set = InstrumentResponseSet(\n [rsp_a, rsp_b], exposure_getter, counts_getter, reference_time=ref_time\n )\n\n assert rsp_set.reference_time == ref_time\n\n weighted_matrix = rsp_set.weight_by_exposure(\"5.0 - 12.0\", \"25.0-28.0\")\n\n # weight1 = (0.9 * 5.0) = 4.5\n # weight2 = (0.9 * 2.0) = 1.8\n # weight3 = (0.9 * 3.0) = 2.7\n # sum = weight1 + weight2 + weight3 = 8.2\n # new_matrix = rsp_a * weight1/sum + rsp_b * weight2 / sum + rsp_b * weight3 / sum\n\n # but rsp_b = rsp_a / 2.0, so:\n\n # new_matrix = rsp_a * weight1 / sum + rsp_a / 2.0 * weight2 / sum + rsp_a / 2.0 * weight3 / sum\n\n # so in the end:\n\n # new_matrix = 1.0 / (w1 + w2 + w3) * (w1 + w2 / 2.0 + w3 / 2.0) * rsp_a = 0.75 * rsp_a\n\n assert np.allclose(weighted_matrix.matrix, 0.75 * rsp_a.matrix)\n\n # Now the same with counts\n\n weighted_matrix = rsp_set.weight_by_counts(\"5.0 - 12.0\", \"25.0-28.0\")\n\n w1 = counts_getter(5.0, 10.0)\n w2 = counts_getter(10.0, 12.0)\n w3 = counts_getter(25.0, 28.0)\n\n factor = 1.0 / (w1 + w2 + w3) * (w1 + w2 / 2.0 + w3 / 2.0)\n\n assert np.allclose(weighted_matrix.matrix, factor * rsp_a.matrix)\n",
"__author__ = \"grburgess <J. Michael Burgess>\"\n\nfrom astromodels import DiracDelta, StepFunctionUpper\nimport numpy as np\n\n\ndef step_generator(intervals, parameter):\n \"\"\"\n\n Generates sum of step or dirac delta functions for the given intervals\n and parameter. This can be used to link time-independent parameters\n of a model to time.\n\n If the intervals provided are 1-D, i.e, they are the means of time bins or\n the TOA of photons, then a sum of dirac deltas is returned with their centers\n at the times provided\n\n If the intervals are 2-D (start, stop), sum of step functions is created with\n the bounds at the start and stop times of the interval.\n\n The parameter is used to set the bounds and initial value, min, max of the\n non-zero points of the functions\n\n :param intervals: an array of the 1- or 2-D intervals to be used\n :param parameter: astromodels parameter\n \"\"\"\n\n intervals = np.atleast_2d(intervals)\n\n # need to make sure the shape is right\n # assert self._intervals.shape\n\n # Check if the interval is 2D or 1D\n if intervals.shape[0] > 1 and intervals.shape[1] == 2:\n\n n_intervals = intervals.shape[0]\n\n is_2d = True\n\n elif intervals.shape[0] == 1:\n\n n_intervals = intervals.shape[1]\n intervals = intervals[0]\n\n is_2d = False\n\n else:\n\n raise RuntimeError(\"These intervals are not yet supported\")\n\n # Copy the parameter values\n parameter_min = parameter.min_value\n parameter_max = parameter.max_value\n initial_value = parameter.value\n\n if is_2d:\n\n # For 2D intervals, we grab a step function\n\n func = StepFunctionUpper()\n\n # Sum up the functions\n\n for i in range(n_intervals - 1):\n\n func += StepFunctionUpper()\n\n # Go through and iterate over intervals to set the parameter values\n\n for i, interval in enumerate(intervals):\n\n i = i + 1\n\n func.free_parameters[\"value_%d\" % i].value = initial_value\n func.free_parameters[\"value_%d\" % i].min_value = parameter_min\n func.free_parameters[\"value_%d\" % i].max_value = parameter_max\n\n func.parameters[\"upper_bound_%d\" % i].value = interval[1]\n\n func.parameters[\"lower_bound_%d\" % i].value = interval[0]\n\n else:\n\n # For 1-D intervals, just create a sum of delta functions\n\n func = DiracDelta()\n\n for i in range(n_intervals - 1):\n\n func += DiracDelta()\n\n # Set up the values\n\n for i, interval in enumerate(intervals):\n\n i = i + 1\n\n func.free_parameters[\"value_%d\" % i].value = initial_value\n func.free_parameters[\"value_%d\" % i].min_value = parameter_min\n func.free_parameters[\"value_%d\" % i].max_value = parameter_max\n\n func.parameters[\"zero_point_%d\" % i].value = interval\n\n return func\n",
"from builtins import object\nimport pandas as pd\nimport numpy as np\nfrom threeML.io.table import long_path_formatter\nfrom threeML.io.rich_display import display\nfrom threeML.io.uncertainty_formatter import uncertainty_formatter\n\n\nclass ResultsTable(object):\n def __init__(\n self, parameter_paths, values, negative_errors, positive_errors, units\n ):\n\n values_s = pd.Series([], dtype=np.float64)\n negative_error_s = pd.Series([], dtype=np.float64)\n positive_error_s = pd.Series([], dtype=np.float64)\n units_s = pd.Series([], dtype=np.float64)\n\n for i, this_path in enumerate(parameter_paths):\n\n # Check if this parameter has a dex() unit, i.e., if it is in log10 scale\n # If it is, we display the transformed value, not the logarithm\n\n units_s[this_path] = units[i]\n\n if units_s[this_path].to_string().find(\"dex\") < 0:\n\n # A normal parameter\n values_s[this_path] = values[i]\n negative_error_s[this_path] = negative_errors[i]\n positive_error_s[this_path] = positive_errors[i]\n\n else:\n\n # A dex() parameter (logarithmic parameter)\n values_s[this_path] = 10 ** values[i]\n negative_error_s[this_path] = (\n 10 ** (values[i] + negative_errors[i]) - values_s[this_path]\n )\n positive_error_s[this_path] = (\n 10 ** (values[i] + positive_errors[i]) - values_s[this_path]\n )\n\n self._data_frame = pd.DataFrame()\n self._data_frame[\"value\"] = values_s\n self._data_frame[\"negative_error\"] = negative_error_s\n self._data_frame[\"positive_error\"] = positive_error_s\n self._data_frame[\"error\"] = (\n np.abs(negative_error_s.values) + positive_error_s.values\n ) / 2.0\n self._data_frame[\"unit\"] = units_s\n\n @property\n def frame(self):\n\n return self._data_frame\n\n def display(self, key_formatter=long_path_formatter):\n def row_formatter(row):\n\n value = row[\"value\"]\n lower_bound = value + row[\"negative_error\"]\n upper_bound = value + row[\"positive_error\"]\n\n pretty_string = uncertainty_formatter(value, lower_bound, upper_bound)\n\n return pretty_string\n\n # Make another data frame with the keys\n new_frame = self._data_frame.copy(deep=True) # type: pd.DataFrame\n\n # Add new column which will become the new index\n new_frame[\"parameter\"] = [key_formatter(x) for x in new_frame.index.values]\n\n # Set it as the index\n new_frame.set_index(\"parameter\", drop=True, inplace=True)\n\n # compute the display\n new_frame[\"result\"] = new_frame.apply(row_formatter, axis=1)\n\n # Display\n\n display(new_frame[[\"result\", \"unit\"]])\n"
] | [
[
"numpy.alltrue",
"numpy.ones"
],
[
"pandas.DataFrame"
],
[
"numpy.array",
"pandas.DataFrame"
],
[
"numpy.allclose",
"numpy.diagflat",
"numpy.all",
"numpy.random.uniform",
"numpy.array"
],
[
"numpy.atleast_2d"
],
[
"numpy.abs",
"pandas.Series",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
benneely/lungmap-pipeline | [
"a38a6d1331468834280ce1ac41f30c76ee553ed4",
"a38a6d1331468834280ce1ac41f30c76ee553ed4"
] | [
"examples/run_pipeline_60x.py",
"eval/evaluation.py"
] | [
"import os\nimport numpy as np\nfrom micap import pipeline\nfrom glob import glob\nfrom PIL import Image\nimport cv2_extras as cv2x\n\n# weird import style to un-confuse PyCharm\ntry:\n from cv2 import cv2\nexcept ImportError:\n import cv2\n\ncell_radius = 17 * 3\ncell_size = np.pi * (cell_radius ** 2)\n\nseg_config = [\n {\n 'type': 'color',\n 'args': {\n 'blur_kernel': (51, 51),\n 'min_size': 3 * cell_size,\n 'max_size': None,\n 'colors': ['green', 'cyan', 'red', 'violet', 'yellow']\n }\n },\n {\n 'type': 'saturation',\n 'args': {'blur_kernel': (71, 71), 'min_size': 12 * cell_size, 'max_size': None}\n },\n {\n 'type': 'saturation',\n 'args': {'blur_kernel': (53, 53), 'min_size': 3 * cell_size, 'max_size': 45 * cell_size}\n },\n {\n 'type': 'saturation',\n 'args': {'blur_kernel': (35, 35), 'min_size': 3 * cell_size, 'max_size': 45 * cell_size}\n },\n {\n 'type': 'saturation',\n 'args': {'blur_kernel': (17, 17), 'min_size': 3 * cell_size, 'max_size': 45 * cell_size}\n }\n]\n\nimage_set_dir = 'mm_e16.5_60x_sox9_sftpc_acta2'\n\n# make our 'tmp' directory for caching trained & tested pipeline instances\nif not os.path.isdir('tmp'):\n os.mkdir('tmp')\n\n\noutput_path = os.path.join(\n 'tmp',\n '_'.join([image_set_dir, 'pipeline'])\n)\nimage_set_path = os.path.join('data', image_set_dir)\n\nimage_paths = glob(os.path.join(image_set_path, '*.tif'))\n\ntmp_image = Image.open(image_paths[2])\ntmp_image = np.asarray(tmp_image)\ntmp_image = cv2.cvtColor(tmp_image, cv2.COLOR_RGB2HSV)\n\n# and pipeline test steps\ncandidate_contours = pipeline.generate_structure_candidates(\n tmp_image,\n seg_config,\n filter_min_size=3 * cell_size,\n plot=True\n)\ncv2x.plot_contours(tmp_image, candidate_contours)\n# test_data_processed = pipeline.process_test_data(test_img_hsv, candidate_contours)\n\n# plot functions\n# pipeline.plot_test_results(test_img_hsv, candidate_contours, pred_results, output_path)\n\n# optional cell segmentation\n# utils.process_structures_into_cells(\n# test_img_hsv,\n# os.path.join(output_path, 'regions'),\n# candidate_contours,\n# plot=False\n# )\n",
"import json\nfrom PIL import Image\nimport numpy as np\nimport pandas as pd\nfrom operator import itemgetter\nimport os\nfrom matplotlib import patches\nimport matplotlib.pyplot as plt\n\n# this is just to un-confuse pycharm\ntry:\n from cv2 import cv2\nexcept ImportError:\n import cv2\n\n\ndef get_training_data_for_image_set(image_set_dir):\n # Each image set directory will have a 'regions.json' file. This regions file\n # has keys of the image file names in the image set, and the value for each image\n # is a dict of class labels, and the value of those labels is a list of\n # segmented polygon regions.\n # First, we will read in this file and get the file names for our images\n regions_file = open(os.path.join(image_set_dir, 'regions.json'))\n regions_json = json.load(regions_file)\n regions_file.close()\n\n # output will be a dictionary of training data, were the polygon points dict\n # is a numpy array. The keys will still be the image names\n training_data = {}\n\n for image_name, regions_dict in regions_json.items():\n tmp_image = Image.open(os.path.join(image_set_dir, image_name))\n tmp_image = np.asarray(tmp_image)\n\n tmp_image = cv2.cvtColor(tmp_image, cv2.COLOR_RGB2HSV)\n\n training_data[image_name] = {\n 'hsv_img': tmp_image,\n 'regions': []\n }\n\n for label, regions in regions_dict.items():\n\n for region in regions:\n points = np.empty((0, 2), dtype='int')\n\n for point in region:\n points = np.append(points, [[point[0], point[1]]], axis=0)\n\n training_data[image_name]['regions'].append(\n {\n 'label': label,\n 'points': points\n }\n )\n\n return training_data\n\n\ndef compute_bbox(contour):\n x1, y1, w, h = cv2.boundingRect(contour)\n\n return [x1, y1, x1 + w, y1 + h]\n\n\ndef do_boxes_overlap(box1, box2):\n # if the maximum of both boxes left corner is greater than the\n # minimum of both boxes right corner, the boxes cannot overlap\n max_x_left = max([box1[0], box2[0]])\n min_x_right = min([box1[2], box2[2]])\n\n if min_x_right < max_x_left:\n return False\n\n # Likewise for the y-coordinates\n max_y_top = max([box1[1], box2[1]])\n min_y_bottom = min([box1[3], box2[3]])\n\n if min_y_bottom < max_y_top:\n return False\n\n return True\n\n\ndef make_boolean_mask(contour, img_dims):\n mask = np.zeros(img_dims, dtype=np.uint8)\n cv2.drawContours(\n mask,\n [contour],\n 0,\n 255,\n cv2.FILLED\n )\n\n # return boolean array\n return mask > 0\n\n\ndef make_binary_mask(contour, img_dims):\n mask = np.zeros(img_dims, dtype=np.uint8)\n cv2.drawContours(\n mask,\n [contour],\n 0,\n 1,\n cv2.FILLED\n )\n\n # return boolean array\n return mask\n\n\ndef find_overlapping_regions(true_regions, test_regions):\n true_boxes = []\n true_classes = []\n test_boxes = []\n test_classes = []\n test_scores = []\n\n img_dims = true_regions['hsv_img'].shape[:2]\n\n for r in true_regions['regions']:\n true_boxes.append(compute_bbox(r['points']))\n true_classes.append(r['label'])\n\n for r in test_regions:\n test_boxes.append(compute_bbox(r['contour']))\n\n max_prob = max(r['prob'].items(), key=itemgetter(1))\n\n test_classes.append(max_prob[0])\n test_scores.append(max_prob[1])\n\n # now we are ready to find the overlaps, we'll keep track of them with a dictionary\n # where the keys are the true region's index. The values will be a dictionary of\n # overlapping test regions, organized into 2 groups:\n # - matching overlaps: overlaps where the test & true region labels agree\n # - non-matching overlaps: overlaps where the test & true region labels differ\n #\n # Each one of those groups will be keys with a value of another list of dictionaries,\n # with the overlapping test region index along with the IoU value.\n # There are 2 other cases to cover:\n # - true regions with no matching overlaps (i.e. missed regions)\n # - test regions with no matching overlaps (i.e. false positives)\n overlaps = {}\n true_match_set = set()\n test_match_set = set()\n\n for i, r1 in enumerate(true_boxes):\n true_mask = None # reset to None, will compute as needed\n\n for j, r2 in enumerate(test_boxes):\n if not do_boxes_overlap(r1, r2):\n continue\n\n # So you're saying there's a chance?\n # If we get here, there is a chance for an overlap but it is not guaranteed,\n # we'll need to check the contours' pixels\n if true_mask is None:\n # we've never tested against this contour yet, so render it\n true_mask = make_boolean_mask(true_regions['regions'][i]['points'], img_dims)\n\n # and render the test contour\n test_mask = make_boolean_mask(test_regions[j]['contour'], img_dims)\n\n intersect_mask = np.bitwise_and(true_mask, test_mask)\n intersect_area = intersect_mask.sum()\n\n if not intersect_area > 0:\n # the bounding boxes overlapped, but the contours didn't, skip it\n continue\n\n union_mask = np.bitwise_or(true_mask, test_mask)\n true_match_set.add(i)\n test_match_set.add(j)\n\n if i not in overlaps:\n overlaps[i] = {\n 'true_label': true_classes[i],\n 'true': [],\n 'false': []\n }\n\n test_result = {\n 'test_index': j,\n 'iou': intersect_area / union_mask.sum()\n }\n\n if true_classes[i] == test_classes[j]:\n overlaps[i]['true'].append(test_result)\n else:\n overlaps[i]['false'].append(test_result)\n\n missed_regions = true_match_set.symmetric_difference((range(0, len(true_boxes))))\n false_positives = test_match_set.symmetric_difference((range(0, len(test_boxes))))\n\n return {\n 'overlaps': overlaps,\n 'missed_regions': missed_regions,\n 'false_positives': false_positives\n }\n\n\ndef calc_recall(tps, fns):\n eps = np.spacing(1)\n recall = tps / (tps + fns + eps)\n return recall\n\n\ndef calc_precision(tps, fps):\n eps = np.spacing(1)\n precision = tps / (tps + fps + eps)\n return precision\n\n\ndef generate_iou_pred_matrices(true_regions, test_regions):\n true_boxes = []\n test_boxes = []\n img_dims = true_regions['hsv_img'].shape[:2]\n\n for r in true_regions['regions']:\n true_boxes.append(compute_bbox(r['points']))\n\n for r in test_regions:\n test_boxes.append(compute_bbox(r['points']))\n\n iou_mat = np.zeros((len(test_boxes), len(true_boxes)))\n pred_mat = iou_mat.copy()\n\n for i, r1 in enumerate(true_boxes):\n true_mask = None # reset to None, will compute as needed\n\n for j, r2 in enumerate(test_boxes):\n if not do_boxes_overlap(r1, r2):\n continue\n\n # So you're saying there's a chance?\n # If we get here, there is a chance for an overlap but it is not guaranteed,\n # we'll need to check the contours' pixels\n if true_mask is None:\n # we've never tested against this contour yet, so render it\n true_mask = make_boolean_mask(true_regions['regions'][i]['points'], img_dims)\n\n # and render the test contour\n test_mask = make_boolean_mask(test_regions[j]['points'], img_dims)\n\n intersect_mask = np.bitwise_and(true_mask, test_mask)\n intersect_area = intersect_mask.sum()\n\n if not intersect_area > 0:\n # the bounding boxes overlapped, but the contours didn't, skip it\n continue\n\n union_mask = np.bitwise_or(true_mask, test_mask)\n iou = intersect_area / union_mask.sum()\n iou_mat[j, i] = iou\n types, value = max(test_regions[j]['prob'].items(), key=itemgetter(1))\n if types == true_regions['regions'][i]['label']:\n pred_mat[j, i] = value\n return iou_mat, pred_mat\n\n\ndef generate_tp_fn_fp(iou_mat, pred_mat, iou_thresh=0.5, pred_thresh=0.25):\n tp = {}\n for i in reversed(list(np.argsort(pred_mat, axis=None))):\n predind, gtind = np.unravel_index(i, pred_mat.shape)\n if iou_mat[predind, gtind] > iou_thresh:\n # TODO optionally only add if the prediction isn't already in tp.values()?\n if pred_mat[predind, gtind] > pred_thresh:\n tp[gtind] = predind\n fn = set(range(iou_mat.shape[1])) - set(tp.keys())\n fp = set(range(iou_mat.shape[0])) - set(tp.values())\n return tp, fn, fp\n\n\ndef generate_dataframe_aggregation_tp_fn_fp(\n true_regions,\n test_regions,\n iou_mat,\n pred_mat,\n tp,\n fn,\n fp\n):\n class_names = set()\n for x in true_regions['regions']:\n class_names.add(x['label'])\n for x in test_regions:\n c, value = max(x['label']['prob'].items(), key=itemgetter(1))\n class_names.add(c)\n results = {k: {'tp': [], 'fp': [], 'fn': []} for k in class_names}\n df = pd.DataFrame({'category': list(class_names)})\n df['TP'] = [0 for _ in list(class_names)]\n df['FP'] = [0 for _ in list(class_names)]\n df['FN'] = [0 for _ in list(class_names)]\n df['GTc'] = [0 for _ in list(class_names)]\n for x in true_regions['regions']:\n c = x['label']\n mask = (df['category'] == c)\n df.loc[mask, 'GTc'] = df.loc[mask, 'GTc'] + 1\n for x in tp.items():\n save = {\n 'iou': iou_mat[x[1], x[0]],\n 'prob': pred_mat[x[1], x[0]],\n 'test_ind': x[1]\n }\n c = true_regions['regions'][x[0]]['label']\n results[c]['tp'].append(save)\n results[c]['tp'].append({'gt_ind': x[0]})\n mask = (df['category'] == c)\n df.loc[mask, 'TP'] = df.loc[mask, 'TP'] + 1\n for x in fn:\n c = true_regions['regions'][x]['label']\n results[c]['fn'].append({'gt_ind': x})\n mask = (df['category'] == c)\n df.loc[mask, 'FN'] = df.loc[mask, 'FN'] + 1\n for x in fp:\n c, value = max(test_regions[x]['label']['prob'].items(), key=itemgetter(1))\n save = {\n 'iou': np.max(iou_mat[x, :]),\n 'test_ind': x\n }\n results[c]['fp'].append(save)\n mask = (df['category'] == c)\n df.loc[mask, 'FP'] = df.loc[mask, 'FP'] + 1\n df['precision'] = df.apply(lambda row: calc_precision(row['TP'], row['FP']), axis=1)\n df['recall'] = df.apply(lambda row: calc_recall(row['TP'], row['FN']), axis=1)\n return df, results\n\n\ndef apply_mask(image, mask, color, alpha=0.5):\n \"\"\"Apply the given mask to the image.\n \"\"\"\n for c in range(3):\n image[:, :, c] = np.where(mask == 1,\n image[:, :, c] *\n (1 - alpha) + alpha * color[c] * 255,\n image[:, :, c])\n return image\n\n\ndef display_class_prediction_overlaps(\n image,\n segments,\n true_regions,\n test_regions,\n figsize=(16, 16),\n show_mask=True,\n show_bbox=True\n):\n for key, x in segments.items():\n # If no axis is passed, create one and automatically call show()\n ax = None\n if not ax:\n _, ax = plt.subplots(1, figsize=figsize)\n auto_show = True\n\n # Generate random colors\n # Number of color segments (choosing three to match tp, fp, fn\n # colors = colors or random_colors(3)\n colors = [(0.0, 1.0, 0.40000000000000036),\n (1.0, 0.0, 1.0),\n (1.0, 1.0, 0.0)]\n color_labels = ['tp', 'fn', 'fp']\n # Show area outside image boundaries.\n height, width = image.shape[:2]\n ax.set_ylim(height + 10, -10)\n ax.set_xlim(-10, width + 10)\n ax.axis('off')\n ax.set_title(key)\n masked_image = image.astype(np.uint32).copy()\n for typekey, t in x.items():\n color = colors[color_labels.index(typekey)]\n for seg in t:\n if 'gt_ind' in list(seg.keys()):\n contour = true_regions['regions'][seg['gt_ind']]['points']\n seglabel = 'gt'\n elif 'test_ind' in list(seg.keys()):\n contour = test_regions[seg['test_ind']]['points']\n if 'prob' in list(seg.keys()):\n seglabel = 'IOU: {0:.2}, PROB: {0:.2%}'.format(seg['iou'], seg['prob'])\n else:\n seglabel = 'IOU: {0:.2}'.format(seg['iou'])\n\n x1, y1, x2, y2 = compute_bbox(contour)\n if show_bbox:\n p = patches.Rectangle(\n (x1, y1),\n x2 - x1,\n y2 - y1,\n linewidth=2,\n alpha=0.7,\n linestyle=\"dashed\",\n edgecolor=color,\n facecolor='none'\n )\n ax.add_patch(p)\n ax.text(\n x1,\n y1 + 8,\n seglabel,\n color='w',\n size=15,\n backgroundcolor=\"none\"\n )\n # Mask\n mask = make_binary_mask(contour, (masked_image.shape[0], masked_image.shape[1]))\n if show_mask:\n masked_image = apply_mask(masked_image, mask, color)\n\n ax.imshow(masked_image.astype(np.uint8))\n if auto_show:\n plt.show()\n\n\ndef plot_test_results(trained_pipeline, report):\n hsv_img = trained_pipeline.training_data[trained_pipeline.test_img_name]['hsv_img'].copy()\n ground_truth = trained_pipeline.training_data[trained_pipeline.test_img_name]['regions']\n test_results = trained_pipeline.test_results\n\n # first, map ground truth indices by label\n gt_by_label_map = {}\n for i, gt in enumerate(ground_truth):\n if gt['label'] not in gt_by_label_map:\n gt_by_label_map[gt['label']] = []\n\n gt_by_label_map[gt['label']].append(gt['points'])\n\n tp_by_label_map = {}\n fp_by_label_map = {}\n fn_by_label_map = {}\n for k, v in report.items():\n if k not in tp_by_label_map:\n tp_by_label_map[k] = []\n fp_by_label_map[k] = []\n fn_by_label_map[k] = []\n\n for tp in v['tp']:\n if 'test_ind' in tp:\n tp_by_label_map[k].append(\n test_results[tp['test_ind']]['contour']\n )\n\n for fp in v['fp']:\n if 'test_ind' in fp:\n fp_by_label_map[k].append(\n test_results[fp['test_ind']]['contour']\n )\n\n for fn in v['fn']:\n if 'gt_ind' in fn:\n fn_by_label_map[k].append(\n ground_truth[fn['gt_ind']]['points']\n )\n\n # create separate set of images for each class label\n for class_label in sorted(report.keys()):\n # ground truth\n if class_label != 'background':\n new_img = cv2.cvtColor(hsv_img.copy(), cv2.COLOR_HSV2RGB)\n cv2.drawContours(new_img, gt_by_label_map[class_label], -1, (0, 255, 0), 5)\n plt.figure(figsize=(8, 8))\n plt.imshow(new_img)\n plt.title(\"%s - %s\" % (class_label, 'Ground Truth'))\n plt.show()\n\n # true positive\n new_img = cv2.cvtColor(hsv_img.copy(), cv2.COLOR_HSV2RGB)\n cv2.drawContours(new_img, tp_by_label_map[class_label], -1, (0, 255, 0), 5)\n plt.figure(figsize=(8, 8))\n plt.imshow(new_img)\n plt.title(\"%s - %s\" % (class_label, 'True Positive'))\n plt.show()\n\n # false negative\n new_img = cv2.cvtColor(hsv_img.copy(), cv2.COLOR_HSV2RGB)\n cv2.drawContours(new_img, fn_by_label_map[class_label], -1, (0, 255, 0), 5)\n plt.figure(figsize=(8, 8))\n plt.imshow(new_img)\n plt.title(\"%s - %s\" % (class_label, 'False Negative'))\n plt.show()\n\n # false positive\n new_img = cv2.cvtColor(hsv_img.copy(), cv2.COLOR_HSV2RGB)\n cv2.drawContours(new_img, fp_by_label_map[class_label], -1, (0, 255, 0), 5)\n plt.figure(figsize=(8, 8))\n plt.imshow(new_img)\n plt.title(\"%s - %s\" % (class_label, 'False Positive'))\n plt.show()\n"
] | [
[
"numpy.asarray"
],
[
"matplotlib.pyplot.imshow",
"numpy.spacing",
"matplotlib.pyplot.title",
"numpy.asarray",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.subplots",
"numpy.empty",
"numpy.bitwise_and",
"numpy.max",
"numpy.append",
"numpy.bitwise_or",
"numpy.argsort",
"matplotlib.pyplot.show",
"numpy.unravel_index",
"numpy.where",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
salvacarrion/autonlp | [
"5cc462901e451b9259219f44225034fc8eedf6d3",
"5cc462901e451b9259219f44225034fc8eedf6d3",
"5cc462901e451b9259219f44225034fc8eedf6d3"
] | [
"examples/3_plot_results.py",
"autonmt/toolkits/base.py",
"autonmt/modules/datasets/seq2seq_dataset.py"
] | [
"import pandas as pd\nfrom tokenizers import normalizers\nfrom tokenizers.normalizers import NFKC, Strip, Lowercase\n\nfrom autonmt.bundle import utils\nfrom autonmt.bundle.report import generate_multivariable_report\nfrom autonmt.preprocessing import DatasetBuilder\n\n\ndef main():\n\n # Create preprocessing for training\n builder = DatasetBuilder(\n base_path=\"/home/scarrion/datasets/nn/translation\",\n datasets=[\n {\"name\": \"europarl\", \"languages\": [\"de-en\"], \"sizes\": [(\"100k\", 100000)]},\n ],\n encoding=[\n {\"subword_models\": [\"unigram+bytes\"], \"vocab_sizes\": [x+256 for x in [100, 200, 400, 1000, 2000, 4000, 8000, 16000]]},\n ],\n normalizer=lambda x: normalizers.Sequence([NFKC(), Strip(), Lowercase()]).normalize_str(x),\n merge_vocabs=False,\n eval_mode=\"compatible\",\n ).build(make_plots=False, force_overwrite=False)\n\n # Create preprocessing for training and testing\n tr_datasets = builder.get_train_ds()\n ts_datasets = builder.get_test_ds()\n\n # Train & Score a model for each dataset\n stats = []\n for ds in tr_datasets:\n # Get ds stats\n ds_stats = utils.load_json(ds.get_stats_path(\"stats.json\"))\n\n # Add stats\n ds_stats[\"scores\"] = {}\n row = {\n \"subword_model\": ds.subword_model,\n \"vocab_size\": ds.vocab_size,\n \"unknown_avg_tokens\": ds_stats[\"val.en\"][\"unknown_avg_tokens\"],\n }\n stats.append(row)\n\n # Create dataframes\n # assert len(ts_datasets) == 1\n df_report = pd.DataFrame(stats)\n df_report[\"dataset\"] = [f\"{ds.dataset_name}-{ds.dataset_size_name}\".replace(\"_lc\", \"\").title() for ds in tr_datasets]\n df_report[\"vocab_size\"] = df_report[\"vocab_size\"].astype(int)\n\n # Make report and print it\n output_path = f\".outputs/myplots\"\n prefix = \"unknowns_\"\n generate_multivariable_report(data=df_report,\n x=\"vocab_size\",\n y_left=(\"unknown_avg_tokens\", \"subword_model\"), y_right=None,\n output_path=output_path, prefix=prefix,\n save_figures=True, show_figures=False, save_csv=True)\n print(\"Summary:\")\n print(df_report.to_string(index=False))\n\n\nif __name__ == \"__main__\":\n main()\n",
"import os.path\nimport shutil\nfrom abc import ABC, abstractmethod\nfrom typing import List, Set\n\nfrom autonmt.bundle.metrics import *\nfrom autonmt.bundle.utils import *\nfrom autonmt.preprocessing.dataset import Dataset\nfrom autonmt.preprocessing.processors import normalize_file, pretokenize_file, encode_file, decode_file\n\n\ndef _check_datasets(train_ds: Dataset = None, eval_ds: Dataset = None):\n # Check that train_ds is a Dataset\n if train_ds and not isinstance(train_ds, Dataset):\n raise TypeError(\"'train_ds' must be an instance of 'Dataset' so that we can know the layout of the trained \"\n \"model (e.g. checkpoints available, subword model, vocabularies, etc\")\n\n # Check that train_ds is a Dataset\n if eval_ds and not isinstance(eval_ds, Dataset):\n raise TypeError(\"'eval_ds' must be an instance of 'Dataset' so that we can know the layout of the dataset \"\n \"and get the corresponding data (e.g. splits, pretokenized, encoded, stc)\")\n\n # Check that the preprocessing are compatible\n if train_ds and eval_ds and ((train_ds.src_lang != eval_ds.src_lang) or (train_ds.trg_lang != eval_ds.trg_lang)):\n raise ValueError(f\"The languages from the train and test datasets are not compatible:\\n\"\n f\"\\t- train_lang_pair=({train_ds.dataset_lang_pair})\\n\"\n f\"\\t- test_lang_pair=({eval_ds.dataset_lang_pair})\\n\")\n\n\ndef _check_supported_metrics(metrics, metrics_supported):\n # Check\n metrics = set(metrics)\n metrics_supported = set(metrics_supported)\n\n # Get valid metrics\n metrics_valid = list(metrics.intersection(metrics_supported))\n metrics_valid += [x for x in metrics if x.startswith(\"hg_\")] # Ignore huggingface metrics\n metrics_valid = set(metrics_valid)\n metrics_non_valid = metrics.difference(metrics_valid)\n\n if metrics_non_valid:\n print(f\"=> [WARNING] These metrics are not supported: {str(metrics_non_valid)}\")\n if metrics == metrics_non_valid:\n print(\"\\t- [Score]: Skipped. No valid metrics were found.\")\n\n return metrics_valid\n\n\nclass BaseTranslator(ABC):\n\n # Global variables\n total_runs = 0\n TOOL_PARSERS = {\"sacrebleu\": {\"filename\": \"sacrebleu_scores\", \"py\": (parse_sacrebleu_json, \"json\")},\n \"bertscore\": {\"filename\": \"bertscore_scores\", \"py\": (parse_bertscore_json, \"json\")},\n \"comet\": {\"filename\": \"comet_scores\", \"py\": (parse_comet_json, \"json\")},\n \"beer\": {\"filename\": \"beer_scores\", \"py\": (parse_beer_json, \"json\")},\n \"huggingface\": {\"filename\": \"huggingface_scores\", \"py\": (parse_huggingface_json, \"json\")},\n \"fairseq\": {\"filename\": \"fairseq_scores\", \"py\": (parse_fairseq_txt, \"txt\")},\n }\n TOOL2METRICS = {\"sacrebleu\": {\"bleu\", \"chrf\", \"ter\"},\n \"bertscore\": {\"bertscore\"},\n \"comet\": {\"comet\"},\n \"beer\": {\"beer\"},\n \"fairseq\": {\"fairseq\"},\n # \"huggingface\": \"huggingface\",\n }\n METRICS2TOOL = {m: tool for tool, metrics in TOOL2METRICS.items() for m in metrics}\n\n def __init__(self, engine, run_prefix=\"model\", model_ds=None, src_vocab=None, trg_vocab=None,\n safe_seconds=3, **kwargs):\n # Store vars\n self.engine = engine\n self.run_prefix = run_prefix\n self.model_ds = model_ds\n self.config = {}\n self.model_ds = None\n self.safe_seconds = safe_seconds\n\n # Set vocab (optional)\n self.src_vocab = src_vocab\n self.trg_vocab = trg_vocab\n\n # Check dataset\n _check_datasets(train_ds=self.model_ds) if self.model_ds else None\n\n def _get_metrics_tool(self, metrics):\n tools = set()\n for m in metrics:\n if m.startswith(\"hg_\"):\n m_tool = \"huggingface\"\n else:\n m_tool = self.METRICS2TOOL.get(m)\n\n # Add tools\n if m_tool:\n tools.add(m_tool)\n return tools\n\n def _add_config(self, key: str, values: dict, reset=False):\n def is_valid(k, v):\n primitive_types = (str, bool, int, float, dict, set, list) # Problems with list of objects\n return not(k.startswith(\"_\") or k in {\"kwargs\"}) and (isinstance(v, primitive_types) or v is None)\n\n def parse_value(x):\n if isinstance(x, (list, set)):\n return [str(_x) for _x in x]\n return str(x)\n\n # Reset value (if needed)\n if reset or key not in self.config:\n self.config[key] = {}\n\n # Update values\n self.config[key].update({k: parse_value(v) for k, v in values.items() if is_valid(k, v)})\n\n def fit(self, train_ds, max_tokens=None, batch_size=128, max_epochs=1,\n learning_rate=0.001, optimizer=\"adam\", weight_decay=0, gradient_clip_val=0.0, accumulate_grad_batches=1,\n criterion=\"cross_entropy\", patience=None, seed=None, devices=\"auto\", accelerator=\"auto\", num_workers=0,\n monitor=\"loss\", resume_training=False, force_overwrite=False, **kwargs):\n print(\"=> [Fit]: Started.\")\n\n # Set model\n self.model_ds = train_ds\n\n # Store config (and save file)\n self._add_config(key=\"fit\", values=locals(), reset=False)\n self._add_config(key=\"fit\", values=kwargs, reset=False)\n logs_path = train_ds.get_model_logs_path(toolkit=self.engine, run_name=train_ds.get_run_name(self.run_prefix))\n make_dir(logs_path)\n save_json(self.config, savepath=os.path.join(logs_path, \"config_train.json\"))\n\n # Train and preprocess\n self.preprocess(train_ds, force_overwrite=force_overwrite, **kwargs)\n self.train(train_ds, max_tokens=max_tokens, batch_size=batch_size, max_epochs=max_epochs,\n learning_rate=learning_rate, optimizer=optimizer, weight_decay=weight_decay,\n gradient_clip_val=gradient_clip_val, accumulate_grad_batches=accumulate_grad_batches,\n criterion=criterion, patience=patience, seed=seed, devices=devices, accelerator=accelerator,\n num_workers=num_workers, monitor=monitor, resume_training=resume_training,\n force_overwrite=force_overwrite, **kwargs)\n\n def predict(self, eval_datasets: List[Dataset], beams: List[int] = None,\n metrics: Set[str] = None, batch_size=64, max_tokens=None, max_len_a=1.2, max_len_b=50, truncate_at=None,\n devices=\"auto\", accelerator=\"auto\", num_workers=0, load_best_checkpoint=False,\n model_ds=None, force_overwrite=False, **kwargs):\n print(\"=> [Predict]: Started.\")\n\n # Set default values\n if beams is None:\n beams = [5]\n else:\n beams = list(set(beams))\n beams.sort(reverse=True)\n\n # Default metrics\n if metrics is None:\n metrics = {\"bleu\"}\n else:\n metrics = set(metrics)\n\n # Get model dataset\n if model_ds:\n self.model_ds = model_ds\n elif self.model_ds:\n pass\n else:\n raise ValueError(f\"Missing 'model_ds'. It's needed to get the model's path (training and eval).\")\n\n # Store config\n self._add_config(key=\"predict\", values=locals(), reset=False)\n self._add_config(key=\"predict\", values=kwargs, reset=False)\n logs_path = self.model_ds.get_model_logs_path(toolkit=self.engine, run_name=self.model_ds.get_run_name(self.run_prefix))\n make_dir(logs_path)\n save_json(self.config, savepath=os.path.join(logs_path, \"config_predict.json\"))\n\n # Translate and score\n eval_scores = []\n eval_datasets = self.model_ds.get_eval_datasets(eval_datasets)\n for eval_ds in eval_datasets:\n self.translate(model_ds=self.model_ds, eval_ds=eval_ds, beams=beams, max_len_a=max_len_a, max_len_b=max_len_b,\n truncate_at=truncate_at, batch_size=batch_size, max_tokens=max_tokens,\n devices=devices, accelerator=accelerator, num_workers=num_workers,\n load_best_checkpoint=load_best_checkpoint, force_overwrite=force_overwrite, **kwargs)\n self.score(model_ds=self.model_ds, eval_ds=eval_ds, beams=beams, metrics=metrics,\n force_overwrite=force_overwrite, **kwargs)\n model_scores = self.parse_metrics(model_ds=self.model_ds, eval_ds=eval_ds, beams=beams, metrics=metrics,\n engine=self.engine, force_overwrite=force_overwrite, **kwargs)\n eval_scores.append(model_scores)\n return eval_scores\n\n @abstractmethod\n def _preprocess(self, *args, **kwargs):\n pass\n\n def preprocess(self, ds: Dataset, force_overwrite, **kwargs):\n print(f\"=> [Preprocess]: Started. ({ds.id2(as_path=True)})\")\n\n # Set vars\n src_lang = ds.src_lang\n trg_lang = ds.trg_lang\n train_path = ds.get_encoded_path(fname=ds.train_name)\n val_path = ds.get_encoded_path(fname=ds.val_name)\n test_path = ds.get_encoded_path(fname=ds.test_name)\n model_src_vocab_path = ds.get_vocab_file(lang=src_lang)\n model_trg_vocab_path = ds.get_vocab_file(lang=trg_lang)\n model_data_bin_path = ds.get_model_data_bin(toolkit=self.engine)\n\n # Create dirs\n make_dir([model_data_bin_path])\n\n start_time = time.time()\n self._preprocess(ds=ds, src_lang=src_lang, trg_lang=trg_lang, output_path=model_data_bin_path,\n train_path=train_path, val_path=val_path, test_path=test_path,\n src_vocab_path=model_src_vocab_path, trg_vocab_path=model_trg_vocab_path,\n force_overwrite=force_overwrite, **kwargs)\n print(f\"\\t- [INFO]: Preprocess time: {str(datetime.timedelta(seconds=time.time()-start_time))}\")\n\n @abstractmethod\n def _train(self, *args, **kwargs):\n pass\n\n def train(self, train_ds: Dataset, resume_training, force_overwrite, **kwargs):\n print(f\"=> [Train]: Started. ({train_ds.id2(as_path=True)})\")\n\n # Check preprocessing\n _check_datasets(train_ds=train_ds)\n\n # Check debug\n if is_debug_enabled():\n print(\"\\t=> [WARNING]: Debug is enabled. This could lead to critical problems when using a data parallel strategy.\")\n\n # Set run name\n run_name = train_ds.get_run_name(self.run_prefix)\n\n # Set paths\n data_bin_path = train_ds.get_model_data_bin(toolkit=self.engine)\n checkpoints_dir = train_ds.get_model_checkpoints_path(toolkit=self.engine, run_name=run_name)\n logs_path = train_ds.get_model_logs_path(toolkit=self.engine, run_name=run_name)\n\n # Create dirs\n make_dir([data_bin_path, checkpoints_dir, logs_path])\n\n # Set seed\n self.manual_seed(seed=kwargs.get(\"seed\"))\n\n start_time = time.time()\n self._train(data_bin_path=data_bin_path, checkpoints_dir=checkpoints_dir, logs_path=logs_path,\n run_name=run_name, ds_alias='_'.join(train_ds.id()),\n resume_training=resume_training, force_overwrite=force_overwrite, **kwargs)\n print(f\"\\t- [INFO]: Training time: {str(datetime.timedelta(seconds=time.time()-start_time))}\")\n\n @abstractmethod\n def _translate(self, *args, **kwargs):\n pass\n\n def translate(self, model_ds: Dataset, eval_ds: Dataset, beams: List[int], max_len_a, max_len_b, truncate_at,\n batch_size, max_tokens, num_workers, force_overwrite, **kwargs):\n print(f\"=> [Translate]: Started. ({model_ds.id2(as_path=True)})\")\n\n # Check preprocessing\n _check_datasets(train_ds=model_ds, eval_ds=eval_ds)\n assert model_ds.dataset_lang_pair == eval_ds.dataset_lang_pair\n\n # Set run names\n run_name = model_ds.get_run_name(self.run_prefix)\n eval_name = '_'.join(eval_ds.id()) # Subword model and vocab size don't characterize the dataset!\n\n # Checkpoints dir\n checkpoints_dir = model_ds.get_model_checkpoints_path(self.engine, run_name)\n\n # [Trained model]: Create eval folder\n model_src_vocab_path = model_ds.get_vocab_file(lang=model_ds.src_lang) # Needed to preprocess\n model_trg_vocab_path = model_ds.get_vocab_file(lang=model_ds.trg_lang) # Needed to preprocess\n model_eval_data_path = model_ds.get_model_eval_data_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name)\n model_eval_data_bin_path = model_ds.get_model_eval_data_bin_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name)\n\n # Create dirs\n make_dir([model_eval_data_path, model_eval_data_bin_path])\n\n # [Encode extern data]: Encode test data using the subword model of the trained model\n for ts_fname in [fname for fname in eval_ds.split_names_lang if eval_ds.test_name in fname]:\n lang = ts_fname.split('.')[-1]\n input_file = eval_ds.get_split_path(ts_fname) # as raw as possible\n output_file = model_ds.get_model_eval_data_path(toolkit=self.engine, run_name=run_name,\n eval_name=eval_name)\n\n # Create directories\n make_dir([\n os.path.join(output_file, \"raw\"),\n os.path.join(output_file, \"normalized\"),\n os.path.join(output_file, \"tokenized\"),\n os.path.join(output_file, \"encoded\"),\n ])\n\n # Copy raw\n raw_file = os.path.join(output_file, \"raw\", ts_fname)\n shutil.copyfile(input_file, raw_file)\n input_file = raw_file\n\n # Normalize data\n norm_file = os.path.join(output_file, \"normalized\", ts_fname)\n normalize_file(input_file=input_file, output_file=norm_file,\n normalizer=model_ds.normalizer, force_overwrite=force_overwrite)\n input_file = norm_file\n\n # Pretokenize data (if needed)\n if model_ds.pretok_flag:\n pretok_file = os.path.join(output_file, \"tokenized\", ts_fname)\n pretokenize_file(input_file=input_file, output_file=pretok_file, lang=lang,\n force_overwrite=force_overwrite)\n input_file = pretok_file\n\n # Encode file\n enc_file = os.path.join(output_file, \"encoded\", ts_fname)\n encode_file(ds=model_ds, input_file=input_file, output_file=enc_file,\n lang=lang, merge_vocabs=model_ds.merge_vocabs, truncate_at=truncate_at,\n force_overwrite=force_overwrite)\n\n # Preprocess external data\n test_path = os.path.join(model_eval_data_path, \"encoded\", eval_ds.test_name)\n self._preprocess(ds=model_ds, src_lang=model_ds.src_lang, trg_lang=model_ds.trg_lang,\n output_path=model_eval_data_bin_path,\n train_path=None, val_path=None, test_path=test_path,\n src_vocab_path=model_src_vocab_path, trg_vocab_path=model_trg_vocab_path,\n subword_model=model_ds.subword_model, pretok_flag=model_ds.pretok_flag,\n external_data=True, force_overwrite=force_overwrite,\n **kwargs)\n\n # Iterate over beams\n for beam in beams:\n start_time = time.time()\n # Create output path (if needed)\n output_path = model_ds.get_model_beam_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name, beam=beam)\n make_dir(output_path)\n\n # Translate\n tok_flag = [os.path.exists(os.path.join(output_path, f)) for f in [\"hyp.tok\"]]\n if force_overwrite or not all(tok_flag):\n self._translate(\n src_lang=model_ds.src_lang, trg_lang=model_ds.trg_lang,\n beam_width=beam, max_len_a=max_len_a, max_len_b=max_len_b, batch_size=batch_size, max_tokens=max_tokens,\n data_bin_path=model_eval_data_bin_path, output_path=output_path, checkpoints_dir=checkpoints_dir,\n model_src_vocab_path=model_src_vocab_path, model_trg_vocab_path=model_trg_vocab_path,\n num_workers=num_workers, model_ds=model_ds, force_overwrite=force_overwrite, **kwargs)\n\n # Copy src/ref raw\n for fname, lang in [(\"src\", model_ds.src_lang), (\"ref\", model_ds.trg_lang)]:\n raw_file = model_ds.get_model_eval_data_path(toolkit=self.engine, run_name=run_name,\n eval_name=eval_name,\n fname=f\"normalized/test.{lang}\")\n output_file = os.path.join(output_path, f\"{fname}.txt\")\n shutil.copyfile(raw_file, output_file)\n\n # Postprocess tokenized files\n for fname, lang in [(\"hyp\", model_ds.trg_lang)]:\n input_file = os.path.join(output_path, f\"{fname}.tok\")\n output_file = os.path.join(output_path, f\"{fname}.txt\")\n model_vocab_path = model_src_vocab_path if lang == model_ds.src_lang else model_trg_vocab_path\n\n # Post-process files\n decode_file(input_file=input_file, output_file=output_file, lang=lang,\n subword_model=model_ds.subword_model, pretok_flag=model_ds.pretok_flag,\n model_vocab_path=model_vocab_path, remove_unk_hyphen=True,\n force_overwrite=force_overwrite)\n\n # Check amount of lines\n ref_lines = len(open(os.path.join(output_path, \"ref.txt\"), 'r').readlines())\n hyp_lines = len(open(os.path.join(output_path, \"hyp.txt\"), 'r').readlines())\n if ref_lines != hyp_lines:\n raise ValueError(f\"The number of lines in 'ref.txt' ({ref_lines}) and 'hyp.txt' ({hyp_lines}) \"\n f\"does not match. If you see a 'CUDA out of memory' message, try again with \"\n f\"smaller batch.\")\n\n print(f\"\\t- [INFO]: Translating time (beam={str(beam)}): {str(datetime.timedelta(seconds=time.time() - start_time))}\")\n\n\n def score(self, model_ds: Dataset, eval_ds: Dataset, beams: List[int], metrics: Set[str], force_overwrite, **kwargs):\n print(f\"=> [Score]: Started. ({model_ds.id2(as_path=True)})\")\n\n # Check preprocessing\n _check_datasets(train_ds=model_ds, eval_ds=eval_ds)\n assert model_ds.dataset_lang_pair == eval_ds.dataset_lang_pair\n\n # Check supported metrics\n metrics_valid = _check_supported_metrics(metrics, self.METRICS2TOOL.keys())\n if not metrics_valid:\n return\n\n # Set run names\n run_name = model_ds.get_run_name(self.run_prefix)\n eval_name = '_'.join(eval_ds.id()) # Subword model and vocab size don't characterize the dataset!\n\n # Iterate over beams\n for beam in beams:\n start_time = time.time()\n\n # Paths\n beam_path = model_ds.get_model_beam_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name, beam=beam)\n scores_path = model_ds.get_model_scores_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name, beam=beam)\n\n # Create dirs\n make_dir([scores_path])\n\n # Set input files (results)\n src_file_path = os.path.join(beam_path, \"src.txt\")\n ref_file_path = os.path.join(beam_path, \"ref.txt\")\n hyp_file_path = os.path.join(beam_path, \"hyp.txt\")\n\n # Check that the paths exists\n if not all([os.path.exists(p) for p in [src_file_path, ref_file_path, hyp_file_path]]):\n raise IOError(\"Missing files to compute scores\")\n\n # Score: bleu, chrf and ter\n if self.TOOL2METRICS[\"sacrebleu\"].intersection(metrics):\n output_file = os.path.join(scores_path, f\"sacrebleu_scores.json\")\n if force_overwrite or not os.path.exists(output_file):\n compute_sacrebleu(ref_file=ref_file_path, hyp_file=hyp_file_path, output_file=output_file, metrics=metrics)\n\n # Score: bertscore\n if self.TOOL2METRICS[\"bertscore\"].intersection(metrics):\n output_file = os.path.join(scores_path, f\"bertscore_scores.json\")\n if force_overwrite or not os.path.exists(output_file):\n compute_bertscore(ref_file=ref_file_path, hyp_file=hyp_file_path, output_file=output_file, trg_lang=model_ds.trg_lang)\n\n # Score: comet\n if self.TOOL2METRICS[\"comet\"].intersection(metrics):\n output_file = os.path.join(scores_path, f\"comet_scores.json\")\n if force_overwrite or not os.path.exists(output_file):\n compute_comet(src_file=src_file_path, ref_file=ref_file_path, hyp_file=hyp_file_path, output_file=output_file)\n\n # Score: fairseq\n if self.TOOL2METRICS[\"fairseq\"].intersection(metrics):\n output_file = os.path.join(scores_path, f\"fairseq_scores.txt\")\n if force_overwrite or not os.path.exists(output_file):\n compute_fairseq(ref_file=ref_file_path, hyp_file=hyp_file_path, output_file=output_file)\n\n # Huggingface metrics\n hg_metrics = {x[3:] for x in metrics if x.startswith(\"hg_\")}\n if hg_metrics:\n output_file = os.path.join(scores_path, f\"huggingface_scores.json\")\n if force_overwrite or not os.path.exists(output_file):\n compute_huggingface(src_file=src_file_path, hyp_file=hyp_file_path, ref_file=ref_file_path,\n output_file=output_file, metrics=hg_metrics, trg_lang=model_ds.trg_lang)\n\n print(f\"\\t- [INFO]: Scoring time (beam={str(beam)}): {str(datetime.timedelta(seconds=time.time() - start_time))}\")\n\n\n def parse_metrics(self, model_ds, eval_ds, beams: List[int], metrics: Set[str], force_overwrite, **kwargs):\n print(f\"=> [Parsing]: Started. ({model_ds.id2(as_path=True)})\")\n\n # Check preprocessing\n _check_datasets(train_ds=model_ds, eval_ds=eval_ds)\n assert model_ds.dataset_lang_pair == eval_ds.dataset_lang_pair\n\n # Check supported metrics\n metrics_valid = _check_supported_metrics(metrics, self.METRICS2TOOL.keys())\n if not metrics_valid:\n return\n\n # Metrics to retrieve\n metric_tools = self._get_metrics_tool(metrics)\n\n # Set run names\n run_name = model_ds.get_run_name(self.run_prefix)\n eval_name = '_'.join(eval_ds.id()) # Subword model and vocab size don't characterize the dataset!\n\n # Walk through beams\n scores = {\n \"engine\": kwargs.get(\"engine\"),\n \"lang_pair\": model_ds.dataset_lang_pair,\n \"train_dataset\": model_ds.dataset_name,\n \"eval_dataset\": eval_ds.dataset_name,\n \"subword_model\": str(model_ds.subword_model).lower(),\n \"vocab_size\": str(model_ds.vocab_size).lower(),\n \"run_name\": run_name,\n \"train_max_lines\": model_ds.dataset_lines,\n \"beams\": {},\n \"config\": self.config,\n }\n\n # Iterate over beams\n for beam in beams:\n # Paths\n scores_path = model_ds.get_model_scores_path(toolkit=self.engine, run_name=run_name, eval_name=eval_name,\n beam=beam)\n\n # Walk through metric files\n beam_scores = {}\n for m_tool in metric_tools:\n values = self.TOOL_PARSERS[m_tool]\n m_parser, ext = values[\"py\"]\n m_fname = f\"{values['filename']}.{ext}\"\n\n # Read file\n filename = os.path.join(scores_path, m_fname)\n if os.path.exists(filename):\n try:\n with open(filename, 'r') as f:\n m_scores = m_parser(text=f.readlines())\n for m_name, m_values in m_scores.items(): # [bleu_score, chrf_score, ter_score], [bertscore_precision]\n for score_name, score_value in m_values.items():\n m_name_full = f\"{m_tool}_{m_name}_{score_name}\".lower().strip()\n beam_scores[m_name_full] = score_value\n except Exception as e:\n print(f\"\\t- [PARSING ERROR]: ({m_fname}) {str(e)}\")\n else:\n print(f\"\\t- [WARNING]: There are no metrics from '{m_tool}'\")\n\n # Add beam scores\n scores[\"beams\"].update({f\"beam{str(beam)}\": beam_scores})\n return scores\n\n @staticmethod\n def manual_seed(seed, use_deterministic_algorithms=False):\n import torch\n import random\n import numpy as np\n from pytorch_lightning.utilities.seed import seed_everything\n\n # Define seed\n seed = seed if seed is not None else int(time.time()) % 2**32\n\n # Set seeds\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n seed_everything(seed)\n\n # Tricky: https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html\n torch.use_deterministic_algorithms(use_deterministic_algorithms)\n\n # Test randomness\n print(f\"\\t- [INFO]: Testing random seed ({seed}):\")\n print(f\"\\t\\t- random: {random.random()}\")\n print(f\"\\t\\t- numpy: {np.random.rand(1)}\")\n print(f\"\\t\\t- torch: {torch.rand(1)}\")\n\n return seed\n",
"import numpy as np\nimport torch\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.utils.data import Dataset\nfrom itertools import compress\n\nfrom autonmt.bundle.utils import read_file_lines\n\n\n\nclass Seq2SeqDataset(Dataset):\n def __init__(self, file_prefix, src_lang, trg_lang, src_vocab=None, trg_vocab=None, limit=None,\n filter_langs=None, filter_fn=None, **kwargs):\n # Set vocabs\n self.src_vocab = src_vocab\n self.trg_vocab = trg_vocab\n\n # Get src/trg file paths\n src_file_path = file_prefix.strip() + f\".{src_lang}\"\n trg_file_path = file_prefix.strip() + f\".{trg_lang}\"\n\n # Read files\n self.src_lines = read_file_lines(filename=src_file_path, autoclean=True)\n self.trg_lines = read_file_lines(filename=trg_file_path, autoclean=True)\n\n # Filter langs\n if filter_fn:\n self.src_lines, self.trg_lines = filter_fn(self.src_lines, self.trg_lines, filter_langs)\n\n # Limit lines\n self.src_lines = self.src_lines[:limit] if limit else self.src_lines\n self.trg_lines = self.trg_lines[:limit] if limit else self.trg_lines\n\n assert len(self.src_lines) == len(self.trg_lines)\n\n def __len__(self):\n return len(self.src_lines)\n\n def __getitem__(self, idx):\n src_line, trg_line = self.src_lines[idx], self.trg_lines[idx]\n return src_line, trg_line\n\n def collate_fn(self, batch, max_tokens=None, **kwargs):\n x_encoded, y_encoded = [], []\n x_max_len = y_max_len = 0\n\n # Add elements to batch\n for i, (x, y) in enumerate(batch):\n # Encode tokens\n _x = self.src_vocab.encode(x)\n _y = self.trg_vocab.encode(y)\n\n # Control tokens in batch\n x_max_len = max(x_max_len, len(_x))\n y_max_len = max(y_max_len, len(_y))\n\n # Add elements\n if max_tokens is None or (i+1)*(x_max_len+y_max_len) <= max_tokens: # sample*size\n x_encoded.append(torch.tensor(_x, dtype=torch.long))\n y_encoded.append(torch.tensor(_y, dtype=torch.long))\n else:\n msg = \"[WARNING] Dropping {:.2f}% of the batch because the maximum number of tokens ({}) was exceeded\"\n drop_ratio = 1 - ((i+1)/len(batch))\n print(msg.format(drop_ratio, max_tokens))\n break\n\n # Pad sequence\n x_padded = pad_sequence(x_encoded, batch_first=False, padding_value=self.src_vocab.pad_id).T\n y_padded = pad_sequence(y_encoded, batch_first=False, padding_value=self.trg_vocab.pad_id).T\n\n # Check stuff\n assert x_padded.shape[0] == y_padded.shape[0] == len(x_encoded) # Control samples\n assert max_tokens is None or (x_padded.numel() + y_padded.numel()) <= max_tokens # Control max tokens\n return x_padded, y_padded\n"
] | [
[
"pandas.DataFrame"
],
[
"numpy.random.seed",
"torch.manual_seed",
"torch.use_deterministic_algorithms",
"numpy.random.rand",
"torch.rand"
],
[
"torch.nn.utils.rnn.pad_sequence",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hbaspecto-com/tmip-emat | [
"e1c936e88f36f9b3e4379d814ecb7a3c255e16b1"
] | [
"emat/model/core_model.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\" core_model.py - define coure model API\"\"\"\nimport os\nimport abc\nimport yaml\nimport pandas as pd\nimport numpy as np\nimport logging\nimport subprocess\nimport warnings\nfrom contextlib import contextmanager\nfrom typing import Union, Mapping\nfrom ..workbench.em_framework.model import AbstractModel as AbstractWorkbenchModel\nfrom ..workbench.em_framework.evaluators import BaseEvaluator\n\nfrom typing import Collection\nfrom typing import Iterable\n\nfrom ..database.database import Database\nfrom ..scope.scope import Scope\nfrom ..optimization.optimization_result import OptimizationResult\nfrom ..optimization import EpsilonProgress, ConvergenceMetrics, SolutionCount\nfrom ..util.evaluators import prepare_evaluator\nfrom ..exceptions import MissingArchivePathError, ReadOnlyDatabaseError, MissingIdWarning\n\nfrom .._pkg_constants import *\n\nfrom ..util.loggers import get_module_logger\n_logger = get_module_logger(__name__)\n\nclass AbstractCoreModel(abc.ABC, AbstractWorkbenchModel):\n \"\"\"\n An interface for using a model with EMAT.\n\n Individual models should be instantiated using derived\n subclasses of this abstract base class, and not using\n this class directly.\n\n Args:\n configuration (str or Mapping or None):\n The configuration for this core model. This can be given\n explicitly as a `dict`, or as a `str` which gives the\n filename of a YAML file that will be loaded. If there is\n no configuration, giving `None` is also acceptable.\n scope (Scope or str):\n The exploration scope, as a `Scope` object or as\n a `str` which gives the filename of a YAML file that will be\n loaded.\n safe (bool):\n Load the configuration YAML file in 'safe' mode.\n This can be disabled if the configuration requires\n custom Python types or is otherwise not compatible with\n safe mode. Loading configuration files with safe mode\n off is not secure and should not be done with files from\n untrusted sources.\n db (Database, optional):\n An optional Database to store experiments and results.\n name (str, default \"EMAT\"):\n A name for this model, given as an alphanumeric string.\n The name is required by workbench operations.\n metamodel_id (int, optional):\n An identifier for this model, if it is a meta-model.\n Defaults to 0 (i.e., not a meta-model).\n \"\"\"\n\n def __init__(self,\n configuration:Union[str,Mapping,None],\n scope,\n safe=True,\n db=None,\n name='EMAT',\n metamodel_id=0,\n ):\n if isinstance(configuration, str):\n with open(configuration, 'r') as stream:\n if safe:\n configuration = yaml.safe_load(stream)\n else:\n configuration = yaml.load(stream, Loader=yaml.FullLoader)\n if configuration is None:\n configuration = {}\n\n self.config = configuration if configuration is not None else {}\n self.db = db\n if isinstance(scope, Scope):\n self.scope = scope\n else:\n self.scope = Scope(scope)\n\n AbstractWorkbenchModel.__init__(self, name=name.replace('_','').replace(' ',''))\n self.uncertainties = self.scope._x_list\n self.levers = self.scope._l_list\n self.constants = self.scope._c_list\n self.outcomes = self.scope._m_list\n\n self.metamodel_id = metamodel_id\n\n def __getstate__(self):\n # don't pickle the db connection\n return dict((k, v) for (k, v) in self.__dict__.items() if (k != 'db'))\n\n @abc.abstractmethod\n def setup(self, params):\n \"\"\"\n Configure the core model with the experiment variable values.\n\n This method is the place where the core model set up takes place,\n including creating or modifying files as necessary to prepare\n for a core model run. When running experiments, this method\n is called once for each core model experiment, where each experiment\n is defined by a set of particular values for both the exogenous\n uncertainties and the policy levers. These values are passed to\n the experiment only here, and not in the `run` method itself.\n This facilitates debugging, as the `setup` method can potentially\n be used without the `run` method, allowing the user to manually\n inspect the prepared files and ensure they are correct before\n actually running a potentially expensive model.\n\n Each input exogenous uncertainty or policy lever can potentially\n be used to manipulate multiple different aspects of the underlying\n core model. For example, a policy lever that includes a number of\n discrete future network \"build\" options might trigger the replacement\n of multiple related network definition files. Or, a single uncertainty\n relating to the cost of fuel might scale both a parameter linked to\n the modeled per-mile cost of operating an automobile, as well as the\n modeled total cost of fuel used by transit services.\n\n At the end of the `setup` method, a core model experiment should be\n ready to run using the `run` method.\n\n Args:\n params (dict):\n experiment variables including both exogenous\n uncertainty and policy levers\n \n Raises:\n KeyError:\n if a defined experiment variable is not supported\n by the core model \n \"\"\" \n \n @abc.abstractmethod\n def get_experiment_archive_path(\n self,\n experiment_id=None,\n makedirs=False,\n parameters=None,\n run_id=None,\n ):\n \"\"\"\n Returns a file system location to store model run outputs.\n\n For core models with long model run times, it is recommended\n to store the complete model run results in an archive. This\n will facilitate adding additional performance measures to the\n scope at a later time.\n\n Both the scope name and experiment id can be used to create the \n folder path. \n \n Args:\n experiment_id (int):\n The experiment id, which is also the row id of the\n experiment in the database. If this is omitted, an\n experiment id is read or created using the parameters.\n makedirs (bool, default False):\n If this archive directory does not yet exist, create it.\n parameters (dict, optional):\n The parameters for this experiment, used to create or\n lookup an experiment id. The parameters are ignored\n if `experiment_id` is given.\n run_id (UUID, optional):\n The run_id of this model run. If not given but a\n run_id attribute is stored in this FilesCoreModel\n instance, that value is used.\n\n Returns:\n str: Experiment archive path (no trailing backslashes).\n \"\"\" \n \n @abc.abstractmethod\n def run(self):\n \"\"\"\n Run the core model.\n\n This method is the place where the core model run takes place.\n Note that this method takes no arguments; all the input\n exogenous uncertainties and policy levers are delivered to the\n core model in the `setup` method, which will be executed prior\n to calling this method. This facilitates debugging, as the `setup`\n method can potentially be used without the `run` method, allowing\n the user to manually inspect the prepared files and ensure they\n are correct before actually running a potentially expensive model.\n When running experiments, this method is called once for each core\n model experiment, after the `setup` method completes.\n\n If the core model requires some post-processing by `post_process`\n method defined in this API, then when this function terminates\n the model directory should be in a state that is ready to run the\n `post_process` command next.\n\n Raises:\n UserWarning: If model is not properly setup\n \"\"\" \n \n def post_process(self, params, measure_names, output_path=None):\n \"\"\"\n Runs post processors associated with particular performance measures.\n\n This method is the place to conduct automatic post-processing\n of core model run results, in particular any post-processing that\n is expensive or that will write new output files into the core model's\n output directory. The core model run should already have\n been completed using `setup` and `run`. If the relevant performance\n measures do not require any post-processing to create (i.e. they\n can all be read directly from output files created during the core\n model run itself) then this method does not need to be overloaded\n for a particular core model implementation.\n\n Args:\n params (dict):\n Dictionary of experiment variables, with keys as variable names\n and values as the experiment settings. Most post-processing\n scripts will not need to know the particular values of the\n inputs (exogenous uncertainties and policy levers), but this\n method receives the experiment input parameters as an argument\n in case one or more of these parameter values needs to be known\n in order to complete the post-processing.\n measure_names (List[str]):\n List of measures to be processed. Normally for the first pass\n of core model run experiments, post-processing will be completed\n for all performance measures. However, it is possible to use\n this argument to give only a subset of performance measures to\n post-process, which may be desirable if the post-processing\n of some performance measures is expensive. Additionally, this\n method may also be called on archived model results, allowing\n it to run to generate only a subset of (probably new) performance\n measures based on these archived runs.\n output_path (str, optional):\n Path to model outputs. If this is not given (typical for the\n initial run of core model experiments) then the local/default\n model directory is used. This argument is provided primarily\n to facilitate post-processing archived model runs to make new\n performance measures (i.e. measures that were not in-scope when\n the core model was actually run).\n\n Raises:\n KeyError:\n If post process is not available for specified measure\n \"\"\"\n \n @abc.abstractmethod\n def load_measures(\n self,\n measure_names: Collection[str]=None,\n *,\n rel_output_path=None,\n abs_output_path=None,\n ) -> dict:\n \"\"\"\n Import selected measures from the core model.\n \n This method is the place to put code that can actually reach into\n files in the core model's run results and extract performance\n measures. It is expected that it should not do any post-processing\n of results (i.e. it should read from but not write to the model\n outputs directory).\n\n Imports measures from active scenario\n \n Args:\n measure_names (Collection[str]):\n Collection of measures to be loaded.\n rel_output_path, abs_output_path (str, optional):\n Path to model output locations, either relative\n to the `model_path` directory (when a subclass\n is a type that has a model path) or as an absolute\n directory. If neither is given, the default\n value is equivalent to setting `rel_output_path` to\n 'Outputs'.\n\n Returns:\n dict of measure name and values from active scenario\n \n Raises:\n KeyError: If load_measures is not available for specified\n measure\n \"\"\" \n \n\n @abc.abstractmethod\n def archive(self, params, model_results_path, experiment_id:int=0):\n \"\"\"\n Copies model outputs to archive location.\n \n Args:\n params (dict): Dictionary of experiment variables\n model_results_path (str): archive path\n experiment_id (int, optional): The id number for this experiment.\n \n \"\"\"\n\n @property\n def allow_short_circuit(self):\n \"\"\"\n Bool: Allow model runs to be skipped if measures already appear in the database.\n \"\"\"\n return self.config.get('allow_short_circuit', True)\n\n @allow_short_circuit.setter\n def allow_short_circuit(self, value):\n self.config['allow_short_circuit'] = bool(value)\n\n @property\n def ignore_crash(self):\n \"\"\"\n Bool: Allow model runs to `post_process` and `archive` even after an apparent crash in `run`.\n \"\"\"\n return self.config.get('ignore_crash', False)\n\n @ignore_crash.setter\n def ignore_crash(self, value):\n self.config['ignore_crash'] = bool(value)\n\n @property\n def success_indicator(self):\n \"\"\"\n str: The name of a file that indicates the model has run successfully.\n\n The flag is the mere existance of a file with this name, not any particular\n file content. This file is deleted automatically when the model `run` is\n initiated, so that it can be recreated to indicate a success.\n \"\"\"\n return self.config.get('success_indicator', None)\n\n @success_indicator.setter\n def success_indicator(self, value):\n self.config['success_indicator'] = value\n\n @property\n def killed_indicator(self):\n \"\"\"\n str: The name of a file that indicates the model was killed due to an unrecoverable error.\n\n The flag is the mere existance of a file with this name, not any particular\n file content. This file is deleted automatically when the model `run` is\n initiated, so that it can be recreated to indicate an unrecoverable error.\n \"\"\"\n return self.config.get('killed_indicator', None)\n\n @killed_indicator.setter\n def killed_indicator(self, value):\n self.config['killed_indicator'] = value\n\n @property\n def local_directory(self):\n \"\"\"Path: The current local working directory for this model.\"\"\"\n return self.config.get(\"local_directory\", os.getcwd())\n\n @local_directory.setter\n def local_directory(self, value):\n self.config[\"local_directory\"] = value\n\n @property\n def resolved_model_path(self):\n \"\"\"\n Path: The resolved model path.\n\n For core models that don't rely on the file system, this\n is set to the current working directory and is generally\n irrelevant. Overload this property for models that do\n rely on the file system.\n \"\"\"\n return self.local_directory\n\n @property\n def is_db_locked(self):\n if self.db:\n return self.db.is_locked\n return False\n\n @contextmanager\n def lock_db(self, x=True):\n if x and self.db:\n with self.db.lock:\n yield\n else:\n yield\n\n def enter_run_model(self):\n \"\"\"A hook for actions at the very beginning of the run_model step.\"\"\"\n\n def exit_run_model(self):\n \"\"\"A hook for actions at the very end of the run_model step.\"\"\"\n\n def run_model(self, scenario, policy):\n \"\"\"\n Runs an experiment through core model.\n\n This method overloads the `run_model` method given in\n the EMA Workbench, and provides the correct execution\n of a core model within the workbench framework. This\n function assembles and executes the steps laid out in\n other methods of this class, adding some useful logic\n to optimize the process (e.g. optionally short-\n circuiting runs that already have results stored\n in the database).\n\n For each experiment, the core model is called to:\n\n 1. `setup` experiment variables, copy files\n as needed, and otherwise prepare to run the\n core model for a particular experiment,\n 2. `run` the experiment,\n 3. `post_process` the result if needed to\n produce all relevant performance measures,\n 4. `archive` model outputs from this experiment\n (optional), and\n 5. `load_measures` from the experiment and\n store those measures in the associated database.\n\n Note that this method does *not* return any outcomes.\n Outcomes are instead written into self.outcomes_output,\n and can be retrieved from there, or from the database at\n a later time.\n\n In general, it should not be necessary to overload this\n method in derived classes built for particular core models.\n Instead, write overloaded methods for `setup`, `run`,\n `post_process` , `archive`, and `load_measures`. Moreover,\n in typical usage a modeler will generally not want to rely\n on this method directly, but instead use `run_experiments`\n to automatically run multiple experiments with one command.\n\n Args:\n scenario (Scenario): A dict-like object that\n has key-value pairs for each uncertainty.\n policy (Policy): A dict-like object that\n has key-value pairs for each lever.\n\n Raises:\n UserWarning: If there are no experiments associated with\n this type.\n\n \"\"\"\n self.enter_run_model()\n try:\n self.comment_on_run = None\n\n _logger.debug(\"run_core_model read_experiment_parameters\")\n\n experiment_id = policy.get(\"_experiment_id_\", None)\n if experiment_id is None:\n experiment_id = scenario.get(\"_experiment_id_\", None)\n\n if not hasattr(self, 'db') and hasattr(self, '_db'):\n self.db = self._db\n\n # If running a core files model using the DistributedEvaluator,\n # the workers won't have access to the DB directly, so we'll only\n # run the short-circuit test and the ad-hoc write-to-database\n # section of this code if the `db` attribute is available.\n if hasattr(self, 'db') and self.db is not None:\n\n assert isinstance(self.db, Database)\n\n if experiment_id is None:\n with warnings.catch_warnings():\n if self.is_db_locked:\n warnings.simplefilter(\"ignore\", category=MissingIdWarning)\n experiment_id = self.db.read_experiment_id(self.scope.name, scenario, policy)\n\n if experiment_id and self.allow_short_circuit:\n # opportunity to short-circuit run by loading pre-computed values.\n precomputed = self.db.read_experiment_measures(\n self.scope,\n design_name=None,\n experiment_id=experiment_id,\n )\n if not precomputed.empty:\n self.outcomes_output = dict(precomputed.iloc[0])\n self.log(f\"short circuit experiment_id {experiment_id} / {getattr(self, 'uid', 'no uid')}\")\n return\n\n if experiment_id is None and not self.is_db_locked:\n experiment_id = self.db.write_experiment_parameters_1(\n self.scope.name, 'ad hoc', scenario, policy\n )\n self.log(f\"YES DATABASE experiment_id {experiment_id}\", level=logging.DEBUG)\n\n else:\n _logger.debug(f\"NO DATABASE experiment_id {experiment_id}\")\n\n xl = {}\n xl.update(scenario)\n xl.update(policy)\n\n m_names = self.scope.get_measure_names()\n\n _logger.debug(f\"run_core_model setup {experiment_id}\")\n self.setup(xl)\n\n if self.success_indicator is not None:\n success_indicator = os.path.join(self.resolved_model_path, self.success_indicator)\n if os.path.exists(success_indicator):\n os.remove(success_indicator)\n else:\n success_indicator = None\n\n if self.killed_indicator is not None:\n killed_indicator = os.path.join(self.resolved_model_path, self.killed_indicator)\n if os.path.exists(killed_indicator):\n os.remove(killed_indicator)\n else:\n killed_indicator = None\n\n _logger.debug(f\"run_core_model run {experiment_id}\")\n try:\n self.run()\n except subprocess.CalledProcessError as err:\n _logger.error(f\"ERROR in run_core_model run {experiment_id}: {str(err)}\")\n try:\n ex_archive_path = self.get_experiment_archive_path(experiment_id, makedirs=True)\n except MissingArchivePathError:\n pass\n else:\n if isinstance(err, subprocess.CalledProcessError):\n if err.stdout:\n with open(os.path.join(ex_archive_path, 'error.stdout.log'), 'ab') as stdout:\n stdout.write(err.stdout)\n if err.stderr:\n with open(os.path.join(ex_archive_path, 'error.stderr.log'), 'ab') as stderr:\n stderr.write(err.stderr)\n with open(os.path.join(ex_archive_path, 'error.log'), 'a') as errlog:\n errlog.write(str(err))\n measures_dictionary = {name: np.nan for name in m_names}\n # Assign to outcomes_output, for ema_workbench compatibility\n self.outcomes_output = measures_dictionary\n\n if not self.ignore_crash:\n # If 'ignore_crash' is False (the default), then abort now and skip\n # any post-processing and other archiving steps, which will\n # probably fail anyway.\n self.log(f\"run_core_model ABORT {experiment_id}\", level=logging.ERROR)\n self.comment_on_run = f\"FAILED EXPERIMENT {experiment_id}: {str(err)}\"\n return\n else:\n _logger.error(f\"run_core_model CONTINUE AFTER ERROR {experiment_id}\")\n\n try:\n if success_indicator and not os.path.exists(success_indicator):\n # The absence of the `success_indicator` file means that the model\n # did not actually terminate correctly, so we do not want to\n # post-process or store these results in the database.\n self.comment_on_run = f\"NON-SUCCESSFUL EXPERIMENT {experiment_id}: success_indicator missing\"\n raise ValueError(f\"success_indicator missing: {success_indicator}\")\n\n if killed_indicator and os.path.exists(killed_indicator):\n self.comment_on_run = f\"KILLED EXPERIMENT {experiment_id}: killed_indicator present\"\n raise ValueError(f\"killed_indicator present: {killed_indicator}\")\n\n _logger.debug(f\"run_core_model post_process {experiment_id}\")\n self.post_process(xl, m_names)\n\n _logger.debug(f\"run_core_model wrap up {experiment_id}\")\n measures_dictionary = self.load_measures(m_names)\n m_df = pd.DataFrame(measures_dictionary, index=[experiment_id])\n\n except KeyboardInterrupt:\n _logger.exception(\n f\"KeyboardInterrupt in post_process, load_measures or outcome processing {experiment_id}\")\n raise\n except Exception as err:\n _logger.exception(f\"error in post_process, load_measures or outcome processing {experiment_id}\")\n _logger.error(f\"proceeding directly to archive attempt {experiment_id}\")\n if not self.comment_on_run:\n self.comment_on_run = f\"PROBLEM IN EXPERIMENT {experiment_id}: {str(err)}\"\n else:\n # only write to database if there was no error in post_process, load_measures or outcome processing\n if experiment_id and hasattr(self, 'db') and self.db is not None and not self.db.readonly:\n _logger.debug(f\"run_core_model write db {experiment_id}\")\n run_id = getattr(self, 'run_id', None)\n if run_id is None:\n run_id, _ = self.db.new_run_id(\n scope_name=self.scope.name,\n experiment_id=experiment_id,\n source=self.metamodel_id or 0,\n )\n try:\n self.db.write_experiment_measures(self.scope.name, self.metamodel_id, m_df, [run_id])\n except ReadOnlyDatabaseError:\n warnings.warn(\"database is read-only, not storing model outcomes\")\n except Exception as err:\n _logger.exception(f\"error in writing results to database: {str(err)}\")\n else:\n _logger.debug(f\"run_core_model OK write db {experiment_id} {self.metamodel_id} {run_id}\\n{m_df}\")\n else:\n _logger.debug(f\"run_core_model no db to write to {experiment_id}\")\n\n if experiment_id:\n try:\n ex_archive_path = self.get_experiment_archive_path(experiment_id)\n except MissingArchivePathError:\n pass\n else:\n _logger.debug(f\"run_core_model archive {experiment_id}\")\n self.archive(xl, ex_archive_path, experiment_id)\n else:\n _logger.debug(f\"run_core_model no archive because no experiment_id\")\n finally:\n self.exit_run_model()\n\n def read_experiments(\n self,\n design_name,\n db=None,\n only_pending=False,\n only_complete=False,\n only_with_measures=False,\n ):\n \"\"\"\n Reads results from a design of experiments from the database.\n\n Args:\n design_name (str): The name of the design to load.\n db (Database, optional): The Database from which to read experiments.\n If no db is given, the default `db` for this model is used.\n only_pending (bool, default False): If True, only pending\n experiments (which have no performance measure results\n stored in the database) are returned.\n only_complete (bool, default False): If True, only complete\n experiments (which have no performance measure\n results missing in the database) are returned.\n only_with_measures (bool, default False): If True, only\n experiments with at least one stored performance measure\n are returned.\n\n Returns:\n pandas.DataFrame:\n A DataFrame that contains all uncertainties, levers, and measures\n for the experiments.\n\n Raises:\n ValueError:\n If there is no Database connection `db` set.\n \"\"\"\n db = db if db is not None else self.db\n if db is None:\n raise ValueError('no database to read from')\n\n return self.ensure_dtypes(\n db.read_experiment_all(\n self.scope.name,\n design_name,\n only_pending=only_pending,\n only_complete=only_complete,\n only_with_measures=only_with_measures,\n )\n )\n\n def read_experiment_parameters(\n self,\n design_name=None,\n db=None,\n only_pending=False,\n *,\n experiment_ids=None,\n ):\n \"\"\"\n Reads uncertainties and levers from a design of experiments from the database.\n\n Args:\n design_name (str, optional): If given, only experiments\n associated with both the scope and the named design\n are returned, otherwise all experiments associated\n with the scope are returned.\n db (Database, optional): The Database from which to read experiments.\n If no db is given, the default `db` for this model is used.\n only_pending (bool, default False): If True, only pending\n experiments (which have no performance measure results\n stored in the database) are returned.\n experiment_ids (Collection, optional):\n A collection of experiment id's to load. If given,\n both `design_name` and `only_pending` are ignored.\n\n Returns:\n pandas.DataFrame:\n A DataFrame that contains all uncertainties, levers, and measures\n for the experiments.\n\n Raises:\n ValueError:\n If `db` is not given and there is no default\n Database connection set.\n \"\"\"\n db = db if db is not None else self.db\n\n if db is None:\n raise ValueError('no database to read from')\n\n return self.ensure_dtypes(\n db.read_experiment_parameters(\n self.scope.name,\n design_name,\n only_pending=only_pending,\n experiment_ids=experiment_ids,\n )\n )\n\n def read_experiment_measures(\n self,\n design_name,\n experiment_id=None,\n db=None,\n ):\n \"\"\"\n Reads performance measures from a design of experiments from the database.\n\n Args:\n design_name (str): The name of the design to load.\n experiment_id (int, optional): The id of the experiment to load.\n db (Database, optional): The Database from which to read experiment(s).\n If no db is given, the default `db` for this model is used.\n\n Returns:\n pandas.DataFrame:\n A DataFrame that contains all uncertainties, levers, and measures\n for the experiments.\n\n Raises:\n ValueError:\n If `db` is not given and there is no default\n Database connection set.\n \"\"\"\n db = db if db is not None else self.db\n\n if db is None:\n raise ValueError('no database to read from')\n\n measures = self.ensure_dtypes(\n db.read_experiment_measures(\n self.scope.name,\n design_name,\n experiment_id,\n source=self.metamodel_id,\n )\n )\n \n # only return measures within scope\n measures = measures[[i for i in self.scope.get_measure_names()\n if i in measures.columns]]\n \n return measures\n \n\n def ensure_dtypes(self, df:pd.DataFrame):\n \"\"\"\n Convert columns of dataframe to correct dtype as needed.\n\n Args:\n df (pandas.DataFrame): A dataframe with column names\n that are uncertainties, levers, or measures.\n\n Returns:\n pandas.DataFrame:\n The same data as input, but with dtypes as appropriate.\n \"\"\"\n return self.scope.ensure_dtypes(df)\n\n def design_experiments(self, *args, **kwargs):\n \"\"\"\n Create a design of experiments based on this model.\n\n Args:\n n_samples_per_factor (int, default 10): The number of samples in the\n design per random factor.\n n_samples (int or tuple, optional): The total number of samples in the\n design. If `jointly` is False, this is the number of samples in each\n of the uncertainties and the levers, the total number of samples will\n be the square of this value. Give a 2-tuple to set values for\n uncertainties and levers respectively, to set them independently.\n If this argument is given, it overrides `n_samples_per_factor`.\n random_seed (int or None, default 1234): A random seed for reproducibility.\n db (Database, optional): If provided, this design will be stored in the\n database indicated. If not provided, the `db` for this model will\n be used, if one is set.\n design_name (str, optional): A name for this design, to identify it in the\n database. If not given, a unique name will be generated based on the\n selected sampler.\n sampler (str or AbstractSampler, default 'lhs'): The sampler to use for this\n design. Available pre-defined samplers include:\n - 'lhs': Latin Hypercube sampling\n - 'ulhs': Uniform Latin Hypercube sampling, which ignores defined\n distribution shapes from the scope and samples everything\n as if it was from a uniform distribution\n - 'mc': Monte carlo sampling\n - 'uni': Univariate sensitivity testing, whereby experiments are\n generated setting each parameter individually to minimum and\n maximum values (for numeric dtypes) or all possible values\n (for boolean and categorical dtypes). Note that designs for\n univariate sensitivity testing are deterministic and the number\n of samples given is ignored.\n sample_from ('all', 'uncertainties', or 'levers'): Which scope components\n from which to sample. Components not sampled are set at their default\n values in the design.\n jointly (bool, default True): Whether to sample jointly all uncertainties\n and levers in a single design, or, if False, to generate separate samples\n for levers and uncertainties, and then combine the two in a full-factorial\n manner. This argument has no effect unless `sample_from` is 'all'.\n Note that setting `jointly` to False may produce a very large design,\n as the total number of experiments will be the product of the number of\n experiments for the levers and the number of experiments for the\n uncertainties, which are set separately (i.e. if `n_samples` is given,\n the total number of experiments is the square of that value).\n\n Returns:\n pandas.DataFrame: The resulting design.\n \"\"\"\n if 'scope' in kwargs:\n kwargs.pop('scope')\n\n if 'db' not in kwargs:\n kwargs['db'] = self.db\n\n from ..experiment import experimental_design\n return experimental_design.design_experiments(self.scope, *args, **kwargs)\n\n def async_experiments(\n self,\n design:pd.DataFrame=None,\n db=None,\n *,\n design_name=None,\n evaluator=None,\n max_n_workers=None,\n stagger_start=None,\n batch_size=None,\n ):\n \"\"\"\n Asynchronously runs a design of combined experiments using this model.\n\n A combined experiment includes a complete set of input values for\n all exogenous uncertainties (a Scenario) and all policy levers\n (a Policy). Unlike the perform_experiments function in the EMA Workbench,\n this method pairs each Scenario and Policy in sequence, instead\n of running all possible combinations of Scenario and Policy.\n This change ensures compatibility with the EMAT database modules, which\n preserve the complete set of input information (both uncertainties\n and levers) for each experiment. To conduct a full cross-factorial set\n of experiments similar to the default settings for EMA Workbench,\n use a factorial design, by setting the `jointly` argument for the\n `design_experiments` to False, or by designing experiments outside\n of EMAT with your own approach.\n\n Args:\n design (pandas.DataFrame, optional): experiment definitions\n given as a DataFrame, where each exogenous uncertainties and\n policy levers is given as a column, and each row is an experiment.\n db (Database, required): The database to use for loading and saving experiments.\n If none is given, the default database for this model is used.\n If there is no default db, and none is given here,\n these experiments will be aborted.\n design_name (str, optional): The name of a design of experiments to\n load from the database. This design is only used if\n `design` is None.\n evaluator (emat.workbench.Evaluator, optional): Optionally give an\n evaluator instance. If not given, a default DistributedEvaluator\n will be instantiated. Passing any other kind of evaluator will\n currently cause an error, although in the future other async\n compatible evaluators may be provided.\n max_n_workers (int, optional):\n The maximum number of workers that will be created for a default\n dask.distributed LocalCluster. If the number of cores available is\n smaller than this number, fewer workers will be spawned. This value\n is only used if a default LocalCluster has not yet been created.\n stagger_start (int, optional):\n If provided, wait this number of seconds between initial dispatch\n of experiments to the evaluator. For models that do a lot of\n file copying up front, this can prevent over-saturating the file\n storage system.\n batch_size (int, optional):\n For fast-running core models, the overhead from multi-processing\n can represent a big chunk of overall runtime. Grouping experiments\n into batches that are sent to workers as a group can mitigate this.\n Setting batch_size to 1 will process every experiment separately.\n If no batch size is given, a guess is made as to an efficient\n batch_size based on the number of experiments and the number of\n workers.\n\n Raises:\n ValueError:\n If there are no experiments defined. This includes\n the situation where `design` is given but no database is\n available.\n\n \"\"\"\n # catch user gives only a design, not experiment_parameters\n if isinstance(design, str) and design_name is None:\n design_name, design = design, None\n\n if design_name is None and design is None:\n raise ValueError(f\"must give design_name or design\")\n\n if db is None:\n db = self.db\n\n if design_name is not None and design is None:\n if not db:\n raise ValueError(f'cannot load design \"{design_name}\", there is no db')\n design = db.read_experiment_parameters(self.scope.name, design_name)\n\n if design.empty:\n raise ValueError(f\"no experiments available\")\n\n from .asynchronous import asynchronous_experiments\n\n if self.db is None:\n if db is not None:\n self.db = db\n else:\n raise ValueError(\"cannot run async_experiments without a `db` defined\")\n\n return asynchronous_experiments(\n self,\n design,\n evaluator=evaluator,\n max_n_workers=max_n_workers,\n stagger_start=stagger_start,\n batch_size=batch_size,\n )\n\n\n def run_experiments(\n self,\n design=None,\n evaluator=None,\n *,\n design_name=None,\n db=None,\n allow_short_circuit=None,\n ):\n \"\"\"\n Runs a design of combined experiments using this model.\n\n A combined experiment includes a complete set of input values for\n all exogenous uncertainties (a Scenario) and all policy levers\n (a Policy). Unlike the perform_experiments function in the EMA Workbench,\n this method pairs each Scenario and Policy in sequence, instead\n of running all possible combinations of Scenario and Policy.\n This change ensures compatibility with the EMAT database modules, which\n preserve the complete set of input information (both uncertainties\n and levers) for each experiment. To conduct a full cross-factorial set\n of experiments similar to the default settings for EMA Workbench,\n use a factorial design, by setting the `jointly` argument for the\n `design_experiments` to False, or by designing experiments outside\n of EMAT with your own approach.\n\n Args:\n design (pandas.DataFrame, optional): experiment definitions\n given as a DataFrame, where each exogenous uncertainty and\n policy levers is given as a column, and each row is an experiment.\n evaluator (emat.workbench.Evaluator, optional): Optionally give an\n evaluator instance. If not given, a default SequentialEvaluator\n will be instantiated.\n design_name (str, optional): The name of a design of experiments to\n load from the database. This design is only used if\n `design` is None.\n db (Database, optional): The database to use for loading and saving experiments.\n If none is given, the default database for this model is used.\n If there is no default db, and none is given here,\n the results are not stored in a database. Set to False to explicitly\n not use the default database, even if it exists.\n\n Returns:\n pandas.DataFrame:\n A DataFrame that contains all uncertainties, levers, and measures\n for the experiments.\n\n Raises:\n ValueError:\n If there are no experiments defined. This includes\n the situation where `design` is given but no database is\n available.\n\n \"\"\"\n\n from ..workbench import Scenario, Policy, perform_experiments\n\n # catch user gives only a design, not experiment_parameters\n if isinstance(design, str) and design_name is None:\n design_name, design = design, None\n\n if design_name is None and design is None:\n raise ValueError(f\"must give design_name or design\")\n\n if db is None:\n db = self.db\n\n if design_name is not None and design is None:\n if not db:\n raise ValueError(f'cannot load design \"{design_name}\", there is no db')\n design = db.read_experiment_parameters(self.scope.name, design_name)\n\n if design.empty:\n raise ValueError(f\"no experiments available\")\n\n # catch metamodels here and run them as a batch, which is much faster\n function = getattr(self, 'function', None)\n from .meta_model import MetaModel\n if isinstance(function, MetaModel):\n outcomes = function.predict(design)\n result = self.ensure_dtypes(pd.concat([\n design,\n outcomes\n ], axis=1, sort=False))\n from ..experiment.experimental_design import ExperimentalDesign\n result = ExperimentalDesign(result)\n result.scope = self.scope\n result.design_name = getattr(design, 'design_name', None)\n result.sampler_name = getattr(design, 'sampler_name', None)\n if db:\n metamodel_id = self.metamodel_id\n if metamodel_id is None:\n metamodel_id = db.get_new_metamodel_id(self.scope.name)\n db.write_experiment_measures(self.scope.name, metamodel_id, outcomes)\n return result\n\n scenarios = []\n scenario_cols = self.scope._get_uncertainty_and_constant_names()\n design_scenarios = design[scenario_cols]\n for rownum in range(len(design)):\n if design.index.name == 'experiment':\n s = Scenario(\n _experiment_id_=design.index[rownum],\n **design_scenarios.iloc[rownum],\n )\n else:\n s = Scenario(\n _experiment_id_=False,\n **design_scenarios.iloc[rownum],\n )\n scenarios.append(s)\n\n lever_names = self.scope.get_lever_names()\n policies = [\n Policy(f\"Incognito{n}\", **dict(zip(lever_names, i)))\n for n,i in enumerate(design[lever_names].itertuples(index=False, name='ExperimentL'))\n ]\n\n evaluator = prepare_evaluator(evaluator, self)\n\n if getattr(evaluator, 'asynchronous', False):\n # When the evaluator is in asynchronous mode, the core model runs will be\n # dispatched here but the function will not block waiting on the result, and\n # instead depend on the model execution process to write the results into\n # the database when complete.\n with evaluator:\n if allow_short_circuit is not None:\n _stored_allow_short_circuit = self.allow_short_circuit\n self.allow_short_circuit = allow_short_circuit\n else:\n _stored_allow_short_circuit = None\n try:\n perform_experiments(\n self,\n scenarios=scenarios,\n policies=policies,\n zip_over={'scenarios', 'policies'},\n evaluator=evaluator,\n )\n finally:\n if _stored_allow_short_circuit is not None:\n self.allow_short_circuit = _stored_allow_short_circuit\n return\n\n else:\n with evaluator:\n if db is False:\n _stored_db = self.db\n self.db = None\n else:\n _stored_db = None\n if allow_short_circuit is not None:\n _stored_allow_short_circuit = self.allow_short_circuit\n self.allow_short_circuit = allow_short_circuit\n else:\n _stored_allow_short_circuit = None\n try:\n experiments, outcomes = perform_experiments(\n self,\n scenarios=scenarios,\n policies=policies,\n zip_over={'scenarios', 'policies'},\n evaluator=evaluator,\n )\n finally:\n if _stored_db:\n self.db = _stored_db\n if _stored_allow_short_circuit is not None:\n self.allow_short_circuit = _stored_allow_short_circuit\n experiments.index = design.index\n\n outcomes = pd.DataFrame.from_dict(outcomes)\n outcomes.index = design.index\n\n # if db:\n # metamodel_id = self.metamodel_id\n # if metamodel_id is None:\n # metamodel_id = 0\n # db.write_experiment_measures(self.scope.name, metamodel_id, outcomes)\n\n # Put constants back into experiments\n experiments_ = experiments.drop(\n columns=['scenario', 'policy', 'model', '_experiment_id_'],\n errors='ignore',\n )\n for i in self.scope.get_constants():\n experiments_[i.name] = i.value\n\n result = self.ensure_dtypes(pd.concat([\n experiments_,\n outcomes\n ], axis=1, sort=False))\n from ..experiment.experimental_design import ExperimentalDesign\n result = ExperimentalDesign(result)\n result.scope = self.scope\n result.design_name = getattr(design, 'design_name', None)\n result.sampler_name = getattr(design, 'sampler_name', None)\n return result\n\n def run_reference_experiment(\n self,\n evaluator=None,\n *,\n db=None,\n ):\n \"\"\"\n Runs a reference experiment using this model.\n\n This single experiment includes a complete set of input values for\n all exogenous uncertainties (a Scenario) and all policy levers\n (a Policy). Each is set to the default value indicated by the scope.\n\n Args:\n evaluator (emat.workbench.Evaluator, optional): Optionally give an\n evaluator instance. If not given, a default SequentialEvaluator\n will be instantiated.\n db (Database, optional): The database to use for loading and saving experiments.\n If none is given, the default database for this model is used.\n If there is no default db, and none is given here,\n the results are not stored in a database. Set to False to explicitly\n not use the default database, even if it exists.\n\n Returns:\n pandas.DataFrame:\n A DataFrame that contains all uncertainties, levers, and measures\n for the experiments.\n\n \"\"\"\n if db is None:\n db = self.db\n ref = self.design_experiments(sampler='ref', db=db)\n return self.run_experiments(ref, evaluator=evaluator, db=db)\n\n def create_metamodel_from_data(\n self,\n experiment_inputs:pd.DataFrame,\n experiment_outputs:pd.DataFrame,\n output_transforms: dict = None,\n metamodel_id:int=None,\n include_measures=None,\n exclude_measures=None,\n db = None,\n random_state=None,\n experiment_stratification=None,\n suppress_converge_warnings=False,\n regressor = None,\n find_best_metamodeltype=False,\n ):\n \"\"\"\n Create a MetaModel from a set of input and output observations.\n\n Args:\n experiment_inputs (pandas.DataFrame): This dataframe\n should contain all of the experimental inputs, including\n values for each uncertainty, level, and constant.\n experiment_outputs (pandas.DataFrame): This dataframe\n should contain all of the experimental outputs, including\n a column for each performance measure. The index\n for the outputs should match the index for the\n `experiment_inputs`, so that the I-O matches row-by-row.\n output_transforms (dict): Deprecated. Specify the\n output transforms directly in the scope instead.\n metamodel_id (int, optional): An identifier for this meta-model.\n If not given, a unique id number will be created randomly.\n include_measures (Collection[str], optional): If provided, only\n output performance measures with names in this set will be included.\n exclude_measures (Collection[str], optional): If provided, only\n output performance measures with names not in this set will be included.\n db (Database, optional): The database to use for loading and saving metamodels.\n If none is given, the default database for this model is used.\n If there is no default db, and none is given here,\n the metamodel is not stored in a database.\n random_state (int, optional): A random state to use in the metamodel\n regression fitting.\n experiment_stratification (pandas.Series, optional):\n A stratification of experiments, used in cross-validation.\n suppress_converge_warnings (bool, default False):\n Suppress convergence warnings during metamodel fitting.\n regressor (Estimator, optional): A scikit-learn estimator implementing a\n multi-target regression. If not given, a detrended simple Gaussian\n process regression is used.\n find_best_metamodeltype (int, default 0):\n Run a search to find the best metamodeltype for each\n performance measure, repeating each cross-validation\n step this many times. For more stable results, choose\n 3 or more, although larger numbers will be slow. If\n domain knowledge about the normal expected range and\n behavior of each performance measure is available,\n it is better to give the metamodeltype explicitly in\n the Scope.\n\n Returns:\n MetaModel:\n a callable object that, when called as if a\n function, accepts keyword arguments as inputs and\n returns a dictionary of (measure name: value) pairs.\n \"\"\"\n from .meta_model import create_metamodel\n\n # The outputs index typically has a 2-level multi-index,\n # giving both experiment_id and run_id. But for this\n # analysis, we will strip out the run_id.\n if experiment_outputs.index.nlevels == 2:\n experiment_outputs.index = experiment_outputs.index.get_level_values(0)\n\n return create_metamodel(\n scope=self.scope,\n experiments=pd.concat([experiment_inputs, experiment_outputs], axis=1),\n metamodel_id=metamodel_id,\n db=db,\n include_measures=include_measures,\n exclude_measures=exclude_measures,\n random_state=random_state,\n experiment_stratification=experiment_stratification,\n suppress_converge_warnings=suppress_converge_warnings,\n regressor=regressor,\n name=None,\n find_best_metamodeltype=find_best_metamodeltype,\n )\n\n def create_metamodel_from_design(\n self,\n design_name:str,\n metamodel_id:int = None,\n include_measures=None,\n exclude_measures=None,\n db=None,\n random_state=None,\n suppress_converge_warnings=False,\n regressor=None,\n find_best_metamodeltype=False,\n ):\n \"\"\"\n Create a MetaModel from a set of input and output observations.\n\n Args:\n design_name (str): The name of the design to use.\n metamodel_id (int, optional): An identifier for this meta-model.\n If not given, a unique id number will be created randomly.\n include_measures (Collection[str], optional): If provided, only\n output performance measures with names in this set will be included.\n exclude_measures (Collection[str], optional): If provided, only\n output performance measures with names not in this set will be included.\n random_state (int, optional): A random state to use in the metamodel\n regression fitting.\n suppress_converge_warnings (bool, default False):\n Suppress convergence warnings during metamodel fitting.\n regressor (Estimator, optional): A scikit-learn estimator implementing a\n multi-target regression. If not given, a detrended simple Gaussian\n process regression is used.\n find_best_metamodeltype (int, default 0):\n Run a search to find the best metamodeltype for each\n performance measure, repeating each cross-validation\n step this many times. For more stable results, choose\n 3 or more, although larger numbers will be slow. If\n domain knowledge about the normal expected range and\n behavior of each performance measure is available,\n it is better to give the metamodeltype explicitly in\n the Scope.\n\n Returns:\n MetaModel:\n a callable object that, when called as if a\n function, accepts keyword arguments as inputs and\n returns a dictionary of (measure name: value) pairs.\n\n Raises:\n ValueError: If the named design still has pending experiments.\n \"\"\"\n db = db if db is not None else self.db\n\n if db is None:\n raise ValueError(\"db is None\")\n\n check_df = db.read_experiment_parameters(self.scope.name, design_name, only_pending=True)\n if not check_df.empty:\n from ..exceptions import PendingExperimentsError\n raise PendingExperimentsError(f'design \"{design_name}\" has pending experiments')\n\n experiment_inputs = db.read_experiment_parameters(self.scope.name, design_name)\n experiment_outputs = db.read_experiment_measures(self.scope.name, design_name)\n\n transforms = {\n i.name: i.metamodeltype\n for i in self.scope.get_measures()\n }\n\n return self.create_metamodel_from_data(\n experiment_inputs,\n experiment_outputs,\n transforms,\n metamodel_id=metamodel_id,\n include_measures=include_measures,\n exclude_measures=exclude_measures,\n db=db,\n random_state=random_state,\n suppress_converge_warnings=suppress_converge_warnings,\n regressor=regressor,\n find_best_metamodeltype=find_best_metamodeltype,\n )\n\n def create_metamodel_from_designs(\n self,\n design_names:str,\n metamodel_id:int = None,\n include_measures=None,\n exclude_measures=None,\n db=None,\n random_state=None,\n suppress_converge_warnings=False,\n ):\n \"\"\"\n Create a MetaModel from multiple sets of input and output observations.\n\n Args:\n design_names (Collection[str]): The names of the designs to use.\n metamodel_id (int, optional): An identifier for this meta-model.\n If not given, a unique id number will be created randomly.\n include_measures (Collection[str], optional): If provided, only\n output performance measures with names in this set will be included.\n exclude_measures (Collection[str], optional): If provided, only\n output performance measures with names not in this set will be included.\n random_state (int, optional): A random state to use in the metamodel\n regression fitting.\n suppress_converge_warnings (bool, default False):\n Suppress convergence warnings during metamodel fitting.\n\n Returns:\n MetaModel:\n a callable object that, when called as if a\n function, accepts keyword arguments as inputs and\n returns a dictionary of (measure name: value) pairs.\n\n Raises:\n ValueError: If the named design still has pending experiments.\n \"\"\"\n db = db if db is not None else self.db\n\n if db is not None:\n for design_name in design_names:\n check_df = db.read_experiment_parameters(self.scope.name, design_name, only_pending=True)\n if not check_df.empty:\n from ..exceptions import PendingExperimentsError\n raise PendingExperimentsError(f'design \"{design_name}\" has pending experiments')\n\n experiment_inputs = []\n for design_name in design_names:\n f = db.read_experiment_parameters(self.scope.name, design_name)\n f['_design_'] = design_name\n experiment_inputs.append(f)\n experiment_inputs = pd.concat(experiment_inputs)\n\n experiment_outputs = []\n for design_name in design_names:\n f = db.read_experiment_measures(self.scope.name, design_name)\n # f['_design_'] = design_name\n experiment_outputs.append(f)\n experiment_outputs = pd.concat(experiment_outputs)\n\n transforms = {\n i.name: i.metamodeltype\n for i in self.scope.get_measures()\n }\n\n return self.create_metamodel_from_data(\n experiment_inputs.drop('_design_', axis=1),\n experiment_outputs,\n transforms,\n metamodel_id=metamodel_id,\n include_measures=include_measures,\n exclude_measures=exclude_measures,\n db=db,\n random_state=random_state,\n experiment_stratification=experiment_inputs['_design_'],\n suppress_converge_warnings=suppress_converge_warnings,\n )\n\n\n def feature_scores(\n self,\n design,\n return_type='styled',\n random_state=None,\n cmap='viridis',\n measures=None,\n shortnames=None,\n ):\n \"\"\"\n Calculate feature scores based on a design of experiments.\n\n This method is provided as a convenient pass-through to the\n `feature_scores` function in the `analysis` sub-package, using\n the scope and database attached to this model.\n\n Args:\n design (str or pandas.DataFrame): The name of the design\n of experiments to use for feature scoring, or a single\n pandas.DataFrame containing the experimental design and\n results.\n return_type ({'styled', 'figure', 'dataframe'}):\n The format to return, either a heatmap figure as an SVG\n render in and xmle.Elem, or a plain pandas.DataFrame,\n or a styled dataframe.\n random_state (int or numpy.RandomState, optional):\n Random state to use.\n cmap (string or colormap, default 'viridis'): matplotlib\n colormap to use for rendering.\n measures (Collection, optional): The performance measures\n on which feature scores are to be generated. By default,\n all measures are included.\n\n Returns:\n xmle.Elem or pandas.DataFrame:\n Returns a rendered SVG as xml, or a DataFrame,\n depending on the `return_type` argument.\n\n This function internally uses feature_scoring from the EMA Workbench, which in turn\n scores features using the \"extra trees\" regression approach.\n \"\"\"\n from ..analysis.feature_scoring import feature_scores\n if shortnames is True:\n shortnames = self.scope\n return feature_scores(\n self.scope,\n design=design,\n return_type=return_type,\n db=self.db,\n random_state=random_state,\n cmap=cmap,\n measures=measures,\n shortnames=shortnames,\n )\n\n def get_feature_scores(self, *args, **kwargs):\n \"\"\"\n Deprecated, use `Model.feature_scores`.\n \"\"\"\n # for compatability with prior versions of TMIP-EMAT\n return self.feature_scores(*args, **kwargs)\n\n def _common_optimization_setup(\n self,\n epsilons=0.1,\n convergence='default',\n display_convergence=True,\n evaluator=None,\n ):\n import numbers\n if isinstance(epsilons, numbers.Number):\n epsilons = [epsilons]*len(self.outcomes)\n\n if convergence == 'default':\n convergence = ConvergenceMetrics(\n EpsilonProgress(),\n SolutionCount(),\n )\n\n if display_convergence and isinstance(convergence, ConvergenceMetrics):\n from IPython.display import display\n display(convergence)\n\n evaluator = prepare_evaluator(evaluator, self)\n\n return epsilons, convergence, display_convergence, evaluator\n\n def optimize(\n self,\n searchover='levers',\n evaluator=None,\n nfe=10000,\n convergence='default',\n display_convergence=True,\n convergence_freq=100,\n constraints=None,\n reference=None,\n reverse_targets=False,\n algorithm=None,\n epsilons='auto',\n min_epsilon=0.1,\n cache_dir=None,\n cache_file=None,\n check_extremes=False,\n **kwargs,\n ):\n \"\"\"\n Perform multi-objective optimization over levers or uncertainties.\n\n The targets for the multi-objective optimization (i.e. whether each\n individual performance measures is to be maximized or minimized) are\n read from the model's scope.\n\n Args:\n searchover ({'levers', 'uncertainties'}):\n Which group of inputs to search over. The other group\n will be set at their default values, unless other values\n are provided in the `reference` argument.\n evaluator (Evaluator, optional): The evaluator to use to\n run the model. If not given, a SequentialEvaluator will\n be created.\n nfe (int, default 10_000): Number of function evaluations.\n This generally needs to be fairly large to achieve stable\n results in all but the most trivial applications.\n convergence ('default', None, or emat.optimization.ConvergenceMetrics):\n A convergence display during optimization. The default\n value is to report the epsilon-progress (the number of\n solutions that ever enter the candidate pool of non-dominated\n solutions) and the number of solutions remaining in that candidate\n pool. Pass `None` explicitly to disable convergence tracking.\n display_convergence (bool, default True): Whether to automatically\n display figures that dynamically track convergence. Set to\n `False` if you are not using this method within a Jupyter\n interactive environment.\n convergence_freq (int, default 100): How frequently to update the\n convergence measures. There is some computational overhead to\n these convergence updates, so setting a value too small may\n noticeably slow down the process.\n constraints (Collection[Constraint], optional):\n Solutions will be constrained to only include values that\n satisfy these constraints. The constraints can be based on\n the search parameters (levers or uncertainties, depending on the\n value given for `searchover`), or performance measures, or\n some combination thereof.\n reference (Mapping): A set of values for the non-active inputs,\n i.e. the uncertainties if `searchover` is 'levers', or the\n levers if `searchover` is 'uncertainties'. Any values not\n set here revert to the default values identified in the scope.\n reverse_targets (bool, default False): Whether to reverse the\n optimization targets given in the scope (i.e., changing\n minimize to maximize, or vice versa). This will result in\n the optimization searching for the worst outcomes, instead of\n the best outcomes.\n algorithm (platypus.Algorithm, optional): Select an\n algorithm for multi-objective optimization. The default\n algorithm is EpsNSGAII. See `platypus` documentation for details.\n epsilons (float or array-like): Used to limit the number of\n distinct solutions generated. Set to a larger value to get\n fewer distinct solutions.\n cache_dir (path-like, optional): A directory in which to\n cache results. Most of the arguments will be hashed\n to develop a unique filename for these results, making this\n generally safer than `cache_file`.\n cache_file (path-like, optional): A file into which to\n cache results. If this file exists, the contents of the\n file will be loaded and all other arguments are ignored.\n Use with great caution.\n kwargs: Any additional arguments will be passed on to the\n platypus algorithm.\n\n Returns:\n emat.OptimizationResult:\n The set of non-dominated solutions found.\n When `convergence` is given, the convergence measures are\n included, as a pandas.DataFrame in the `convergence` attribute.\n \"\"\"\n from ..util.disk_cache import load_cache_if_available, save_cache\n if isinstance(algorithm, str) or algorithm is None:\n alg = algorithm\n else:\n alg = algorithm.__name__\n\n if reference is not None:\n from ..workbench import Policy, Scenario\n if searchover == 'levers' and not isinstance(reference, Scenario):\n reference = Scenario(\"ReferenceScenario\", **reference)\n elif searchover == 'uncertainties' and not isinstance(reference, Policy):\n reference = Policy(\"ReferencePolicy\", **reference)\n else:\n if searchover == 'levers':\n reference = self.scope.default_scenario()\n elif searchover == 'uncertainties':\n reference = self.scope.default_policy()\n\n x, cache_file = load_cache_if_available(\n cache_file=cache_file,\n cache_dir=cache_dir,\n searchover=searchover,\n nfe=nfe,\n convergence=convergence,\n convergence_freq=convergence_freq,\n constraints=constraints,\n reference=reference,\n reverse_targets=reverse_targets,\n algorithm=alg,\n epsilons=epsilons,\n )\n\n if x is None:\n epsilons, convergence, display_convergence, evaluator = self._common_optimization_setup(\n epsilons, convergence, display_convergence, evaluator\n )\n\n if reverse_targets:\n for k in self.scope.get_measures():\n k.kind_original = k.kind\n k.kind = k.kind * -1\n\n _db_pause = self.db\n\n try:\n self.db = None\n with evaluator:\n\n if epsilons == 'auto':\n from ..workbench import perform_experiments\n if searchover == 'levers':\n _, trial_outcomes = perform_experiments(\n self,\n scenarios=reference,\n policies=30,\n evaluator=evaluator,\n )\n else:\n _, trial_outcomes = perform_experiments(\n self,\n scenarios=30,\n policies=reference,\n evaluator=evaluator,\n )\n epsilons = [max(min_epsilon, np.std(trial_outcomes[mn]) / 20) for mn in self.scope.get_measure_names()]\n\n results = evaluator.optimize(\n searchover=searchover,\n reference=reference,\n nfe=nfe,\n constraints=constraints,\n convergence=convergence,\n convergence_freq=convergence_freq,\n epsilons=epsilons,\n **kwargs,\n )\n\n if isinstance(results, tuple) and len(results) == 2:\n results, result_convergence = results\n else:\n result_convergence = None\n\n # Put constants back in to results\n for i in self.scope.get_constants():\n results[i.name] = i.value\n\n results = self.ensure_dtypes(results)\n x = OptimizationResult(results, result_convergence, scope=self.scope)\n\n if searchover == 'levers':\n x.scenarios = reference\n elif searchover == 'uncertainties':\n x.policies = reference\n\n if check_extremes:\n x.check_extremes(\n self,\n 1 if check_extremes is True else check_extremes,\n evaluator=evaluator,\n searchover=searchover,\n robust=False,\n )\n\n finally:\n if reverse_targets:\n for k in self.scope.get_measures():\n k.kind = k.kind_original\n del k.kind_original\n self.db = _db_pause\n\n elif display_convergence:\n _, convergence, display_convergence, _ = self._common_optimization_setup(\n None, convergence, display_convergence, False\n )\n for c in convergence:\n try:\n c.rebuild(x.convergence)\n except KeyboardInterrupt:\n raise\n except:\n pass\n\n x.cache_file = cache_file\n save_cache(x, cache_file)\n return x\n\n def robust_optimize(\n self,\n robustness_functions,\n scenarios,\n evaluator=None,\n nfe=10000,\n convergence='default',\n display_convergence=True,\n convergence_freq=100,\n constraints=None,\n epsilons=0.1,\n cache_dir=None,\n cache_file=None,\n algorithm=None,\n check_extremes=False,\n **kwargs,\n ):\n \"\"\"\n Perform robust optimization.\n\n The robust optimization generally a multi-objective optimization task.\n It is undertaken using statistical measures of outcomes evaluated across\n a number of scenarios, instead of using the individual outcomes themselves.\n For each candidate policy, the model is evaluated against all of the considered\n scenarios, and then the robustness measures are evaluated using the\n set of outcomes from the original runs. The robustness measures\n are aggregate measures that are computed from a set of outcomes.\n For example, this may be expected value, median, n-th percentile,\n minimum, or maximum value of any individual outcome. It is also\n possible to have joint measures, e.g. expected value of the larger\n of outcome 1 or outcome 2.\n\n Each robustness function is indicated as a maximization or minimization\n target, where higher or lower values are better, respectively.\n The optimization process then tries to identify one or more\n non-dominated solutions for the possible policy levers.\n\n Args:\n robustness_functions (Collection[Measure]): A collection of\n aggregate statistical performance measures.\n scenarios (int or Collection): A collection of scenarios to\n use in the evaluation(s), or give an integer to generate\n that number of random scenarios.\n evaluator (Evaluator, optional): The evaluator to use to\n run the model. If not given, a SequentialEvaluator will\n be created.\n nfe (int, default 10_000): Number of function evaluations.\n This generally needs to be fairly large to achieve stable\n results in all but the most trivial applications.\n convergence ('default', None, or emat.optimization.ConvergenceMetrics):\n A convergence display during optimization.\n display_convergence (bool, default True): Automatically display\n the convergence metric figures when optimizing.\n convergence_freq (int, default 100): The frequency at which\n convergence metric figures are updated.\n constraints (Collection[Constraint], optional)\n Solutions will be constrained to only include values that\n satisfy these constraints. The constraints can be based on\n the policy levers, or on the computed values of the robustness\n functions, or some combination thereof.\n epsilons (float or array-like): Used to limit the number of\n distinct solutions generated. Set to a larger value to get\n fewer distinct solutions.\n cache_dir (path-like, optional): A directory in which to\n cache results. Most of the arguments will be hashed\n to develop a unique filename for these results, making this\n generally safer than `cache_file`.\n cache_file (path-like, optional): A file into which to\n cache results. If this file exists, the contents of the\n file will be loaded and all other arguments are ignored.\n Use with great caution.\n algorithm (platypus.Algorithm or str, optional): Select an\n algorithm for multi-objective optimization. The algorithm can\n be given directly, or named in a string. See `platypus`\n documentation for details.\n check_extremes (bool or int, default False): Conduct additional\n evaluations, setting individual policy levers to their\n extreme values, for each candidate Pareto optimal solution.\n kwargs: any additional arguments will be passed on to the\n platypus algorithm.\n\n Returns:\n emat.OptimizationResult:\n The set of non-dominated solutions found.\n When `convergence` is given, the convergence measures are\n included, as a pandas.DataFrame in the `convergence` attribute.\n \"\"\"\n from ..optimization.optimize import robust_optimize\n\n from ..util.disk_cache import load_cache_if_available, save_cache\n if isinstance(algorithm, str) or algorithm is None:\n alg = algorithm\n else:\n alg = algorithm.__name__\n result, cache_file = load_cache_if_available(\n cache_file=cache_file,\n cache_dir=cache_dir,\n scenarios=scenarios,\n convergence=convergence,\n convergence_freq=convergence_freq,\n constraints=constraints,\n epsilons=epsilons,\n nfe=nfe,\n robustness_functions=robustness_functions,\n alg=alg,\n check_extremes=check_extremes,\n )\n\n if result is None:\n _db_pause = self.db\n try:\n self.db = None\n result = robust_optimize(\n self,\n robustness_functions,\n scenarios,\n evaluator=evaluator,\n nfe=nfe,\n convergence=convergence,\n display_convergence=display_convergence,\n convergence_freq=convergence_freq,\n constraints=constraints,\n epsilons=epsilons,\n check_extremes=check_extremes,\n **kwargs,\n )\n finally:\n self.db = _db_pause\n elif display_convergence:\n _, convergence, display_convergence, _ = self._common_optimization_setup(\n None, convergence, display_convergence, False\n )\n for c in convergence:\n try:\n c.rebuild(result.convergence)\n except KeyboardInterrupt:\n raise\n except:\n pass\n\n result.cache_file = cache_file\n save_cache(result, cache_file)\n return result\n\n def robust_evaluate(\n self,\n robustness_functions,\n scenarios,\n policies,\n evaluator=None,\n cache_dir=None,\n suspend_db=True,\n ):\n \"\"\"\n Perform robust evaluation(s).\n\n The robust evaluation is used to generate statistical measures\n of outcomes, instead of generating the individual outcomes themselves.\n For each policy, the model is evaluated against all of the considered\n scenarios, and then the robustness measures are evaluated using the\n set of outcomes from the original runs. The robustness measures\n are aggregate measures that are computed from a set of outcomes.\n For example, this may be expected value, median, n-th percentile,\n minimum, or maximum value of any individual outcome. It is also\n possible to have joint measures, e.g. expected value of the larger\n of outcome 1 or outcome 2.\n\n Args:\n robustness_functions (Collection[Measure]): A collection of\n aggregate statistical performance measures.\n scenarios (int or Collection): A collection of scenarios to\n use in the evaluation(s), or give an integer to generate\n that number of random scenarios.\n policies (int, or collection): A collection of policies to\n use in the evaluation(s), or give an integer to generate\n that number of random policies.\n evaluator (Evaluator, optional): The evaluator to use to\n run the model. If not given, a SequentialEvaluator will\n be created.\n cache_dir (path-like, optional): A directory in which to\n cache results.\n suspend_db (bool, default True):\n Suspend writing the results of individual model runs to\n the database. Robust evaluation potentially generates a\n large number of model executions, and storing all these\n individual results may not be useful.\n\n Returns:\n pandas.DataFrame: The computed value of each item\n in `robustness_functions`, for each policy in `policies`.\n \"\"\"\n robust_results = None\n cache_file = None\n if cache_dir is not None:\n try:\n from ..util.hasher import hash_it\n hh = hash_it(\n scenarios,\n policies,\n robustness_functions,\n )\n os.makedirs(os.path.join(cache_dir,hh[2:4],hh[4:6]), exist_ok=True)\n cache_file = os.path.join(cache_dir,hh[2:4],hh[4:6],hh[6:]+\".gz\")\n if os.path.exists(cache_file):\n _logger.debug(f\"loading from cache_file={cache_file}\")\n from ..util.filez import load\n robust_results = load(cache_file)\n cache_file = None\n except KeyboardInterrupt:\n raise\n except:\n import traceback\n warnings.warn('unable to manage cache')\n traceback.print_exc()\n\n if robust_results is None:\n with self.lock_db(suspend_db):\n if evaluator is None:\n from ..workbench.em_framework import SequentialEvaluator\n evaluator = SequentialEvaluator(self)\n\n if not isinstance(evaluator, BaseEvaluator):\n from dask.distributed import Client\n if isinstance(evaluator, Client):\n from ..workbench.em_framework.ema_distributed import DistributedEvaluator\n evaluator = DistributedEvaluator(self, client=evaluator)\n\n from ..workbench.em_framework.samplers import sample_uncertainties, sample_levers\n\n if isinstance(scenarios, int):\n n_scenarios = scenarios\n scenarios = sample_uncertainties(self, n_scenarios)\n\n with evaluator:\n robust_results = evaluator.robust_evaluate(\n robustness_functions,\n scenarios,\n policies,\n )\n\n robust_results = self.ensure_dtypes(robust_results)\n\n if cache_file is not None:\n from ..util.filez import save\n save(robust_results, cache_file, overwrite=True)\n with open(cache_file.replace('.gz','.info.txt'), 'wt') as notes:\n print(\"scenarios=\", scenarios, file=notes)\n print(\"robustness_functions=\", robustness_functions, file=notes)\n print(\"policies=\", policies, file=notes)\n\n return robust_results\n\n def io_experiment(self, params):\n \"\"\"\n Run an experiment, and return a dictionary of inputs and outputs together.\n\n Args:\n params: dict\n\n Returns:\n dict\n \"\"\"\n out = self.run_experiment(params).copy()\n out.update(params)\n return out\n\n def log(self, message, level=logging.INFO):\n \"\"\"\n Log a message.\n\n This facility will attempt to send log messages to\n the attached database, falling back to the regular\n module logger in case that fails.\n\n Args:\n message (str): Message to send to log.\n level (int, default logging.INFO): Log level.\n\n Returns:\n\n \"\"\"\n db = getattr(self, 'db', None)\n try:\n db.log(message, level=level)\n except:\n _logger.log(level, message)\n"
] | [
[
"numpy.std",
"pandas.concat",
"pandas.DataFrame",
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
YoungSkKim/CenterTrack-RideFlux | [
"36c0e4ddc608bddd203c12feb8a5f562c990eacb"
] | [
"src/lib/dataset/datasets/mot.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pycocotools.coco as coco\nfrom pycocotools.cocoeval import COCOeval\nimport numpy as np\nimport json\nimport os\nfrom collections import defaultdict\nfrom ..generic_dataset import GenericDataset\n\nclass MOT(GenericDataset):\n num_categories = 1\n default_resolution = [544, 960]\n class_name = ['']\n max_objs = 256\n cat_ids = {1: 1, -1: -1}\n def __init__(self, opt, split):\n self.dataset_version = opt.dataset_version\n self.year = int(self.dataset_version[:2])\n print('Using MOT {} {}'.format(self.year, self.dataset_version))\n data_dir = os.path.join(opt.data_dir, 'mot{}'.format(self.year))\n\n if opt.dataset_version in ['17trainval', '17test']:\n ann_file = '{}.json'.format('train' if split == 'train' else \\\n 'test')\n elif opt.dataset_version == '17halftrain':\n ann_file = '{}.json'.format('train_half')\n elif opt.dataset_version == '17halfval':\n ann_file = '{}.json'.format('val_half')\n img_dir = os.path.join(data_dir, '{}'.format(\n 'test' if 'test' in self.dataset_version else 'train'))\n\n print('ann_file', ann_file)\n ann_path = os.path.join(data_dir, 'annotations', ann_file)\n\n self.images = None\n # load image list and coco\n super(MOT, self).__init__(opt, split, ann_path, img_dir)\n\n self.num_samples = len(self.images)\n print('Loaded MOT {} {} {} samples'.format(\n self.dataset_version, split, self.num_samples))\n\n def _to_float(self, x):\n return float(\"{:.2f}\".format(x))\n\n def __len__(self):\n return self.num_samples\n\n def save_results(self, results, save_dir):\n results_dir = os.path.join(save_dir, 'results_mot{}'.format(self.dataset_version))\n if not os.path.exists(results_dir):\n os.mkdir(results_dir)\n for video in self.coco.dataset['videos']:\n video_id = video['id']\n file_name = video['file_name']\n out_path = os.path.join(results_dir, '{}.txt'.format(file_name))\n f = open(out_path, 'w')\n images = self.video_to_images[video_id]\n tracks = defaultdict(list)\n for image_info in images:\n if not (image_info['id'] in results):\n continue\n result = results[image_info['id']]\n frame_id = image_info['frame_id']\n for item in result:\n if not ('tracking_id' in item):\n item['tracking_id'] = np.random.randint(100000)\n if item['active'] == 0:\n continue\n tracking_id = item['tracking_id']\n bbox = item['bbox']\n bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]\n tracks[tracking_id].append([frame_id] + bbox)\n rename_track_id = 0\n for track_id in sorted(tracks):\n rename_track_id += 1\n for t in tracks[track_id]:\n f.write('{},{},{:.2f},{:.2f},{:.2f},{:.2f},-1,-1,-1,-1\\n'.format(\n t[0], rename_track_id, t[1], t[2], t[3]-t[1], t[4]-t[2]))\n f.close()\n\n def run_eval(self, results, save_dir):\n self.save_results(results, save_dir)\n gt_type_str = '{}'.format(\n '_train_half' if '17halftrain' in self.opt.dataset_version \\\n else '_val_half' if '17halfval' in self.opt.dataset_version \\\n else '')\n gt_type_str = '_val_half' if self.year in [16, 19] else gt_type_str\n gt_type_str = '--gt_type {}'.format(gt_type_str) if gt_type_str != '' else \\\n ''\n os.system('python tools/eval_motchallenge.py ' + \\\n '../data/mot{}/{}/ '.format(self.year, 'train') + \\\n '{}/results_mot{}/ '.format(save_dir, self.dataset_version) + \\\n gt_type_str + ' --eval_official')\n"
] | [
[
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
snji-khjuria/RelationClassificationFewShotModels | [
"a5047f44a57a81ab3281bf1290fa149a4c456486"
] | [
"models/metagan_queryattentive.py"
] | [
"#creating the metagan model\nimport torch\nimport torch.nn as nn\n\nimport sys\n\nsys.path.append('..')\nimport fewshot_re_kit\nimport torch\nfrom torch import autograd, optim, nn\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\n\n\n\n\n\n\n\nclass MetaGenerator(fewshot_re_kit.adversarial_framework.FewShotAdversarialREModel):\n def __init__(self, input_size, K, D=230):\n fewshot_re_kit.adversarial_framework.FewShotAdversarialREModel.__init__(self)\n self.generator_model = nn.Sequential(\n nn.Linear(input_size, 4096),\n nn.ReLU(),\n nn.Linear(4096, 2048),\n nn.ReLU(),\n nn.Linear(2048, 1024),\n nn.ReLU(),\n nn.Linear(1024, K*D),\n nn.ReLU()\n )\n\n def forward(self, x):\n x = self.generator_model(x)\n return x\n\n\n\nclass MetaDisc(fewshot_re_kit.adversarial_framework.FewShotAdversarialREModel):\n\n\n def __init__(self, hidden_size=230, relnet_features=230*2):\n fewshot_re_kit.adversarial_framework.FewShotAdversarialREModel.__init__(self)\n self.hidden_size = hidden_size\n self.drop=nn.Dropout()\n self.fc = nn.Sequential(\n nn.Linear(hidden_size, hidden_size, bias=True)\n )\n self.relation_network = nn.Sequential(\n #nn.Dropout(),\n nn.Linear(relnet_features , 64),\n nn.BatchNorm1d(64),\n nn.ReLU(inplace=True),\n nn.Dropout(),\n nn.Linear(64, 1),\n #TODO: Add the sigmoid layer if you want to\n )\n\n\n\n def __dist__(self, x, y, dim):\n return (torch.pow(x - y, 2)).sum(dim)\n\n def compute_distance(self, prototypes, query):\n return self.__dist__(prototypes, query.unsqueeze(2), 3)\n\n def euclidean_similarity(self, S, Q):\n distance = self.__dist__(S.unsqueeze(1), Q.unsqueeze(2), 3)\n return distance\n #return torch.div(1, 1+distance)\n def relation_score(self, support, query):\n return self.euclidean_similarity(support, query)\n #return self.__batch_dist__(support, query)\n #print(\"support is \", support.size())\n #print(\"q query is \", query.size())\n _, nq, _ = query.size()\n B, nc, D = support.size()\n s_s = support.unsqueeze(1).expand(-1, nq, -1, -1)\n q_q = query.unsqueeze(2).expand(-1, -1, nc, -1)\n #cos = nn.CosineSimilarity(dim=3, eps=1e-6)\n #return cos(s_s, q_q)\n\n nn_input = torch.cat([s_s, q_q], 3)\n nn_input = nn_input.view(B*nq*nc, -1)\n nn_out = self.relation_network(nn_input)\n nn_out = nn_out.view(B, nq, nc, 1).squeeze(3)\n return nn_out\n\n\n def forward(self, support, query, N, K, NQ, is_train=False):\n '''\n support: Inputs of the support set.\n query: Inputs of the query set.\n N: Num of classes\n K: Num of instances for each class in the support set\n Q: Num of instances for each class in the query set\n '''\n support = self.drop(support)\n query = self.drop(query)\n support = support.view(-1, N, K, self.hidden_size) # (B, N, K, D)\n query = query.view(-1, NQ, self.hidden_size) # (B, N * Q, D)\n B = support.size(0) # Batch size\n NQ = query.size(1) # Num of instances for each batch in the query set\n support = support.unsqueeze(1).expand(-1, NQ, -1, -1, -1)\n support_for_att = self.fc(support)#(B, NQ, N, D)\n query_for_att = self.fc(query.unsqueeze(2).unsqueeze(3).expand(-1, -1, N, K, -1))\n ins_att_score = F.softmax(torch.tanh(support_for_att * query_for_att).sum(-1), dim=-1)\n support_proto = (support * ins_att_score.unsqueeze(4).expand(-1, -1, -1, -1, self.hidden_size))\n support_proto = support_proto.sum(3)\n prototypes = support_proto\n #prototypes = self.generate_query_attentive_prototype(support, query)\n logits = -self.compute_distance(prototypes, query)\n\n _, pred = torch.max(logits.view(-1, N), 1)\n return logits, pred"
] | [
[
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.cat",
"torch.tanh",
"torch.nn.Linear",
"torch.nn.ReLU",
"torch.pow"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
samysweb/dnnv | [
"58fb95b7300914d9da28eed86c39eca473b1aaef",
"58fb95b7300914d9da28eed86c39eca473b1aaef",
"58fb95b7300914d9da28eed86c39eca473b1aaef",
"58fb95b7300914d9da28eed86c39eca473b1aaef"
] | [
"dnnv/verifiers/marabou/__init__.py",
"tests/unit_tests/test_properties/test_expressions/test_Call.py",
"tests/unit_tests/test_properties/test_visitors/test_DetailsInference/test_Add.py",
"tests/unit_tests/test_nn/test_operations/test_patterns/test_Parallel.py"
] | [
"import numpy as np\nimport tempfile\n\nfrom dnnv.verifiers.common.base import Parameter, Verifier\nfrom dnnv.verifiers.common.reductions import IOPolytopeReduction, HalfspacePolytope\nfrom dnnv.verifiers.common.results import SAT, UNSAT, UNKNOWN\nfrom functools import partial\n\nfrom .errors import MarabouError, MarabouTranslatorError\n\n\nclass Marabou(Verifier):\n reduction = partial(IOPolytopeReduction, HalfspacePolytope, HalfspacePolytope)\n translator_error = MarabouTranslatorError\n verifier_error = MarabouError\n parameters = {\n \"num_workers\": Parameter(int, help=\"Maximum number of workers to use.\"),\n }\n\n def build_inputs(self, prop):\n if prop.input_constraint.num_variables > 1:\n raise self.translator_error(\n \"Unsupported network: More than 1 input variable\"\n )\n\n with tempfile.NamedTemporaryFile(\n mode=\"w+\", suffix=\".onnx\", delete=False\n ) as onnx_model_file:\n prop.op_graph.simplify().export_onnx(onnx_model_file.name)\n\n lb, ub = prop.input_constraint.as_bounds()\n A_in, b_in = prop.input_constraint.as_matrix_inequality()\n A_out, b_out = prop.output_constraint.as_matrix_inequality(include_bounds=True)\n\n with tempfile.NamedTemporaryFile(\n mode=\"w+\", suffix=\".npy\", delete=False\n ) as constraint_file:\n np.save(constraint_file.name, ((lb, ub), (A_in, b_in), (A_out, b_out)))\n\n with tempfile.NamedTemporaryFile(\n mode=\"w+\", suffix=\".npy\", delete=False\n ) as output_file:\n self._tmp_output_file = output_file\n args = (\n \"marabou\",\n onnx_model_file.name,\n constraint_file.name,\n \"-o\",\n self._tmp_output_file.name,\n ) + tuple(f\"--{k}={v}\" for k, v in self.parameters.items() if v is not None)\n return args\n\n def parse_results(self, prop, results):\n result, cinput = np.load(self._tmp_output_file.name, allow_pickle=True)\n if result == False:\n return UNSAT, None\n elif result == True:\n input_shape, input_dtype = prop.op_graph.input_details[0]\n cex = cinput.reshape(input_shape).astype(input_dtype)\n return SAT, cex\n raise self.translator_error(f\"Unknown verification result: {result}\")\n",
"import numpy as np\nimport pytest\n\nimport dnnv.nn.operations as operations\n\nfrom dnnv.nn.graph import OperationGraph\nfrom dnnv.properties.expressions import *\n\n\ndef test_value():\n func_call = Call(Constant(np.sign), (Constant(5),), {})\n assert func_call.value == 1\n\n func_call = Call(\n Constant(np.ones), (Constant((1, 5)),), {\"dtype\": Constant(np.float32)}\n )\n assert func_call.value.dtype == np.float32\n assert np.allclose(func_call.value, np.ones((1, 5), dtype=np.float32))\n\n func_call = Call(Constant(np.argmax), (Symbol(\"y\"),), {})\n with pytest.raises(ValueError, match=\"Cannot get value of non-concrete expression\"):\n _ = func_call.value\n\n input_op = operations.Input((-1, 5), np.dtype(np.float32))\n mul_op = operations.Mul(input_op, 2.0)\n add_op = operations.Add(mul_op, -1.0)\n relu_op = operations.Relu(add_op)\n op_graph_1 = OperationGraph([relu_op])\n Network(\"N1\").concretize(op_graph_1)\n input_op = operations.Input((-1, 5), np.dtype(np.float32))\n mul_op = operations.Mul(input_op, -2.0)\n add_op = operations.Add(mul_op, 10.0)\n relu_op = operations.Relu(add_op)\n op_graph_2 = OperationGraph([relu_op])\n Network(\"N2\").concretize(op_graph_2)\n func_call = Call(\n Network(\"N2\").compose,\n (Network(\"N1\"),),\n {},\n )\n N12 = func_call.value\n assert isinstance(N12, OperationGraph)\n\n\ndef test_repr():\n func_call = Call(Constant(np.sign), (Constant(5),), {})\n assert repr(func_call) == \"numpy.sign(5)\"\n\n func_call = Call(\n Constant(np.ones), (Constant((1, 5)),), {\"dtype\": Constant(np.float32)}\n )\n assert repr(func_call) == \"numpy.ones((1, 5), dtype=numpy.float32)\"\n\n func_call = Call(Network(\"N\"), (Constant(5),), {})\n assert repr(func_call) == \"Network('N')(5)\"\n\n func_call = Call(Constant(dict), (), {})\n assert repr(func_call) == \"builtins.dict()\"\n\n func_call = Call(Constant(int), (Constant(\"11\"),), {\"base\": Constant(2)})\n assert repr(func_call) == \"builtins.int('11', base=2)\"\n\n func_call = Call(Constant(str), (), {\"encoding\": Constant(\"utf8\")})\n assert repr(func_call) == \"builtins.str(encoding='utf8')\"\n\n\ndef test_str():\n func_call = Call(Constant(np.sign), (Constant(5),), {})\n assert str(func_call) == \"numpy.sign(5)\"\n\n func_call = Call(\n Constant(np.ones), (Constant((1, 5)),), {\"dtype\": Constant(np.float32)}\n )\n assert str(func_call) == \"numpy.ones((1, 5), dtype=numpy.float32)\"\n\n func_call = Call(Network(\"N\"), (Constant(5),), {})\n assert str(func_call) == \"N(5)\"\n\n func_call = Call(Constant(dict), (), {})\n assert str(func_call) == \"builtins.dict()\"\n\n func_call = Call(Constant(int), (Constant(\"11\"),), {\"base\": Constant(2)})\n assert str(func_call) == \"builtins.int('11', base=2)\"\n\n func_call = Call(Constant(str), (), {\"encoding\": Constant(\"utf8\")})\n assert str(func_call) == \"builtins.str(encoding='utf8')\"\n\n\ndef test_is_equivalent():\n expr1 = Call(Constant(abs), (Constant(-4),), {})\n expr2 = Call(Constant(abs), (Constant(-4),), {})\n expr3 = Call(Constant(abs), (Constant(-2),), {})\n expr4 = Call(Constant(hex), (Constant(-4),), {})\n\n assert expr1.is_equivalent(expr1)\n assert expr1.is_equivalent(expr2)\n assert expr2.is_equivalent(expr1)\n assert not expr1.is_equivalent(expr3)\n assert not expr3.is_equivalent(expr1)\n assert not expr1.is_equivalent(expr4)\n assert not expr4.is_equivalent(expr1)\n",
"import numpy as np\nimport pytest\n\nfrom dnnv.properties.expressions import *\nfrom dnnv.properties.visitors import DetailsInference, DNNVShapeError\n\n\ndef test_Add_symbols():\n inference = DetailsInference()\n\n a, b = Symbol(\"a\"), Symbol(\"b\")\n expr = Add(a, b)\n inference.visit(expr)\n\n assert not inference.shapes[a].is_concrete\n assert not inference.shapes[b].is_concrete\n assert not inference.shapes[expr].is_concrete\n\n assert not inference.types[a].is_concrete\n assert not inference.types[b].is_concrete\n assert not inference.types[expr].is_concrete\n\n\ndef test_Add_constants():\n inference = DetailsInference()\n\n a, b = Constant(3), Constant(11)\n expr = Add(a, b)\n inference.visit(expr)\n\n assert inference.shapes[a].is_concrete\n assert inference.shapes[b].is_concrete\n assert inference.shapes[expr].is_concrete\n\n assert inference.shapes[a].value == ()\n assert inference.shapes[b].value == ()\n assert inference.shapes[expr].value == ()\n\n assert inference.types[a].is_concrete\n assert inference.types[b].is_concrete\n assert inference.types[expr].is_concrete\n\n assert inference.types[a].value == np.min_scalar_type(3)\n assert inference.types[b].value == np.min_scalar_type(11)\n assert inference.types[expr].value == np.result_type(\n np.min_scalar_type(3), np.min_scalar_type(11)\n )\n\n\ndef test_Add_arrays():\n inference = DetailsInference()\n\n a, b = Constant(np.array([[1, 2, 3]])), Constant(np.random.rand(1, 1, 1, 3))\n expr = Add(a, b)\n inference.visit(expr)\n\n assert inference.shapes[a].is_concrete\n assert inference.shapes[b].is_concrete\n assert inference.shapes[expr].is_concrete\n\n assert inference.shapes[a].value == (1, 3)\n assert inference.shapes[b].value == (1, 1, 1, 3)\n assert inference.shapes[expr].value == (1, 1, 1, 3)\n\n assert inference.types[a].is_concrete\n assert inference.types[b].is_concrete\n assert inference.types[expr].is_concrete\n\n assert inference.types[a].value == a.value.dtype\n assert inference.types[b].value == b.value.dtype\n assert inference.types[expr].value == (a.value + b.value).dtype\n\n\ndef test_Add_incompatible_shapes():\n inference = DetailsInference()\n\n a, b = Constant(np.random.rand(3, 4)), Constant(np.random.rand(2))\n expr = Add(a, b)\n with pytest.raises(DNNVShapeError):\n inference.visit(expr)\n",
"import numpy as np\nimport pytest\n\nfrom dnnv.nn.operations import *\nfrom dnnv.nn.operations.patterns import *\n\n\ndef test_init():\n pattern = Parallel(Operation, Input)\n assert isinstance(pattern, Parallel)\n assert len(pattern.patterns) == 2\n assert pattern.patterns[0] == Operation\n assert pattern.patterns[1] == Input\n\n pattern = Parallel(Operation, Operation)\n assert isinstance(pattern, Parallel)\n assert len(pattern.patterns) == 2\n assert pattern.patterns[0] == Operation\n assert pattern.patterns[1] == Operation\n\n\ndef test_str():\n pattern = Parallel(Operation, Input)\n assert str(pattern) == \"(Operation & Input)\"\n\n pattern = Parallel(Operation, Operation)\n assert str(pattern) == \"(Operation & Operation)\"\n\n pattern = Parallel(Add, Sub, Mul)\n assert str(pattern) == \"(Add & Sub & Mul)\"\n\n pattern = Parallel(Operation, None)\n assert str(pattern) == \"(Operation & None)\"\n\n\ndef test_and_error():\n pattern = Parallel(Operation, Input)\n with pytest.raises(TypeError) as excinfo:\n _ = pattern & 2\n assert str(excinfo.value).startswith(\n \"unsupported operand type(s) for &: 'Parallel' and \"\n )\n\n\ndef test_and():\n parallel_pattern = Parallel(Operation, Input)\n\n pattern = parallel_pattern & None\n assert isinstance(pattern, Parallel)\n assert len(pattern.patterns) == 3\n assert pattern.patterns[0] == Operation\n assert pattern.patterns[1] == Input\n assert pattern.patterns[2] == None\n\n pattern = parallel_pattern & Parallel(Add, Mul)\n assert isinstance(pattern, Parallel)\n assert len(pattern.patterns) == 4\n assert pattern.patterns[0] == Operation\n assert pattern.patterns[1] == Input\n assert pattern.patterns[2] == Add\n assert pattern.patterns[3] == Mul\n\n or_pattern = Or(Add, Mul)\n pattern = parallel_pattern & or_pattern\n assert isinstance(pattern, Parallel)\n assert len(pattern.patterns) == 3\n assert pattern.patterns[0] == Operation\n assert pattern.patterns[1] == Input\n assert pattern.patterns[2] == or_pattern\n\n sequential_pattern = Sequential(Add, Mul)\n pattern = parallel_pattern & sequential_pattern\n assert isinstance(pattern, Parallel)\n assert len(pattern.patterns) == 3\n assert pattern.patterns[0] == Operation\n assert pattern.patterns[1] == Input\n assert pattern.patterns[2] == sequential_pattern\n\n\ndef test_rand_error():\n pattern = Parallel(Operation, Input)\n with pytest.raises(TypeError) as excinfo:\n _ = 2 & pattern\n assert str(excinfo.value).startswith(\n \"unsupported operand type(s) for &: 'int' and 'Parallel'\"\n )\n\n\ndef test_ror():\n parallel_pattern = Parallel(Operation, Input)\n\n pattern = None & parallel_pattern\n assert isinstance(pattern, Parallel)\n assert len(pattern.patterns) == 3\n assert pattern.patterns[0] == None\n assert pattern.patterns[1] == Operation\n assert pattern.patterns[2] == Input\n\n\ndef test_match_false():\n parallel_pattern_empty = Parallel()\n matches = list(parallel_pattern_empty.match([Operation()]))\n assert len(matches) == 0\n\n par_pattern = Parallel(Add, Sub)\n matches = list(par_pattern.match([]))\n assert len(matches) == 0\n matches = list(par_pattern.match([Input(None, None)]))\n assert len(matches) == 0\n matches = list(par_pattern.match([Mul(None, None)]))\n assert len(matches) == 0\n input_op = Input((), np.dtype(np.float32))\n matches = list(\n par_pattern.match(\n [\n Mul(input_op, 2.0),\n Div(input_op, 2.0),\n ]\n )\n )\n assert len(matches) == 0\n\n\ndef test_match_true():\n input_op = Input((), np.dtype(np.float32))\n\n par_pattern = Parallel(Input)\n matches = list(par_pattern.match([input_op]))\n assert len(matches) == 1\n assert len(matches[0]) == 0\n\n par_pattern = Parallel(Add, Sub)\n matches = list(\n par_pattern.match(\n [\n Add(input_op, 2.0),\n Sub(input_op, 2.0),\n ]\n )\n )\n assert len(matches) == 1\n assert len(matches[0]) == 1\n assert matches[0][0] == input_op\n\n par_pattern = Parallel(Add, Sub)\n _input_op = Input((), np.dtype(np.float32))\n matches = list(\n par_pattern.match(\n [\n Add(input_op, 2.0),\n Sub(_input_op, 2.0),\n ]\n )\n )\n assert len(matches) == 1\n assert len(matches[0]) == 2\n assert matches[0][0] == input_op\n assert matches[0][1] == _input_op\n\n\ndef test_match_optional():\n input_op = Input((), np.dtype(np.float32))\n add_op = Add(input_op, 2.0)\n mul_op = Mul(input_op, 2.0)\n sub_op = Sub(input_op, 2.0)\n\n par_pattern = Parallel(None)\n matches = list(par_pattern.match([add_op]))\n assert len(matches) == 1\n assert matches[0][0] == add_op\n matches = list(par_pattern.match([mul_op]))\n assert len(matches) == 1\n assert matches[0][0] == mul_op\n\n par_pattern = Parallel(Sub, None)\n matches = list(par_pattern.match([sub_op, add_op]))\n assert len(matches) == 1\n assert len(matches[0]) == 2\n assert matches[0][0] == input_op\n assert matches[0][1] == add_op\n matches = list(par_pattern.match([sub_op, mul_op]))\n assert len(matches) == 1\n assert len(matches[0]) == 2\n assert matches[0][0] == input_op\n assert matches[0][1] == mul_op\n matches = list(par_pattern.match([sub_op, input_op]))\n assert len(matches) == 1\n assert len(matches[0]) == 1\n assert matches[0][0] == input_op\n"
] | [
[
"numpy.load",
"numpy.save"
],
[
"numpy.dtype",
"numpy.ones"
],
[
"numpy.min_scalar_type",
"numpy.array",
"numpy.random.rand"
],
[
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gstoica27/ENAS-pytorch | [
"f8b9acbd101ab15c158066d2e4e9012ad11061a7"
] | [
"main.py"
] | [
"\"\"\"Entry point.\"\"\"\nimport os\n\nimport torch\n\nimport data\nimport config\nimport utils\nimport trainer\nimport re_trainer\n\nfrom data.loader import DataLoader\nfrom tacred_utils import scorer, constant, helper\nfrom tacred_utils.vocab import Vocab\nimport numpy as np\n\nlogger = utils.get_logger()\n\n\ndef main(args): # pylint:disable=redefined-outer-name\n \"\"\"main: Entry point.\"\"\"\n utils.prepare_dirs(args)\n\n torch.manual_seed(args.random_seed)\n\n if args.num_gpu > 0:\n torch.cuda.manual_seed(args.random_seed)\n\n if args.network_type == 'rnn':\n if args.dataset != 'tacred':\n dataset = data.text.Corpus(args.data_path)\n # loading tacred data\n else:\n opt = vars(args)\n opt['num_classes'] = len(constant.LABEL_TO_ID)\n\n # load vocab\n #vocab_file = \"/Volumes/External HDD/dataset/tacred/data/vocab/vocab.pkl\"\n #emb_file = '/Volumes/External HDD/dataset/tacred/data/vocab/embedding.npy'\n #opt['data_dir'] = '/Volumes/External HDD/dataset/tacred/data/json'\n\n emb_file = '/home/scratch/gis/datasets/vocab/embedding.npy'\n vocab_file = '/home/scratch/gis/datasets/vocab/vocab.pkl'\n opt['data_dir'] = '/home/scratch/gis/datasets/tacred/data/json'\n\n vocab = Vocab(vocab_file, load=True)\n opt['vocab_size'] = vocab.size\n emb_matrix = np.load(emb_file)\n assert emb_matrix.shape[0] == vocab.size\n assert emb_matrix.shape[1] == args.emb_dim\n\n train_batch = DataLoader(opt['data_dir'] + '/train.json', opt['batch_size'], opt, vocab, evaluation=False)\n score_dev_batch = DataLoader(opt['data_dir'] + '/dev.json', opt['batch_size'], opt, vocab, evaluation=False)\n eval_dev_batch = DataLoader(opt['data_dir'] + '/dev.json', opt['batch_size'], opt, vocab, evaluation=True)\n test_batch = DataLoader(opt['data_dir'] + '/test.json', opt['batch_size'], opt, vocab, evaluation=True)\n\n dataset = {'train_batch': train_batch,\n 'score_dev_batch': score_dev_batch,\n 'eval_dev_batch': eval_dev_batch,\n 'test_batch': test_batch,\n 'emb_matrix': emb_matrix}\n args.num_classes = opt['num_classes']\n args.emb_matrix = emb_matrix\n args.vocab_size = opt['vocab_size']\n\n elif args.dataset == 'cifar':\n dataset = data.image.Image(args.data_path)\n else:\n raise NotImplementedError(f\"{args.dataset} is not supported\")\n if args.dataset != 'tacred':\n trnr = trainer.Trainer(args, dataset)\n else:\n trnr = re_trainer.Trainer(args, dataset)\n\n if args.mode == 'train':\n utils.save_args(args)\n trnr.train()\n elif args.mode == 'derive':\n assert args.load_path != \"\", (\"`--load_path` should be given in \"\n \"`derive` mode\")\n trnr.derive()\n elif args.mode == 'test':\n if not args.load_path:\n raise Exception(\"[!] You should specify `load_path` to load a \"\n \"pretrained model\")\n trnr.test()\n elif args.mode == 'single':\n if not args.dag_path:\n raise Exception(\"[!] You should specify `dag_path` to load a dag\")\n utils.save_args(args)\n trnr.train(single=True)\n else:\n raise Exception(f\"[!] Mode not found: {args.mode}\")\n\nif __name__ == \"__main__\":\n args, unparsed = config.get_args()\n print(args)\n main(args)\n"
] | [
[
"torch.manual_seed",
"numpy.load",
"torch.cuda.manual_seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bsavelev/medipy | [
"f0da3750a6979750d5f4c96aedc89ad5ae74545f"
] | [
"lib/medipy/gui/image/spectro_dialog.py"
] | [
"##########################################################################\n# MediPy - Copyright (C) Universite de Strasbourg\n# Distributed under the terms of the CeCILL-B license, as published by\n# the CEA-CNRS-INRIA. Refer to the LICENSE file or to\n# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html\n# for details.\n##########################################################################\n\nimport os\nimport xml.dom.minidom as md\n\nimport numpy\nimport wx\nimport wx.xrc\n\nfrom medipy.base import find_resource, ImageAnnotation, ObservableList\nimport medipy.io\nimport medipy.io.rbnmr as rbnmr\nimport medipy.gui.xrc_wrapper\n\nclass SpectroDialog(medipy.gui.xrc_wrapper.Dialog):\n \"\"\" Dialog allowing the user to choose a spectroscopy image\n within a directory.\n Il also gives the choice either to open a 1D spectrum or compute\n the projection histogram.\n \"\"\"\n def __init__(self, parent=None, *args, **kwargs):\n \n resource = wx.xrc.EmptyXmlResource()\n resource.InsertHandler(medipy.gui.xrc_wrapper.DirectoryXMLHandler())\n resource.InsertHandler(medipy.gui.xrc_wrapper.FileXMLHandler())\n \n file = open(find_resource(\"resources/gui/spectro_dialog.xrc\"))\n resource.LoadFromString(file.read())\n \n dialog = resource.LoadDialog(parent, \"main_dialog\")\n medipy.gui.xrc_wrapper.Dialog.__init__(self, dialog, *args, **kwargs)\n \n controls = [\"dir_dialog\", \"dir_listbox\", \"image_listbox\",\n \"reference_listbox\", \"annotations_listbox\", \"annotations_checkbox\",\n \"file_dialog\", \"open_button\", \"cancel_button\"]\n \n for control in controls : \n setattr(self, \"_\"+control, wx.xrc.XRCCTRL(self, control))\n \n self.SetTitle(\"Load spectroscopy image\") \n self._open_button.Disable()\n \n # Attributes initialization \n self._patient_dirname = None\n self._image_dict = {}\n self._rect_dict = {}\n self._image_path = None\n self._reference_path = None\n self._annotations_path = None\n self._file_dialog._wildcard = \"Annotation file|*.xml\"\n self._file_dialog._button.Disable()\n \n # Events\n self.Bind(wx.EVT_CLOSE, self.OnClose)\n self._dir_listbox.Bind(wx.EVT_LISTBOX, self.OnDirChosen)\n self._image_listbox.Bind(wx.EVT_LISTBOX, self.OnImageChosen)\n self._reference_listbox.Bind(wx.EVT_LISTBOX, self.OnReferenceChosen)\n self._annotations_listbox.Bind(wx.EVT_LISTBOX, self.OnAnnotationChosen)\n self._annotations_checkbox.Bind(wx.EVT_CHECKBOX, self.OnCustomChecked)\n self._open_button.Bind(wx.EVT_BUTTON, self.OnOpenClicked)\n self._cancel_button.Bind(wx.EVT_BUTTON, self.OnCancelClicked)\n\n self._dir_dialog.add_observer(\"value\", self.OnPathChanged)\n self._file_dialog.add_observer(\"value\", self.OnCustomPathChanged)\n \n def update_information(self):\n \"\"\" Enable the open button if all the necessary pieces of information\n have been gathered. Disable it if not.\n \"\"\"\n if self._image_path is not None:\n self._open_button.Enable() \n else:\n self._open_button.Disable() \n \n self.Fit()\n self.GetSizer().SetSizeHints(self)\n \n ##########\n # Events #\n ########## \n \n def OnClose(self, event):\n \"\"\" Shut the window\n \"\"\"\n self.Destroy()\n \n def OnPathChanged(self, event):\n \"\"\" Set up the directories and reference spectra listboxes\n \"\"\"\n # Clean all the listboxes\n self._dir_listbox.Clear()\n self._image_listbox.Clear()\n self._reference_listbox.Clear()\n self._annotations_listbox.Clear()\n \n # Set up the directories listbox \n self._patient_dirname = self._dir_dialog._text.GetValue() \n dir_list = []\n for dirpath, dirnames, filenames in os.walk(self._patient_dirname):\n dir_list.append((dirpath, dirnames, filenames))\n dir_list[0][1].sort()\n self._dir_listbox.InsertItems(dir_list[0][1], 0)\n \n # Define the path splitter\n if '/' in self._patient_dirname:\n splitter = '/'\n else:\n splitter = '\\\\'\n \n # Set up the reference spectra and annotations listboxes\n self._ref_dict ={}\n self._annotations_dict ={}\n\n for i in dir_list:\n for filename in i[2]:\n # A reference spectrum has been found\n if filename in ['1r', '1i']:\n self._ref_dict[i[0].split(splitter)[-3] +'->'+ filename] = i[0]\n \n # An annotation file has been found\n if filename == \"peaklist.xml\":\n self._annotations_dict[i[0].split(splitter)[-3] +'->'+ filename] = os.path.join(i[0], \"peaklist.xml\")\n \n sorted_spectra = self._ref_dict.keys()\n sorted_spectra.sort()\n self._reference_listbox.InsertItems(sorted_spectra, 0)\n self._reference_listbox.Insert(\"None\", 0)\n \n if self._annotations_dict.keys() != []:\n sorted_annotations = self._annotations_dict.keys()\n sorted_annotations.sort()\n self._annotations_listbox.InsertItems(sorted_annotations, 0)\n self._annotations_listbox.Insert(\"None\", 0)\n \n self.update_information()\n \n def OnCustomPathChanged(self, event):\n \"\"\" Set up the path to the custom annotation file\n \"\"\"\n self._annotations_path = self._file_dialog.value\n \n def OnDirChosen(self, event):\n \"\"\" Display the available images within the selected directory\n \"\"\"\n self._image_listbox.Clear()\n image_list = []\n for dirpath, dirnames, filenames in os.walk(os.path.join(self._patient_dirname, self._dir_listbox.GetStringSelection())):\n image_list.append((dirpath, dirnames, filenames))\n \n # Dictionary associating a filename with its parent directory \n self._image_dict = {}\n for i in image_list:\n for filename in i[2]:\n if filename in ['2rr','2ri','2ir','2ii']:\n self._image_dict[filename]=i[0]\n \n sorted_images = self._image_dict.keys()\n sorted_images.sort()\n sorted_images.reverse()\n self._image_listbox.InsertItems(sorted_images, 0)\n \n self.update_information()\n \n def OnImageChosen(self, event):\n \"\"\" Set the full path to the selected image\n \"\"\"\n if self._image_listbox.GetStringSelection() != '':\n self._image_path = os.path.join(self._image_dict[self._image_listbox.GetStringSelection()],self._image_listbox.GetStringSelection())\n \n self.update_information()\n \n def OnReferenceChosen(self, event):\n \"\"\"Set the full path to the selected reference spectrum\n\n \"\"\"\n if (self._reference_listbox.GetStringSelection() != \"None\") and (self._reference_listbox.GetStringSelection() != ''):\n self._reference_path = os.path.join(self._ref_dict[self._reference_listbox.GetStringSelection()], self._reference_listbox.GetStringSelection()[-2:])\n else:\n self._reference_path = None\n \n self.update_information()\n \n def OnAnnotationChosen(self, event):\n \"\"\" Set the full path to the selected annotation file\n \"\"\"\n if (self._annotations_listbox.GetStringSelection() != \"None\") and (self._annotations_listbox.GetStringSelection() != ''):\n self._annotations_path = self._annotations_dict[self._annotations_listbox.GetStringSelection()]\n \n def OnCustomChecked(self, event): \n \"\"\" Allow the user to load an annotation file from another patient directory\n \"\"\"\n if self._annotations_checkbox.IsChecked():\n self._annotations_listbox.Disable()\n self._file_dialog._button.Enable()\n if self._file_dialog.validate():\n self._annotations_path = self._file_dialog.value\n else:\n self._annotations_path = None\n else:\n self._annotations_listbox.Enable()\n self._file_dialog._button.Disable()\n if self._annotations_listbox.IsEmpty():\n self._annotations_path = None\n elif self._annotations_listbox.GetStringSelection() != '':\n self._annotations_path = self._annotations_dict[self._annotations_listbox.GetStringSelection()]\n \n def OnOpenClicked(self, event):\n \"\"\" Load the spectrum with either a reference spectrum or a computed histogram\n \"\"\"\n \n # Create the image\n image = medipy.io.load(self._image_path) #, 0, loader_class= nmr2D.Nmr2D)\n \n # Insert a reference spectrum into the image if one has been specified\n if self._reference_path is not None:\n spectrum = numpy.fromfile(self._reference_path, numpy.int32)\n image.metadata[\"header\"][\"proton_spectrum\"] = spectrum\n \n # Load a list of annotations if an annotation file has been specified\n if self._annotations_path is not None:\n image.metadata[\"Data\"] = image.data\n dom = md.parse(self._annotations_path)\n peaks = dom.getElementsByTagName(\"Peak2D\")\n image.annotations = ObservableList()\n for peak in peaks:\n annotation = ImageAnnotation()\n ppm = (float(peak.getAttribute(\"F1\")),float(peak.getAttribute(\"F2\")))\n point = rbnmr.ppm_to_point(ppm, \n image.metadata[\"Procs\"],\n image.metadata[\"Proc2s\"])\n annotation.position = [0, point[-2], point[-1]]\n annotation.label = peak.getAttribute(\"annotation\")\n annotation.shape = ImageAnnotation.Shape.cross\n annotation.size = 10\n annotation.color = [0, 1., 0.]\n annotation.filled = False\n annotation.depth = 10\n image.annotations.append(annotation)\n \n self.GetParent().append_image([{\"image\":image}])\n \n # Close the window\n self.Destroy()\n \n def OnCancelClicked(self, event):\n \"\"\" Abort\n \"\"\"\n self.OnClose(event)\n \nif __name__ == \"__main__\" :\n app = wx.App()\n \n dlg = SpectroDialog()\n dlg.ShowModal()\n dlg.GetSizer().SetSizeHints(dlg)\n \n app.MainLoop()\n"
] | [
[
"numpy.fromfile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Xingbaji/pytorch-template | [
"2246bf32a0605f2e8527f296274550a88b1e8fc9"
] | [
"model/attentionPDE_model.py"
] | [
"import torch.nn as nn\nimport torch.nn.functional as F\nfrom base import BaseModel\nimport pdb\nimport torch\nimport math\n\nclass AttentionPDEModel(nn.Module):\n def __init__(self,ndim,T,device_num = 0):\n super(AttentionPDEModel, self).__init__()\n # W_x = torch.randn(ndim, ndim, requires_grad=True)\n # torch.set_default_tensor_type('torch.cuda.DoubleTensor')\n #默认全部都使用cuda\n self.device_num = device_num\n self.ndim = ndim\n self.ndim = ndim\n self.T = T\n self.dt = 0.015\n self.dx = 0.1257\n self.dy = 0.1257\n self.build()\n # xy_path = \"/home1/shenxing/Attention_PDE/data/xy.pt\"\n # self.xy = torch.load(xy_path)\n # xy = xy.double()\n\n\n # Input = xy[:, :, :, 0]\n # Input = Input.repeat(20,1,1,1)\n # Input = Input.permute(1,2,3,0)\n # u_t = self.get_u_t(Input)\n\n #\n # Input = xy[:, :, :, 0]\n # Input = Input.repeat(20,1,1,1)\n # Input = Input.permute(1,2,3,0)\n #\n # u_x = self.get_u_x(Input)\n # u_xx = self.get_u_xx(Input)\n #\n # Input = xy[:, :, :, 1]\n # Input = Input.repeat(20, 1, 1, 1)\n # Input = Input.permute(1, 2, 3, 0)\n #\n # u_y = self.get_u_y(Input)\n # u_yy = self.get_u_yy(Input)\n\n def build(self):\n \"\"\"变量转到gpu device之后运行,构建W\"\"\"\n self.build_W_t()\n self.build_W_x()\n self.build_W_y()\n self.build_W_xx()\n self.build_W_yy()\n\n def build_W_t(self):\n #计算u_t这里没有可训练参数\n dt = 0.015\n #暂时用固定值,之后添加其他\n W_t_diag1 = torch.ones(self.T-1)*(1 / 2)\n W_t_diag1[0] = 1\n #上对角\n W_t_diag2 = torch.ones(self.T-1) * (- 1 / 2)\n W_t_diag2[-1] = -1\n # 下对角\n\n W_t = torch.diag(W_t_diag1,1) + torch.diag(W_t_diag2,-1)\n W_t[0,0] = -1\n W_t[-1,-1] = 1\n self.W_t = W_t*(1/dt)\n # self.W_t = torch.nn.Parameter(W_t, requires_grad=False)\n #shape [20,20]\n\n def build_W_x(self):\n # self.W_x_2D = torch.nn.Parameter(torch.DoubleTensor(self.ndim, self.ndim))\n # torch.nn.init.xavier_normal(self.W_x_2D)\n\n #用准确解来验证模型是否正确\n K = torch.ones(49,49)\n K = K*(2*self.dx)\n K[:,0] = K[:,0] *(1/2)\n K[:, -1] = K[:, -1] * (1 / 2)\n r_ux = self.get_real_coefficient()[0]\n r_W_x_2d = r_ux / K\n # self.W_x_2D = torch.nn.init.constant(self.W_x_2D,r_W_x_2d)\n self.W_x_2D = r_W_x_2d\n self.W_x_2D.requires_grad = True\n\n # self.W_x_2D = torch.arange(1, 49 * 49+1).view(49, 49)\n #shape 49*49\n W_x_tmp = self.W_x_2D[:,:-1]\n #0 to n-1\n W_x_tmp2 = self.W_x_2D[:,1:]\n #1 to end\n #shape 49*48\n W_x_diag1 = torch.diag_embed(W_x_tmp, offset = 1,dim1=0, dim2=1)\n W_x_diag2 = -1 *torch.diag_embed(W_x_tmp2, offset = -1,dim1=0, dim2=1)\n self.W_x_3D = W_x_diag1 + W_x_diag2\n self.W_x_3D[0,0,:] = - self.W_x_2D[:,0]\n self.W_x_3D[-1,-1,:] = self.W_x_2D[:,-1]\n # self.W_x_3D = torch.nn.Parameter(self.W_x_3D,requires_grad=True)\n #变为parameter后这里就不能往后求导了\n self.W_x_3D = self.W_x_3D.double()\n #shape [49,49,49]\n\n\n def build_W_xx(self):\n \"\"\"\n 因为方程u_xx,u_yy前都是固定的参数,所以只有一个需要更新的参数\n :return:\n \"\"\"\n # self.W_xx_k = torch.nn.Parameter(torch.randn(1))\n c = 0.2/0.1257\n self.W_xx_k = torch.tensor([c],requires_grad = False)\n # self.W_xx_k = torch.ones(1)\n W_xx_diag1 = torch.ones(self.ndim)*(-2)\n W_xx_diag1[0] = W_xx_diag1[-1] = 1\n #中心对角\n W_xx_diag2 = torch.ones(self.ndim-1)\n W_xx_diag2[0] = -2\n #上1对角\n W_xx_diag3 = torch.ones(self.ndim-1)\n W_xx_diag3[-1] = -2\n #下1对角\n W_xx = torch.diag(W_xx_diag1) + torch.diag(W_xx_diag2,1) + torch.diag(W_xx_diag3,-1)\n W_xx[0,2] = 1\n W_xx[-1,-3] = 1\n # W_xx = torch.nn.Parameter(W_xx, requires_grad=True)\n self.W_xx = (self.W_xx_k * W_xx).double()\n # self.W_xx = torch.nn.Parameter(self.W_xx)\n\n def build_W_y(self):\n # self.W_y_2D = torch.nn.Parameter(torch.DoubleTensor(self.ndim, self.ndim))\n # torch.nn.init.xavier_normal(self.W_y_2D)\n\n #用准确解来验证模型是否正确\n K = torch.ones(49,49)\n K = K * (2 * self.dy)\n K[:, 0] = K[:, 0] * (1 / 2)\n K[:, -1] = K[:, -1] * (1 / 2)\n r_uy = self.get_real_coefficient()[1]\n r_W_y_2d = r_uy / K\n self.W_y_2D = r_W_y_2d\n self.W_y_2D.requires_grad = True\n #shape 49*49\n W_y_tmp = self.W_y_2D[:,:-1]\n #0 to n-1\n W_y_tmp2 = self.W_y_2D[:,1:]\n #1 to end\n #shape 49*48\n W_y_diag1 = torch.diag_embed(W_y_tmp, offset = 1,dim1=0, dim2=1)\n W_y_diag2 = -1 *torch.diag_embed(W_y_tmp2, offset = -1,dim1=0, dim2=1)\n self.W_y_3D = W_y_diag1 + W_y_diag2\n self.W_y_3D[0,0,:] = - self.W_y_2D[:,0]\n self.W_y_3D[-1,-1,:] = self.W_y_2D[:,-1]\n # self.W_y_3D = torch.nn.Parameter(self.W_y_3D,requires_grad=True)\n self.W_y_3D = self.W_y_3D.double()\n\n def build_W_yy(self):\n # self.W_yy_k = torch.nn.Parameter(torch.randn(1))\n d = 0.3 / 0.1257\n self.W_yy_k = torch.tensor([d], requires_grad=False)\n W_yy_diag1 = torch.ones(self.ndim)*(-2)\n W_yy_diag1[0] = W_yy_diag1[-1] = 1\n #中心对角\n W_yy_diag2 = torch.ones(self.ndim-1)\n W_yy_diag2[0] = -2\n #上1对角\n W_yy_diag3 = torch.ones(self.ndim-1)\n W_yy_diag3[-1] = -2\n #下1对角\n W_yy = torch.diag(W_yy_diag1) + torch.diag(W_yy_diag2,1) + torch.diag(W_yy_diag3,-1)\n W_yy[0,2] = 1\n W_yy[-1,-3] = 1\n # W_yy = torch.nn.Parameter(W_yy,requires_grad=True)\n self.W_yy = (self.W_yy_k * W_yy).double()\n # self.W_yy = torch.nn.Parameter(self.W_yy)\n\n def get_u_t(self,Input):\n \"\"\"\n 计算偏导u_t,在所有空间上\n :param Input: shape[batch_size,49,49,20]\n :return: u_t: shape [batch_size,49,49,20]\n \"\"\"\n batch_size = Input.shape[0]\n T = Input.shape[-1]\n size = Input.shape[1]\n Input = Input.view(batch_size,size*size,T)\n #shape [28,2401,20]\n W_t = torch.transpose(self.W_t,0,1)\n #shape [20,20]\n u_t = torch.matmul(Input,W_t)\n #shape [28,2401,20]\n u_t = u_t.view(batch_size,size,size,T)\n return u_t\n\n def get_u_x(self,Input):\n \"\"\"\n 计算偏导u_x,在所有时间空间\n :param Input: shape[batch_size,49,49,20]\n :return: u_x: shape [batch_size,49,49,20]\n \"\"\"\n batch_size = Input.shape[0]\n T = Input.shape[-1]\n size = Input.shape[1]\n Input = Input.permute(0,3,1,2)\n #shape[batch_size,20, 49, 49]\n Input = Input.contiguous().view(-1,self.ndim,self.ndim)\n # shape[batch_size*20, 49, 49]\n Input = Input.permute(1,0,2)\n # shape[49,batch_size*20, 49]\n W_x_tmp = self.W_x_3D.permute(2, 0, 1)\n # shape[49, 49, 49]\n W_x_tmp = torch.transpose(W_x_tmp, 1, 2)\n u_x = torch.bmm(Input, W_x_tmp)\n #batch matmul 每个49*49的网格点都计算一次u_x,因为alpha(x,y)与t无关,所以用同一个W_X_3D\n #shape [49,batch_size*20,49]\n u_x = u_x.permute(1,0,2)\n u_x = u_x.view(batch_size,T,size,size)\n u_x = u_x.permute(0,2,3,1)\n return u_x\n\n def get_u_xx(self,Input):\n \"\"\"\n 计算偏导u_xx,在所有时间空间\n :param Input: shape[batch_size,49,49,20]\n :return: u_xx: shape [batch_size,49,49,20]\n \"\"\"\n batch_size = Input.shape[0]\n T = Input.shape[-1]\n size = Input.shape[1]\n Input = Input.permute(0,3,1,2)\n #shape[batch_size,20, 49, 49]\n Input = Input.contiguous().view(-1,self.ndim,self.ndim)\n # shape[batch_size*20, 49, 49]\n W_xx_tmp = torch.transpose(self.W_xx, 0, 1)\n # shape [49,49]\n u_xx = torch.matmul(Input, W_xx_tmp)\n #shape [batch_size*20,49,49]\n u_xx = u_xx.view(batch_size,T,size,size)\n u_xx = u_xx.permute(0,2,3,1)\n return u_xx\n\n\n def get_u_y(self, Input):\n \"\"\"\n 计算偏导u_y,在所有时间空间\n :param Input: shape[batch_size,49,49,20]\n :return: u_y: shape [batch_size,49,49,20]\n \"\"\"\n batch_size = Input.shape[0]\n T = Input.shape[-1]\n size = Input.shape[1]\n Input = Input.permute(0, 3, 1, 2)\n # shape[batch_size,20, 49, 49]\n Input = Input.contiguous().view(-1, self.ndim, self.ndim)\n # shape[batch_size*20, 49, 49]\n Input = Input.permute(2, 1, 0)\n # shape[49, 49, batch_size*20]\n W_y_tmp = self.W_y_3D.permute(2, 0, 1)\n # shape[49, 49, 49]\n u_y = torch.bmm(W_y_tmp,Input)\n # batch matmul 每个49*49的网格点都计算一次u_x,因为alpha(x,y)与t无关,所以用同一个W_X_3D\n # shape [49,49,batch_size*20]\n u_y = u_y.permute(2, 1, 0)\n u_y = u_y.view(batch_size, T, size, size)\n u_y = u_y.permute(0, 2, 3, 1)\n return u_y\n\n def get_u_yy(self,Input):\n \"\"\"\n 计算偏导u_yy,在所有时间空间\n :param Input: shape[batch_size,49,49,20]\n :return: u_yy: shape [batch_size,49,49,20]\n \"\"\"\n batch_size = Input.shape[0]\n T = Input.shape[-1]\n size = Input.shape[1]\n Input = Input.permute(0,3,1,2)\n #shape[batch_size,20, 49, 49]\n Input = Input.contiguous().view(-1,self.ndim,self.ndim)\n Input = Input.permute(1,2,0)\n # shape[49, 49,batch_size*20]\n u_yy = torch.matmul(self.W_yy,Input)\n #shape [49,49,batch_size*20]\n u_yy = u_yy.permute(2,0,1)\n u_yy = u_yy.view(batch_size,T,size,size)\n u_yy = u_yy.permute(0,2,3,1)\n return u_yy\n\n def cal_from_u0(self,Input):\n \"\"\"\n 从u0计算u1,u2,...uT\n :param Input: u0 shape [batch_size,size,size,1]\n :return: u_all shape [batch_size,size,size,21]\n \"\"\"\n batch_size = Input.shape[0]\n T = Input.shape[-1]\n size = Input.shape[1]\n u0 = Input[:,:,:,0]\n u0 = u0.view(batch_size,size,size,1)\n u_tmp = u0\n u_all = u_tmp\n for i in range(20):\n u_xi, u_yi, u_xxi, u_yyi = self.get_u_x(u_tmp), self.get_u_y(u_tmp), self.get_u_xx(u_tmp), self.get_u_yy(u_tmp)\n G = u_xi + u_yi + u_xxi + u_yyi\n u_tmp = self.dt * G + u0\n u_all = torch.cat((u_all,u_tmp),3)\n return u_all\n\n def coefficient_ux(self):\n \"\"\"\n 从self.W_x_2D算出方程u_x对应的参数\n :return:\n \"\"\"\n K = torch.ones_like(self.W_x_2D)\n K = K*(2*self.dx)\n K[:,0] = K[:,0] *(1/2)\n self.p_ux = K*self.W_x_2D\n return self.p_ux\n\n def coefficient_uy(self):\n \"\"\"\n 从self.W_y_2D算出方程u_y对应的参数\n :return:\n \"\"\"\n K = torch.ones_like(self.W_y_2D)\n K = K*(2*self.dy)\n K[:,0] = K[:,0] *(1/2)\n self.p_uy = K*self.W_y_2D\n return self.p_uy\n\n def coefficient_uxx(self):\n \"\"\"\n 从self.W_xx_k算出方程u_xx对应的参数\n :return:\n \"\"\"\n self.p_uxx = (self.dx)*self.W_xx_k\n return self.p_uxx\n\n def coefficient_uyy(self):\n \"\"\"\n 从self.W_yy_k算出方程u_yy对应的参数\n :return:\n \"\"\"\n self.p_uyy = (self.dy)*self.W_yy_k\n return self.p_uyy\n\n\n def get_coefficient(self):\n \"\"\"\n :return:算出的方程参数\n \"\"\"\n return [self.coefficient_ux(),self.coefficient_uy(),self.coefficient_uxx(),self.coefficient_uyy()]\n\n def get_real_coefficient(self,xy_batch = None):\n \"\"\"\n :return:方程参数的真实解\n \"\"\"\n if xy_batch == None:\n xy_path = \"/home1/shenxing/Attention_PDE/data/xy.pt\"\n xy_batch = torch.load(xy_path)\n x = xy_batch[0,:,:,0]\n y = xy_batch[0,:,:,1]\n r_ux = 0.5 * torch.cos(y) + 0.5 * x * (2 * math.pi - x) * torch.sin(x) + 0.6\n r_uy = 2 * (torch.cos(y) + torch.sin(x)) +0.8\n r_uxx = torch.tensor([0.2])\n r_uyy = torch.tensor([0.3])\n return [r_ux,r_uy,r_uxx,r_uyy]\n\n\n def forward(self, Input):\n \"\"\"\n 输入:当前u0生成u在t在0.015到0.3的所有的u(x,y)\n input:uT_batch shape:[batch_size,49,49,20]\"\"\"\n return self.get_u_t(Input),self.get_u_x(Input), self.get_u_y(Input), self.get_u_xx(Input), self.get_u_yy(Input)\n\n # def build_W_x(self,device = None):\n # # self.W_x_2D = torch.arange(1, 49 * 49+1).view(49, 49)\n # self.W_x_2D = torch.nn.Parameter(torch.randn(self.ndim, self.ndim))\n # if device != None:\n # self.W_x_2D = self.W_x_2D.to(device)\n # #shape 49*49\n # W_x_tmp = self.W_x_2D[:,:-1]\n # #0 to n-1\n # W_x_tmp2 = self.W_x_2D[:,1:]\n # #1 to end\n # #shape 49*48\n # W_x_diag1 = torch.diag_embed(W_x_tmp, offset = 1,dim1=0, dim2=1)\n # W_x_diag2 = -1 *torch.diag_embed(W_x_tmp2, offset = -1,dim1=0, dim2=1)\n # self.W_x_3D_1 = W_x_diag1 + W_x_diag2\n # self.W_x_3D_2 = self.W_x_3D_1\n # self.W_x_3D_2[0,0,:] = - self.W_x_2D[:,0]\n # self.W_x_3D_2[-1,-1,:] = self.W_x_2D[:,-1]\n # # self.W_x_3D_3 = torch.nn.Parameter(self.W_x_3D_2)\n # #?测试这里能否顺利求导\n # self.W_x_3D = self.W_x_3D_2.double()\n # shape [49,49,49]\n\n\n\n # # W_y = torch.randn(ndim, ndim, requires_grad=True)\n # self.W_y = torch.nn.Parameter(torch.randn(ndim, ndim))\n # ones_tmp = torch.ones(ndim)\n # mask = torch.diag(ones_tmp, 0) + torch.diag(ones_tmp[1:], 1) + torch.diag(ones_tmp[1:], -1)\n # mask = torch.nn.Parameter(mask,requires_grad=False)\n # W_x_mask = torch.mul(mask, self.W_x)\n # self.W_x_mask = torch.nn.Parameter(W_x_mask)\n # #shape 49,49\n # # 三对角阵.点乘\n # W_y_mask = torch.mul(mask,self.W_y)\n # self.W_y_mask = torch.nn.Parameter(W_y_mask)\n\n # def get_u_x(self,Input):\n # \"\"\"\n # 计算偏导u_x,在所有时间空间\n # :param Input: shape[batch_size,49,49,20]\n # :return: u_x: shape [batch_size,49,49,20]\n # \"\"\"\n # Input = Input.permute(0,3,1,2)\n # # shape [batch_size,20,49,49]\n # u_x = torch.matmul(Input,self.W_x_mask)\n # # u_x: shape[batch_size, 20, 49, 49]\n # u_x = u_x.permute(0,2,3,1)\n # #matmul可以broadcast\n # return u_x\n #\n # def get_u_y(self,Input):\n # \"\"\"\n # 计算偏导u_y,在所有时间空间\n # :param Input: shape[batch_size,49,49,20]\n # :return: u_y: shape [batch_size,49,49,20]\n # \"\"\"\n # Input = Input.permute(1, 2, 3, 0)\n # # shape [49,49,20,batch_size]\n # Input = Input.contiguous().view(self.ndim,self.ndim,-1)\n # # shape [49,49,20*batch_size]\n # u_y = torch.matmul(self.W_y_mask,Input)\n # # shape [49,49,20*batch_size]\n # u_y = u_y.view(self.ndim,self.ndim,20,-1)\n # # shape [49,49,20,batch_size]\n # u_y = u_y.permute(3,0,1,2)\n # return u_y\n # Input_x = Input.view(49, 1, 49)\n #\n # W_x_tmp = self.W_x_3D.permute(2,0,1)\n # W_x_tmp = torch.transpose(W_x_tmp,1,2)\n # u_x = torch.bmm(Input_x,W_x_tmp)\n # u_x = torch.squeeze(u_x)\n #\n # self.W_y_2D = torch.nn.Parameter(torch.randn(ndim, ndim))\n # # shape 49*49\n # W_y_tmp = self.W_y_2D[:, :-1]\n # # 0 to n-1\n # W_y_tmp2 = self.W_y_2D[:, 1:]\n # # 1 to end\n # # shape 49*48\n # W_x_diag1 = torch.diag_embed(W_y_tmp, offset=1)\n # W_y_diag2 = -1 * torch.diag_embed(W_y_tmp2, offset=-1)\n # self.W_y_3D = W_x_diag1 + W_x_diag2\n # self.W_y_3D[0, 0, :] = self.W_y_2D[0, 0]\n # self.W_y_3D[-1, -1, :] = self.W_y_2D[-1, -1]\n # self.W_y_3D = torch.nn.Parameter(self.W_y_3D)\n # print(self.W_y_3D)\n"
] | [
[
"torch.transpose",
"torch.ones",
"torch.cat",
"torch.load",
"torch.sin",
"torch.diag_embed",
"torch.tensor",
"torch.matmul",
"torch.bmm",
"torch.diag",
"torch.ones_like",
"torch.cos"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tianhm/rqalpha | [
"a2df4cb85fc86a20429c66a5a6d4f1a48520f173"
] | [
"rqalpha/data/data_source.py"
] | [
"import pytz\nimport six\n\nimport pandas as pd\nfrom ..instruments import Instrument\n\n\nclass LocalDataSource:\n DAILY = 'daily.bcolz'\n INSTRUMENTS = 'instruments.pk'\n DIVIDEND = 'dividend.bcolz'\n TRADING_DATES = 'trading_dates.bcolz'\n YIELD_CURVE = 'yield_curve.bcolz'\n\n YIELD_CURVE_TENORS = {\n 0: 'S0',\n 30: 'M1',\n 60: 'M2',\n 90: 'M3',\n 180: 'M6',\n 270: 'M9',\n 365: 'Y1',\n 365 * 2: 'Y2',\n 365 * 3: 'Y3',\n 365 * 4: 'Y4',\n 365 * 5: 'Y5',\n 365 * 6: 'Y6',\n 365 * 7: 'Y7',\n 365 * 8: 'Y8',\n 365 * 9: 'Y9',\n 365 * 10: 'Y10',\n 365 * 15: 'Y15',\n 365 * 20: 'Y20',\n 365 * 30: 'Y30',\n 365 * 40: 'Y40',\n 365 * 50: 'Y50',\n }\n\n YIELD_CURVE_DURATION = sorted(YIELD_CURVE_TENORS.keys())\n\n PRICE_SCALE = 1000.\n\n def __init__(self, root_dir):\n self._root_dir = root_dir\n import bcolz\n import os\n import pickle\n self._daily_table = bcolz.open(os.path.join(root_dir, LocalDataSource.DAILY))\n self._instruments = {d['order_book_id']: Instrument(d)\n for d in pickle.load(open(os.path.join(root_dir, LocalDataSource.INSTRUMENTS), 'rb'))}\n self._dividend = bcolz.open(os.path.join(root_dir, LocalDataSource.DIVIDEND))\n self._yield_curve = bcolz.open(os.path.join(root_dir, LocalDataSource.YIELD_CURVE))\n self._trading_dates = pd.Index(pd.Timestamp(str(d)) for d in\n bcolz.open(os.path.join(root_dir, LocalDataSource.TRADING_DATES)))\n\n def instruments(self, order_book_ids):\n if isinstance(order_book_ids, six.string_types):\n try:\n return self._instruments[order_book_ids]\n except KeyError:\n print('ERROR: order_book_id {} not exists!'.format(order_book_ids))\n return None\n\n return [self._instruments[ob] for ob in order_book_ids\n if ob in self._instruments]\n\n def all_instruments(self, itype='CS'):\n if itype is None:\n return pd.DataFrame([[v.order_book_id, v.symbol, v.abbrev_symbol, v.type]\n for v in self._instruments.values()],\n columns=['order_book_id', 'symbol', 'abbrev_symbol', 'type'])\n\n if itype not in ['CS', 'ETF', 'LOF', 'FenjiA', 'FenjiB', 'FenjiMu', 'INDX', 'Future']:\n raise ValueError('Unknown type {}'.format(itype))\n\n return pd.DataFrame([v.__dict__ for v in self._instruments.values() if v.type == itype])\n\n def sector(self, code):\n return [v.order_book_id for v in self._instruments.values()\n if v.type == 'CS' and v.sector_code == code]\n\n def industry(self, code):\n return [v.order_book_id for v in self._instruments.values()\n if v.type == 'CS' and v.industry_code == code]\n\n def concept(self, *concepts):\n return [v.order_book_id for v in self._instruments.values()\n if v.type == 'CS' and any(c in v.concept_names.split('|') for c in concepts)]\n\n def get_trading_dates(self, start_date, end_date):\n left = self._trading_dates.searchsorted(start_date)\n right = self._trading_dates.searchsorted(end_date, side='right')\n return self._trading_dates[left:right]\n\n def get_yield_curve(self, start_date, end_date):\n duration = (end_date - start_date).days\n tenor = 0\n for t in LocalDataSource.YIELD_CURVE_DURATION:\n if duration >= t:\n tenor = t\n else:\n break\n\n d = start_date.year * 10000 + start_date.month * 100 + start_date.day\n return self._yield_curve.fetchwhere('date<={}'.format(d)).cols[self.YIELD_CURVE_TENORS[tenor]][-1] / 10000.0\n\n def get_dividends(self, order_book_id):\n try:\n sid = self._dividend.attrs['stock_id'][order_book_id]\n except KeyError:\n return pd.DataFrame()\n\n dividends = self._dividend.fetchwhere('id=={}'.format(sid))\n return pd.DataFrame({\n 'book_closure_date': pd.Index(pd.Timestamp(str(d)) for d in dividends.cols['closure_date']),\n 'ex_dividend_date': pd.Index(pd.Timestamp(str(d)) for d in dividends.cols['ex_date']),\n 'payable_date': pd.Index(pd.Timestamp(str(d)) for d in dividends.cols['payable_date']),\n 'dividend_cash_before_tax': dividends.cols['cash_before_tax'][:] / 10000.0,\n 'round_lot': dividends.cols['round_lot']\n }, index=pd.Index(pd.Timestamp(str(d)) for d in dividends.cols['announcement_date']))\n\n def get_all_bars(self, order_book_id):\n try:\n sid = self._daily_table.attrs['id_map'][order_book_id]\n except KeyError:\n raise RuntimeError('No data for {}'.format(order_book_id))\n\n bars = self._daily_table.fetchwhere('id=={}'.format(sid))\n return pd.DataFrame({\n 'open': (bars.cols['open'][:] / self.PRICE_SCALE).round(2),\n 'close': (bars.cols['close'][:] / self.PRICE_SCALE).round(2),\n 'high': (bars.cols['high'][:] / self.PRICE_SCALE).round(2),\n 'low': (bars.cols['low'][:] / self.PRICE_SCALE).round(2),\n 'volume': bars.cols['volume'],\n }, index=pd.Index(pd.Timestamp(str(d)) for d in bars.cols['date']))\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Giselle-Liu/PyTorch-Tutorial | [
"52b7a8c8fc8fa23b2fafb6d539b3b8aff13af45c"
] | [
"tutorial-contents/502_GPU.py"
] | [
"\"\"\"\nView more, visit my tutorial page: https://mofanpy.com/tutorials/\nMy Youtube Channel: https://www.youtube.com/user/MorvanZhou\n\nDependencies:\ntorch: 0.4\ntorchvision\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.utils.data as Data\nimport torchvision\n\n# torch.manual_seed(1)\n\nEPOCH = 1\nBATCH_SIZE = 50\nLR = 0.001\nDOWNLOAD_MNIST = False\n\ntrain_data = torchvision.datasets.MNIST(\n root='./mnist/',\n train=True,\n transform=torchvision.transforms.ToTensor(),\n download=DOWNLOAD_MNIST,\n)\ntrain_loader = Data.DataLoader(dataset=train_data,\n batch_size=BATCH_SIZE,\n shuffle=True)\n\ntest_data = torchvision.datasets.MNIST(root='./mnist/', train=False)\n\n# !!!!!!!! Change in here !!!!!!!!! #\ntest_x = torch.unsqueeze(test_data.test_data, dim=1).type(\n torch.FloatTensor)[:2000].cuda() / 255. # Tensor on GPU\ntest_y = test_data.test_labels[:2000].cuda()\n\n\nclass CNN(nn.Module):\n\n def __init__(self):\n super(CNN, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(\n in_channels=1,\n out_channels=16,\n kernel_size=5,\n stride=1,\n padding=2,\n ),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2),\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(16, 32, 5, 1, 2),\n nn.ReLU(),\n nn.MaxPool2d(2),\n )\n self.out = nn.Linear(32 * 7 * 7, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = x.view(x.size(0), -1)\n output = self.out(x)\n return output\n\n\ncnn = CNN()\n\n# !!!!!!!! Change in here !!!!!!!!! #\ncnn.cuda() # Moves all model parameters and buffers to the GPU.\n\noptimizer = torch.optim.Adam(cnn.parameters(), lr=LR)\nloss_func = nn.CrossEntropyLoss()\n\nfor epoch in range(EPOCH):\n for step, (x, y) in enumerate(train_loader):\n\n # !!!!!!!! Change in here !!!!!!!!! #\n b_x = x.cuda() # Tensor on GPU\n b_y = y.cuda() # Tensor on GPU\n\n output = cnn(b_x)\n loss = loss_func(output, b_y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if step % 50 == 0:\n test_output = cnn(test_x)\n\n # !!!!!!!! Change in here !!!!!!!!! #\n pred_y = torch.max(test_output,\n 1)[1].cuda().data # move the computation in GPU\n\n accuracy = torch.sum(pred_y == test_y).type(\n torch.FloatTensor) / test_y.size(0)\n print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.cpu().numpy(),\n '| test accuracy: %.2f' % accuracy)\n\ntest_output = cnn(test_x[:10])\n\n# !!!!!!!! Change in here !!!!!!!!! #\npred_y = torch.max(test_output, 1)[1].cuda().data # move the computation in GPU\n\nprint(pred_y, 'prediction number')\nprint(test_y[:10], 'real number')\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.nn.Conv2d",
"torch.utils.data.DataLoader",
"torch.sum",
"torch.unsqueeze",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Crazy-Jack/SpatialExpGeneCluster | [
"9e57c308d1c577a936a2358d0641c65b8130034f"
] | [
"src/trainDEC.py"
] | [
"import argparse\nimport os, sys\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torchvision\nfrom torchvision import transforms\nfrom PIL import Image\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.cluster import KMeans\nfrom scipy.stats import ortho_group\n\nfrom utlis import set_args, set_optimizer\nfrom utlis import save_model\nfrom utlis import AverageMeter\nfrom utlis import txt_logger\nfrom network.DECnetwork import DECNetwork\nfrom DEC_loss import DECLoss\nfrom data_utlis import SpatialDataset\n\n\ndef costomize_args(args):\n return args\n\n\ndef set_dataloader(args):\n \"\"\"use args.dataset decide which dataset to use and return dataloader\"\"\"\n if args.dataset == 'mnist':\n transform = transforms.Compose([\n transforms.Resize((32, 32)),\n transforms.ToTensor(),\n ])\n train_dataset = torchvision.datasets.MNIST(root=args.loading_path, train=True, download=True, \n transform=transform)\n test_dataset = torchvision.datasets.MNIST(root=args.loading_path, train=False, download=True, \n transform=transform)\n elif args.dataset == 'spatial':\n transform = transforms.Compose([\n transforms.Resize((32, 32)),\n transforms.ToTensor(),\n ])\n train_dataset = SpatialDataset(args.data_root, args.data_file_name)\n test_dataset = SpatialDataset(args.data_root, args.data_file_name)\n\n else:\n raise NotImplemented(\"dataset {} is not implemented.\".format(args.dataset))\n # train loader\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.num_workers, pin_memory=True)\n # test loader\n test_dataloader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.num_workers, pin_memory=True)\n\n return train_dataloader, test_dataloader\n\n\ndef get_model(args, logger):\n model = DECNetwork(args.input_channel, args.feature_dim, args.latent_class_num,\n alpha=1.0, decode_constraint=False)\n latent_class_criterion = DECLoss()\n\n rec_criterion = torch.nn.MSELoss()\n\n if torch.cuda.is_available():\n if torch.cuda.device_count() > 1:\n print(\"Used devices: {}\".format(torch.cuda.device_count()))\n model.encoder = torch.nn.DataParallel(model.encoder)\n model = model.cuda()\n latent_class_criterion = latent_class_criterion.cuda()\n rec_criterion = rec_criterion.cuda()\n cudnn.benchmark = True\n\n if args.resume_model_path:\n # get pre ssl epoch\n ckpt = torch.load(args.resume_model_path, map_location='cpu')\n state_dict = ckpt['model']\n new_state_dict = {}\n for k, v in state_dict.items():\n if torch.cuda.device_count() > 1:\n print(k)\n #if k.split(\".\")[0] != 'head':\n # k = \".\".join([k.split(\".\")[0], \"module\"] + k.split(\".\")[1:])\n else:\n k = k.replace(\"module.\", \"\")\n new_state_dict[k] = v\n state_dict = new_state_dict\n model.load_state_dict(state_dict)\n\n logger.logger.info(\"Model loaded! Pretrained from epoch {}\".format(opt.pre_ssl_epoch))\n\n return model, latent_class_criterion, rec_criterion\n\n\n\ndef pre_train(train_loader, model, epoch, args, optimizer, scheduler, pretrain_criterion):\n model.train()\n model.setPretrain(True)\n\n losses = AverageMeter()\n\n for idx, (img, labels) in tqdm(enumerate(train_loader), total=len(train_loader)):\n img = img.cuda()\n bsz = img.shape[0]\n\n # compute probrability\n feature, rec_img = model(img)\n\n # compute loss\n loss = pretrain_criterion(rec_img, img)\n\n # update metric\n losses.update(loss.item(), bsz)\n\n # SGD\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if args.use_scheduler_pretrain:\n scheduler.step(loss)\n\n return losses.avg\n\n\ndef train(train_loader, model, optimizer, epoch, args, scheduler, UnSup_criterion, rec_criterion=None):\n \"\"\"one epoch training\"\"\"\n # TODO: rewrite this and fill all empty lines!\n model.train()\n model.setPretrain(False)\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n\n Y_assignment = []\n T_assignment = []\n for idx, (img, labels) in tqdm(enumerate(train_loader), total=len(train_loader)):\n \"\"\"params:\n img: [bz, C, H, W]\n labels: [bz,]\n \"\"\"\n img = img.cuda()\n # labels = labels.cuda()\n bsz = img.shape[0]\n\n # compute probrability - p(y|a) dim: [bsz, |Y|]\n features, prob = model(img)\n prob = prob.float()\n # get Y and T assignment\n Y_assignment.extend(prob.argmax(dim=1).cpu().numpy())\n\n # compute loss\n # DEC loss\n loss = UnSup_criterion(prob)\n \n # update metric\n losses.update(loss.item(), bsz)\n\n # SGD\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if args.lr_scheduling == 'reduce': # reduce on pleatau\n scheduler.step(loss)\n\n # # compute H(Y|T) and I(Y;T)\n # # TODO: LOW PRIORITY!! use pytorch to implement H and MI calucalation\n # Y_assignment = np.array(Y_assignment)\n # # print(\"Y_assign\", Y_assignment[:200])\n # T_assignment = np.array(T_assignment)\n # H_Y_T = conditional_entropy(Y_assignment, T_assignment)\n # MI = mutual_information(Y_assignment, T_assignment)\n\n return losses.avg, Y_assignment\n\n\n\ndef main():\n args = set_args()\n args = costomize_args(args)\n\n train_loader, test_loader = set_dataloader(args)\n\n scalar_logger = txt_logger(args.saving_path, args, 'python ' + ' '.join(sys.argv))\n model, UnSup_criterion, pretrain_criterion = get_model(args, scalar_logger)\n optimizer, scheduler = set_optimizer(args, model)\n\n # training routine\n # resume model path\n if args.resume_model_path:\n start = opt.pre_ssl_epoch\n else:\n start = 0\n\n if args.pretrain_mode == 'autoencoder':\n # pre_train\n pre_train_optimizer = optim.Adam(model.parameters(), weight_decay=5e-4)\n pre_train_scheduler = optim.lr_scheduler.ReduceLROnPlateau(pre_train_optimizer, mode='min', factor=0.5, patience=20, verbose=True)\n for epoch in range(start + 1, args.pre_train_epochs + 1):\n pre_train_loss = pre_train(train_loader, model, epoch, args, pre_train_optimizer, pre_train_scheduler, pretrain_criterion)\n\n scalar_logger.log_value(epoch, ('pre_train loss', pre_train_loss))\n\n # k-means clustering for initialization\n print(\"Initialization (K-means) ---------\")\n features = []\n model.eval()\n for idx, (img, _) in tqdm(enumerate(train_loader), total=len(train_loader)):\n with torch.no_grad():\n img = img.cuda()\n feature, rec_img = model(img)\n features.extend(feature.cpu().numpy())\n\n features = np.array(features)\n features = features.reshape(features.shape[0], features.shape[1])\n print(features.shape)\n k_means = KMeans(n_clusters=args.latent_class_num, n_init=20)\n k_means.fit(features)\n model.clusterCenterInitialization(k_means.cluster_centers_)\n\n elif args.pretrain_mode == 'None':\n # random othogonal init\n if args.latent_class_num < args.feature_dim:\n mu_init = ortho_group.rvs(dim=args.feature_dim)[:args.latent_class_num]\n else:\n mu_init = np.random.rand(args.latent_class_num, args.feature_dim)\n model.clusterCenterInitialization(mu_init)\n else:\n raise NotImplementedError(\"pretrain mode {} has not been implemented.\".format(args.pretrain_mode))\n\n # train\n print(\"Begin Training -------------------------\")\n for epoch in range(start + 1, args.epochs + 1):\n # train for one epoch\n loss, Y_assignment = train(train_loader, model, optimizer, epoch, args, scheduler, UnSup_criterion)\n # latent_class statistics\n unique_latent_class = set(Y_assignment)\n\n # file logger\n scalar_logger.log_value(epoch, ('loss', loss),\n ('learning_rate', optimizer.param_groups[0]['lr']),\n ('lc_len', len(unique_latent_class)),\n )\n\n\n if epoch % args.save_freq == 0:\n save_file = os.path.join(\n args.saving_path, 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch))\n save_model(model, optimizer, args, epoch, save_file)\n\n # TODO: save latent class assignment\n save_file_lat_class_assign = os.path.join(args.saving_path, 'latent_class.npy')\n np.save(save_file_lat_class_assign, Y_assignment)\n # log latent class statistics\n latent_class_stats = {}\n for i in unique_latent_class:\n latent_class_stats[i] = np.where(Y_assignment == i)[0].shape[0]\n scalar_logger.log_value(epoch, ('final_lc_assign', latent_class_stats))\n\n # save the last model\n save_file = os.path.join(\n args.saving_path, 'last.pth')\n save_model(model, optimizer, args, args.epochs, save_file)\n\n return \n\n \nif __name__ == '__main__':\n main()\n\n\n\n\n\n "
] | [
[
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"sklearn.cluster.KMeans",
"torch.load",
"torch.utils.data.DataLoader",
"numpy.save",
"torch.nn.DataParallel",
"torch.no_grad",
"numpy.random.rand",
"torch.cuda.is_available",
"scipy.stats.ortho_group.rvs",
"torch.cuda.device_count",
"numpy.array",
"numpy.where",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BeyondTheProof/metrics | [
"8af688daff819a95f4cb3d757ffc919c86072ee9",
"8af688daff819a95f4cb3d757ffc919c86072ee9",
"8af688daff819a95f4cb3d757ffc919c86072ee9",
"8af688daff819a95f4cb3d757ffc919c86072ee9"
] | [
"tests/retrieval/inputs.py",
"torchmetrics/functional/retrieval/precision.py",
"tests/retrieval/test_precision.py",
"torchmetrics/regression/mean_squared_log_error.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom collections import namedtuple\n\nimport torch\n\nfrom tests.helpers.testers import BATCH_SIZE, EXTRA_DIM, NUM_BATCHES\n\nInput = namedtuple('InputMultiple', [\"indexes\", \"preds\", \"target\"])\n\n# correct\n_input_retrieval_scores = Input(\n indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),\n preds=torch.rand(NUM_BATCHES, BATCH_SIZE),\n target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),\n)\n\n_input_retrieval_scores_extra = Input(\n indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),\n preds=torch.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM),\n target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),\n)\n\n_input_retrieval_scores_non_binary_target = Input(\n indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),\n preds=torch.rand(NUM_BATCHES, BATCH_SIZE),\n target=torch.randint(high=4, size=(NUM_BATCHES, BATCH_SIZE)),\n)\n\n# with errors\n_input_retrieval_scores_no_target = Input(\n indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),\n preds=torch.rand(NUM_BATCHES, BATCH_SIZE),\n target=torch.randint(high=1, size=(NUM_BATCHES, BATCH_SIZE)),\n)\n\n_input_retrieval_scores_all_target = Input(\n indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),\n preds=torch.rand(NUM_BATCHES, BATCH_SIZE),\n target=torch.randint(low=1, high=2, size=(NUM_BATCHES, BATCH_SIZE)),\n)\n\n_input_retrieval_scores_empty = Input(\n indexes=torch.randint(high=10, size=[0]),\n preds=torch.rand(0),\n target=torch.randint(high=2, size=[0]),\n)\n\n_input_retrieval_scores_mismatching_sizes = Input(\n indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE - 2)),\n preds=torch.rand(NUM_BATCHES, BATCH_SIZE),\n target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),\n)\n\n_input_retrieval_scores_mismatching_sizes_func = Input(\n indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),\n preds=torch.rand(NUM_BATCHES, BATCH_SIZE - 2),\n target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),\n)\n\n_input_retrieval_scores_wrong_targets = Input(\n indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),\n preds=torch.rand(NUM_BATCHES, BATCH_SIZE),\n target=torch.randint(low=-2**31, high=2**31, size=(NUM_BATCHES, BATCH_SIZE)),\n)\n",
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional\n\nimport torch\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.utilities.checks import _check_retrieval_functional_inputs\n\n\ndef retrieval_precision(preds: Tensor, target: Tensor, k: Optional[int] = None) -> Tensor:\n \"\"\"\n Computes the precision metric (for information retrieval),\n as explained `here <https://en.wikipedia.org/wiki/Precision_and_recall#Precision>`__.\n Precision is the fraction of relevant documents among all the retrieved documents.\n\n ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``,\n ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`,\n otherwise an error is raised. If you want to measure Precision@K, ``k`` must be a positive integer.\n\n Args:\n preds: estimated probabilities of each document to be relevant.\n target: ground truth about each document being relevant or not.\n k: consider only the top k elements (default: None)\n\n Returns:\n a single-value tensor with the precision (at ``k``) of the predictions ``preds`` w.r.t. the labels ``target``.\n\n Example:\n >>> preds = tensor([0.2, 0.3, 0.5])\n >>> target = tensor([True, False, True])\n >>> retrieval_precision(preds, target, k=2)\n tensor(0.5000)\n \"\"\"\n preds, target = _check_retrieval_functional_inputs(preds, target)\n\n if k is None:\n k = preds.shape[-1]\n\n if not (isinstance(k, int) and k > 0):\n raise ValueError(\"`k` has to be a positive integer or None\")\n\n if not target.sum():\n return tensor(0.0, device=preds.device)\n\n relevant = target[torch.argsort(preds, dim=-1, descending=True)][:k].sum().float()\n return relevant / k\n",
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport numpy as np\nimport pytest\nfrom torch import Tensor\n\nfrom tests.helpers import seed_all\nfrom tests.retrieval.helpers import (\n RetrievalMetricTester,\n _concat_tests,\n _default_metric_class_input_arguments,\n _default_metric_functional_input_arguments,\n _errors_test_class_metric_parameters_default,\n _errors_test_class_metric_parameters_k,\n _errors_test_class_metric_parameters_no_pos_target,\n _errors_test_functional_metric_parameters_default,\n _errors_test_functional_metric_parameters_k,\n)\nfrom torchmetrics.functional.retrieval.precision import retrieval_precision\nfrom torchmetrics.retrieval.retrieval_precision import RetrievalPrecision\n\nseed_all(42)\n\n\ndef _precision_at_k(target: np.ndarray, preds: np.ndarray, k: int = None):\n \"\"\"\n Didn't find a reliable implementation of Precision in Information Retrieval, so,\n reimplementing here. A good explanation can be found\n `here <https://web.stanford.edu/class/cs276/handouts/EvaluationNew-handout-1-per.pdf>_`.\n \"\"\"\n assert target.shape == preds.shape\n assert len(target.shape) == 1 # works only with single dimension inputs\n\n if k is None:\n k = len(preds)\n\n if target.sum() > 0:\n order_indexes = np.argsort(preds, axis=0)[::-1]\n relevant = np.sum(target[order_indexes][:k])\n return relevant * 1.0 / k\n return np.NaN\n\n\nclass TestPrecision(RetrievalMetricTester):\n\n @pytest.mark.parametrize(\"ddp\", [True, False])\n @pytest.mark.parametrize(\"dist_sync_on_step\", [True, False])\n @pytest.mark.parametrize(\"empty_target_action\", ['skip', 'neg', 'pos'])\n @pytest.mark.parametrize(\"k\", [None, 1, 4, 10])\n @pytest.mark.parametrize(**_default_metric_class_input_arguments)\n def test_class_metric(\n self,\n ddp: bool,\n indexes: Tensor,\n preds: Tensor,\n target: Tensor,\n dist_sync_on_step: bool,\n empty_target_action: str,\n k: int,\n ):\n metric_args = {'empty_target_action': empty_target_action, 'k': k}\n\n self.run_class_metric_test(\n ddp=ddp,\n indexes=indexes,\n preds=preds,\n target=target,\n metric_class=RetrievalPrecision,\n sk_metric=_precision_at_k,\n dist_sync_on_step=dist_sync_on_step,\n metric_args=metric_args,\n )\n\n @pytest.mark.parametrize(**_default_metric_functional_input_arguments)\n @pytest.mark.parametrize(\"k\", [None, 1, 4, 10])\n def test_functional_metric(self, preds: Tensor, target: Tensor, k: int):\n self.run_functional_metric_test(\n preds=preds,\n target=target,\n metric_functional=retrieval_precision,\n sk_metric=_precision_at_k,\n metric_args={},\n k=k,\n )\n\n @pytest.mark.parametrize(**_default_metric_class_input_arguments)\n def test_precision_cpu(self, indexes: Tensor, preds: Tensor, target: Tensor):\n self.run_precision_test_cpu(\n indexes=indexes,\n preds=preds,\n target=target,\n metric_module=RetrievalPrecision,\n metric_functional=retrieval_precision,\n )\n\n @pytest.mark.parametrize(**_default_metric_class_input_arguments)\n def test_precision_gpu(self, indexes: Tensor, preds: Tensor, target: Tensor):\n self.run_precision_test_gpu(\n indexes=indexes,\n preds=preds,\n target=target,\n metric_module=RetrievalPrecision,\n metric_functional=retrieval_precision,\n )\n\n @pytest.mark.parametrize(\n **_concat_tests(\n _errors_test_class_metric_parameters_default,\n _errors_test_class_metric_parameters_no_pos_target,\n _errors_test_class_metric_parameters_k,\n )\n )\n def test_arguments_class_metric(\n self, indexes: Tensor, preds: Tensor, target: Tensor, message: str, metric_args: dict\n ):\n self.run_metric_class_arguments_test(\n indexes=indexes,\n preds=preds,\n target=target,\n metric_class=RetrievalPrecision,\n message=message,\n metric_args=metric_args,\n exception_type=ValueError,\n kwargs_update={},\n )\n\n @pytest.mark.parametrize(\n **_concat_tests(\n _errors_test_functional_metric_parameters_default,\n _errors_test_functional_metric_parameters_k,\n )\n )\n def test_arguments_functional_metric(self, preds: Tensor, target: Tensor, message: str, metric_args: dict):\n self.run_functional_metric_arguments_test(\n preds=preds,\n target=target,\n metric_functional=retrieval_precision,\n message=message,\n exception_type=ValueError,\n kwargs_update=metric_args,\n )\n",
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, Optional\n\nimport torch\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.functional.regression.mean_squared_log_error import (\n _mean_squared_log_error_compute,\n _mean_squared_log_error_update,\n)\nfrom torchmetrics.metric import Metric\n\n\nclass MeanSquaredLogError(Metric):\n r\"\"\"\n Computes `mean squared logarithmic error\n <https://scikit-learn.org/stable/modules/model_evaluation.html#mean-squared-log-error>`_\n (MSLE):\n\n .. math:: \\text{MSLE} = \\frac{1}{N}\\sum_i^N (\\log_e(1 + y_i) - \\log_e(1 + \\hat{y_i}))^2\n\n Where :math:`y` is a tensor of target values, and :math:`\\hat{y}` is a tensor of predictions.\n\n Args:\n compute_on_step:\n Forward only calls ``update()`` and return None if this is set to False. default: True\n dist_sync_on_step:\n Synchronize metric state across processes at each ``forward()``\n before returning the value at the step. default: False\n process_group:\n Specify the process group on which synchronization is called. default: None (which selects the entire world)\n\n Example:\n >>> from torchmetrics import MeanSquaredLogError\n >>> target = torch.tensor([2.5, 5, 4, 8])\n >>> preds = torch.tensor([3, 5, 2.5, 7])\n >>> mean_squared_log_error = MeanSquaredLogError()\n >>> mean_squared_log_error(preds, target)\n tensor(0.0397)\n\n .. note::\n Half precision is only support on GPU for this metric\n\n \"\"\"\n sum_squared_log_error: Tensor\n total: Tensor\n\n def __init__(\n self,\n compute_on_step: bool = True,\n dist_sync_on_step: bool = False,\n process_group: Optional[Any] = None,\n dist_sync_fn: Callable = None,\n ) -> None:\n super().__init__(\n compute_on_step=compute_on_step,\n dist_sync_on_step=dist_sync_on_step,\n process_group=process_group,\n dist_sync_fn=dist_sync_fn,\n )\n\n self.add_state(\"sum_squared_log_error\", default=tensor(0.0), dist_reduce_fx=\"sum\")\n self.add_state(\"total\", default=tensor(0), dist_reduce_fx=\"sum\")\n\n def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore\n \"\"\"\n Update state with predictions and targets.\n\n Args:\n preds: Predictions from model\n target: Ground truth values\n \"\"\"\n sum_squared_log_error, n_obs = _mean_squared_log_error_update(preds, target)\n\n self.sum_squared_log_error += sum_squared_log_error\n self.total += n_obs\n\n def compute(self) -> Tensor:\n \"\"\"\n Compute mean squared logarithmic error over state.\n \"\"\"\n return _mean_squared_log_error_compute(self.sum_squared_log_error, self.total)\n\n @property\n def is_differentiable(self) -> bool:\n return True\n"
] | [
[
"torch.randint",
"torch.rand"
],
[
"torch.argsort",
"torch.tensor"
],
[
"numpy.argsort",
"numpy.sum"
],
[
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mkolod/Vitis-Tutorials | [
"33d6cf9686398ef1179778dc0da163291c68b465",
"33d6cf9686398ef1179778dc0da163291c68b465",
"33d6cf9686398ef1179778dc0da163291c68b465"
] | [
"Machine_Learning/Design_Tutorials/03-using_densenetx/files/datadownload.py",
"Machine_Learning/Feature_Tutorials/02-profiling-example/files/alexnet_zcu102/common/dputils.py",
"Machine_Learning/Design_Tutorials/01-caffe_cats_vs_dogs/files/caffe/code/6_make_predictions.py"
] | [
"'''\n Copyright 2020 Xilinx Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n'''\n\nimport numpy as np\nimport os\n\n# Silence TensorFlow messages\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport tensorflow as tf\n\n\n\ndef datadownload():\n \n # CIFAR10 dataset has 60k images. Training set is 50k, test set is 10k.\n # Each image is 32x32x8bits\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()\n \n # Scale image data from range 0:255 to range 0:1.0\n # Also converts train & test data to float from uint8\n x_train = (x_train/255.0).astype(np.float32)\n x_test = (x_test/255.0).astype(np.float32)\n\n # one-hot encode the labels\n y_train = tf.keras.utils.to_categorical(y_train, num_classes=10)\n y_test = tf.keras.utils.to_categorical(y_test, num_classes=10)\n \n \n return (x_train,y_train), (x_test,y_test)\n",
"'''\nCopyright 2019 Xilinx Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n\nfrom ctypes import *\nimport cv2\nimport numpy as np\nfrom dnndk import n2cube\n\ntry:\n pyc_libdputils = cdll.LoadLibrary(\"libn2cube.so\")\nexcept Exception:\n print('Load libn2cube.so failed\\nPlease install DNNDK first!')\n\n\ndef dpuSetInputImageWithScale(task, nodeName, image, mean, scale, idx=0):\n \"\"\"Set image into DPU Task's input Tensor with a specified scale parameter\"\"\"\n height = n2cube.dpuGetInputTensorHeight(task, nodeName, idx)\n width = n2cube.dpuGetInputTensorWidth(task, nodeName, idx)\n channel = n2cube.dpuGetInputTensorChannel(task, nodeName, idx)\n (imageHeight, imageWidth, imageChannel) = image.shape\n inputMean = (c_float * channel)()\n for i in range(0, channel):\n inputMean[i] = mean[i]\n\n if height == imageHeight and width == imageWidth:\n newImage = image\n else:\n newImage = cv2.resize(image, (width, height), 0, 0, cv2.INTER_LINEAR)\n\n inputImage = np.asarray(newImage, dtype=np.byte)\n inputImage2 = inputImage.ctypes.data_as(c_char_p)\n return pyc_libdputils.pyc_dpuSetInputData(task,\n c_char_p(nodeName.encode(\"utf-8\")), inputImage2,\n c_int(height),\n c_int(width),\n c_int(imageChannel), inputMean,\n c_float(scale), c_int(idx))\n\n\ndef dpuSetInputImage(task, nodeName, image, mean, idx=0):\n \"\"\"\n Set image into DPU Task's input Tensor\n task: DPU Task\n nodeName: The pointer to DPU Node name.\n image: Input image in OpenCV Mat format. Single channel and 3-channel input image are supported.\n mean: Mean value array which contains 1 member for single channel input image\n or 3 members for 3-channel input image\n Note: You can get the mean values from the input Caffe prototxt.\n At present, the format of mean value file is not yet supported\n idx: The index of a single input tensor for the Node, with default value as 0\n \"\"\"\n return dpuSetInputImageWithScale(task, nodeName, image, mean, 1.0, idx)\n\n\ndef dpuSetInputImage2(task, nodeName, image, idx=0):\n \"\"\"\n Set image into DPU Task's input Tensor (mean values automatically processed by N2Cube)\n nodeName: The pointer to DPU Node name.\n image: Input image in OpenCV Mat format. Single channel and 3-channel input image are supported.\n idx: The index of a single input tensor for the Node, with default value as 0\n \"\"\"\n channel = n2cube.dpuGetInputTensorChannel(task, nodeName, idx)\n output = (c_float * channel)()\n outputMean = POINTER(c_float)(output)\n pyc_libdputils.loadMean(task, outputMean, channel)\n for i in range(channel):\n outputMean[i] = float(outputMean[i])\n return dpuSetInputImageWithScale(task, nodeName, image, outputMean, 1.0,\n idx)\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\nCOPYRIGHT\n\nAll new contributions:\nCopyright [2019] [Xilinx Inc.]\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nAll other contributions:\nCopyright (c) 2017, Seokju Lee\nAll rights reserved.\n\nAll new contributions compared to the original branch:\nCopyright (c) 2017 Shifeng Zhang (CBSR, NLPR, CASIA), Longyin Wen (GE),\nXiao Bian (GE), Zhen Lei (CBSR, NLPR, CASIA), Stan Z. Li (CBSR, NLPR, CASIA).\nAll rights reserved.\n\nAll new contributions compared to the original branch:\nCopyright (c) 2015, 2016 Wei Liu (UNC Chapel Hill), Dragomir Anguelov (Zoox),\nDumitru Erhan (Google), Christian Szegedy (Google), Scott Reed (UMich Ann Arbor),\nCheng-Yang Fu (UNC Chapel Hill), Alexander C. Berg (UNC Chapel Hill).\nAll rights reserved.\n\n## original copyright\n\nAll contributions by the University of California:\nCopyright (c) 2014, 2015, The Regents of the University of California (Regents)\nAll rights reserved.\n\nAll other contributions:\nCopyright (c) 2014, 2015, the respective contributors\nAll rights reserved.\n\nCaffe uses a shared copyright model: each contributor holds copyright over\ntheir contributions to Caffe. The project versioning records all such\ncontribution and copyright details. If a contributor wants to further mark\ntheir specific copyright on a particular contribution, they should indicate\ntheir copyright solely in the commit message of the change when it is\ncommitted.\n\nLICENSE\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met: \n\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer. \n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution. \n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nCONTRIBUTION AGREEMENT\n\nBy contributing to the BVLC/caffe repository through pull-request, comment,\nor otherwise, the contributor releases their content to the\nlicense and copyright terms herein.\n'''\n\n'''\nbased on the jupyter notebook code of \"Xilinx_Caffe/examples/00-classification.ipynb\n\nmodified by: [email protected]\ndate 11 March 2020\n'''\n\n# ##################################################################################################\n# USAGE+\n# python code/6_make_predictions.py -d ./models/alexnetBNnoLRN/m1/deploy_1_alexnetBNnoLRN.prototxt -w ./models/alexnetBNnoLRN/m1/snapshot_1_alexnetBNnoLRN__iter_12703.caffemodel\n\n# it computes the prediction accuracy for the CNN trainined on CATS cvs DOGS by using 1000 JPEG 227x227x3 images in\n# the test directory (not belonging to the trainining or validation LMDB datasets)\n\n# ##################################################################################################\n\nimport os\nimport glob\nimport sys\nimport cv2\nimport caffe\nimport warnings\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\nwarnings.filterwarnings(\"ignore\", message=\"numpy.ufunc size changed\")\nimport numpy as np\n\nfrom config import cats_vs_dogs_config as config\n\nimport argparse\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-d\", \"--description\", required=True, help=\"description model\")\nap.add_argument(\"-w\", \"--weights\", required=True, help=\"weights caffemodel\")\nargs = vars(ap.parse_args())\n\nfrom caffe.proto import caffe_pb2\n\n#caffe.set_mode_cpu() \ncaffe.set_mode_gpu()\n\n\n# ##################################################################################################\n#Size of images\nIMAGE_WIDTH = 227\nIMAGE_HEIGHT = 227\n# ##################################################################################################\n\n# mean values of training dataset in BGR format\nMEAN_VALUES= (106.4051, 116.038956, 124.462036)\n\nmean_array=np.ones((3,IMAGE_WIDTH,IMAGE_HEIGHT), dtype=np.float32)\nmean_array[0,:,:]=mean_array[0,:,:]*MEAN_VALUES[0]\nmean_array[1,:,:]=mean_array[1,:,:]*MEAN_VALUES[1]\nmean_array[2,:,:]=mean_array[2,:,:]*MEAN_VALUES[2]\n\n\n# ##################################################################################################\n#Read caffemodel architecture and trained model's weights\ncaffe_description = args[\"description\"]\ncaffe_model = args[\"weights\"]\n\nnet = caffe.Net(caffe_description, caffe_model, caffe.TEST)\n\n# ##################################################################################################\n# Setup Input preprocessing\ntransformer = caffe.io.Transformer({\"data\": net.blobs[\"data\"].data.shape}) #\"data\" is the name of the input blob == net.inputs[0]\n\ntransformer.set_mean(\"data\", mean_array)\ntransformer.set_transpose(\"data\", (2,0,1)) # move image channels to outmost dimension\n#transformer.set_raw_scale(\"data\", 255) # rescslr from [0.1] to [0,255], use only with caffe.io.load_image()\n#transformer.set_channel_swap(\"data\", (2,1,0)) # swap from RGB to BGR: you do not need it if you use OpenCV\n\n# reshape the blobs so that they match the image shape.\n#net.blobs[\"data\"].reshape(1,3,227,227)\n\n# ##################################################################################################\n# Making predictions\n\n#Reading image paths\ntest_img_paths = [img_path for img_path in glob.glob(config.TEST_DIR+ \"/*.jpg\")]\n\nNUMEL = len(test_img_paths)\n\ntest_ids = np.zeros(([NUMEL,1]))\npreds = np.zeros(([NUMEL, 2]))\nidx = 0\n\ntot_true = 0\ntot_false = 0\ntop2_true = 0\ntop2_false= 0\n\nfor img_path in test_img_paths:\n\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n print(img_path)\n #img2 = cv2.resize(img, (128, 128), interpolation = cv2.INTER_CUBIC)\n #cv2.imshow(img_path, img2)\n #cv2.waitKey(0)\n #img = caffe.io.load_image(img_path) # alternative way\n\n preprocessed_image=transformer.preprocess('data', img)\n net.blobs[\"data\"].data[...] = preprocessed_image #copy the image in the memory allocated for the net\n out = net.forward() # perform classification\n #best_n = net.blobs[\"prob\"].data[0].flatten().argsort()[-1: -6:-1]\n #print(\"DBG INFO: \", best_n)\n pred_probas = out[\"prob\"] # returns the probabilities of the 10 classes\n\n # compute top-2: take the last 2 elements [-2:] and reverse them [::-1]\n top2 = pred_probas.argsort()[-2:][::-1]\n\n if \"cat\" in img_path:\n label = 0\n elif \"dog\" in img_path:\n label = 1\n else:\n label = -1 # non existing\n\n if label in top2 :\n top2_true = top2_true + 1\n else :\n top2_false = top2_false + 1\n #print(\"DBG INFO \", label, top2)\n\n test_ids[idx] = label\n preds[idx] = pred_probas\n #print(\"DBG INFO \", pred_probas)\n\n print(\"IMAGE: \" + img_path)\n print(\"PREDICTED: %d\" % preds[idx].argmax())\n print(\"EXPECTED : %d\" % test_ids[idx])\n print(\"-------\")\n\n idx = idx+1\n\n\n# ##################################################################################################\n# SKLEARN REPORT\n'''\nprecision = tp / (tp+fp) = ability of the classifier to not label as positive a sample that is negative\nrecall = tp / (tp+fn) = ability of the classifier to find all positive samples\nF1-score = weighter harmonic mean of precision and recall. Best value approaches 1 and worst 0\nsupport = number of occurrences\n'''\n\n\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.metrics import classification_report\nlb = LabelBinarizer()\ntestY = lb.fit_transform(test_ids)\nlabelNames = [\"cat\", \"dog\"]\n\n#report=classification_report(testY.argmax(axis=1), preds.argmax(axis=1), target_names=labelNames)\nreport=classification_report(testY, preds.argmax(axis=1), target_names=labelNames)\nprint(report)\n\nfrom sklearn.metrics import accuracy_score\n#print(\"SKLEARN Accuracy = %.2f\" % accuracy_score(testY.argmax(axis=1), preds.argmax(axis=1)) )\nprint(\"SKLEARN Accuracy = %.2f\" % accuracy_score(testY, preds.argmax(axis=1)) )\n"
] | [
[
"tensorflow.keras.utils.to_categorical",
"tensorflow.keras.datasets.cifar10.load_data"
],
[
"numpy.asarray"
],
[
"numpy.zeros",
"sklearn.preprocessing.LabelBinarizer",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
srkc95/kaggle-landmark-2021-1st-place | [
"034a7d8665bb4696981698348c9370f2d4e61e35"
] | [
"configs/cfg_ch_hybrid_swin224_b5_s3x.py"
] | [
"from default_config import basic_cfg\nimport albumentations as A\nimport os\nimport pandas as pd\nimport cv2\n\ncfg = basic_cfg\ncfg.debug = True\n\n# paths\ncfg.name = os.path.basename(__file__).split(\".\")[0]\ncfg.data_dir = \"/raid/landmark-recognition-2019/\"\n\ncfg.train\ncfg.data_folder = cfg.data_dir + \"train/\"\ncfg.train_df = \"/mount/glr2021/data/2021/train_gldv2x.csv\"\n\ncfg.val_df = '/raid/landmark-recognition-2019/' + \"recognition_solution_v2.1.csv\"\ncfg.output_dir = f\"/mount/glr2021/models/{os.path.basename(__file__).split('.')[0]}\"\ncfg.val_data_folder = \"/raid/landmark-recognition-2019/\" + \"test/\"\n\ncfg.test = False\ncfg.test_data_folder = cfg.data_dir + \"test/\"\n# cfg.test_df = cfg.data_dir + \"sample_submission_v1.csv\"\n\ncfg.eval_retrieval = True\ncfg.query_data_folder = \"/raid/landmark-recognition-2019/\" + \"test/\"\ncfg.index_data_folder = \"/raid/landmark-recognition-2019/\" + \"index/\"\ncfg.query_df = '/mount/glr2021/data/2019/query_v2.csv'\ncfg.index_df = '/mount/glr2021/data/2019/index_v2.csv'\n\n#logging\ncfg.neptune_project = \"christofhenkel/glr2021\"\ncfg.neptune_connection_mode = \"async\"\ncfg.tags = \"debug\"\n\n\n\n\n\n# MODEL\ncfg.model = \"ch_mdl_hybrid_transformer_2x\"\ncfg.stride = (1,1)\ncfg.embedder = \"tf_efficientnet_b5_ns\"\ncfg.backbone = \"swin_base_patch4_window7_224\"\ncfg.freeze_backbone_head = False\ncfg.find_unused_parameters = True\ncfg.neck = \"option-D\"\ncfg.embedding_size = 512\ncfg.pool = \"gem\"\ncfg.gem_p_trainable = True\ncfg.pretrained_weights ='/mount/glr2021/models/cfg_ch_hybrid_swin224_2x_b5_cutout_s2x/fold0/checkpoint_last_seed248126.pth'\ncfg.pretrained_weights_strict = False\ncfg.pretrained=True\ncfg.pop_weights = ['patch_embed.proj.weight']\n# DATASET\ncfg.dataset = \"ch_ds_1\"\ncfg.normalization = 'imagenet'\ncfg.landmark_id2class_id = pd.read_csv('./assets/landmark_id2class.csv')\ncfg.num_workers = 8\n# cfg.data_sample = 100000\ncfg.loss = 'adaptive_arcface'\ncfg.arcface_s = 45\ncfg.arcface_m = 0.3\n\n\n# OPTIMIZATION & SCHEDULE\n\n# cfg.fold = 0\ncfg.lr = 0.00005\n# cfg.optimizer = \"adam\"\n# cfg.weight_decay = 1e-4\ncfg.warmup = 1\ncfg.epochs = 40\ncfg.stop_at = 16\ncfg.batch_size = 8\ncfg.mixed_precision = True\ncfg.pin_memory = False\ncfg.grad_accumulation = 1.\n\n#inference\ncfg.train = True\ncfg.val = True\ncfg.test = False\ncfg.save_val_data = True\ncfg.train_val = False\ncfg.save_only_last_ckpt = False\ncfg.eval_ddp =True\ncfg.save_headless = False\n# AUGS\n\ncfg.img_size = (448,448)\n# AUGS\n\nimage_size = cfg.img_size[0]\n\ncfg.train_aug = A.Compose([\n A.HorizontalFlip(p=0.5),\n A.ImageCompression(quality_lower=99, quality_upper=100),\n A.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.2, rotate_limit=10, border_mode=0, p=0.7),\n A.Resize(image_size, image_size),\n A.Cutout(max_h_size=int(image_size * 0.4), max_w_size=int(image_size * 0.4), num_holes=1, p=0.5),\n ])\n\ncfg.val_aug = A.Compose([\n A.Resize(image_size, image_size),\n ])\n\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Artur-UF/MetCompA | [
"1198f861f4e5190f7435314bf476c594471e79fa",
"1198f861f4e5190f7435314bf476c594471e79fa"
] | [
"Ark.MetCompA/Aula-py14/atv_ajuste.py",
"Ark.MetCompA/Aula-py4/testeatv4.1.py"
] | [
"# Ajuste de funções\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef aj_lin(xi, yi):\n '''\n Realiza o ajuste linear de pontos em uma reta de ajuste no formato \"y = ax + b\"\n :param xi: coordenadas x dos pontos\n :param yi: coordenadas y dos pontos\n :return: coeficiente angular \"a\" e coeficiente linear \"b\" da reta de ajuste\n '''\n n = len(xi)\n mxy = sum(xi*yi)/n\n mx = sum(xi)/n\n my = sum(yi)/n\n mqx = sum(xi**2)/n\n a = (mxy - (mx*my))/(mqx - (mx**2))\n b = ((mqx*my) - (mx*mxy))/(mqx - (mx**2))\n return a, b\n\n\nx, y = np.loadtxt('dados.dat', unpack=True)\n\nxi = np.linspace(0, 9.50)\npars = aj_lin(x, y)\nyi = lambda p: pars[0]*p + pars[1]\n\nplt.scatter(x, y, s=30, c='k', marker='.', label='Pontos')\nplt.plot(xi, yi(xi), 'g', label='Reta de ajuste')\nplt.xlim(0, 9.5)\nplt.legend()\nplt.xlabel('x')\nplt.ylabel('y')\nplt.grid()\nplt.show()\n\n",
"import numpy as np\ncol1, col2, col3, col4 = np.loadtxt('valoresnp.txt', unpack=True, skiprows=1)\nprint(col1)\nprint(col2)\nprint(col3)\nprint(col4)\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.scatter",
"numpy.linspace",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel"
],
[
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vophihungvn/h1st | [
"d421995bb0b8de6a5a76788261efef5b26bc7c12"
] | [
"examples/AutoCyber/streamlit_app.py"
] | [
"import streamlit as st\nimport time\nimport numpy as np\nimport pandas as pd\n\nfrom aegis_datagen import build_constant_val_msg_stream\n\[email protected]\ndef get_data():\n AWS_BUCKET_URL = \"s3://h1st-tutorial-autocyber/attack-samples\"\n df = pd.read_parquet(AWS_BUCKET_URL + \"/20181114_Driver2_Trip1-0.parquet\")\n return df\n\ndef do_injection(df, sensor_name, values, period):\n df = df.copy()\n \n dt = df[\"Timestamp\"].max() - df[\"Timestamp\"].min()\n\n start = df[\"Timestamp\"].min()\n end = df[\"Timestamp\"].max()\n \n value_start = df[df[\"Timestamp\"] < start][sensor_name].fillna(method=\"ffill\").fillna(method=\"bfill\").values\n value_start = value_start[-1]\n \n value = 0.0\n rows = build_constant_val_msg_stream(value, start, end, period=period, value_jitter=0./20.)\n dfinj = pd.DataFrame.from_records(rows, columns =['Timestamp', sensor_name])\n\n dfinj[\"Label\"] = \"Attack\"\n dfinj[\"AttackSensor\"] = sensor_name\n dfinj[\"AttackMethod\"] = method\n dfinj[\"AttackParams\"] = scale\n\n # # # double check time diff / msg freq of injected values\n # actual_period = (dfinj[\"Timestamp\"] - dfinj[\"Timestamp\"].shift(1)).mean() * 1000\n # assert np.abs(period - actual_period) / period < 0.05, \"unexpected injection msg freq, actual_period = %s\" % actual_period\n \n df2 = pd.concat([df, dfinj]).sort_values(\"Timestamp\")\n\n # these values always go together \n if sensor_name in (\"YawRate\", \"Gx\", \"Gy\"):\n df2_filled = df2.fillna(method=\"ffill\")\n df2.loc[df2.Label == \"Attack\", [\"YawRate\", \"Gx\", \"Gy\"]] = df2_filled.loc[df2_filled.Label == \"Attack\", [\"YawRate\", \"Gx\", \"Gy\"]]\n\n if DEBUG: print(\"injected %s rows, before = %s, after = %s\" % (len(dfinj), len(df), len(df2)))\n # print(df2)\n return df2, start, end\n\ntry:\n df = get_data()\n print(df.head())\nexcept Exception as e:\n st.error(\n \"\"\"\n **This demo requires internet access.**\n\n Connection error: %s\n \"\"\"\n % e\n )\n\nattk_events = df.AttackEventIndex.dropna().unique()\nprint(\"unique attk_events = %s\" % attk_events)\n\nimport random\neid = st.selectbox(\"Select an sample index\", attk_events)\ndf = df[df.AttackEventIndex == eid]\n\nSENSORS = [\"SteeringAngle\", \"CarSpeed\", \"YawRate\", \"Gx\", \"Gy\"]\nattack_sensor = st.selectbox(\"Select a sensor to attack\", SENSORS)\n\nimport matplotlib.pyplot as plt\nz = df.dropna(subset=[attack_sensor])\nnormal = z[z[\"Label\"] == \"Normal\"]\nfig = plt.figure(figsize=(9, 3))\nplt.plot(normal.Timestamp, normal[attack_sensor], label=\"normal %s\" % attack_sensor)\nplt.legend()\n# plt.savefig(\"out.png\")\n\nst.write(fig)\n\n\nimport streamlit as st\nfrom PIL import Image\nfrom streamlit_drawable_canvas import st_canvas\n\nattack_msg_freq = st.sidebar.slider(\"Attack msg period (ms)\", 12, 96, 24, step=12)\nattack_msg_timing = st.sidebar.slider(\"Attack msg time jitter (ns)\", 500, 5000, 1000, step=500)\n\ndrawing_mode = st.sidebar.selectbox(\n \"Drawing tool:\", (\"freedraw\", \"line\")\n)\n\ncanvas_result = st_canvas(\n # fill_color=\"rgba(255, 165, 0, 0.3)\", # Fixed fill color with some opacity\n stroke_width=2,\n #stroke_color=stroke_color,\n background_color=\"transparent\",\n #background_image=Image.open(\"out.png\"),\n update_streamlit=True,\n height=240,\n width=600,\n drawing_mode=drawing_mode,\n key=\"canvas\",\n)\n\nif canvas_result.image_data is not None:\n print(\"canvas_result.image_data\")\n print(type(canvas_result.image_data))\n print(canvas_result.image_data.shape) # shape (240, 600, 4)\n x = canvas_result.image_data[:,:,3]\n print(x.shape)\n print(x)\n values = np.argmax(x, axis=0)\n print(\"Raw values\")\n print(values)\n values = (255 - values)/255.0\n values = pd.Series(values)\n values = values.replace(1.0, float(\"NaN\"))\n print(\"pd.Series values\")\n print(values)\n zmax, zmin = z[attack_sensor].max(), z[attack_sensor].min()\n print((zmax, zmin))\n values = values * (zmax - zmin) + zmin\n st.write(\"Scaled values\")\n st.write(values)\n\n\nimport matplotlib.pyplot as plt\nz = df.dropna(subset=[attack_sensor])\nnormal = z[z[\"Label\"] == \"Normal\"]\nfig = plt.figure(figsize=(9, 3))\nplt.plot(normal.Timestamp, normal[attack_sensor], label=\"normal %s\" % attack_sensor)\nplt.legend()\n# plt.savefig(\"out.png\")\n\nst.write(fig)\n\n\n# Streamlit widgets automatically run the script from top to bottom. Since\n# this button is not connected to any other logic, it just causes a plain\n# rerun.\nst.button(\"Re-run\")"
] | [
[
"matplotlib.pyplot.legend",
"pandas.concat",
"pandas.Series",
"matplotlib.pyplot.plot",
"pandas.read_parquet",
"numpy.argmax",
"pandas.DataFrame.from_records",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
srijan-deepsource/dask | [
"0673d9084e02f985f3fdf5ba6ede80e8de5ac15c"
] | [
"dask/array/core.py"
] | [
"import math\nimport operator\nimport os\nimport pickle\nimport re\nimport sys\nimport traceback\nimport uuid\nimport warnings\nfrom bisect import bisect\nfrom collections.abc import Iterable, Iterator, Mapping\nfrom functools import partial, wraps, reduce\nfrom itertools import product, zip_longest\nfrom numbers import Number, Integral\nfrom operator import add, getitem, mul\nfrom threading import Lock\n\nfrom tlz import partition, concat, first, groupby, accumulate, frequencies\nfrom tlz.curried import pluck\nimport numpy as np\n\nfrom . import chunk\nfrom .. import config, compute\nfrom ..base import (\n DaskMethodsMixin,\n tokenize,\n dont_optimize,\n compute_as_if_collection,\n persist,\n is_dask_collection,\n)\nfrom ..blockwise import broadcast_dimensions\nfrom ..context import globalmethod\nfrom ..utils import (\n ndeepmap,\n ignoring,\n concrete,\n derived_from,\n is_integer,\n IndexCallable,\n funcname,\n SerializableLock,\n Dispatch,\n factors,\n parse_bytes,\n has_keyword,\n M,\n ndimlist,\n format_bytes,\n typename,\n)\nfrom ..core import quote\nfrom ..delayed import delayed, Delayed\nfrom .. import threaded, core\nfrom ..sizeof import sizeof\nfrom ..highlevelgraph import HighLevelGraph\nfrom .numpy_compat import _Recurser, _make_sliced_dtype\nfrom .slicing import slice_array, replace_ellipsis, cached_cumsum\nfrom .blockwise import blockwise\n\nconfig.update_defaults({\"array\": {\"chunk-size\": \"128MiB\", \"rechunk-threshold\": 4}})\n\n\nconcatenate_lookup = Dispatch(\"concatenate\")\ntensordot_lookup = Dispatch(\"tensordot\")\neinsum_lookup = Dispatch(\"einsum\")\nconcatenate_lookup.register((object, np.ndarray), np.concatenate)\ntensordot_lookup.register((object, np.ndarray), np.tensordot)\neinsum_lookup.register((object, np.ndarray), np.einsum)\n\nunknown_chunk_message = (\n \"\\n\\n\"\n \"A possible solution: \"\n \"https://docs.dask.org/en/latest/array-chunks.html#unknown-chunks\\n\"\n \"Summary: to compute chunks sizes, use\\n\\n\"\n \" x.compute_chunk_sizes() # for Dask Array `x`\\n\"\n \" ddf.to_dask_array(lengths=True) # for Dask DataFrame `ddf`\"\n)\n\n\nclass PerformanceWarning(Warning):\n \"\"\" A warning given when bad chunking may cause poor performance \"\"\"\n\n\ndef getter(a, b, asarray=True, lock=None):\n if isinstance(b, tuple) and any(x is None for x in b):\n b2 = tuple(x for x in b if x is not None)\n b3 = tuple(\n None if x is None else slice(None, None)\n for x in b\n if not isinstance(x, Integral)\n )\n return getter(a, b2, asarray=asarray, lock=lock)[b3]\n\n if lock:\n lock.acquire()\n try:\n c = a[b]\n if asarray:\n c = np.asarray(c)\n finally:\n if lock:\n lock.release()\n return c\n\n\ndef getter_nofancy(a, b, asarray=True, lock=None):\n \"\"\" A simple wrapper around ``getter``.\n\n Used to indicate to the optimization passes that the backend doesn't\n support fancy indexing.\n \"\"\"\n return getter(a, b, asarray=asarray, lock=lock)\n\n\ndef getter_inline(a, b, asarray=True, lock=None):\n \"\"\" A getter function that optimizations feel comfortable inlining\n\n Slicing operations with this function may be inlined into a graph, such as\n in the following rewrite\n\n **Before**\n\n >>> a = x[:10] # doctest: +SKIP\n >>> b = a + 1 # doctest: +SKIP\n >>> c = a * 2 # doctest: +SKIP\n\n **After**\n\n >>> b = x[:10] + 1 # doctest: +SKIP\n >>> c = x[:10] * 2 # doctest: +SKIP\n\n This inlining can be relevant to operations when running off of disk.\n \"\"\"\n return getter(a, b, asarray=asarray, lock=lock)\n\n\nfrom .optimization import optimize, fuse_slice\n\n\n# __array_function__ dict for mapping aliases and mismatching names\n_HANDLED_FUNCTIONS = {}\n\n\ndef implements(*numpy_functions):\n \"\"\"Register an __array_function__ implementation for dask.array.Array\n\n Register that a function implements the API of a NumPy function (or several\n NumPy functions in case of aliases) which is handled with\n ``__array_function__``.\n\n Parameters\n ----------\n \\\\*numpy_functions : callables\n One or more NumPy functions that are handled by ``__array_function__``\n and will be mapped by `implements` to a `dask.array` function.\n \"\"\"\n\n def decorator(dask_func):\n for numpy_function in numpy_functions:\n _HANDLED_FUNCTIONS[numpy_function] = dask_func\n\n return dask_func\n\n return decorator\n\n\ndef slices_from_chunks(chunks):\n \"\"\" Translate chunks tuple to a set of slices in product order\n\n >>> slices_from_chunks(((2, 2), (3, 3, 3))) # doctest: +NORMALIZE_WHITESPACE\n [(slice(0, 2, None), slice(0, 3, None)),\n (slice(0, 2, None), slice(3, 6, None)),\n (slice(0, 2, None), slice(6, 9, None)),\n (slice(2, 4, None), slice(0, 3, None)),\n (slice(2, 4, None), slice(3, 6, None)),\n (slice(2, 4, None), slice(6, 9, None))]\n \"\"\"\n cumdims = [cached_cumsum(bds, initial_zero=True) for bds in chunks]\n slices = [\n [slice(s, s + dim) for s, dim in zip(starts, shapes)]\n for starts, shapes in zip(cumdims, chunks)\n ]\n return list(product(*slices))\n\n\ndef getem(\n arr,\n chunks,\n getitem=getter,\n shape=None,\n out_name=None,\n lock=False,\n asarray=True,\n dtype=None,\n):\n \"\"\" Dask getting various chunks from an array-like\n\n >>> getem('X', chunks=(2, 3), shape=(4, 6)) # doctest: +SKIP\n {('X', 0, 0): (getter, 'X', (slice(0, 2), slice(0, 3))),\n ('X', 1, 0): (getter, 'X', (slice(2, 4), slice(0, 3))),\n ('X', 1, 1): (getter, 'X', (slice(2, 4), slice(3, 6))),\n ('X', 0, 1): (getter, 'X', (slice(0, 2), slice(3, 6)))}\n\n >>> getem('X', chunks=((2, 2), (3, 3))) # doctest: +SKIP\n {('X', 0, 0): (getter, 'X', (slice(0, 2), slice(0, 3))),\n ('X', 1, 0): (getter, 'X', (slice(2, 4), slice(0, 3))),\n ('X', 1, 1): (getter, 'X', (slice(2, 4), slice(3, 6))),\n ('X', 0, 1): (getter, 'X', (slice(0, 2), slice(3, 6)))}\n \"\"\"\n out_name = out_name or arr\n chunks = normalize_chunks(chunks, shape, dtype=dtype)\n keys = product([out_name], *(range(len(bds)) for bds in chunks))\n slices = slices_from_chunks(chunks)\n\n if (\n has_keyword(getitem, \"asarray\")\n and has_keyword(getitem, \"lock\")\n and (not asarray or lock)\n ):\n values = [(getitem, arr, x, asarray, lock) for x in slices]\n else:\n # Common case, drop extra parameters\n values = [(getitem, arr, x) for x in slices]\n\n return dict(zip(keys, values))\n\n\ndef dotmany(A, B, leftfunc=None, rightfunc=None, **kwargs):\n \"\"\" Dot product of many aligned chunks\n\n >>> x = np.array([[1, 2], [1, 2]])\n >>> y = np.array([[10, 20], [10, 20]])\n >>> dotmany([x, x, x], [y, y, y])\n array([[ 90, 180],\n [ 90, 180]])\n\n Optionally pass in functions to apply to the left and right chunks\n\n >>> dotmany([x, x, x], [y, y, y], rightfunc=np.transpose)\n array([[150, 150],\n [150, 150]])\n \"\"\"\n if leftfunc:\n A = map(leftfunc, A)\n if rightfunc:\n B = map(rightfunc, B)\n return sum(map(partial(np.dot, **kwargs), A, B))\n\n\ndef _concatenate2(arrays, axes=[]):\n \"\"\" Recursively Concatenate nested lists of arrays along axes\n\n Each entry in axes corresponds to each level of the nested list. The\n length of axes should correspond to the level of nesting of arrays.\n If axes is an empty list or tuple, return arrays, or arrays[0] if\n arrays is a list.\n\n >>> x = np.array([[1, 2], [3, 4]])\n >>> _concatenate2([x, x], axes=[0])\n array([[1, 2],\n [3, 4],\n [1, 2],\n [3, 4]])\n\n >>> _concatenate2([x, x], axes=[1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n >>> _concatenate2([[x, x], [x, x]], axes=[0, 1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4],\n [1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n Supports Iterators\n >>> _concatenate2(iter([x, x]), axes=[1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n Special Case\n >>> _concatenate2([x, x], axes=())\n array([[1, 2],\n [3, 4]])\n \"\"\"\n if axes == ():\n if isinstance(arrays, list):\n return arrays[0]\n else:\n return arrays\n\n if isinstance(arrays, Iterator):\n arrays = list(arrays)\n if not isinstance(arrays, (list, tuple)):\n return arrays\n if len(axes) > 1:\n arrays = [_concatenate2(a, axes=axes[1:]) for a in arrays]\n concatenate = concatenate_lookup.dispatch(\n type(max(arrays, key=lambda x: getattr(x, \"__array_priority__\", 0)))\n )\n return concatenate(arrays, axis=axes[0])\n\n\ndef apply_infer_dtype(func, args, kwargs, funcname, suggest_dtype=\"dtype\", nout=None):\n \"\"\"\n Tries to infer output dtype of ``func`` for a small set of input arguments.\n\n Parameters\n ----------\n func: Callable\n Function for which output dtype is to be determined\n\n args: List of array like\n Arguments to the function, which would usually be used. Only attributes\n ``ndim`` and ``dtype`` are used.\n\n kwargs: dict\n Additional ``kwargs`` to the ``func``\n\n funcname: String\n Name of calling function to improve potential error messages\n\n suggest_dtype: None/False or String\n If not ``None`` adds suggestion to potential error message to specify a dtype\n via the specified kwarg. Defaults to ``'dtype'``.\n\n nout: None or Int\n ``None`` if function returns single output, integer if many.\n Deafults to ``None``.\n\n Returns\n -------\n : dtype or List of dtype\n One or many dtypes (depending on ``nout``)\n \"\"\"\n args = [\n np.ones((1,) * x.ndim, dtype=x.dtype) if isinstance(x, Array) else x\n for x in args\n ]\n try:\n with np.errstate(all=\"ignore\"):\n o = func(*args, **kwargs)\n except Exception as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n tb = \"\".join(traceback.format_tb(exc_traceback))\n suggest = (\n (\n \"Please specify the dtype explicitly using the \"\n \"`{dtype}` kwarg.\\n\\n\".format(dtype=suggest_dtype)\n )\n if suggest_dtype\n else \"\"\n )\n msg = (\n \"`dtype` inference failed in `{0}`.\\n\\n\"\n \"{1}\"\n \"Original error is below:\\n\"\n \"------------------------\\n\"\n \"{2}\\n\\n\"\n \"Traceback:\\n\"\n \"---------\\n\"\n \"{3}\"\n ).format(funcname, suggest, repr(e), tb)\n else:\n msg = None\n if msg is not None:\n raise ValueError(msg)\n return o.dtype if nout is None else tuple(e.dtype for e in o)\n\n\ndef normalize_arg(x):\n \"\"\" Normalize user provided arguments to blockwise or map_blocks\n\n We do a few things:\n\n 1. If they are string literals that might collide with blockwise_token then we\n quote them\n 2. IF they are large (as defined by sizeof) then we put them into the\n graph on their own by using dask.delayed\n \"\"\"\n if is_dask_collection(x):\n return x\n elif isinstance(x, str) and re.match(r\"_\\d+\", x):\n return delayed(x)\n elif isinstance(x, list) and len(x) >= 10:\n return delayed(x)\n elif sizeof(x) > 1e6:\n return delayed(x)\n else:\n return x\n\n\ndef _pass_extra_kwargs(func, keys, *args, **kwargs):\n \"\"\" Helper for :func:`map_blocks` to pass `block_info` or `block_id`.\n\n For each element of `keys`, a corresponding element of args is changed\n to a keyword argument with that key, before all arguments re passed on\n to `func`.\n \"\"\"\n kwargs.update(zip(keys, args))\n return func(*args[len(keys) :], **kwargs)\n\n\ndef map_blocks(\n func,\n *args,\n name=None,\n token=None,\n dtype=None,\n chunks=None,\n drop_axis=[],\n new_axis=None,\n meta=None,\n **kwargs,\n):\n \"\"\" Map a function across all blocks of a dask array.\n\n Parameters\n ----------\n func : callable\n Function to apply to every block in the array.\n args : dask arrays or other objects\n dtype : np.dtype, optional\n The ``dtype`` of the output array. It is recommended to provide this.\n If not provided, will be inferred by applying the function to a small\n set of fake data.\n chunks : tuple, optional\n Chunk shape of resulting blocks if the function does not preserve\n shape. If not provided, the resulting array is assumed to have the same\n block structure as the first input array.\n drop_axis : number or iterable, optional\n Dimensions lost by the function.\n new_axis : number or iterable, optional\n New dimensions created by the function. Note that these are applied\n after ``drop_axis`` (if present).\n token : string, optional\n The key prefix to use for the output array. If not provided, will be\n determined from the function name.\n name : string, optional\n The key name to use for the output array. Note that this fully\n specifies the output key name, and must be unique. If not provided,\n will be determined by a hash of the arguments.\n **kwargs :\n Other keyword arguments to pass to function. Values must be constants\n (not dask.arrays)\n\n See Also\n --------\n dask.array.blockwise : Generalized operation with control over block alignment.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(6, chunks=3)\n\n >>> x.map_blocks(lambda x: x * 2).compute()\n array([ 0, 2, 4, 6, 8, 10])\n\n The ``da.map_blocks`` function can also accept multiple arrays.\n\n >>> d = da.arange(5, chunks=2)\n >>> e = da.arange(5, chunks=2)\n\n >>> f = map_blocks(lambda a, b: a + b**2, d, e)\n >>> f.compute()\n array([ 0, 2, 6, 12, 20])\n\n If the function changes shape of the blocks then you must provide chunks\n explicitly.\n\n >>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))\n\n You have a bit of freedom in specifying chunks. If all of the output chunk\n sizes are the same, you can provide just that chunk size as a single tuple.\n\n >>> a = da.arange(18, chunks=(6,))\n >>> b = a.map_blocks(lambda x: x[:3], chunks=(3,))\n\n If the function changes the dimension of the blocks you must specify the\n created or destroyed dimensions.\n\n >>> b = a.map_blocks(lambda x: x[None, :, None], chunks=(1, 6, 1),\n ... new_axis=[0, 2])\n\n If ``chunks`` is specified but ``new_axis`` is not, then it is inferred to\n add the necessary number of axes on the left.\n\n Map_blocks aligns blocks by block positions without regard to shape. In the\n following example we have two arrays with the same number of blocks but\n with different shape and chunk sizes.\n\n >>> x = da.arange(1000, chunks=(100,))\n >>> y = da.arange(100, chunks=(10,))\n\n The relevant attribute to match is numblocks.\n\n >>> x.numblocks\n (10,)\n >>> y.numblocks\n (10,)\n\n If these match (up to broadcasting rules) then we can map arbitrary\n functions across blocks\n\n >>> def func(a, b):\n ... return np.array([a.max(), b.max()])\n\n >>> da.map_blocks(func, x, y, chunks=(2,), dtype='i8')\n dask.array<func, shape=(20,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>\n\n >>> _.compute()\n array([ 99, 9, 199, 19, 299, 29, 399, 39, 499, 49, 599, 59, 699,\n 69, 799, 79, 899, 89, 999, 99])\n\n Your block function get information about where it is in the array by\n accepting a special ``block_info`` keyword argument.\n\n >>> def func(block, block_info=None):\n ... pass\n\n This will receive the following information:\n\n >>> block_info # doctest: +SKIP\n {0: {'shape': (1000,),\n 'num-chunks': (10,),\n 'chunk-location': (4,),\n 'array-location': [(400, 500)]},\n None: {'shape': (1000,),\n 'num-chunks': (10,),\n 'chunk-location': (4,),\n 'array-location': [(400, 500)],\n 'chunk-shape': (100,),\n 'dtype': dtype('float64')}}\n\n For each argument and keyword arguments that are dask arrays (the positions\n of which are the first index), you will receive the shape of the full\n array, the number of chunks of the full array in each dimension, the chunk\n location (for example the fourth chunk over in the first dimension), and\n the array location (for example the slice corresponding to ``40:50``). The\n same information is provided for the output, with the key ``None``, plus\n the shape and dtype that should be returned.\n\n These features can be combined to synthesize an array from scratch, for\n example:\n\n >>> def func(block_info=None):\n ... loc = block_info[None]['array-location'][0]\n ... return np.arange(loc[0], loc[1])\n\n >>> da.map_blocks(func, chunks=((4, 4),), dtype=np.float_)\n dask.array<func, shape=(8,), dtype=float64, chunksize=(4,), chunktype=numpy.ndarray>\n\n >>> _.compute()\n array([0, 1, 2, 3, 4, 5, 6, 7])\n\n You may specify the key name prefix of the resulting task in the graph with\n the optional ``token`` keyword argument.\n\n >>> x.map_blocks(lambda x: x + 1, name='increment') # doctest: +SKIP\n dask.array<increment, shape=(100,), dtype=int64, chunksize=(10,), chunktype=numpy.ndarray>\n \"\"\"\n if not callable(func):\n msg = (\n \"First argument must be callable function, not %s\\n\"\n \"Usage: da.map_blocks(function, x)\\n\"\n \" or: da.map_blocks(function, x, y, z)\"\n )\n raise TypeError(msg % type(func).__name__)\n if token:\n warnings.warn(\"The token= keyword to map_blocks has been moved to name=\")\n name = token\n\n name = \"%s-%s\" % (name or funcname(func), tokenize(func, *args, **kwargs))\n new_axes = {}\n\n if isinstance(drop_axis, Number):\n drop_axis = [drop_axis]\n if isinstance(new_axis, Number):\n new_axis = [new_axis] # TODO: handle new_axis\n\n arrs = [a for a in args if isinstance(a, Array)]\n\n argpairs = [\n (a, tuple(range(a.ndim))[::-1]) if isinstance(a, Array) else (a, None)\n for a in args\n ]\n if arrs:\n out_ind = tuple(range(max(a.ndim for a in arrs)))[::-1]\n else:\n out_ind = ()\n\n original_kwargs = kwargs\n\n if dtype is None and meta is None:\n dtype = apply_infer_dtype(func, args, original_kwargs, \"map_blocks\")\n\n if drop_axis:\n out_ind = tuple(x for i, x in enumerate(out_ind) if i not in drop_axis)\n if new_axis is None and chunks is not None and len(out_ind) < len(chunks):\n new_axis = range(len(chunks) - len(out_ind))\n if new_axis:\n # new_axis = [x + len(drop_axis) for x in new_axis]\n out_ind = list(out_ind)\n for ax in sorted(new_axis):\n n = len(out_ind) + len(drop_axis)\n out_ind.insert(ax, n)\n if chunks is not None:\n new_axes[n] = chunks[ax]\n else:\n new_axes[n] = 1\n out_ind = tuple(out_ind)\n if max(new_axis) > max(out_ind):\n raise ValueError(\"New_axis values do not fill in all dimensions\")\n\n if chunks is not None:\n if len(chunks) != len(out_ind):\n raise ValueError(\n \"Provided chunks have {0} dims, expected {1} \"\n \"dims.\".format(len(chunks), len(out_ind))\n )\n adjust_chunks = dict(zip(out_ind, chunks))\n else:\n adjust_chunks = None\n\n out = blockwise(\n func,\n out_ind,\n *concat(argpairs),\n name=name,\n new_axes=new_axes,\n dtype=dtype,\n concatenate=True,\n align_arrays=False,\n adjust_chunks=adjust_chunks,\n meta=meta,\n **kwargs,\n )\n\n extra_argpairs = []\n extra_names = []\n # If func has block_id as an argument, construct an array of block IDs and\n # prepare to inject it.\n if has_keyword(func, \"block_id\"):\n block_id_name = \"block-id-\" + out.name\n block_id_dsk = {\n (block_id_name,) + block_id: block_id\n for block_id in product(*(range(len(c)) for c in out.chunks))\n }\n block_id_array = Array(\n block_id_dsk,\n block_id_name,\n chunks=tuple((1,) * len(c) for c in out.chunks),\n dtype=np.object_,\n )\n extra_argpairs.append((block_id_array, out_ind))\n extra_names.append(\"block_id\")\n\n # If func has block_info as an argument, construct an array of block info\n # objects and prepare to inject it.\n if has_keyword(func, \"block_info\"):\n starts = {}\n num_chunks = {}\n shapes = {}\n\n for i, (arg, in_ind) in enumerate(argpairs):\n if in_ind is not None:\n shapes[i] = arg.shape\n if drop_axis:\n # We concatenate along dropped axes, so we need to treat them\n # as if there is only a single chunk.\n starts[i] = [\n (\n cached_cumsum(arg.chunks[j], initial_zero=True)\n if ind in out_ind\n else [0, arg.shape[j]]\n )\n for j, ind in enumerate(in_ind)\n ]\n num_chunks[i] = tuple(len(s) - 1 for s in starts[i])\n else:\n starts[i] = [\n cached_cumsum(c, initial_zero=True) for c in arg.chunks\n ]\n num_chunks[i] = arg.numblocks\n out_starts = [cached_cumsum(c, initial_zero=True) for c in out.chunks]\n\n block_info_name = \"block-info-\" + out.name\n block_info_dsk = {}\n for block_id in product(*(range(len(c)) for c in out.chunks)):\n # Get position of chunk, indexed by axis labels\n location = {out_ind[i]: loc for i, loc in enumerate(block_id)}\n info = {}\n for i, shape in shapes.items():\n # Compute chunk key in the array, taking broadcasting into\n # account. We don't directly know which dimensions are\n # broadcast, but any dimension with only one chunk can be\n # treated as broadcast.\n arr_k = tuple(\n location.get(ind, 0) if num_chunks[i][j] > 1 else 0\n for j, ind in enumerate(argpairs[i][1])\n )\n info[i] = {\n \"shape\": shape,\n \"num-chunks\": num_chunks[i],\n \"array-location\": [\n (starts[i][ij][j], starts[i][ij][j + 1])\n for ij, j in enumerate(arr_k)\n ],\n \"chunk-location\": arr_k,\n }\n\n info[None] = {\n \"shape\": out.shape,\n \"num-chunks\": out.numblocks,\n \"array-location\": [\n (out_starts[ij][j], out_starts[ij][j + 1])\n for ij, j in enumerate(block_id)\n ],\n \"chunk-location\": block_id,\n \"chunk-shape\": tuple(\n out.chunks[ij][j] for ij, j in enumerate(block_id)\n ),\n \"dtype\": dtype,\n }\n block_info_dsk[(block_info_name,) + block_id] = info\n\n block_info = Array(\n block_info_dsk,\n block_info_name,\n chunks=tuple((1,) * len(c) for c in out.chunks),\n dtype=np.object_,\n )\n extra_argpairs.append((block_info, out_ind))\n extra_names.append(\"block_info\")\n\n if extra_argpairs:\n # Rewrite the Blockwise layer. It would be nice to find a way to\n # avoid doing it twice, but it's currently needed to determine\n # out.chunks from the first pass. Since it constructs a Blockwise\n # rather than an expanded graph, it shouldn't be too expensive.\n out = blockwise(\n _pass_extra_kwargs,\n out_ind,\n func,\n None,\n tuple(extra_names),\n None,\n *concat(extra_argpairs),\n *concat(argpairs),\n name=out.name,\n dtype=out.dtype,\n concatenate=True,\n align_arrays=False,\n adjust_chunks=dict(zip(out_ind, out.chunks)),\n meta=meta,\n **kwargs,\n )\n\n return out\n\n\ndef broadcast_chunks(*chunkss):\n \"\"\" Construct a chunks tuple that broadcasts many chunks tuples\n\n >>> a = ((5, 5),)\n >>> b = ((5, 5),)\n >>> broadcast_chunks(a, b)\n ((5, 5),)\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((5, 5),)\n >>> broadcast_chunks(a, b)\n ((10, 10, 10), (5, 5))\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((1,), (5, 5),)\n >>> broadcast_chunks(a, b)\n ((10, 10, 10), (5, 5))\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((3, 3,), (5, 5),)\n >>> broadcast_chunks(a, b)\n Traceback (most recent call last):\n ...\n ValueError: Chunks do not align: [(10, 10, 10), (3, 3)]\n \"\"\"\n if not chunkss:\n return ()\n elif len(chunkss) == 1:\n return chunkss[0]\n n = max(map(len, chunkss))\n chunkss2 = [((1,),) * (n - len(c)) + c for c in chunkss]\n result = []\n for i in range(n):\n step1 = [c[i] for c in chunkss2]\n if all(c == (1,) for c in step1):\n step2 = step1\n else:\n step2 = [c for c in step1 if c != (1,)]\n if len(set(step2)) != 1:\n raise ValueError(\"Chunks do not align: %s\" % str(step2))\n result.append(step2[0])\n return tuple(result)\n\n\ndef store(\n sources,\n targets,\n lock=True,\n regions=None,\n compute=True,\n return_stored=False,\n **kwargs,\n):\n \"\"\" Store dask arrays in array-like objects, overwrite data in target\n\n This stores dask arrays into object that supports numpy-style setitem\n indexing. It stores values chunk by chunk so that it does not have to\n fill up memory. For best performance you can align the block size of\n the storage target with the block size of your array.\n\n If your data fits in memory then you may prefer calling\n ``np.array(myarray)`` instead.\n\n Parameters\n ----------\n\n sources: Array or iterable of Arrays\n targets: array-like or Delayed or iterable of array-likes and/or Delayeds\n These should support setitem syntax ``target[10:20] = ...``\n lock: boolean or threading.Lock, optional\n Whether or not to lock the data stores while storing.\n Pass True (lock each file individually), False (don't lock) or a\n particular ``threading.Lock`` object to be shared among all writes.\n regions: tuple of slices or list of tuples of slices\n Each ``region`` tuple in ``regions`` should be such that\n ``target[region].shape = source.shape``\n for the corresponding source and target in sources and targets,\n respectively. If this is a tuple, the contents will be assumed to be\n slices, so do not provide a tuple of tuples.\n compute: boolean, optional\n If true compute immediately, return ``dask.delayed.Delayed`` otherwise\n return_stored: boolean, optional\n Optionally return the stored result (default False).\n\n Examples\n --------\n >>> x = ... # doctest: +SKIP\n\n >>> import h5py # doctest: +SKIP\n >>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP\n >>> dset = f.create_dataset('/data', shape=x.shape,\n ... chunks=x.chunks,\n ... dtype='f8') # doctest: +SKIP\n\n >>> store(x, dset) # doctest: +SKIP\n\n Alternatively store many arrays at the same time\n\n >>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP\n \"\"\"\n\n if isinstance(sources, Array):\n sources = [sources]\n targets = [targets]\n\n if any(not isinstance(s, Array) for s in sources):\n raise ValueError(\"All sources must be dask array objects\")\n\n if len(sources) != len(targets):\n raise ValueError(\n \"Different number of sources [%d] and targets [%d]\"\n % (len(sources), len(targets))\n )\n\n if isinstance(regions, tuple) or regions is None:\n regions = [regions]\n\n if len(sources) > 1 and len(regions) == 1:\n regions *= len(sources)\n\n if len(sources) != len(regions):\n raise ValueError(\n \"Different number of sources [%d] and targets [%d] than regions [%d]\"\n % (len(sources), len(targets), len(regions))\n )\n\n # Optimize all sources together\n sources_dsk = HighLevelGraph.merge(*[e.__dask_graph__() for e in sources])\n sources_dsk = Array.__dask_optimize__(\n sources_dsk, list(core.flatten([e.__dask_keys__() for e in sources]))\n )\n sources2 = [Array(sources_dsk, e.name, e.chunks, meta=e) for e in sources]\n\n # Optimize all targets together\n targets2 = []\n targets_keys = []\n targets_dsk = []\n for e in targets:\n if isinstance(e, Delayed):\n targets2.append(e.key)\n targets_keys.extend(e.__dask_keys__())\n targets_dsk.append(e.__dask_graph__())\n elif is_dask_collection(e):\n raise TypeError(\"Targets must be either Delayed objects or array-likes\")\n else:\n targets2.append(e)\n\n targets_dsk = HighLevelGraph.merge(*targets_dsk)\n targets_dsk = Delayed.__dask_optimize__(targets_dsk, targets_keys)\n\n load_stored = return_stored and not compute\n toks = [str(uuid.uuid1()) for _ in range(len(sources))]\n store_dsk = HighLevelGraph.merge(\n *[\n insert_to_ooc(s, t, lock, r, return_stored, load_stored, tok)\n for s, t, r, tok in zip(sources2, targets2, regions, toks)\n ]\n )\n store_keys = list(store_dsk.keys())\n\n store_dsk = HighLevelGraph.merge(store_dsk, targets_dsk, sources_dsk)\n\n if return_stored:\n load_store_dsk = store_dsk\n if compute:\n store_dlyds = [Delayed(k, store_dsk) for k in store_keys]\n store_dlyds = persist(*store_dlyds, **kwargs)\n store_dsk_2 = HighLevelGraph.merge(*[e.dask for e in store_dlyds])\n\n load_store_dsk = retrieve_from_ooc(store_keys, store_dsk, store_dsk_2)\n\n result = tuple(\n Array(load_store_dsk, \"load-store-%s\" % t, s.chunks, meta=s)\n for s, t in zip(sources, toks)\n )\n\n return result\n else:\n name = \"store-\" + str(uuid.uuid1())\n dsk = HighLevelGraph.merge({name: store_keys}, store_dsk)\n result = Delayed(name, dsk)\n\n if compute:\n result.compute(**kwargs)\n return None\n else:\n return result\n\n\ndef blockdims_from_blockshape(shape, chunks):\n \"\"\"\n\n >>> blockdims_from_blockshape((10, 10), (4, 3))\n ((4, 4, 2), (3, 3, 3, 1))\n >>> blockdims_from_blockshape((10, 0), (4, 0))\n ((4, 4, 2), (0,))\n \"\"\"\n if chunks is None:\n raise TypeError(\"Must supply chunks= keyword argument\")\n if shape is None:\n raise TypeError(\"Must supply shape= keyword argument\")\n if np.isnan(sum(shape)) or np.isnan(sum(chunks)):\n raise ValueError(\n \"Array chunk sizes are unknown. shape: %s, chunks: %s%s\"\n % (shape, chunks, unknown_chunk_message)\n )\n if not all(map(is_integer, chunks)):\n raise ValueError(\"chunks can only contain integers.\")\n if not all(map(is_integer, shape)):\n raise ValueError(\"shape can only contain integers.\")\n shape = tuple(map(int, shape))\n chunks = tuple(map(int, chunks))\n return tuple(\n ((bd,) * (d // bd) + ((d % bd,) if d % bd else ()) if d else (0,))\n for d, bd in zip(shape, chunks)\n )\n\n\ndef finalize(results):\n if not results:\n return concatenate3(results)\n results2 = results\n while isinstance(results2, (tuple, list)):\n if len(results2) > 1:\n return concatenate3(results)\n else:\n results2 = results2[0]\n return unpack_singleton(results)\n\n\nCHUNKS_NONE_ERROR_MESSAGE = \"\"\"\nYou must specify a chunks= keyword argument.\nThis specifies the chunksize of your array blocks.\n\nSee the following documentation page for details:\n https://docs.dask.org/en/latest/array-creation.html#chunks\n\"\"\".strip()\n\n\nclass Array(DaskMethodsMixin):\n \"\"\" Parallel Dask Array\n\n A parallel nd-array comprised of many numpy arrays arranged in a grid.\n\n This constructor is for advanced uses only. For normal use see the\n ``da.from_array`` function.\n\n Parameters\n ----------\n dask : dict\n Task dependency graph\n name : string\n Name of array in dask\n shape : tuple of ints\n Shape of the entire array\n chunks: iterable of tuples\n block sizes along each dimension\n dtype : str or dtype\n Typecode or data-type for the new Dask Array\n meta : empty ndarray\n empty ndarray created with same NumPy backend, ndim and dtype as the\n Dask Array being created (overrides dtype)\n\n See Also\n --------\n dask.array.from_array\n \"\"\"\n\n __slots__ = \"dask\", \"_name\", \"_cached_keys\", \"_chunks\", \"_meta\"\n\n def __new__(cls, dask, name, chunks, dtype=None, meta=None, shape=None):\n self = super(Array, cls).__new__(cls)\n assert isinstance(dask, Mapping)\n if not isinstance(dask, HighLevelGraph):\n dask = HighLevelGraph.from_collections(name, dask, dependencies=())\n self.dask = dask\n self.name = str(name)\n meta = meta_from_array(meta, dtype=dtype)\n\n if (\n isinstance(chunks, str)\n or isinstance(chunks, tuple)\n and chunks\n and any(isinstance(c, str) for c in chunks)\n ):\n dt = meta.dtype\n else:\n dt = None\n self._chunks = normalize_chunks(chunks, shape, dtype=dt)\n if self._chunks is None:\n raise ValueError(CHUNKS_NONE_ERROR_MESSAGE)\n\n self._meta = meta_from_array(meta, ndim=self.ndim, dtype=dtype)\n\n for plugin in config.get(\"array_plugins\", ()):\n result = plugin(self)\n if result is not None:\n self = result\n\n return self\n\n def __reduce__(self):\n return (Array, (self.dask, self.name, self.chunks, self.dtype))\n\n def __dask_graph__(self):\n return self.dask\n\n def __dask_layers__(self):\n return (self.name,)\n\n def __dask_keys__(self):\n if self._cached_keys is not None:\n return self._cached_keys\n\n name, chunks, numblocks = self.name, self.chunks, self.numblocks\n\n def keys(*args):\n if not chunks:\n return [(name,)]\n ind = len(args)\n if ind + 1 == len(numblocks):\n result = [(name,) + args + (i,) for i in range(numblocks[ind])]\n else:\n result = [keys(*(args + (i,))) for i in range(numblocks[ind])]\n return result\n\n self._cached_keys = result = keys()\n return result\n\n def __dask_tokenize__(self):\n return self.name\n\n __dask_optimize__ = globalmethod(\n optimize, key=\"array_optimize\", falsey=dont_optimize\n )\n __dask_scheduler__ = staticmethod(threaded.get)\n\n def __dask_postcompute__(self):\n return finalize, ()\n\n def __dask_postpersist__(self):\n return Array, (self.name, self.chunks, self.dtype, self._meta)\n\n @property\n def numblocks(self):\n return tuple(map(len, self.chunks))\n\n @property\n def npartitions(self):\n return reduce(mul, self.numblocks, 1)\n\n def compute_chunk_sizes(self):\n \"\"\"\n Compute the chunk sizes for a Dask array. This is especially useful\n when the chunk sizes are unknown (e.g., when indexing one Dask array\n with another).\n\n Notes\n -----\n This function modifies the Dask array in-place.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = da.from_array([-2, -1, 0, 1, 2], chunks=2)\n >>> x.chunks\n ((2, 2, 1),)\n >>> y = x[x <= 0]\n >>> y.chunks\n ((nan, nan, nan),)\n >>> y.compute_chunk_sizes() # in-place computation\n dask.array<getitem, shape=(3,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>\n >>> y.chunks\n ((2, 1, 0),)\n\n \"\"\"\n x = self\n chunk_shapes = x.map_blocks(\n _get_chunk_shape,\n dtype=int,\n chunks=tuple(len(c) * (1,) for c in x.chunks) + ((x.ndim,),),\n new_axis=x.ndim,\n )\n\n c = []\n for i in range(x.ndim):\n s = x.ndim * [0] + [i]\n s[i] = slice(None)\n s = tuple(s)\n\n c.append(tuple(chunk_shapes[s]))\n\n # `map_blocks` assigns numpy dtypes\n # cast chunk dimensions back to python int before returning\n x._chunks = tuple(\n [tuple([int(chunk) for chunk in chunks]) for chunks in compute(tuple(c))[0]]\n )\n return x\n\n @property\n def shape(self):\n return tuple(cached_cumsum(c, initial_zero=True)[-1] for c in self.chunks)\n\n @property\n def chunksize(self):\n return tuple(max(c) for c in self.chunks)\n\n @property\n def dtype(self):\n return self._meta.dtype\n\n def _get_chunks(self):\n return self._chunks\n\n def _set_chunks(self, chunks):\n msg = (\n \"Can not set chunks directly\\n\\n\"\n \"Please use the rechunk method instead:\\n\"\n \" x.rechunk({})\\n\\n\"\n \"If trying to avoid unknown chunks, use\\n\"\n \" x.compute_chunk_sizes()\"\n )\n raise TypeError(msg.format(chunks))\n\n chunks = property(_get_chunks, _set_chunks, \"chunks property\")\n\n def __len__(self):\n if not self.chunks:\n raise TypeError(\"len() of unsized object\")\n return sum(self.chunks[0])\n\n def __array_ufunc__(self, numpy_ufunc, method, *inputs, **kwargs):\n out = kwargs.get(\"out\", ())\n for x in inputs + out:\n if not isinstance(x, (np.ndarray, Number, Array)):\n return NotImplemented\n\n if method == \"__call__\":\n if numpy_ufunc is np.matmul:\n from .routines import matmul\n\n # special case until apply_gufunc handles optional dimensions\n return matmul(*inputs, **kwargs)\n if numpy_ufunc.signature is not None:\n from .gufunc import apply_gufunc\n\n return apply_gufunc(\n numpy_ufunc, numpy_ufunc.signature, *inputs, **kwargs\n )\n if numpy_ufunc.nout > 1:\n from . import ufunc\n\n try:\n da_ufunc = getattr(ufunc, numpy_ufunc.__name__)\n except AttributeError:\n return NotImplemented\n return da_ufunc(*inputs, **kwargs)\n else:\n return elemwise(numpy_ufunc, *inputs, **kwargs)\n elif method == \"outer\":\n from . import ufunc\n\n try:\n da_ufunc = getattr(ufunc, numpy_ufunc.__name__)\n except AttributeError:\n return NotImplemented\n return da_ufunc.outer(*inputs, **kwargs)\n else:\n return NotImplemented\n\n def __repr__(self):\n \"\"\"\n\n >>> import dask.array as da\n >>> da.ones((10, 10), chunks=(5, 5), dtype='i4')\n dask.array<..., shape=(10, 10), dtype=int32, chunksize=(5, 5), chunktype=numpy.ndarray>\n \"\"\"\n chunksize = str(self.chunksize)\n name = self.name.rsplit(\"-\", 1)[0]\n return \"dask.array<%s, shape=%s, dtype=%s, chunksize=%s, chunktype=%s.%s>\" % (\n name,\n self.shape,\n self.dtype,\n chunksize,\n type(self._meta).__module__.split(\".\")[0],\n type(self._meta).__name__,\n )\n\n def _repr_html_(self):\n table = self._repr_html_table()\n try:\n grid = self.to_svg(size=config.get(\"array.svg.size\", 120))\n except NotImplementedError:\n grid = \"\"\n\n both = [\n \"<table>\",\n \"<tr>\",\n \"<td>\",\n table,\n \"</td>\",\n \"<td>\",\n grid,\n \"</td>\",\n \"</tr>\",\n \"</table>\",\n ]\n return \"\\n\".join(both)\n\n def _repr_html_table(self):\n if \"sparse\" in typename(type(self._meta)):\n nbytes = None\n cbytes = None\n elif not math.isnan(self.nbytes):\n nbytes = format_bytes(self.nbytes)\n cbytes = format_bytes(np.prod(self.chunksize) * self.dtype.itemsize)\n else:\n nbytes = \"unknown\"\n cbytes = \"unknown\"\n\n table = [\n \"<table>\",\n \" <thead>\",\n \" <tr><td> </td><th> Array </th><th> Chunk </th></tr>\",\n \" </thead>\",\n \" <tbody>\",\n \" <tr><th> Bytes </th><td> %s </td> <td> %s </td></tr>\"\n % (nbytes, cbytes)\n if nbytes is not None\n else \"\",\n \" <tr><th> Shape </th><td> %s </td> <td> %s </td></tr>\"\n % (str(self.shape), str(self.chunksize)),\n \" <tr><th> Count </th><td> %d Tasks </td><td> %d Chunks </td></tr>\"\n % (len(self.__dask_graph__()), self.npartitions),\n \" <tr><th> Type </th><td> %s </td><td> %s.%s </td></tr>\"\n % (\n self.dtype,\n type(self._meta).__module__.split(\".\")[0],\n type(self._meta).__name__,\n ),\n \" </tbody>\",\n \"</table>\",\n ]\n return \"\\n\".join(table)\n\n @property\n def ndim(self):\n return len(self.shape)\n\n @property\n def size(self):\n \"\"\" Number of elements in array \"\"\"\n return reduce(mul, self.shape, 1)\n\n @property\n def nbytes(self):\n \"\"\" Number of bytes in array \"\"\"\n return self.size * self.dtype.itemsize\n\n @property\n def itemsize(self):\n \"\"\" Length of one array element in bytes \"\"\"\n return self.dtype.itemsize\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, val):\n self._name = val\n # Clear the key cache when the name is reset\n self._cached_keys = None\n\n __array_priority__ = 11 # higher than numpy.ndarray and numpy.matrix\n\n def __array__(self, dtype=None, **kwargs):\n x = self.compute()\n if dtype and x.dtype != dtype:\n x = x.astype(dtype)\n if not isinstance(x, np.ndarray):\n x = np.array(x)\n return x\n\n def __array_function__(self, func, types, args, kwargs):\n import dask.array as module\n\n def handle_nonmatching_names(func, args, kwargs):\n if func not in _HANDLED_FUNCTIONS:\n warnings.warn(\n \"The `{}` function is not implemented by Dask array. \"\n \"You may want to use the da.map_blocks function \"\n \"or something similar to silence this warning. \"\n \"Your code may stop working in a future release.\".format(\n func.__module__ + \".\" + func.__name__\n ),\n FutureWarning,\n )\n # Need to convert to array object (e.g. numpy.ndarray or\n # cupy.ndarray) as needed, so we can call the NumPy function\n # again and it gets the chance to dispatch to the right\n # implementation.\n args, kwargs = compute(args, kwargs)\n return func(*args, **kwargs)\n\n return _HANDLED_FUNCTIONS[func](*args, **kwargs)\n\n # First try to find a matching function name. If that doesn't work, we may\n # be dealing with an alias or a function that's simply not in the Dask API.\n # Handle aliases via the _HANDLED_FUNCTIONS dict mapping, and warn otherwise.\n for submodule in func.__module__.split(\".\")[1:]:\n try:\n module = getattr(module, submodule)\n except AttributeError:\n return handle_nonmatching_names(func, args, kwargs)\n\n if not hasattr(module, func.__name__):\n return handle_nonmatching_names(func, args, kwargs)\n\n da_func = getattr(module, func.__name__)\n if da_func is func:\n return handle_nonmatching_names(func, args, kwargs)\n return da_func(*args, **kwargs)\n\n @property\n def _elemwise(self):\n return elemwise\n\n @wraps(store)\n def store(self, target, **kwargs):\n r = store([self], [target], **kwargs)\n\n if kwargs.get(\"return_stored\", False):\n r = r[0]\n\n return r\n\n def to_svg(self, size=500):\n \"\"\" Convert chunks from Dask Array into an SVG Image\n\n Parameters\n ----------\n chunks: tuple\n size: int\n Rough size of the image\n\n Examples\n --------\n >>> x.to_svg(size=500) # doctest: +SKIP\n\n Returns\n -------\n text: An svg string depicting the array as a grid of chunks\n \"\"\"\n from .svg import svg\n\n return svg(self.chunks, size=size)\n\n def to_hdf5(self, filename, datapath, **kwargs):\n \"\"\" Store array in HDF5 file\n\n >>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP\n\n Optionally provide arguments as though to ``h5py.File.create_dataset``\n\n >>> x.to_hdf5('myfile.hdf5', '/x', compression='lzf', shuffle=True) # doctest: +SKIP\n\n See Also\n --------\n da.store\n h5py.File.create_dataset\n \"\"\"\n return to_hdf5(filename, datapath, self, **kwargs)\n\n def to_dask_dataframe(self, columns=None, index=None, meta=None):\n \"\"\" Convert dask Array to dask Dataframe\n\n Parameters\n ----------\n columns: list or string\n list of column names if DataFrame, single string if Series\n index : dask.dataframe.Index, optional\n An optional *dask* Index to use for the output Series or DataFrame.\n\n The default output index depends on whether the array has any unknown\n chunks. If there are any unknown chunks, the output has ``None``\n for all the divisions (one per chunk). If all the chunks are known,\n a default index with known divsions is created.\n\n Specifying ``index`` can be useful if you're conforming a Dask Array\n to an existing dask Series or DataFrame, and you would like the\n indices to match.\n meta : object, optional\n An optional `meta` parameter can be passed for dask\n to specify the concrete dataframe type to use for partitions of\n the Dask dataframe. By default, pandas DataFrame is used.\n\n See Also\n --------\n dask.dataframe.from_dask_array\n \"\"\"\n from ..dataframe import from_dask_array\n\n return from_dask_array(self, columns=columns, index=index, meta=meta)\n\n def __bool__(self):\n if self.size > 1:\n raise ValueError(\n \"The truth value of a {0} is ambiguous. \"\n \"Use a.any() or a.all().\".format(self.__class__.__name__)\n )\n else:\n return bool(self.compute())\n\n __nonzero__ = __bool__ # python 2\n\n def _scalarfunc(self, cast_type):\n if self.size > 1:\n raise TypeError(\"Only length-1 arrays can be converted to Python scalars\")\n else:\n return cast_type(self.compute())\n\n def __int__(self):\n return self._scalarfunc(int)\n\n __long__ = __int__ # python 2\n\n def __float__(self):\n return self._scalarfunc(float)\n\n def __complex__(self):\n return self._scalarfunc(complex)\n\n def __setitem__(self, key, value):\n from .routines import where\n\n if isinstance(key, Array):\n if isinstance(value, Array) and value.ndim > 1:\n raise ValueError(\"boolean index array should have 1 dimension\")\n y = where(key, value, self)\n self._meta = y._meta\n self.dask = y.dask\n self.name = y.name\n self._chunks = y.chunks\n return self\n else:\n raise NotImplementedError(\n \"Item assignment with %s not supported\" % type(key)\n )\n\n def __getitem__(self, index):\n # Field access, e.g. x['a'] or x[['a', 'b']]\n if isinstance(index, str) or (\n isinstance(index, list) and index and all(isinstance(i, str) for i in index)\n ):\n if isinstance(index, str):\n dt = self.dtype[index]\n else:\n dt = _make_sliced_dtype(self.dtype, index)\n\n if dt.shape:\n new_axis = list(range(self.ndim, self.ndim + len(dt.shape)))\n chunks = self.chunks + tuple((i,) for i in dt.shape)\n return self.map_blocks(\n getitem, index, dtype=dt.base, chunks=chunks, new_axis=new_axis\n )\n else:\n return self.map_blocks(getitem, index, dtype=dt)\n\n if not isinstance(index, tuple):\n index = (index,)\n\n from .slicing import (\n normalize_index,\n slice_with_int_dask_array,\n slice_with_bool_dask_array,\n )\n\n index2 = normalize_index(index, self.shape)\n\n dependencies = {self.name}\n for i in index2:\n if isinstance(i, Array):\n dependencies.add(i.name)\n\n if any(isinstance(i, Array) and i.dtype.kind in \"iu\" for i in index2):\n self, index2 = slice_with_int_dask_array(self, index2)\n if any(isinstance(i, Array) and i.dtype == bool for i in index2):\n self, index2 = slice_with_bool_dask_array(self, index2)\n\n if all(isinstance(i, slice) and i == slice(None) for i in index2):\n return self\n\n out = \"getitem-\" + tokenize(self, index2)\n dsk, chunks = slice_array(out, self.name, self.chunks, index2)\n\n graph = HighLevelGraph.from_collections(out, dsk, dependencies=[self])\n\n meta = meta_from_array(self._meta, ndim=len(chunks))\n if np.isscalar(meta):\n meta = np.array(meta)\n\n return Array(graph, out, chunks, meta=meta)\n\n def _vindex(self, key):\n if not isinstance(key, tuple):\n key = (key,)\n if any(k is None for k in key):\n raise IndexError(\n \"vindex does not support indexing with None (np.newaxis), \"\n \"got {}\".format(key)\n )\n if all(isinstance(k, slice) for k in key):\n if all(\n k.indices(d) == slice(0, d).indices(d) for k, d in zip(key, self.shape)\n ):\n return self\n raise IndexError(\n \"vindex requires at least one non-slice to vectorize over \"\n \"when the slices are not over the entire array (i.e, x[:]). \"\n \"Use normal slicing instead when only using slices. Got: {}\".format(key)\n )\n return _vindex(self, *key)\n\n @property\n def vindex(self):\n \"\"\"Vectorized indexing with broadcasting.\n\n This is equivalent to numpy's advanced indexing, using arrays that are\n broadcast against each other. This allows for pointwise indexing:\n\n >>> x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n >>> x = from_array(x, chunks=2)\n >>> x.vindex[[0, 1, 2], [0, 1, 2]].compute()\n array([1, 5, 9])\n\n Mixed basic/advanced indexing with slices/arrays is also supported. The\n order of dimensions in the result follows those proposed for\n `ndarray.vindex <https://github.com/numpy/numpy/pull/6256>`_:\n the subspace spanned by arrays is followed by all slices.\n\n Note: ``vindex`` provides more general functionality than standard\n indexing, but it also has fewer optimizations and can be significantly\n slower.\n \"\"\"\n return IndexCallable(self._vindex)\n\n def _blocks(self, index):\n from .slicing import normalize_index\n\n if not isinstance(index, tuple):\n index = (index,)\n if sum(isinstance(ind, (np.ndarray, list)) for ind in index) > 1:\n raise ValueError(\"Can only slice with a single list\")\n if any(ind is None for ind in index):\n raise ValueError(\"Slicing with np.newaxis or None is not supported\")\n index = normalize_index(index, self.numblocks)\n index = tuple(slice(k, k + 1) if isinstance(k, Number) else k for k in index)\n\n name = \"blocks-\" + tokenize(self, index)\n\n new_keys = np.array(self.__dask_keys__(), dtype=object)[index]\n\n chunks = tuple(\n tuple(np.array(c)[i].tolist()) for c, i in zip(self.chunks, index)\n )\n\n keys = product(*(range(len(c)) for c in chunks))\n\n layer = {(name,) + key: tuple(new_keys[key].tolist()) for key in keys}\n\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[self])\n return Array(graph, name, chunks, meta=self)\n\n @property\n def blocks(self):\n \"\"\" Slice an array by blocks\n\n This allows blockwise slicing of a Dask array. You can perform normal\n Numpy-style slicing but now rather than slice elements of the array you\n slice along blocks so, for example, ``x.blocks[0, ::2]`` produces a new\n dask array with every other block in the first row of blocks.\n\n You can index blocks in any way that could index a numpy array of shape\n equal to the number of blocks in each dimension, (available as\n array.numblocks). The dimension of the output array will be the same\n as the dimension of this array, even if integer indices are passed.\n This does not support slicing with ``np.newaxis`` or multiple lists.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(10, chunks=2)\n >>> x.blocks[0].compute()\n array([0, 1])\n >>> x.blocks[:3].compute()\n array([0, 1, 2, 3, 4, 5])\n >>> x.blocks[::2].compute()\n array([0, 1, 4, 5, 8, 9])\n >>> x.blocks[[-1, 0]].compute()\n array([8, 9, 0, 1])\n\n Returns\n -------\n A Dask array\n \"\"\"\n return IndexCallable(self._blocks)\n\n @property\n def partitions(self):\n \"\"\"Slice an array by partitions. Alias of dask array .blocks attribute.\n\n This alias allows you to write agnostic code that works with both\n dask arrays and dask dataframes.\n\n This allows blockwise slicing of a Dask array. You can perform normal\n Numpy-style slicing but now rather than slice elements of the array you\n slice along blocks so, for example, ``x.blocks[0, ::2]`` produces a new\n dask array with every other block in the first row of blocks.\n\n You can index blocks in any way that could index a numpy array of shape\n equal to the number of blocks in each dimension, (available as\n array.numblocks). The dimension of the output array will be the same\n as the dimension of this array, even if integer indices are passed.\n This does not support slicing with ``np.newaxis`` or multiple lists.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(10, chunks=2)\n >>> x.partitions[0].compute()\n array([0, 1])\n >>> x.partitions[:3].compute()\n array([0, 1, 2, 3, 4, 5])\n >>> x.partitions[::2].compute()\n array([0, 1, 4, 5, 8, 9])\n >>> x.partitions[[-1, 0]].compute()\n array([8, 9, 0, 1])\n >>> all(x.partitions[:].compute() == x.blocks[:].compute())\n True\n\n Returns\n -------\n A Dask array\n \"\"\"\n return self.blocks\n\n @derived_from(np.ndarray)\n def dot(self, other):\n from .routines import tensordot\n\n return tensordot(self, other, axes=((self.ndim - 1,), (other.ndim - 2,)))\n\n @property\n def A(self):\n return self\n\n @property\n def T(self):\n return self.transpose()\n\n @derived_from(np.ndarray)\n def transpose(self, *axes):\n from .routines import transpose\n\n if not axes:\n axes = None\n elif len(axes) == 1 and isinstance(axes[0], Iterable):\n axes = axes[0]\n if (axes == tuple(range(self.ndim))) or (axes == tuple(range(-self.ndim, 0))):\n # no transpose necessary\n return self\n else:\n return transpose(self, axes=axes)\n\n @derived_from(np.ndarray)\n def ravel(self):\n from .routines import ravel\n\n return ravel(self)\n\n flatten = ravel\n\n @derived_from(np.ndarray)\n def choose(self, choices):\n from .routines import choose\n\n return choose(self, choices)\n\n @derived_from(np.ndarray)\n def reshape(self, *shape):\n from .reshape import reshape\n\n if len(shape) == 1 and not isinstance(shape[0], Number):\n shape = shape[0]\n return reshape(self, shape)\n\n def topk(self, k, axis=-1, split_every=None):\n \"\"\"The top k elements of an array.\n\n See ``da.topk`` for docstring\"\"\"\n from .reductions import topk\n\n return topk(self, k, axis=axis, split_every=split_every)\n\n def argtopk(self, k, axis=-1, split_every=None):\n \"\"\"The indices of the top k elements of an array.\n\n See ``da.argtopk`` for docstring\"\"\"\n from .reductions import argtopk\n\n return argtopk(self, k, axis=axis, split_every=split_every)\n\n def astype(self, dtype, **kwargs):\n \"\"\"Copy of the array, cast to a specified type.\n\n Parameters\n ----------\n dtype : str or dtype\n Typecode or data-type to which the array is cast.\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur. Defaults to 'unsafe'\n for backwards compatibility.\n\n * 'no' means the data types should not be cast at all.\n * 'equiv' means only byte-order changes are allowed.\n * 'safe' means only casts which can preserve values are allowed.\n * 'same_kind' means only safe casts or casts within a kind,\n like float64 to float32, are allowed.\n * 'unsafe' means any data conversions may be done.\n copy : bool, optional\n By default, astype always returns a newly allocated array. If this\n is set to False and the `dtype` requirement is satisfied, the input\n array is returned instead of a copy.\n \"\"\"\n # Scalars don't take `casting` or `copy` kwargs - as such we only pass\n # them to `map_blocks` if specified by user (different than defaults).\n extra = set(kwargs) - {\"casting\", \"copy\"}\n if extra:\n raise TypeError(\n \"astype does not take the following keyword \"\n \"arguments: {0!s}\".format(list(extra))\n )\n casting = kwargs.get(\"casting\", \"unsafe\")\n dtype = np.dtype(dtype)\n if self.dtype == dtype:\n return self\n elif not np.can_cast(self.dtype, dtype, casting=casting):\n raise TypeError(\n \"Cannot cast array from {0!r} to {1!r}\"\n \" according to the rule \"\n \"{2!r}\".format(self.dtype, dtype, casting)\n )\n return self.map_blocks(chunk.astype, dtype=dtype, astype_dtype=dtype, **kwargs)\n\n def __abs__(self):\n return elemwise(operator.abs, self)\n\n def __add__(self, other):\n return elemwise(operator.add, self, other)\n\n def __radd__(self, other):\n return elemwise(operator.add, other, self)\n\n def __and__(self, other):\n return elemwise(operator.and_, self, other)\n\n def __rand__(self, other):\n return elemwise(operator.and_, other, self)\n\n def __div__(self, other):\n return elemwise(operator.div, self, other)\n\n def __rdiv__(self, other):\n return elemwise(operator.div, other, self)\n\n def __eq__(self, other):\n return elemwise(operator.eq, self, other)\n\n def __gt__(self, other):\n return elemwise(operator.gt, self, other)\n\n def __ge__(self, other):\n return elemwise(operator.ge, self, other)\n\n def __invert__(self):\n return elemwise(operator.invert, self)\n\n def __lshift__(self, other):\n return elemwise(operator.lshift, self, other)\n\n def __rlshift__(self, other):\n return elemwise(operator.lshift, other, self)\n\n def __lt__(self, other):\n return elemwise(operator.lt, self, other)\n\n def __le__(self, other):\n return elemwise(operator.le, self, other)\n\n def __mod__(self, other):\n return elemwise(operator.mod, self, other)\n\n def __rmod__(self, other):\n return elemwise(operator.mod, other, self)\n\n def __mul__(self, other):\n return elemwise(operator.mul, self, other)\n\n def __rmul__(self, other):\n return elemwise(operator.mul, other, self)\n\n def __ne__(self, other):\n return elemwise(operator.ne, self, other)\n\n def __neg__(self):\n return elemwise(operator.neg, self)\n\n def __or__(self, other):\n return elemwise(operator.or_, self, other)\n\n def __pos__(self):\n return self\n\n def __ror__(self, other):\n return elemwise(operator.or_, other, self)\n\n def __pow__(self, other):\n return elemwise(operator.pow, self, other)\n\n def __rpow__(self, other):\n return elemwise(operator.pow, other, self)\n\n def __rshift__(self, other):\n return elemwise(operator.rshift, self, other)\n\n def __rrshift__(self, other):\n return elemwise(operator.rshift, other, self)\n\n def __sub__(self, other):\n return elemwise(operator.sub, self, other)\n\n def __rsub__(self, other):\n return elemwise(operator.sub, other, self)\n\n def __truediv__(self, other):\n return elemwise(operator.truediv, self, other)\n\n def __rtruediv__(self, other):\n return elemwise(operator.truediv, other, self)\n\n def __floordiv__(self, other):\n return elemwise(operator.floordiv, self, other)\n\n def __rfloordiv__(self, other):\n return elemwise(operator.floordiv, other, self)\n\n def __xor__(self, other):\n return elemwise(operator.xor, self, other)\n\n def __rxor__(self, other):\n return elemwise(operator.xor, other, self)\n\n def __matmul__(self, other):\n from .routines import matmul\n\n return matmul(self, other)\n\n def __rmatmul__(self, other):\n from .routines import matmul\n\n return matmul(other, self)\n\n def __divmod__(self, other):\n from .ufunc import divmod\n\n return divmod(self, other)\n\n def __rdivmod__(self, other):\n from .ufunc import divmod\n\n return divmod(other, self)\n\n @derived_from(np.ndarray)\n def any(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import any\n\n return any(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def all(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import all\n\n return all(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def min(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import min\n\n return min(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def max(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import max\n\n return max(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def argmin(self, axis=None, split_every=None, out=None):\n from .reductions import argmin\n\n return argmin(self, axis=axis, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def argmax(self, axis=None, split_every=None, out=None):\n from .reductions import argmax\n\n return argmax(self, axis=axis, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def sum(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import sum\n\n return sum(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def trace(self, offset=0, axis1=0, axis2=1, dtype=None):\n from .reductions import trace\n\n return trace(self, offset=offset, axis1=axis1, axis2=axis2, dtype=dtype)\n\n @derived_from(np.ndarray)\n def prod(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import prod\n\n return prod(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def mean(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import mean\n\n return mean(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def std(\n self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None\n ):\n from .reductions import std\n\n return std(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def var(\n self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None\n ):\n from .reductions import var\n\n return var(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n\n def moment(\n self,\n order,\n axis=None,\n dtype=None,\n keepdims=False,\n ddof=0,\n split_every=None,\n out=None,\n ):\n \"\"\"Calculate the nth centralized moment.\n\n Parameters\n ----------\n order : int\n Order of the moment that is returned, must be >= 2.\n axis : int, optional\n Axis along which the central moment is computed. The default is to\n compute the moment of the flattened array.\n dtype : data-type, optional\n Type to use in computing the moment. For arrays of integer type the\n default is float64; for arrays of float types it is the same as the\n array type.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the\n result as dimensions with size one. With this option, the result\n will broadcast correctly against the original array.\n ddof : int, optional\n \"Delta Degrees of Freedom\": the divisor used in the calculation is\n N - ddof, where N represents the number of elements. By default\n ddof is zero.\n\n Returns\n -------\n moment : ndarray\n\n References\n ----------\n .. [1] Pebay, Philippe (2008), \"Formulas for Robust, One-Pass Parallel\n Computation of Covariances and Arbitrary-Order Statistical Moments\",\n Technical Report SAND2008-6212, Sandia National Laboratories.\n\n \"\"\"\n\n from .reductions import moment\n\n return moment(\n self,\n order,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n\n @wraps(map_blocks)\n def map_blocks(self, func, *args, **kwargs):\n return map_blocks(func, self, *args, **kwargs)\n\n def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):\n \"\"\" Map a function over blocks of the array with some overlap\n\n We share neighboring zones between blocks of the array, then map a\n function, then trim away the neighboring strips.\n\n Parameters\n ----------\n func: function\n The function to apply to each extended block\n depth: int, tuple, or dict\n The number of elements that each block should share with its neighbors\n If a tuple or dict then this can be different per axis\n boundary: str, tuple, dict\n How to handle the boundaries.\n Values include 'reflect', 'periodic', 'nearest', 'none',\n or any constant value like 0 or np.nan\n trim: bool\n Whether or not to trim ``depth`` elements from each block after\n calling the map function.\n Set this to False if your mapping function already does this for you\n **kwargs:\n Other keyword arguments valid in ``map_blocks``\n\n Examples\n --------\n >>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])\n >>> x = from_array(x, chunks=5)\n >>> def derivative(x):\n ... return x - np.roll(x, 1)\n\n >>> y = x.map_overlap(derivative, depth=1, boundary=0)\n >>> y.compute()\n array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])\n\n >>> import dask.array as da\n >>> x = np.arange(16).reshape((4, 4))\n >>> d = da.from_array(x, chunks=(2, 2))\n >>> d.map_overlap(lambda x: x + x.size, depth=1).compute()\n array([[16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27],\n [28, 29, 30, 31]])\n\n >>> func = lambda x: x + x.size\n >>> depth = {0: 1, 1: 1}\n >>> boundary = {0: 'reflect', 1: 'none'}\n >>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE\n array([[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27]])\n \"\"\"\n from .overlap import map_overlap\n\n return map_overlap(\n func, self, depth=depth, boundary=boundary, trim=trim, **kwargs\n )\n\n @derived_from(np.ndarray)\n def cumsum(self, axis, dtype=None, out=None):\n from .reductions import cumsum\n\n return cumsum(self, axis, dtype, out=out)\n\n @derived_from(np.ndarray)\n def cumprod(self, axis, dtype=None, out=None):\n from .reductions import cumprod\n\n return cumprod(self, axis, dtype, out=out)\n\n @derived_from(np.ndarray)\n def squeeze(self, axis=None):\n from .routines import squeeze\n\n return squeeze(self, axis)\n\n def rechunk(self, chunks=\"auto\", threshold=None, block_size_limit=None):\n \"\"\" See da.rechunk for docstring \"\"\"\n from . import rechunk # avoid circular import\n\n return rechunk(self, chunks, threshold, block_size_limit)\n\n @property\n def real(self):\n from .ufunc import real\n\n return real(self)\n\n @property\n def imag(self):\n from .ufunc import imag\n\n return imag(self)\n\n def conj(self):\n from .ufunc import conj\n\n return conj(self)\n\n @derived_from(np.ndarray)\n def clip(self, min=None, max=None):\n from .ufunc import clip\n\n return clip(self, min, max)\n\n def view(self, dtype=None, order=\"C\"):\n \"\"\" Get a view of the array as a new data type\n\n Parameters\n ----------\n dtype:\n The dtype by which to view the array.\n The default, None, results in the view having the same data-type\n as the original array.\n order: string\n 'C' or 'F' (Fortran) ordering\n\n This reinterprets the bytes of the array under a new dtype. If that\n dtype does not have the same size as the original array then the shape\n will change.\n\n Beware that both numpy and dask.array can behave oddly when taking\n shape-changing views of arrays under Fortran ordering. Under some\n versions of NumPy this function will fail when taking shape-changing\n views of Fortran ordered arrays if the first dimension has chunks of\n size one.\n \"\"\"\n if dtype is None:\n dtype = self.dtype\n else:\n dtype = np.dtype(dtype)\n mult = self.dtype.itemsize / dtype.itemsize\n\n if order == \"C\":\n chunks = self.chunks[:-1] + (\n tuple(ensure_int(c * mult) for c in self.chunks[-1]),\n )\n elif order == \"F\":\n chunks = (\n tuple(ensure_int(c * mult) for c in self.chunks[0]),\n ) + self.chunks[1:]\n else:\n raise ValueError(\"Order must be one of 'C' or 'F'\")\n\n return self.map_blocks(\n chunk.view, dtype, order=order, dtype=dtype, chunks=chunks\n )\n\n @derived_from(np.ndarray)\n def swapaxes(self, axis1, axis2):\n from .routines import swapaxes\n\n return swapaxes(self, axis1, axis2)\n\n @derived_from(np.ndarray)\n def round(self, decimals=0):\n from .routines import round\n\n return round(self, decimals=decimals)\n\n def copy(self):\n \"\"\"\n Copy array. This is a no-op for dask.arrays, which are immutable\n \"\"\"\n if self.npartitions == 1:\n return self.map_blocks(M.copy)\n else:\n return Array(self.dask, self.name, self.chunks, meta=self)\n\n def __deepcopy__(self, memo):\n c = self.copy()\n memo[id(self)] = c\n return c\n\n def to_delayed(self, optimize_graph=True):\n \"\"\"Convert into an array of ``dask.delayed`` objects, one per chunk.\n\n Parameters\n ----------\n optimize_graph : bool, optional\n If True [default], the graph is optimized before converting into\n ``dask.delayed`` objects.\n\n See Also\n --------\n dask.array.from_delayed\n \"\"\"\n keys = self.__dask_keys__()\n graph = self.__dask_graph__()\n if optimize_graph:\n graph = self.__dask_optimize__(graph, keys) # TODO, don't collape graph\n name = \"delayed-\" + self.name\n graph = HighLevelGraph.from_collections(name, graph, dependencies=())\n L = ndeepmap(self.ndim, lambda k: Delayed(k, graph), keys)\n return np.array(L, dtype=object)\n\n @derived_from(np.ndarray)\n def repeat(self, repeats, axis=None):\n from .creation import repeat\n\n return repeat(self, repeats, axis=axis)\n\n @derived_from(np.ndarray)\n def nonzero(self):\n from .routines import nonzero\n\n return nonzero(self)\n\n def to_zarr(self, *args, **kwargs):\n \"\"\"Save array to the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n See function ``to_zarr()`` for parameters.\n \"\"\"\n return to_zarr(self, *args, **kwargs)\n\n def to_tiledb(self, uri, *args, **kwargs):\n \"\"\"Save array to the TileDB storage manager\n\n See function ``to_tiledb()`` for argument documentation.\n\n See https://docs.tiledb.io for details about the format and engine.\n \"\"\"\n from .tiledb_io import to_tiledb\n\n return to_tiledb(self, uri, *args, **kwargs)\n\n\ndef ensure_int(f):\n i = int(f)\n if i != f:\n raise ValueError(\"Could not coerce %f to integer\" % f)\n return i\n\n\ndef normalize_chunks(chunks, shape=None, limit=None, dtype=None, previous_chunks=None):\n \"\"\" Normalize chunks to tuple of tuples\n\n This takes in a variety of input types and information and produces a full\n tuple-of-tuples result for chunks, suitable to be passed to Array or\n rechunk or any other operation that creates a Dask array.\n\n Parameters\n ----------\n chunks: tuple, int, dict, or string\n The chunks to be normalized. See examples below for more details\n shape: Tuple[int]\n The shape of the array\n limit: int (optional)\n The maximum block size to target in bytes,\n if freedom is given to choose\n dtype: np.dtype\n previous_chunks: Tuple[Tuple[int]] optional\n Chunks from a previous array that we should use for inspiration when\n rechunking auto dimensions. If not provided but auto-chunking exists\n then auto-dimensions will prefer square-like chunk shapes.\n\n Examples\n --------\n Specify uniform chunk sizes\n\n >>> normalize_chunks((2, 2), shape=(5, 6))\n ((2, 2, 1), (2, 2, 2))\n\n Also passes through fully explicit tuple-of-tuples\n\n >>> normalize_chunks(((2, 2, 1), (2, 2, 2)), shape=(5, 6))\n ((2, 2, 1), (2, 2, 2))\n\n Cleans up lists to tuples\n\n >>> normalize_chunks([[2, 2], [3, 3]])\n ((2, 2), (3, 3))\n\n Expands integer inputs 10 -> (10, 10)\n\n >>> normalize_chunks(10, shape=(30, 5))\n ((10, 10, 10), (5,))\n\n Expands dict inputs\n\n >>> normalize_chunks({0: 2, 1: 3}, shape=(6, 6))\n ((2, 2, 2), (3, 3))\n\n The values -1 and None get mapped to full size\n\n >>> normalize_chunks((5, -1), shape=(10, 10))\n ((5, 5), (10,))\n\n Use the value \"auto\" to automatically determine chunk sizes along certain\n dimensions. This uses the ``limit=`` and ``dtype=`` keywords to\n determine how large to make the chunks. The term \"auto\" can be used\n anywhere an integer can be used. See array chunking documentation for more\n information.\n\n >>> normalize_chunks((\"auto\",), shape=(20,), limit=5, dtype='uint8')\n ((5, 5, 5, 5),)\n\n You can also use byte sizes (see ``dask.utils.parse_bytes``) in place of\n \"auto\" to ask for a particular size\n\n >>> normalize_chunks(\"1kiB\", shape=(2000,), dtype='float32')\n ((250, 250, 250, 250, 250, 250, 250, 250),)\n\n Respects null dimensions\n\n >>> normalize_chunks((), shape=(0, 0))\n ((0,), (0,))\n \"\"\"\n if dtype and not isinstance(dtype, np.dtype):\n dtype = np.dtype(dtype)\n if chunks is None:\n raise ValueError(CHUNKS_NONE_ERROR_MESSAGE)\n if isinstance(chunks, list):\n chunks = tuple(chunks)\n if isinstance(chunks, (Number, str)):\n chunks = (chunks,) * len(shape)\n if isinstance(chunks, dict):\n chunks = tuple(chunks.get(i, None) for i in range(len(shape)))\n if isinstance(chunks, np.ndarray):\n chunks = chunks.tolist()\n if not chunks and shape and all(s == 0 for s in shape):\n chunks = ((0,),) * len(shape)\n\n if (\n shape\n and len(shape) == 1\n and len(chunks) > 1\n and all(isinstance(c, (Number, str)) for c in chunks)\n ):\n chunks = (chunks,)\n\n if shape and len(chunks) != len(shape):\n raise ValueError(\n \"Chunks and shape must be of the same length/dimension. \"\n \"Got chunks=%s, shape=%s\" % (chunks, shape)\n )\n if -1 in chunks or None in chunks:\n chunks = tuple(s if c == -1 or c is None else c for c, s in zip(chunks, shape))\n\n # If specifying chunk size in bytes, use that value to set the limit.\n # Verify there is only one consistent value of limit or chunk-bytes used.\n for c in chunks:\n if isinstance(c, str) and c != \"auto\":\n parsed = parse_bytes(c)\n if limit is None:\n limit = parsed\n elif parsed != limit:\n raise ValueError(\n \"Only one consistent value of limit or chunk is allowed.\"\n \"Used %s != %s\" % (parsed, limit)\n )\n # Substitute byte limits with 'auto' now that limit is set.\n chunks = tuple(\"auto\" if isinstance(c, str) and c != \"auto\" else c for c in chunks)\n\n if any(c == \"auto\" for c in chunks):\n chunks = auto_chunks(chunks, shape, limit, dtype, previous_chunks)\n\n if shape is not None:\n chunks = tuple(c if c not in {None, -1} else s for c, s in zip(chunks, shape))\n\n if chunks and shape is not None:\n chunks = sum(\n (\n blockdims_from_blockshape((s,), (c,))\n if not isinstance(c, (tuple, list))\n else (c,)\n for s, c in zip(shape, chunks)\n ),\n (),\n )\n for c in chunks:\n if not c:\n raise ValueError(\n \"Empty tuples are not allowed in chunks. Express \"\n \"zero length dimensions with 0(s) in chunks\"\n )\n\n if shape is not None:\n if len(chunks) != len(shape):\n raise ValueError(\n \"Input array has %d dimensions but the supplied \"\n \"chunks has only %d dimensions\" % (len(shape), len(chunks))\n )\n if not all(\n c == s or (math.isnan(c) or math.isnan(s))\n for c, s in zip(map(sum, chunks), shape)\n ):\n raise ValueError(\n \"Chunks do not add up to shape. \"\n \"Got chunks=%s, shape=%s\" % (chunks, shape)\n )\n\n return tuple(tuple(int(x) if not math.isnan(x) else x for x in c) for c in chunks)\n\n\ndef _compute_multiplier(limit: int, dtype, largest_block: int, result):\n \"\"\"\n Utility function for auto_chunk, to fin how much larger or smaller the ideal\n chunk size is relative to what we have now.\n \"\"\"\n return (\n limit\n / dtype.itemsize\n / largest_block\n / np.prod(list(r if r != 0 else 1 for r in result.values()))\n )\n\n\ndef auto_chunks(chunks, shape, limit, dtype, previous_chunks=None):\n \"\"\" Determine automatic chunks\n\n This takes in a chunks value that contains ``\"auto\"`` values in certain\n dimensions and replaces those values with concrete dimension sizes that try\n to get chunks to be of a certain size in bytes, provided by the ``limit=``\n keyword. If multiple dimensions are marked as ``\"auto\"`` then they will\n all respond to meet the desired byte limit, trying to respect the aspect\n ratio of their dimensions in ``previous_chunks=``, if given.\n\n Parameters\n ----------\n chunks: Tuple\n A tuple of either dimensions or tuples of explicit chunk dimensions\n Some entries should be \"auto\"\n shape: Tuple[int]\n limit: int, str\n The maximum allowable size of a chunk in bytes\n previous_chunks: Tuple[Tuple[int]]\n\n See also\n --------\n normalize_chunks: for full docstring and parameters\n \"\"\"\n if previous_chunks is not None:\n previous_chunks = tuple(\n c if isinstance(c, tuple) else (c,) for c in previous_chunks\n )\n chunks = list(chunks)\n\n autos = {i for i, c in enumerate(chunks) if c == \"auto\"}\n if not autos:\n return tuple(chunks)\n\n if limit is None:\n limit = config.get(\"array.chunk-size\")\n if isinstance(limit, str):\n limit = parse_bytes(limit)\n\n if dtype is None:\n raise TypeError(\"DType must be known for auto-chunking\")\n\n if dtype.hasobject:\n raise NotImplementedError(\n \"Can not use auto rechunking with object dtype. \"\n \"We are unable to estimate the size in bytes of object data\"\n )\n\n for x in tuple(chunks) + tuple(shape):\n if (\n isinstance(x, Number)\n and np.isnan(x)\n or isinstance(x, tuple)\n and np.isnan(x).any()\n ):\n raise ValueError(\n \"Can not perform automatic rechunking with unknown \"\n \"(nan) chunk sizes.%s\" % unknown_chunk_message\n )\n\n limit = max(1, limit)\n\n largest_block = np.prod(\n [cs if isinstance(cs, Number) else max(cs) for cs in chunks if cs != \"auto\"]\n )\n\n if previous_chunks:\n # Base ideal ratio on the median chunk size of the previous chunks\n result = {a: np.median(previous_chunks[a]) for a in autos}\n\n ideal_shape = []\n for i, s in enumerate(shape):\n chunk_frequencies = frequencies(previous_chunks[i])\n mode, count = max(chunk_frequencies.items(), key=lambda kv: kv[1])\n if mode > 1 and count >= len(previous_chunks[i]) / 2:\n ideal_shape.append(mode)\n else:\n ideal_shape.append(s)\n\n # How much larger or smaller the ideal chunk size is relative to what we have now\n multiplier = _compute_multiplier(limit, dtype, largest_block, result)\n\n last_multiplier = 0\n last_autos = set()\n while (\n multiplier != last_multiplier or autos != last_autos\n ): # while things change\n last_multiplier = multiplier # record previous values\n last_autos = set(autos) # record previous values\n\n # Expand or contract each of the dimensions appropriately\n for a in sorted(autos):\n if ideal_shape[a] == 0:\n result[a] = 0\n continue\n proposed = result[a] * multiplier ** (1 / len(autos))\n if proposed > shape[a]: # we've hit the shape boundary\n autos.remove(a)\n largest_block *= shape[a]\n chunks[a] = shape[a]\n del result[a]\n else:\n result[a] = round_to(proposed, ideal_shape[a])\n\n # recompute how much multiplier we have left, repeat\n multiplier = _compute_multiplier(limit, dtype, largest_block, result)\n\n for k, v in result.items():\n chunks[k] = v\n return tuple(chunks)\n\n else:\n size = (limit / dtype.itemsize / largest_block) ** (1 / len(autos))\n small = [i for i in autos if shape[i] < size]\n if small:\n for i in small:\n chunks[i] = (shape[i],)\n return auto_chunks(chunks, shape, limit, dtype)\n\n for i in autos:\n chunks[i] = round_to(size, shape[i])\n\n return tuple(chunks)\n\n\ndef round_to(c, s):\n \"\"\" Return a chunk dimension that is close to an even multiple or factor\n\n We want values for c that are nicely aligned with s.\n\n If c is smaller than s then we want the largest factor of s that is less than the\n desired chunk size, but not less than half, which is too much. If no such\n factor exists then we just go with the original chunk size and accept an\n uneven chunk at the end.\n\n If c is larger than s then we want the largest multiple of s that is still\n smaller than c.\n \"\"\"\n if c <= s:\n try:\n return max(f for f in factors(s) if c / 2 <= f <= c)\n except ValueError: # no matching factors within factor of two\n return max(1, int(c))\n else:\n return c // s * s\n\n\ndef _get_chunk_shape(a):\n s = np.asarray(a.shape, dtype=int)\n return s[len(s) * (None,) + (slice(None),)]\n\n\ndef from_array(\n x,\n chunks=\"auto\",\n name=None,\n lock=False,\n asarray=None,\n fancy=True,\n getitem=None,\n meta=None,\n):\n \"\"\" Create dask array from something that looks like an array\n\n Input must have a ``.shape``, ``.ndim``, ``.dtype`` and support numpy-style slicing.\n\n Parameters\n ----------\n x : array_like\n chunks : int, tuple\n How to chunk the array. Must be one of the following forms:\n\n - A blocksize like 1000.\n - A blockshape like (1000, 1000).\n - Explicit sizes of all blocks along all dimensions like\n ((1000, 1000, 500), (400, 400)).\n - A size in bytes, like \"100 MiB\" which will choose a uniform\n block-like shape\n - The word \"auto\" which acts like the above, but uses a configuration\n value ``array.chunk-size`` for the chunk size\n\n -1 or None as a blocksize indicate the size of the corresponding\n dimension.\n name : str, optional\n The key name to use for the array. Defaults to a hash of ``x``.\n By default, hash uses python's standard sha1. This behaviour can be\n changed by installing cityhash, xxhash or murmurhash. If installed,\n a large-factor speedup can be obtained in the tokenisation step.\n Use ``name=False`` to generate a random name instead of hashing (fast)\n\n .. note::\n\n Because this ``name`` is used as the key in task graphs, you should\n ensure that it uniquely identifies the data contained within. If\n you'd like to provide a descriptive name that is still unique, combine\n the descriptive name with :func:`dask.base.tokenize` of the\n ``array_like``. See :ref:`graphs` for more.\n\n lock : bool or Lock, optional\n If ``x`` doesn't support concurrent reads then provide a lock here, or\n pass in True to have dask.array create one for you.\n asarray : bool, optional\n If True then call np.asarray on chunks to convert them to numpy arrays.\n If False then chunks are passed through unchanged.\n If None (default) then we use True if the ``__array_function__`` method\n is undefined.\n fancy : bool, optional\n If ``x`` doesn't support fancy indexing (e.g. indexing with lists or\n arrays) then set to False. Default is True.\n meta : Array-like, optional\n The metadata for the resulting dask array. This is the kind of array\n that will result from slicing the input array.\n Defaults to the input array.\n\n Examples\n --------\n\n >>> x = h5py.File('...')['/data/path'] # doctest: +SKIP\n >>> a = da.from_array(x, chunks=(1000, 1000)) # doctest: +SKIP\n\n If your underlying datastore does not support concurrent reads then include\n the ``lock=True`` keyword argument or ``lock=mylock`` if you want multiple\n arrays to coordinate around the same lock.\n\n >>> a = da.from_array(x, chunks=(1000, 1000), lock=True) # doctest: +SKIP\n\n If your underlying datastore has a ``.chunks`` attribute (as h5py and zarr\n datasets do) then a multiple of that chunk shape will be used if you\n do not provide a chunk shape.\n\n >>> a = da.from_array(x, chunks='auto') # doctest: +SKIP\n >>> a = da.from_array(x, chunks='100 MiB') # doctest: +SKIP\n >>> a = da.from_array(x) # doctest: +SKIP\n\n If providing a name, ensure that it is unique\n\n >>> import dask.base\n >>> token = dask.base.tokenize(x) # doctest: +SKIP\n >>> a = da.from_array('myarray-' + token) # doctest: +SKIP\n \"\"\"\n if isinstance(x, Array):\n raise ValueError(\n \"Array is already a dask array. Use 'asarray' or \" \"'rechunk' instead.\"\n )\n elif is_dask_collection(x):\n warnings.warn(\n \"Passing an object to dask.array.from_array which is already a \"\n \"Dask collection. This can lead to unexpected behavior.\"\n )\n\n if isinstance(x, (list, tuple, memoryview) + np.ScalarType):\n x = np.array(x)\n\n if asarray is None:\n asarray = not hasattr(x, \"__array_function__\")\n\n previous_chunks = getattr(x, \"chunks\", None)\n\n chunks = normalize_chunks(\n chunks, x.shape, dtype=x.dtype, previous_chunks=previous_chunks\n )\n\n if name in (None, True):\n token = tokenize(x, chunks)\n original_name = \"array-original-\" + token\n name = name or \"array-\" + token\n elif name is False:\n original_name = name = \"array-\" + str(uuid.uuid1())\n else:\n original_name = name\n\n if lock is True:\n lock = SerializableLock()\n\n # Always use the getter for h5py etc. Not using isinstance(x, np.ndarray)\n # because np.matrix is a subclass of np.ndarray.\n if type(x) is np.ndarray and all(len(c) == 1 for c in chunks):\n # No slicing needed\n dsk = {(name,) + (0,) * x.ndim: x}\n else:\n if getitem is None:\n if type(x) is np.ndarray and not lock:\n # simpler and cleaner, but missing all the nuances of getter\n getitem = operator.getitem\n elif fancy:\n getitem = getter\n else:\n getitem = getter_nofancy\n\n dsk = getem(\n original_name,\n chunks,\n getitem=getitem,\n shape=x.shape,\n out_name=name,\n lock=lock,\n asarray=asarray,\n dtype=x.dtype,\n )\n dsk[original_name] = x\n\n # Workaround for TileDB, its indexing is 1-based,\n # and doesn't seems to support 0-length slicing\n if x.__class__.__module__.split(\".\")[0] == \"tiledb\" and hasattr(x, \"_ctx_\"):\n return Array(dsk, name, chunks, dtype=x.dtype)\n\n if meta is None:\n meta = x\n\n return Array(dsk, name, chunks, meta=meta, dtype=getattr(x, \"dtype\", None))\n\n\ndef from_zarr(\n url, component=None, storage_options=None, chunks=None, name=None, **kwargs\n):\n \"\"\"Load array from the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n Parameters\n ----------\n url: Zarr Array or str or MutableMapping\n Location of the data. A URL can include a protocol specifier like s3://\n for remote data. Can also be any MutableMapping instance, which should\n be serializable if used in multiple processes.\n component: str or None\n If the location is a zarr group rather than an array, this is the\n subcomponent that should be loaded, something like ``'foo/bar'``.\n storage_options: dict\n Any additional parameters for the storage backend (ignored for local\n paths)\n chunks: tuple of ints or tuples of ints\n Passed to ``da.from_array``, allows setting the chunks on\n initialisation, if the chunking scheme in the on-disc dataset is not\n optimal for the calculations to follow.\n name : str, optional\n An optional keyname for the array. Defaults to hashing the input\n kwargs: passed to ``zarr.Array``.\n \"\"\"\n import zarr\n\n storage_options = storage_options or {}\n if isinstance(url, zarr.Array):\n z = url\n elif isinstance(url, str):\n from ..bytes.core import get_mapper\n\n mapper = get_mapper(url, **storage_options)\n z = zarr.Array(mapper, read_only=True, path=component, **kwargs)\n else:\n mapper = url\n z = zarr.Array(mapper, read_only=True, path=component, **kwargs)\n chunks = chunks if chunks is not None else z.chunks\n if name is None:\n name = \"from-zarr-\" + tokenize(z, component, storage_options, chunks, **kwargs)\n return from_array(z, chunks, name=name)\n\n\ndef to_zarr(\n arr,\n url,\n component=None,\n storage_options=None,\n overwrite=False,\n compute=True,\n return_stored=False,\n **kwargs,\n):\n \"\"\"Save array to the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n Parameters\n ----------\n arr: dask.array\n Data to store\n url: Zarr Array or str or MutableMapping\n Location of the data. A URL can include a protocol specifier like s3://\n for remote data. Can also be any MutableMapping instance, which should\n be serializable if used in multiple processes.\n component: str or None\n If the location is a zarr group rather than an array, this is the\n subcomponent that should be created/over-written.\n storage_options: dict\n Any additional parameters for the storage backend (ignored for local\n paths)\n overwrite: bool\n If given array already exists, overwrite=False will cause an error,\n where overwrite=True will replace the existing data. Note that this\n check is done at computation time, not during graph creation.\n compute, return_stored: see ``store()``\n kwargs: passed to the ``zarr.create()`` function, e.g., compression options\n\n Raises\n ------\n ValueError\n If ``arr`` has unknown chunk sizes, which is not supported by Zarr.\n\n See Also\n --------\n dask.array.Array.compute_chunk_sizes\n\n \"\"\"\n import zarr\n\n if np.isnan(arr.shape).any():\n raise ValueError(\n \"Saving a dask array with unknown chunk sizes is not \"\n \"currently supported by Zarr.%s\" % unknown_chunk_message\n )\n\n if isinstance(url, zarr.Array):\n z = url\n if isinstance(z.store, (dict, zarr.DictStore)) and \"distributed\" in config.get(\n \"scheduler\", \"\"\n ):\n raise RuntimeError(\n \"Cannot store into in memory Zarr Array using \"\n \"the Distributed Scheduler.\"\n )\n arr = arr.rechunk(z.chunks)\n return arr.store(z, lock=False, compute=compute, return_stored=return_stored)\n\n if not _check_regular_chunks(arr.chunks):\n raise ValueError(\n \"Attempt to save array to zarr with irregular \"\n \"chunking, please call `arr.rechunk(...)` first.\"\n )\n\n storage_options = storage_options or {}\n\n if isinstance(url, str):\n from ..bytes.core import get_mapper\n\n mapper = get_mapper(url, **storage_options)\n else:\n # assume the object passed is already a mapper\n mapper = url\n\n chunks = [c[0] for c in arr.chunks]\n\n # The zarr.create function has the side-effect of immediately\n # creating metadata on disk. This may not be desired,\n # particularly if compute=False. The caller may be creating many\n # arrays on a slow filesystem, with the desire that any I/O be\n # sharded across workers (not done serially on the originating\n # machine). Or the caller may decide later to not to do this\n # computation, and so nothing should be written to disk.\n z = delayed(zarr.create)(\n shape=arr.shape,\n chunks=chunks,\n dtype=arr.dtype,\n store=mapper,\n path=component,\n overwrite=overwrite,\n **kwargs,\n )\n return arr.store(z, lock=False, compute=compute, return_stored=return_stored)\n\n\ndef _check_regular_chunks(chunkset):\n \"\"\"Check if the chunks are regular\n\n \"Regular\" in this context means that along every axis, the chunks all\n have the same size, except the last one, which may be smaller\n\n Parameters\n ----------\n chunkset: tuple of tuples of ints\n From the ``.chunks`` attribute of an ``Array``\n\n Returns\n -------\n True if chunkset passes, else False\n\n Examples\n --------\n >>> import dask.array as da\n >>> arr = da.zeros(10, chunks=(5, ))\n >>> _check_regular_chunks(arr.chunks)\n True\n\n >>> arr = da.zeros(10, chunks=((3, 3, 3, 1), ))\n >>> _check_regular_chunks(arr.chunks)\n True\n\n >>> arr = da.zeros(10, chunks=((3, 1, 3, 3), ))\n >>> _check_regular_chunks(arr.chunks)\n False\n \"\"\"\n for chunks in chunkset:\n if len(chunks) == 1:\n continue\n if len(set(chunks[:-1])) > 1:\n return False\n if chunks[-1] > chunks[0]:\n return False\n return True\n\n\ndef from_delayed(value, shape, dtype=None, meta=None, name=None):\n \"\"\" Create a dask array from a dask delayed value\n\n This routine is useful for constructing dask arrays in an ad-hoc fashion\n using dask delayed, particularly when combined with stack and concatenate.\n\n The dask array will consist of a single chunk.\n\n Examples\n --------\n >>> import dask\n >>> import dask.array as da\n >>> value = dask.delayed(np.ones)(5)\n >>> array = da.from_delayed(value, (5,), dtype=float)\n >>> array\n dask.array<from-value, shape=(5,), dtype=float64, chunksize=(5,), chunktype=numpy.ndarray>\n >>> array.compute()\n array([1., 1., 1., 1., 1.])\n \"\"\"\n from ..delayed import delayed, Delayed\n\n if not isinstance(value, Delayed) and hasattr(value, \"key\"):\n value = delayed(value)\n\n name = name or \"from-value-\" + tokenize(value, shape, dtype, meta)\n dsk = {(name,) + (0,) * len(shape): value.key}\n chunks = tuple((d,) for d in shape)\n # TODO: value._key may not be the name of the layer in value.dask\n # This should be fixed after we build full expression graphs\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[value])\n return Array(graph, name, chunks, dtype=dtype, meta=meta)\n\n\ndef from_func(func, shape, dtype=None, name=None, args=(), kwargs={}):\n \"\"\" Create dask array in a single block by calling a function\n\n Calling the provided function with func(*args, **kwargs) should return a\n NumPy array of the indicated shape and dtype.\n\n Examples\n --------\n\n >>> a = from_func(np.arange, (3,), dtype='i8', args=(3,))\n >>> a.compute()\n array([0, 1, 2])\n\n This works particularly well when coupled with dask.array functions like\n concatenate and stack:\n\n >>> arrays = [from_func(np.array, (), dtype='i8', args=(n,)) for n in range(5)]\n >>> stack(arrays).compute()\n array([0, 1, 2, 3, 4])\n \"\"\"\n name = name or \"from_func-\" + tokenize(func, shape, dtype, args, kwargs)\n if args or kwargs:\n func = partial(func, *args, **kwargs)\n dsk = {(name,) + (0,) * len(shape): (func,)}\n chunks = tuple((i,) for i in shape)\n return Array(dsk, name, chunks, dtype)\n\n\ndef common_blockdim(blockdims):\n \"\"\" Find the common block dimensions from the list of block dimensions\n\n Currently only implements the simplest possible heuristic: the common\n block-dimension is the only one that does not span fully span a dimension.\n This is a conservative choice that allows us to avoid potentially very\n expensive rechunking.\n\n Assumes that each element of the input block dimensions has all the same\n sum (i.e., that they correspond to dimensions of the same size).\n\n Examples\n --------\n >>> common_blockdim([(3,), (2, 1)])\n (2, 1)\n >>> common_blockdim([(1, 2), (2, 1)])\n (1, 1, 1)\n >>> common_blockdim([(2, 2), (3, 1)]) # doctest: +SKIP\n Traceback (most recent call last):\n ...\n ValueError: Chunks do not align\n \"\"\"\n if not any(blockdims):\n return ()\n non_trivial_dims = set([d for d in blockdims if len(d) > 1])\n if len(non_trivial_dims) == 1:\n return first(non_trivial_dims)\n if len(non_trivial_dims) == 0:\n return max(blockdims, key=first)\n\n if np.isnan(sum(map(sum, blockdims))):\n raise ValueError(\n \"Arrays chunk sizes (%s) are unknown.\\n\\n\"\n \"A possible solution:\\n\"\n \" x.compute_chunk_sizes()\" % blockdims\n )\n\n if len(set(map(sum, non_trivial_dims))) > 1:\n raise ValueError(\"Chunks do not add up to same value\", blockdims)\n\n # We have multiple non-trivial chunks on this axis\n # e.g. (5, 2) and (4, 3)\n\n # We create a single chunk tuple with the same total length\n # that evenly divides both, e.g. (4, 1, 2)\n\n # To accomplish this we walk down all chunk tuples together, finding the\n # smallest element, adding it to the output, and subtracting it from all\n # other elements and remove the element itself. We stop once we have\n # burned through all of the chunk tuples.\n # For efficiency's sake we reverse the lists so that we can pop off the end\n rchunks = [list(ntd)[::-1] for ntd in non_trivial_dims]\n total = sum(first(non_trivial_dims))\n i = 0\n\n out = []\n while i < total:\n m = min(c[-1] for c in rchunks)\n out.append(m)\n for c in rchunks:\n c[-1] -= m\n if c[-1] == 0:\n c.pop()\n i += m\n\n return tuple(out)\n\n\ndef unify_chunks(*args, **kwargs):\n \"\"\"\n Unify chunks across a sequence of arrays\n\n This utility function is used within other common operations like\n ``map_blocks`` and ``blockwise``. It is not commonly used by end-users\n directly.\n\n Parameters\n ----------\n *args: sequence of Array, index pairs\n Sequence like (x, 'ij', y, 'jk', z, 'i')\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.ones(10, chunks=((5, 2, 3),))\n >>> y = da.ones(10, chunks=((2, 3, 5),))\n >>> chunkss, arrays = unify_chunks(x, 'i', y, 'i')\n >>> chunkss\n {'i': (2, 3, 2, 3)}\n\n >>> x = da.ones((100, 10), chunks=(20, 5))\n >>> y = da.ones((10, 100), chunks=(4, 50))\n >>> chunkss, arrays = unify_chunks(x, 'ij', y, 'jk', 'constant', None)\n >>> chunkss # doctest: +SKIP\n {'k': (50, 50), 'i': (20, 20, 20, 20, 20), 'j': (4, 1, 3, 2)}\n\n >>> unify_chunks(0, None)\n ({}, [0])\n\n Returns\n -------\n chunkss : dict\n Map like {index: chunks}.\n arrays : list\n List of rechunked arrays.\n\n See Also\n --------\n common_blockdim\n \"\"\"\n if not args:\n return {}, []\n\n arginds = [\n (asanyarray(a) if ind is not None else a, ind) for a, ind in partition(2, args)\n ] # [x, ij, y, jk]\n args = list(concat(arginds)) # [(x, ij), (y, jk)]\n warn = kwargs.get(\"warn\", True)\n\n arrays, inds = zip(*arginds)\n if all(ind is None for ind in inds):\n return {}, list(arrays)\n if all(ind == inds[0] for ind in inds) and all(\n a.chunks == arrays[0].chunks for a in arrays\n ):\n return dict(zip(inds[0], arrays[0].chunks)), arrays\n\n nameinds = []\n blockdim_dict = dict()\n max_parts = 0\n for a, ind in arginds:\n if ind is not None:\n nameinds.append((a.name, ind))\n blockdim_dict[a.name] = a.chunks\n max_parts = max(max_parts, a.npartitions)\n else:\n nameinds.append((a, ind))\n\n chunkss = broadcast_dimensions(nameinds, blockdim_dict, consolidate=common_blockdim)\n nparts = np.prod(list(map(len, chunkss.values())))\n\n if warn and nparts and nparts >= max_parts * 10:\n warnings.warn(\n \"Increasing number of chunks by factor of %d\" % (nparts / max_parts),\n PerformanceWarning,\n stacklevel=3,\n )\n\n arrays = []\n for a, i in arginds:\n if i is None:\n arrays.append(a)\n else:\n chunks = tuple(\n chunkss[j]\n if a.shape[n] > 1\n else a.shape[n]\n if not np.isnan(sum(chunkss[j]))\n else None\n for n, j in enumerate(i)\n )\n if chunks != a.chunks and all(a.chunks):\n arrays.append(a.rechunk(chunks))\n else:\n arrays.append(a)\n return chunkss, arrays\n\n\ndef unpack_singleton(x):\n \"\"\"\n\n >>> unpack_singleton([[[[1]]]])\n 1\n >>> unpack_singleton(np.array(np.datetime64('2000-01-01')))\n array('2000-01-01', dtype='datetime64[D]')\n \"\"\"\n while isinstance(x, (list, tuple)):\n try:\n x = x[0]\n except (IndexError, TypeError, KeyError):\n break\n return x\n\n\ndef block(arrays, allow_unknown_chunksizes=False):\n \"\"\"\n Assemble an nd-array from nested lists of blocks.\n\n Blocks in the innermost lists are concatenated along the last\n dimension (-1), then these are concatenated along the second-last\n dimension (-2), and so on until the outermost list is reached\n\n Blocks can be of any dimension, but will not be broadcasted using the normal\n rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``\n the same for all blocks. This is primarily useful for working with scalars,\n and means that code like ``block([v, 1])`` is valid, where\n ``v.ndim == 1``.\n\n When the nested list is two levels deep, this allows block matrices to be\n constructed from their components.\n\n Parameters\n ----------\n arrays : nested list of array_like or scalars (but not tuples)\n If passed a single ndarray or scalar (a nested list of depth 0), this\n is returned unmodified (and not copied).\n\n Elements shapes must match along the appropriate axes (without\n broadcasting), but leading 1s will be prepended to the shape as\n necessary to make the dimensions match.\n\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n\n Returns\n -------\n block_array : ndarray\n The array assembled from the given blocks.\n\n The dimensionality of the output is equal to the greatest of:\n * the dimensionality of all the inputs\n * the depth to which the input list is nested\n\n Raises\n ------\n ValueError\n * If list depths are mismatched - for instance, ``[[a, b], c]`` is\n illegal, and should be spelt ``[[a, b], [c]]``\n * If lists are empty - for instance, ``[[a, b], []]``\n\n See Also\n --------\n concatenate : Join a sequence of arrays together.\n stack : Stack arrays in sequence along a new dimension.\n hstack : Stack arrays in sequence horizontally (column wise).\n vstack : Stack arrays in sequence vertically (row wise).\n dstack : Stack arrays in sequence depth wise (along third dimension).\n vsplit : Split array into a list of multiple sub-arrays vertically.\n\n Notes\n -----\n\n When called with only scalars, ``block`` is equivalent to an ndarray\n call. So ``block([[1, 2], [3, 4]])`` is equivalent to\n ``array([[1, 2], [3, 4]])``.\n\n This function does not enforce that the blocks lie on a fixed grid.\n ``block([[a, b], [c, d]])`` is not restricted to arrays of the form::\n\n AAAbb\n AAAbb\n cccDD\n\n But is also allowed to produce, for some ``a, b, c, d``::\n\n AAAbb\n AAAbb\n cDDDD\n\n Since concatenation happens along the last axis first, `block` is _not_\n capable of producing the following directly::\n\n AAAbb\n cccbb\n cccDD\n\n Matlab's \"square bracket stacking\", ``[A, B, ...; p, q, ...]``, is\n equivalent to ``block([[A, B, ...], [p, q, ...]])``.\n \"\"\"\n\n # This was copied almost verbatim from numpy.core.shape_base.block\n # See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt\n # or NUMPY_LICENSE.txt within this directory\n\n def atleast_nd(x, ndim):\n x = asanyarray(x)\n diff = max(ndim - x.ndim, 0)\n if diff == 0:\n return x\n else:\n return x[(None,) * diff + (Ellipsis,)]\n\n def format_index(index):\n return \"arrays\" + \"\".join(\"[{}]\".format(i) for i in index)\n\n rec = _Recurser(recurse_if=lambda x: type(x) is list)\n\n # ensure that the lists are all matched in depth\n list_ndim = None\n any_empty = False\n for index, value, entering in rec.walk(arrays):\n if type(value) is tuple:\n # not strictly necessary, but saves us from:\n # - more than one way to do things - no point treating tuples like\n # lists\n # - horribly confusing behaviour that results when tuples are\n # treated like ndarray\n raise TypeError(\n \"{} is a tuple. \"\n \"Only lists can be used to arrange blocks, and np.block does \"\n \"not allow implicit conversion from tuple to ndarray.\".format(\n format_index(index)\n )\n )\n if not entering:\n curr_depth = len(index)\n elif len(value) == 0:\n curr_depth = len(index) + 1\n any_empty = True\n else:\n continue\n\n if list_ndim is not None and list_ndim != curr_depth:\n raise ValueError(\n \"List depths are mismatched. First element was at depth {}, \"\n \"but there is an element at depth {} ({})\".format(\n list_ndim, curr_depth, format_index(index)\n )\n )\n list_ndim = curr_depth\n\n # do this here so we catch depth mismatches first\n if any_empty:\n raise ValueError(\"Lists cannot be empty\")\n\n # convert all the arrays to ndarrays\n arrays = rec.map_reduce(arrays, f_map=asanyarray, f_reduce=list)\n\n # determine the maximum dimension of the elements\n elem_ndim = rec.map_reduce(arrays, f_map=lambda xi: xi.ndim, f_reduce=max)\n ndim = max(list_ndim, elem_ndim)\n\n # first axis to concatenate along\n first_axis = ndim - list_ndim\n\n # Make all the elements the same dimension\n arrays = rec.map_reduce(\n arrays, f_map=lambda xi: atleast_nd(xi, ndim), f_reduce=list\n )\n\n # concatenate innermost lists on the right, outermost on the left\n return rec.map_reduce(\n arrays,\n f_reduce=lambda xs, axis: concatenate(\n list(xs), axis=axis, allow_unknown_chunksizes=allow_unknown_chunksizes\n ),\n f_kwargs=lambda axis: dict(axis=(axis + 1)),\n axis=first_axis,\n )\n\n\ndef concatenate(seq, axis=0, allow_unknown_chunksizes=False):\n \"\"\"\n Concatenate arrays along an existing axis\n\n Given a sequence of dask Arrays form a new dask Array by stacking them\n along an existing dimension (axis=0 by default)\n\n Parameters\n ----------\n seq: list of dask.arrays\n axis: int\n Dimension along which to align all of the arrays\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n\n Examples\n --------\n\n Create slices\n\n >>> import dask.array as da\n >>> import numpy as np\n\n >>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))\n ... for i in range(3)]\n\n >>> x = da.concatenate(data, axis=0)\n >>> x.shape\n (12, 4)\n\n >>> da.concatenate(data, axis=1).shape\n (4, 12)\n\n Result is a new dask Array\n\n See Also\n --------\n stack\n \"\"\"\n from . import wrap\n\n seq = [asarray(a) for a in seq]\n\n if not seq:\n raise ValueError(\"Need array(s) to concatenate\")\n\n seq_metas = [meta_from_array(s) for s in seq]\n _concatenate = concatenate_lookup.dispatch(\n type(max(seq_metas, key=lambda x: getattr(x, \"__array_priority__\", 0)))\n )\n meta = _concatenate(seq_metas, axis=axis)\n\n # Promote types to match meta\n seq = [a.astype(meta.dtype) for a in seq]\n\n # Find output array shape\n ndim = len(seq[0].shape)\n shape = tuple(\n sum((a.shape[i] for a in seq)) if i == axis else seq[0].shape[i]\n for i in range(ndim)\n )\n\n # Drop empty arrays\n seq2 = [a for a in seq if a.size]\n if not seq2:\n seq2 = seq\n\n if axis < 0:\n axis = ndim + axis\n if axis >= ndim:\n msg = (\n \"Axis must be less than than number of dimensions\"\n \"\\nData has %d dimensions, but got axis=%d\"\n )\n raise ValueError(msg % (ndim, axis))\n\n n = len(seq2)\n if n == 0:\n try:\n return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)\n except TypeError:\n return wrap.empty(shape, chunks=shape, dtype=meta.dtype)\n elif n == 1:\n return seq2[0]\n\n if not allow_unknown_chunksizes and not all(\n i == axis or all(x.shape[i] == seq2[0].shape[i] for x in seq2)\n for i in range(ndim)\n ):\n if any(map(np.isnan, seq2[0].shape)):\n raise ValueError(\n \"Tried to concatenate arrays with unknown\"\n \" shape %s.\\n\\nTwo solutions:\\n\"\n \" 1. Force concatenation pass\"\n \" allow_unknown_chunksizes=True.\\n\"\n \" 2. Compute shapes with \"\n \"[x.compute_chunk_sizes() for x in seq]\" % str(seq2[0].shape)\n )\n raise ValueError(\"Shapes do not align: %s\", [x.shape for x in seq2])\n\n inds = [list(range(ndim)) for i in range(n)]\n for i, ind in enumerate(inds):\n ind[axis] = -(i + 1)\n\n uc_args = list(concat(zip(seq2, inds)))\n _, seq2 = unify_chunks(*uc_args, warn=False)\n\n bds = [a.chunks for a in seq2]\n\n chunks = (\n seq2[0].chunks[:axis]\n + (sum([bd[axis] for bd in bds], ()),)\n + seq2[0].chunks[axis + 1 :]\n )\n\n cum_dims = [0] + list(accumulate(add, [len(a.chunks[axis]) for a in seq2]))\n\n names = [a.name for a in seq2]\n\n name = \"concatenate-\" + tokenize(names, axis)\n keys = list(product([name], *[range(len(bd)) for bd in chunks]))\n\n values = [\n (names[bisect(cum_dims, key[axis + 1]) - 1],)\n + key[1 : axis + 1]\n + (key[axis + 1] - cum_dims[bisect(cum_dims, key[axis + 1]) - 1],)\n + key[axis + 2 :]\n for key in keys\n ]\n\n dsk = dict(zip(keys, values))\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=seq2)\n\n return Array(graph, name, chunks, meta=meta)\n\n\ndef load_store_chunk(x, out, index, lock, return_stored, load_stored):\n \"\"\"\n A function inserted in a Dask graph for storing a chunk.\n\n Parameters\n ----------\n x: array-like\n An array (potentially a NumPy one)\n out: array-like\n Where to store results too.\n index: slice-like\n Where to store result from ``x`` in ``out``.\n lock: Lock-like or False\n Lock to use before writing to ``out``.\n return_stored: bool\n Whether to return ``out``.\n load_stored: bool\n Whether to return the array stored in ``out``.\n Ignored if ``return_stored`` is not ``True``.\n\n Examples\n --------\n\n >>> a = np.ones((5, 6))\n >>> b = np.empty(a.shape)\n >>> load_store_chunk(a, b, (slice(None), slice(None)), False, False, False)\n \"\"\"\n\n result = None\n if return_stored and not load_stored:\n result = out\n\n if lock:\n lock.acquire()\n try:\n if x is not None:\n out[index] = np.asanyarray(x)\n if return_stored and load_stored:\n result = out[index]\n finally:\n if lock:\n lock.release()\n\n return result\n\n\ndef store_chunk(x, out, index, lock, return_stored):\n return load_store_chunk(x, out, index, lock, return_stored, False)\n\n\ndef load_chunk(out, index, lock):\n return load_store_chunk(None, out, index, lock, True, True)\n\n\ndef insert_to_ooc(\n arr, out, lock=True, region=None, return_stored=False, load_stored=False, tok=None\n):\n \"\"\"\n Creates a Dask graph for storing chunks from ``arr`` in ``out``.\n\n Parameters\n ----------\n arr: da.Array\n A dask array\n out: array-like\n Where to store results too.\n lock: Lock-like or bool, optional\n Whether to lock or with what (default is ``True``,\n which means a ``threading.Lock`` instance).\n region: slice-like, optional\n Where in ``out`` to store ``arr``'s results\n (default is ``None``, meaning all of ``out``).\n return_stored: bool, optional\n Whether to return ``out``\n (default is ``False``, meaning ``None`` is returned).\n load_stored: bool, optional\n Whether to handling loading from ``out`` at the same time.\n Ignored if ``return_stored`` is not ``True``.\n (default is ``False``, meaning defer to ``return_stored``).\n tok: str, optional\n Token to use when naming keys\n\n Examples\n --------\n >>> import dask.array as da\n >>> d = da.ones((5, 6), chunks=(2, 3))\n >>> a = np.empty(d.shape)\n >>> insert_to_ooc(d, a) # doctest: +SKIP\n \"\"\"\n\n if lock is True:\n lock = Lock()\n\n slices = slices_from_chunks(arr.chunks)\n if region:\n slices = [fuse_slice(region, slc) for slc in slices]\n\n name = \"store-%s\" % (tok or str(uuid.uuid1()))\n func = store_chunk\n args = ()\n if return_stored and load_stored:\n name = \"load-%s\" % name\n func = load_store_chunk\n args = args + (load_stored,)\n\n dsk = {\n (name,) + t[1:]: (func, t, out, slc, lock, return_stored) + args\n for t, slc in zip(core.flatten(arr.__dask_keys__()), slices)\n }\n\n return dsk\n\n\ndef retrieve_from_ooc(keys, dsk_pre, dsk_post=None):\n \"\"\"\n Creates a Dask graph for loading stored ``keys`` from ``dsk``.\n\n Parameters\n ----------\n keys: Sequence\n A sequence containing Dask graph keys to load\n dsk_pre: Mapping\n A Dask graph corresponding to a Dask Array before computation\n dsk_post: Mapping, optional\n A Dask graph corresponding to a Dask Array after computation\n\n Examples\n --------\n >>> import dask.array as da\n >>> d = da.ones((5, 6), chunks=(2, 3))\n >>> a = np.empty(d.shape)\n >>> g = insert_to_ooc(d, a)\n >>> retrieve_from_ooc(g.keys(), g) # doctest: +SKIP\n \"\"\"\n\n if not dsk_post:\n dsk_post = {k: k for k in keys}\n\n load_dsk = {\n (\"load-\" + k[0],) + k[1:]: (load_chunk, dsk_post[k]) + dsk_pre[k][3:-1]\n for k in keys\n }\n\n return load_dsk\n\n\ndef asarray(a, **kwargs):\n \"\"\"Convert the input to a dask array.\n\n Parameters\n ----------\n a : array-like\n Input data, in any form that can be converted to a dask array.\n\n Returns\n -------\n out : dask array\n Dask array interpretation of a.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = np.arange(3)\n >>> da.asarray(x)\n dask.array<array, shape=(3,), dtype=int64, chunksize=(3,), chunktype=numpy.ndarray>\n\n >>> y = [[1, 2, 3], [4, 5, 6]]\n >>> da.asarray(y)\n dask.array<array, shape=(2, 3), dtype=int64, chunksize=(2, 3), chunktype=numpy.ndarray>\n \"\"\"\n if isinstance(a, Array):\n return a\n elif hasattr(a, \"to_dask_array\"):\n return a.to_dask_array()\n elif type(a).__module__.startswith(\"xarray.\") and hasattr(a, \"data\"):\n return asarray(a.data)\n elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):\n return stack(a)\n elif not isinstance(getattr(a, \"shape\", None), Iterable):\n a = np.asarray(a)\n return from_array(a, getitem=getter_inline, **kwargs)\n\n\ndef asanyarray(a):\n \"\"\"Convert the input to a dask array.\n\n Subclasses of ``np.ndarray`` will be passed through as chunks unchanged.\n\n Parameters\n ----------\n a : array-like\n Input data, in any form that can be converted to a dask array.\n\n Returns\n -------\n out : dask array\n Dask array interpretation of a.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = np.arange(3)\n >>> da.asanyarray(x)\n dask.array<array, shape=(3,), dtype=int64, chunksize=(3,), chunktype=numpy.ndarray>\n\n >>> y = [[1, 2, 3], [4, 5, 6]]\n >>> da.asanyarray(y)\n dask.array<array, shape=(2, 3), dtype=int64, chunksize=(2, 3), chunktype=numpy.ndarray>\n \"\"\"\n if isinstance(a, Array):\n return a\n elif hasattr(a, \"to_dask_array\"):\n return a.to_dask_array()\n elif type(a).__module__.startswith(\"xarray.\") and hasattr(a, \"data\"):\n return asanyarray(a.data)\n elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):\n a = stack(a)\n elif not isinstance(getattr(a, \"shape\", None), Iterable):\n a = np.asanyarray(a)\n return from_array(a, chunks=a.shape, getitem=getter_inline, asarray=False)\n\n\ndef is_scalar_for_elemwise(arg):\n \"\"\"\n\n >>> is_scalar_for_elemwise(42)\n True\n >>> is_scalar_for_elemwise('foo')\n True\n >>> is_scalar_for_elemwise(True)\n True\n >>> is_scalar_for_elemwise(np.array(42))\n True\n >>> is_scalar_for_elemwise([1, 2, 3])\n True\n >>> is_scalar_for_elemwise(np.array([1, 2, 3]))\n False\n >>> is_scalar_for_elemwise(from_array(np.array(0), chunks=()))\n False\n >>> is_scalar_for_elemwise(np.dtype('i4'))\n True\n \"\"\"\n # the second half of shape_condition is essentially just to ensure that\n # dask series / frame are treated as scalars in elemwise.\n maybe_shape = getattr(arg, \"shape\", None)\n shape_condition = not isinstance(maybe_shape, Iterable) or any(\n is_dask_collection(x) for x in maybe_shape\n )\n\n return (\n np.isscalar(arg)\n or shape_condition\n or isinstance(arg, np.dtype)\n or (isinstance(arg, np.ndarray) and arg.ndim == 0)\n )\n\n\ndef broadcast_shapes(*shapes):\n \"\"\"\n Determines output shape from broadcasting arrays.\n\n Parameters\n ----------\n shapes : tuples\n The shapes of the arguments.\n\n Returns\n -------\n output_shape : tuple\n\n Raises\n ------\n ValueError\n If the input shapes cannot be successfully broadcast together.\n \"\"\"\n if len(shapes) == 1:\n return shapes[0]\n out = []\n for sizes in zip_longest(*map(reversed, shapes), fillvalue=-1):\n if np.isnan(sizes).any():\n dim = np.nan\n else:\n dim = 0 if 0 in sizes else np.max(sizes)\n if any(i not in [-1, 0, 1, dim] and not np.isnan(i) for i in sizes):\n raise ValueError(\n \"operands could not be broadcast together with \"\n \"shapes {0}\".format(\" \".join(map(str, shapes)))\n )\n out.append(dim)\n return tuple(reversed(out))\n\n\ndef elemwise(op, *args, **kwargs):\n \"\"\" Apply elementwise function across arguments\n\n Respects broadcasting rules\n\n Examples\n --------\n >>> elemwise(add, x, y) # doctest: +SKIP\n >>> elemwise(sin, x) # doctest: +SKIP\n\n See Also\n --------\n blockwise\n \"\"\"\n out = kwargs.pop(\"out\", None)\n if not set([\"name\", \"dtype\"]).issuperset(kwargs):\n msg = \"%s does not take the following keyword arguments %s\"\n raise TypeError(\n msg % (op.__name__, str(sorted(set(kwargs) - set([\"name\", \"dtype\"]))))\n )\n\n args = [np.asarray(a) if isinstance(a, (list, tuple)) else a for a in args]\n\n shapes = []\n for arg in args:\n shape = getattr(arg, \"shape\", ())\n if any(is_dask_collection(x) for x in shape):\n # Want to excluded Delayed shapes and dd.Scalar\n shape = ()\n shapes.append(shape)\n\n shapes = [s if isinstance(s, Iterable) else () for s in shapes]\n out_ndim = len(\n broadcast_shapes(*shapes)\n ) # Raises ValueError if dimensions mismatch\n expr_inds = tuple(range(out_ndim))[::-1]\n\n need_enforce_dtype = False\n if \"dtype\" in kwargs:\n dt = kwargs[\"dtype\"]\n else:\n # We follow NumPy's rules for dtype promotion, which special cases\n # scalars and 0d ndarrays (which it considers equivalent) by using\n # their values to compute the result dtype:\n # https://github.com/numpy/numpy/issues/6240\n # We don't inspect the values of 0d dask arrays, because these could\n # hold potentially very expensive calculations. Instead, we treat\n # them just like other arrays, and if necessary cast the result of op\n # to match.\n vals = [\n np.empty((1,) * max(1, a.ndim), dtype=a.dtype)\n if not is_scalar_for_elemwise(a)\n else a\n for a in args\n ]\n try:\n dt = apply_infer_dtype(op, vals, {}, \"elemwise\", suggest_dtype=False)\n except Exception:\n return NotImplemented\n need_enforce_dtype = any(\n not is_scalar_for_elemwise(a) and a.ndim == 0 for a in args\n )\n\n name = kwargs.get(\"name\", None) or \"%s-%s\" % (funcname(op), tokenize(op, dt, *args))\n\n blockwise_kwargs = dict(dtype=dt, name=name, token=funcname(op).strip(\"_\"))\n if need_enforce_dtype:\n blockwise_kwargs[\"enforce_dtype\"] = dt\n blockwise_kwargs[\"enforce_dtype_function\"] = op\n op = _enforce_dtype\n result = blockwise(\n op,\n expr_inds,\n *concat(\n (a, tuple(range(a.ndim)[::-1]) if not is_scalar_for_elemwise(a) else None)\n for a in args\n ),\n **blockwise_kwargs,\n )\n\n return handle_out(out, result)\n\n\ndef handle_out(out, result):\n \"\"\" Handle out parameters\n\n If out is a dask.array then this overwrites the contents of that array with\n the result\n \"\"\"\n if isinstance(out, tuple):\n if len(out) == 1:\n out = out[0]\n elif len(out) > 1:\n raise NotImplementedError(\"The out parameter is not fully supported\")\n else:\n out = None\n if isinstance(out, Array):\n if out.shape != result.shape:\n raise ValueError(\n \"Mismatched shapes between result and out parameter. \"\n \"out=%s, result=%s\" % (str(out.shape), str(result.shape))\n )\n out._chunks = result.chunks\n out.dask = result.dask\n out._meta = result._meta\n out.name = result.name\n elif out is not None:\n msg = (\n \"The out parameter is not fully supported.\"\n \" Received type %s, expected Dask Array\" % type(out).__name__\n )\n raise NotImplementedError(msg)\n else:\n return result\n\n\ndef _enforce_dtype(*args, **kwargs):\n \"\"\"Calls a function and converts its result to the given dtype.\n\n The parameters have deliberately been given unwieldy names to avoid\n clashes with keyword arguments consumed by blockwise\n\n A dtype of `object` is treated as a special case and not enforced,\n because it is used as a dummy value in some places when the result will\n not be a block in an Array.\n\n Parameters\n ----------\n enforce_dtype : dtype\n Result dtype\n enforce_dtype_function : callable\n The wrapped function, which will be passed the remaining arguments\n \"\"\"\n dtype = kwargs.pop(\"enforce_dtype\")\n function = kwargs.pop(\"enforce_dtype_function\")\n\n result = function(*args, **kwargs)\n if hasattr(result, \"dtype\") and dtype != result.dtype and dtype != object:\n if not np.can_cast(result, dtype, casting=\"same_kind\"):\n raise ValueError(\n \"Inferred dtype from function %r was %r \"\n \"but got %r, which can't be cast using \"\n \"casting='same_kind'\"\n % (funcname(function), str(dtype), str(result.dtype))\n )\n if np.isscalar(result):\n # scalar astype method doesn't take the keyword arguments, so\n # have to convert via 0-dimensional array and back.\n result = result.astype(dtype)\n else:\n try:\n result = result.astype(dtype, copy=False)\n except TypeError:\n # Missing copy kwarg\n result = result.astype(dtype)\n return result\n\n\ndef broadcast_to(x, shape, chunks=None):\n \"\"\"Broadcast an array to a new shape.\n\n Parameters\n ----------\n x : array_like\n The array to broadcast.\n shape : tuple\n The shape of the desired array.\n chunks : tuple, optional\n If provided, then the result will use these chunks instead of the same\n chunks as the source array. Setting chunks explicitly as part of\n broadcast_to is more efficient than rechunking afterwards. Chunks are\n only allowed to differ from the original shape along dimensions that\n are new on the result or have size 1 the input array.\n\n Returns\n -------\n broadcast : dask array\n\n See Also\n --------\n :func:`numpy.broadcast_to`\n \"\"\"\n x = asarray(x)\n shape = tuple(shape)\n\n if x.shape == shape and (chunks is None or chunks == x.chunks):\n return x\n\n ndim_new = len(shape) - x.ndim\n if ndim_new < 0 or any(\n new != old for new, old in zip(shape[ndim_new:], x.shape) if old != 1\n ):\n raise ValueError(\"cannot broadcast shape %s to shape %s\" % (x.shape, shape))\n\n if chunks is None:\n chunks = tuple((s,) for s in shape[:ndim_new]) + tuple(\n bd if old > 1 else (new,)\n for bd, old, new in zip(x.chunks, x.shape, shape[ndim_new:])\n )\n else:\n chunks = normalize_chunks(\n chunks, shape, dtype=x.dtype, previous_chunks=x.chunks\n )\n for old_bd, new_bd in zip(x.chunks, chunks[ndim_new:]):\n if old_bd != new_bd and old_bd != (1,):\n raise ValueError(\n \"cannot broadcast chunks %s to chunks %s: \"\n \"new chunks must either be along a new \"\n \"dimension or a dimension of size 1\" % (x.chunks, chunks)\n )\n\n name = \"broadcast_to-\" + tokenize(x, shape, chunks)\n dsk = {}\n\n enumerated_chunks = product(*(enumerate(bds) for bds in chunks))\n for new_index, chunk_shape in (zip(*ec) for ec in enumerated_chunks):\n old_index = tuple(\n 0 if bd == (1,) else i for bd, i in zip(x.chunks, new_index[ndim_new:])\n )\n old_key = (x.name,) + old_index\n new_key = (name,) + new_index\n dsk[new_key] = (np.broadcast_to, old_key, quote(chunk_shape))\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])\n return Array(graph, name, chunks, dtype=x.dtype)\n\n\n@derived_from(np)\ndef broadcast_arrays(*args, **kwargs):\n subok = bool(kwargs.pop(\"subok\", False))\n\n to_array = asanyarray if subok else asarray\n args = tuple(to_array(e) for e in args)\n\n if kwargs:\n raise TypeError(\"unsupported keyword argument(s) provided\")\n\n # Unify uneven chunking\n inds = [list(reversed(range(x.ndim))) for x in args]\n uc_args = concat(zip(args, inds))\n _, args = unify_chunks(*uc_args, warn=False)\n\n shape = broadcast_shapes(*(e.shape for e in args))\n chunks = broadcast_chunks(*(e.chunks for e in args))\n\n result = [broadcast_to(e, shape=shape, chunks=chunks) for e in args]\n\n return result\n\n\ndef offset_func(func, offset, *args):\n \"\"\" Offsets inputs by offset\n\n >>> double = lambda x: x * 2\n >>> f = offset_func(double, (10,))\n >>> f(1)\n 22\n >>> f(300)\n 620\n \"\"\"\n\n def _offset(*args):\n args2 = list(map(add, args, offset))\n return func(*args2)\n\n with ignoring(Exception):\n _offset.__name__ = \"offset_\" + func.__name__\n\n return _offset\n\n\ndef chunks_from_arrays(arrays):\n \"\"\" Chunks tuple from nested list of arrays\n\n >>> x = np.array([1, 2])\n >>> chunks_from_arrays([x, x])\n ((2, 2),)\n\n >>> x = np.array([[1, 2]])\n >>> chunks_from_arrays([[x], [x]])\n ((1, 1), (2,))\n\n >>> x = np.array([[1, 2]])\n >>> chunks_from_arrays([[x, x]])\n ((1,), (2, 2))\n\n >>> chunks_from_arrays([1, 1])\n ((1, 1),)\n \"\"\"\n if not arrays:\n return ()\n result = []\n dim = 0\n\n def shape(x):\n try:\n return x.shape\n except AttributeError:\n return (1,)\n\n while isinstance(arrays, (list, tuple)):\n result.append(tuple([shape(deepfirst(a))[dim] for a in arrays]))\n arrays = arrays[0]\n dim += 1\n return tuple(result)\n\n\ndef deepfirst(seq):\n \"\"\" First element in a nested list\n\n >>> deepfirst([[[1, 2], [3, 4]], [5, 6], [7, 8]])\n 1\n \"\"\"\n if not isinstance(seq, (list, tuple)):\n return seq\n else:\n return deepfirst(seq[0])\n\n\ndef shapelist(a):\n \"\"\" Get the shape of nested list \"\"\"\n if type(a) is list:\n return tuple([len(a)] + list(shapelist(a[0])))\n else:\n return ()\n\n\ndef reshapelist(shape, seq):\n \"\"\" Reshape iterator to nested shape\n\n >>> reshapelist((2, 3), range(6))\n [[0, 1, 2], [3, 4, 5]]\n \"\"\"\n if len(shape) == 1:\n return list(seq)\n else:\n n = int(len(seq) / shape[0])\n return [reshapelist(shape[1:], part) for part in partition(n, seq)]\n\n\ndef transposelist(arrays, axes, extradims=0):\n \"\"\" Permute axes of nested list\n\n >>> transposelist([[1,1,1],[1,1,1]], [2,1])\n [[[1, 1], [1, 1], [1, 1]]]\n\n >>> transposelist([[1,1,1],[1,1,1]], [2,1], extradims=1)\n [[[[1], [1]], [[1], [1]], [[1], [1]]]]\n \"\"\"\n if len(axes) != ndimlist(arrays):\n raise ValueError(\"Length of axes should equal depth of nested arrays\")\n if extradims < 0:\n raise ValueError(\"`newdims` should be positive\")\n if len(axes) > len(set(axes)):\n raise ValueError(\"`axes` should be unique\")\n\n ndim = max(axes) + 1\n shape = shapelist(arrays)\n newshape = [\n shape[axes.index(i)] if i in axes else 1 for i in range(ndim + extradims)\n ]\n\n result = list(core.flatten(arrays))\n return reshapelist(newshape, result)\n\n\ndef stack(seq, axis=0, allow_unknown_chunksizes=False):\n \"\"\"\n Stack arrays along a new axis\n\n Given a sequence of dask arrays, form a new dask array by stacking them\n along a new dimension (axis=0 by default)\n\n Parameters\n ----------\n seq: list of dask.arrays\n axis: int\n Dimension along which to align all of the arrays\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n\n Examples\n --------\n\n Create slices\n\n >>> import dask.array as da\n >>> import numpy as np\n\n >>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))\n ... for i in range(3)]\n\n >>> x = da.stack(data, axis=0)\n >>> x.shape\n (3, 4, 4)\n\n >>> da.stack(data, axis=1).shape\n (4, 3, 4)\n\n >>> da.stack(data, axis=-1).shape\n (4, 4, 3)\n\n Result is a new dask Array\n\n See Also\n --------\n concatenate\n \"\"\"\n from . import wrap\n\n seq = [asarray(a) for a in seq]\n\n if not seq:\n raise ValueError(\"Need array(s) to stack\")\n if not allow_unknown_chunksizes and not all(x.shape == seq[0].shape for x in seq):\n idx = first(i for i in enumerate(seq) if i[1].shape != seq[0].shape)\n raise ValueError(\n \"Stacked arrays must have the same shape. \"\n \"The first array had shape {0}, while array \"\n \"{1} has shape {2}.\".format(seq[0].shape, idx[0] + 1, idx[1].shape)\n )\n\n meta = np.stack([meta_from_array(a) for a in seq], axis=axis)\n seq = [x.astype(meta.dtype) for x in seq]\n\n ndim = meta.ndim - 1\n if axis < 0:\n axis = ndim + axis + 1\n shape = tuple(\n len(seq)\n if i == axis\n else (seq[0].shape[i] if i < axis else seq[0].shape[i - 1])\n for i in range(meta.ndim)\n )\n\n seq2 = [a for a in seq if a.size]\n if not seq2:\n seq2 = seq\n\n n = len(seq2)\n if n == 0:\n try:\n return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)\n except TypeError:\n return wrap.empty(shape, chunks=shape, dtype=meta.dtype)\n\n ind = list(range(ndim))\n uc_args = list(concat((x, ind) for x in seq2))\n _, seq2 = unify_chunks(*uc_args)\n\n assert len(set(a.chunks for a in seq2)) == 1 # same chunks\n chunks = seq2[0].chunks[:axis] + ((1,) * n,) + seq2[0].chunks[axis:]\n\n names = [a.name for a in seq2]\n name = \"stack-\" + tokenize(names, axis)\n keys = list(product([name], *[range(len(bd)) for bd in chunks]))\n\n inputs = [\n (names[key[axis + 1]],) + key[1 : axis + 1] + key[axis + 2 :] for key in keys\n ]\n values = [\n (\n getitem,\n inp,\n (slice(None, None, None),) * axis\n + (None,)\n + (slice(None, None, None),) * (ndim - axis),\n )\n for inp in inputs\n ]\n\n layer = dict(zip(keys, values))\n graph = HighLevelGraph.from_collections(name, layer, dependencies=seq2)\n\n return Array(graph, name, chunks, meta=meta)\n\n\ndef concatenate3(arrays):\n \"\"\" Recursive np.concatenate\n\n Input should be a nested list of numpy arrays arranged in the order they\n should appear in the array itself. Each array should have the same number\n of dimensions as the desired output and the nesting of the lists.\n\n >>> x = np.array([[1, 2]])\n >>> concatenate3([[x, x, x], [x, x, x]])\n array([[1, 2, 1, 2, 1, 2],\n [1, 2, 1, 2, 1, 2]])\n\n >>> concatenate3([[x, x], [x, x], [x, x]])\n array([[1, 2, 1, 2],\n [1, 2, 1, 2],\n [1, 2, 1, 2]])\n \"\"\"\n from .utils import IS_NEP18_ACTIVE\n\n # We need this as __array_function__ may not exist on older NumPy versions.\n # And to reduce verbosity.\n NDARRAY_ARRAY_FUNCTION = getattr(np.ndarray, \"__array_function__\", None)\n\n arrays = concrete(arrays)\n if not arrays:\n return np.empty(0)\n\n advanced = max(\n core.flatten(arrays, container=(list, tuple)),\n key=lambda x: getattr(x, \"__array_priority__\", 0),\n )\n\n if IS_NEP18_ACTIVE and not all(\n NDARRAY_ARRAY_FUNCTION\n is getattr(arr, \"__array_function__\", NDARRAY_ARRAY_FUNCTION)\n for arr in arrays\n ):\n try:\n x = unpack_singleton(arrays)\n return _concatenate2(arrays, axes=tuple(range(x.ndim)))\n except TypeError:\n pass\n\n if concatenate_lookup.dispatch(type(advanced)) is not np.concatenate:\n x = unpack_singleton(arrays)\n return _concatenate2(arrays, axes=list(range(x.ndim)))\n\n ndim = ndimlist(arrays)\n if not ndim:\n return arrays\n chunks = chunks_from_arrays(arrays)\n shape = tuple(map(sum, chunks))\n\n def dtype(x):\n try:\n return x.dtype\n except AttributeError:\n return type(x)\n\n result = np.empty(shape=shape, dtype=dtype(deepfirst(arrays)))\n\n for (idx, arr) in zip(slices_from_chunks(chunks), core.flatten(arrays)):\n if hasattr(arr, \"ndim\"):\n while arr.ndim < ndim:\n arr = arr[None, ...]\n result[idx] = arr\n\n return result\n\n\ndef concatenate_axes(arrays, axes):\n \"\"\" Recursively call np.concatenate along axes \"\"\"\n if len(axes) != ndimlist(arrays):\n raise ValueError(\"Length of axes should equal depth of nested arrays\")\n\n extradims = max(0, deepfirst(arrays).ndim - (max(axes) + 1))\n return concatenate3(transposelist(arrays, axes, extradims=extradims))\n\n\ndef to_hdf5(filename, *args, **kwargs):\n \"\"\" Store arrays in HDF5 file\n\n This saves several dask arrays into several datapaths in an HDF5 file.\n It creates the necessary datasets and handles clean file opening/closing.\n\n >>> da.to_hdf5('myfile.hdf5', '/x', x) # doctest: +SKIP\n\n or\n\n >>> da.to_hdf5('myfile.hdf5', {'/x': x, '/y': y}) # doctest: +SKIP\n\n Optionally provide arguments as though to ``h5py.File.create_dataset``\n\n >>> da.to_hdf5('myfile.hdf5', '/x', x, compression='lzf', shuffle=True) # doctest: +SKIP\n\n This can also be used as a method on a single Array\n\n >>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP\n\n See Also\n --------\n da.store\n h5py.File.create_dataset\n \"\"\"\n if len(args) == 1 and isinstance(args[0], dict):\n data = args[0]\n elif len(args) == 2 and isinstance(args[0], str) and isinstance(args[1], Array):\n data = {args[0]: args[1]}\n else:\n raise ValueError(\"Please provide {'/data/path': array} dictionary\")\n\n chunks = kwargs.pop(\"chunks\", True)\n\n import h5py\n\n with h5py.File(filename, mode=\"a\") as f:\n dsets = [\n f.require_dataset(\n dp,\n shape=x.shape,\n dtype=x.dtype,\n chunks=tuple([c[0] for c in x.chunks]) if chunks is True else chunks,\n **kwargs,\n )\n for dp, x in data.items()\n ]\n store(list(data.values()), dsets)\n\n\ndef interleave_none(a, b):\n \"\"\"\n\n >>> interleave_none([0, None, 2, None], [1, 3])\n (0, 1, 2, 3)\n \"\"\"\n result = []\n i = j = 0\n n = len(a) + len(b)\n while i + j < n:\n if a[i] is not None:\n result.append(a[i])\n i += 1\n else:\n result.append(b[j])\n i += 1\n j += 1\n return tuple(result)\n\n\ndef keyname(name, i, okey):\n \"\"\"\n\n >>> keyname('x', 3, [None, None, 0, 2])\n ('x', 3, 0, 2)\n \"\"\"\n return (name, i) + tuple(k for k in okey if k is not None)\n\n\ndef _vindex(x, *indexes):\n \"\"\"Point wise indexing with broadcasting.\n\n >>> x = np.arange(56).reshape((7, 8))\n >>> x\n array([[ 0, 1, 2, 3, 4, 5, 6, 7],\n [ 8, 9, 10, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 52, 53, 54, 55]])\n\n >>> d = from_array(x, chunks=(3, 4))\n >>> result = _vindex(d, [0, 1, 6, 0], [0, 1, 0, 7])\n >>> result.compute()\n array([ 0, 9, 48, 7])\n \"\"\"\n indexes = replace_ellipsis(x.ndim, indexes)\n\n nonfancy_indexes = []\n reduced_indexes = []\n for i, ind in enumerate(indexes):\n if isinstance(ind, Number):\n nonfancy_indexes.append(ind)\n elif isinstance(ind, slice):\n nonfancy_indexes.append(ind)\n reduced_indexes.append(slice(None))\n else:\n nonfancy_indexes.append(slice(None))\n reduced_indexes.append(ind)\n\n nonfancy_indexes = tuple(nonfancy_indexes)\n reduced_indexes = tuple(reduced_indexes)\n\n x = x[nonfancy_indexes]\n\n array_indexes = {}\n for i, (ind, size) in enumerate(zip(reduced_indexes, x.shape)):\n if not isinstance(ind, slice):\n ind = np.array(ind, copy=True)\n if ind.dtype.kind == \"b\":\n raise IndexError(\"vindex does not support indexing with boolean arrays\")\n if ((ind >= size) | (ind < -size)).any():\n raise IndexError(\n \"vindex key has entries out of bounds for \"\n \"indexing along axis %s of size %s: %r\" % (i, size, ind)\n )\n ind %= size\n array_indexes[i] = ind\n\n if array_indexes:\n x = _vindex_array(x, array_indexes)\n\n return x\n\n\ndef _vindex_array(x, dict_indexes):\n \"\"\"Point wise indexing with only NumPy Arrays.\"\"\"\n\n try:\n broadcast_indexes = np.broadcast_arrays(*dict_indexes.values())\n except ValueError as e:\n # note: error message exactly matches numpy\n shapes_str = \" \".join(str(a.shape) for a in dict_indexes.values())\n raise IndexError(\n \"shape mismatch: indexing arrays could not be \"\n \"broadcast together with shapes \" + shapes_str\n ) from e\n broadcast_shape = broadcast_indexes[0].shape\n\n lookup = dict(zip(dict_indexes, broadcast_indexes))\n flat_indexes = [\n lookup[i].ravel().tolist() if i in lookup else None for i in range(x.ndim)\n ]\n flat_indexes.extend([None] * (x.ndim - len(flat_indexes)))\n\n flat_indexes = [\n list(index) if index is not None else index for index in flat_indexes\n ]\n bounds = [list(accumulate(add, (0,) + c)) for c in x.chunks]\n bounds2 = [b for i, b in zip(flat_indexes, bounds) if i is not None]\n axis = _get_axis(flat_indexes)\n token = tokenize(x, flat_indexes)\n out_name = \"vindex-merge-\" + token\n\n points = list()\n for i, idx in enumerate(zip(*[i for i in flat_indexes if i is not None])):\n block_idx = [\n np.searchsorted(b, ind, \"right\") - 1 for b, ind in zip(bounds2, idx)\n ]\n inblock_idx = [\n ind - bounds2[k][j] for k, (ind, j) in enumerate(zip(idx, block_idx))\n ]\n points.append((i, tuple(block_idx), tuple(inblock_idx)))\n\n chunks = [c for i, c in zip(flat_indexes, x.chunks) if i is None]\n chunks.insert(0, (len(points),) if points else (0,))\n chunks = tuple(chunks)\n\n if points:\n per_block = groupby(1, points)\n per_block = dict((k, v) for k, v in per_block.items() if v)\n\n other_blocks = list(\n product(\n *[\n list(range(len(c))) if i is None else [None]\n for i, c in zip(flat_indexes, x.chunks)\n ]\n )\n )\n\n full_slices = [slice(None, None) if i is None else None for i in flat_indexes]\n\n name = \"vindex-slice-\" + token\n vindex_merge_name = \"vindex-merge-\" + token\n dsk = {}\n for okey in other_blocks:\n for i, key in enumerate(per_block):\n dsk[keyname(name, i, okey)] = (\n _vindex_transpose,\n (\n _vindex_slice,\n (x.name,) + interleave_none(okey, key),\n interleave_none(\n full_slices, list(zip(*pluck(2, per_block[key])))\n ),\n ),\n axis,\n )\n dsk[keyname(vindex_merge_name, 0, okey)] = (\n _vindex_merge,\n [list(pluck(0, per_block[key])) for key in per_block],\n [keyname(name, i, okey) for i in range(len(per_block))],\n )\n\n result_1d = Array(\n HighLevelGraph.from_collections(out_name, dsk, dependencies=[x]),\n out_name,\n chunks,\n x.dtype,\n )\n return result_1d.reshape(broadcast_shape + result_1d.shape[1:])\n\n # output has a zero dimension, just create a new zero-shape array with the\n # same dtype\n from .wrap import empty\n\n result_1d = empty(\n tuple(map(sum, chunks)), chunks=chunks, dtype=x.dtype, name=out_name\n )\n return result_1d.reshape(broadcast_shape + result_1d.shape[1:])\n\n\ndef _get_axis(indexes):\n \"\"\" Get axis along which point-wise slicing results lie\n\n This is mostly a hack because I can't figure out NumPy's rule on this and\n can't be bothered to go reading.\n\n >>> _get_axis([[1, 2], None, [1, 2], None])\n 0\n >>> _get_axis([None, [1, 2], [1, 2], None])\n 1\n >>> _get_axis([None, None, [1, 2], [1, 2]])\n 2\n \"\"\"\n ndim = len(indexes)\n indexes = [slice(None, None) if i is None else [0] for i in indexes]\n x = np.empty((2,) * ndim)\n x2 = x[tuple(indexes)]\n return x2.shape.index(1)\n\n\ndef _vindex_slice(block, points):\n \"\"\" Pull out point-wise slices from block \"\"\"\n points = [p if isinstance(p, slice) else list(p) for p in points]\n return block[tuple(points)]\n\n\ndef _vindex_transpose(block, axis):\n \"\"\" Rotate block so that points are on the first dimension \"\"\"\n axes = [axis] + list(range(axis)) + list(range(axis + 1, block.ndim))\n return block.transpose(axes)\n\n\ndef _vindex_merge(locations, values):\n \"\"\"\n\n >>> locations = [0], [2, 1]\n >>> values = [np.array([[1, 2, 3]]),\n ... np.array([[10, 20, 30], [40, 50, 60]])]\n\n >>> _vindex_merge(locations, values)\n array([[ 1, 2, 3],\n [40, 50, 60],\n [10, 20, 30]])\n \"\"\"\n locations = list(map(list, locations))\n values = list(values)\n\n n = sum(map(len, locations))\n\n shape = list(values[0].shape)\n shape[0] = n\n shape = tuple(shape)\n\n dtype = values[0].dtype\n\n x = np.empty(shape, dtype=dtype)\n\n ind = [slice(None, None) for i in range(x.ndim)]\n for loc, val in zip(locations, values):\n ind[0] = loc\n x[tuple(ind)] = val\n\n return x\n\n\ndef to_npy_stack(dirname, x, axis=0):\n \"\"\" Write dask array to a stack of .npy files\n\n This partitions the dask.array along one axis and stores each block along\n that axis as a single .npy file in the specified directory\n\n Examples\n --------\n >>> x = da.ones((5, 10, 10), chunks=(2, 4, 4)) # doctest: +SKIP\n >>> da.to_npy_stack('data/', x, axis=0) # doctest: +SKIP\n\n The ``.npy`` files store numpy arrays for ``x[0:2], x[2:4], and x[4:5]``\n respectively, as is specified by the chunk size along the zeroth axis::\n\n $ tree data/\n data/\n |-- 0.npy\n |-- 1.npy\n |-- 2.npy\n |-- info\n\n The ``info`` file stores the dtype, chunks, and axis information of the array.\n You can load these stacks with the ``da.from_npy_stack`` function.\n\n >>> y = da.from_npy_stack('data/') # doctest: +SKIP\n\n See Also\n --------\n from_npy_stack\n \"\"\"\n\n chunks = tuple((c if i == axis else (sum(c),)) for i, c in enumerate(x.chunks))\n xx = x.rechunk(chunks)\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n\n meta = {\"chunks\": chunks, \"dtype\": x.dtype, \"axis\": axis}\n\n with open(os.path.join(dirname, \"info\"), \"wb\") as f:\n pickle.dump(meta, f)\n\n name = \"to-npy-stack-\" + str(uuid.uuid1())\n dsk = {\n (name, i): (np.save, os.path.join(dirname, \"%d.npy\" % i), key)\n for i, key in enumerate(core.flatten(xx.__dask_keys__()))\n }\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[xx])\n compute_as_if_collection(Array, graph, list(dsk))\n\n\ndef from_npy_stack(dirname, mmap_mode=\"r\"):\n \"\"\" Load dask array from stack of npy files\n\n See ``da.to_npy_stack`` for docstring\n\n Parameters\n ----------\n dirname: string\n Directory of .npy files\n mmap_mode: (None or 'r')\n Read data in memory map mode\n \"\"\"\n with open(os.path.join(dirname, \"info\"), \"rb\") as f:\n info = pickle.load(f)\n\n dtype = info[\"dtype\"]\n chunks = info[\"chunks\"]\n axis = info[\"axis\"]\n\n name = \"from-npy-stack-%s\" % dirname\n keys = list(product([name], *[range(len(c)) for c in chunks]))\n values = [\n (np.load, os.path.join(dirname, \"%d.npy\" % i), mmap_mode)\n for i in range(len(chunks[axis]))\n ]\n dsk = dict(zip(keys, values))\n\n return Array(dsk, name, chunks, dtype)\n\n\nfrom .utils import meta_from_array\n"
] | [
[
"numpy.can_cast",
"numpy.asarray",
"numpy.isnan",
"numpy.median",
"numpy.dtype",
"numpy.ones",
"numpy.max",
"numpy.asanyarray",
"numpy.isscalar",
"numpy.searchsorted",
"numpy.errstate",
"numpy.prod",
"numpy.array",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cophus/PhaseContrastTomographySolver | [
"e75cfd5af5cc0fdf363d3754c22d91f4c2dec8e8",
"e75cfd5af5cc0fdf363d3754c22d91f4c2dec8e8"
] | [
"transform.py",
"forward.py"
] | [
"\"\"\"\nTransform functions for Tomography in Numpy, Scipy, Torch, and Skimage\nEstimates affine transform between measured image and predicted image\nhttps://github.com/scikit-image/scikit-image\n\nDavid Ren [email protected]\n\nDec 28, 2020\n\"\"\"\n\nimport numpy as np\nimport torch\nfrom skimage.registration import optical_flow_tvl1\nfrom skimage import transform\nimport scipy.optimize as sop\n\nclass ImageTransformOpticalFlow():\n \"\"\"\n Class written to register stack of images for AET.\n Uses correlation based method to determine subpixel shift between predicted and measured images.\n Input parameters:\n - shape: shape of the image\n \"\"\" \n def __init__(self, shape, method=\"optical_flow\"):\n self.shape = shape\n self.x_lin, self.y_lin = np.meshgrid(np.arange(self.shape[1]), np.arange(self.shape[0]))\n self.xy_lin = np.concatenate((self.x_lin[np.newaxis,], self.y_lin[np.newaxis,])).astype('float32')\n \n\n def _coordinate_warp(self, transform_vec, xy_lin, xy_flow):\n transform_vec = transform_vec.astype('float32')\n rot_mat = [np.cos(transform_vec[0]), \\\n -np.sin(transform_vec[0]), \\\n np.sin(transform_vec[0]), \\\n np.cos(transform_vec[0])]\n xy_predict = np.zeros_like(xy_lin)\n xy_predict[0,] = rot_mat[0] * xy_lin[0,] + rot_mat[1] * xy_lin[1,] + transform_vec[1]\n xy_predict[1,] = rot_mat[2] * xy_lin[0,] + rot_mat[3] * xy_lin[1,] + transform_vec[2]\n resid = xy_predict - xy_flow\n f_val = 0.5 * np.sum(resid.transpose((1,2,0)).flatten() ** 2)\n f_grad = []\n #theta\n f_grad.append(np.sum((rot_mat[1] * xy_lin[0,] * resid[0,]).flatten()) +\\\n np.sum((-rot_mat[0] * xy_lin[1,] * resid[0,]).flatten()) + \\\n np.sum((rot_mat[0] * xy_lin[0,] * resid[1,]).flatten()) + \\\n np.sum((rot_mat[1] * xy_lin[1,] * resid[1,]).flatten()))\n #dx\n f_grad.append(np.sum((resid[0,]).flatten()))\n #dy\n f_grad.append(np.sum((resid[1,]).flatten()))\n f_grad = np.array(f_grad)\n return f_val.astype('float64'), np.array(f_grad).astype('float64')\n\n def _estimate_single(self, predicted, measured):\n assert predicted.shape == self.shape\n assert measured.shape == self.shape\n flow = optical_flow_tvl1(predicted, measured)\n flow[[1,0],] = flow[[0,1],]\n xy_flow = self.xy_lin - flow\n _Afunc_coord_warp = lambda transform_vec: self._coordinate_warp(transform_vec, self.xy_lin, xy_flow) \n\n #estimate transform matrix from optical flow\n results = sop.fmin_l_bfgs_b(_Afunc_coord_warp, np.array([0.0,0,0]))\n transform_final = results[0]\n if results[2][\"warnflag\"]:\n transform_final *= 0.0\n print(\"Transform estimation not converged\")\n\n #inverse warp measured image\n transform_mat = np.array([np.cos(transform_final[0]), \\\n -np.sin(transform_final[0]), \\\n np.sin(transform_final[0]), \\\n np.cos(transform_final[0]), \\\n transform_final[1], \\\n transform_final[2]]) \n aff_mat = np.array([transform_mat[[0,1,4]], transform_mat[[2,3,5]],[0,0,1]])\n tform = transform.AffineTransform(matrix = aff_mat)\n measured_warp = transform.warp(measured, tform.inverse, cval = 1.0)\n\n return measured_warp, transform_final\n\n def estimate(self, predicted_stack, measured_stack):\n assert predicted_stack.shape == measured_stack.shape\n transform_vec_list = np.zeros((3,measured_stack.shape[2]), dtype=\"float32\")\n\n #Change from torch array to numpy array\n flag_predicted_gpu = predicted_stack.is_cuda\n if flag_predicted_gpu:\n predicted_stack = predicted_stack.cpu()\n\n flag_measured_gpu = measured_stack.is_cuda\n if flag_measured_gpu:\n measured_stack = measured_stack.cpu() \n \n predicted_np = np.array(predicted_stack.detach())\n measured_np = np.array(measured_stack.detach())\n \n #For each image, estimate the affine transform error\n for img_idx in range(measured_np.shape[2]):\n measured_np[...,img_idx], transform_vec = self._estimate_single(predicted_np[...,img_idx], \\\n measured_np[...,img_idx])\n transform_vec_list[...,img_idx] = transform_vec\n \n #Change data back to torch tensor format\n if flag_predicted_gpu:\n predicted_stack = predicted_stack.cuda()\n\n measured_np = torch.tensor(measured_np)\n if flag_measured_gpu:\n measured_stack = measured_stack.cuda() \n measured_np = measured_np.cuda()\n\n return measured_np, torch.tensor(transform_vec_list)\n",
"\"\"\"\ntop level module for pytorch\n\nDavid Ren [email protected]\n\nSeptember 16, 2019\n\"\"\"\n\nimport sys\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\ntorch.set_printoptions(precision=10)\n\n#data\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\n\nimport torch.optim as optim\nimport operators as op\nimport utilities\nimport shift\nimport transform\nfrom aperture import Pupil\nfrom propagation import SingleSlicePropagation, Defocus, MultislicePropagation\nfrom regularizers import Regularizer\n\nimport scipy.io as sio\nimport numpy as np\nbin_obj = utilities.BinObject.apply\ncomplex_exp = op.ComplexExp.apply\ncomplex_mul = op.ComplexMul.apply\ncomplex_abs = op.ComplexAbs.apply\nfield_defocus = Defocus.apply\n\nclass TorchTomographySolver:\n\tdef __init__(self, **kwargs):\n\t\t\"\"\"\n\t\tCreating tomography solver object.\n\t\tRequired Args:\n\t\t\tshape: shape of the object in [y, x, z]\n\t\t\tvoxel_size: size of voxel in [y, x, z]\n\t\t\twavelength: wavelength of probing wave, scalar\n\t\t\tsigma: sigma used in calculating transmittance function (exp(1i * sigma * object)), scalar\n\t\t\ttilt_angles: an array of sample rotation angles\n\t\t\tdefocus_list: an array of defocus values\n\n\t\tOptional Args [default]\n\t\t\tamplitude_measurements: measurements for reconstruction, not needed for forward evaluation of the model only [None]\n\t\t\tnumerical_aperture: numerical aperture of the system, scalar [1.0]\n\t\t\tbinning_factor: bins the number of slices together to save computation, scalar [1]\n\t\t\tpad_size: padding reconstruction from measurements in [dy,dx], final size will be measurement.shape + 2*[dy, dx], [0, 0]\n\t\t\tshuffle: random shuffle of measurements, boolean [True]\n\t\t\tpupil: inital value for the pupil function [None]\n\t\t\tmaxitr: maximum number of iterations [100]\n\t\t\tstep_size: step_size for each gradient update [0.1]\n\t\t\tmomentum: [0.0 NOTIMPLEMENTED]\n\n\n\t\t\t-- transform alignment parameters (currently only support rigid body transform alignment) -- \n\t\t\ttransform_align: whether to turn on transform alignment, boolean, [False]\n\t\t\tta_method: \"optical_flow\"\n\t\t\tta_start_iteration: alignment process will not start until then, int, [0]\n\n\t\t\t-- Shift alignment parameters -- \n\t\t\tshift_align: whether to turn on alignment, boolean, [False]\n\t\t\tsa_method: shift alignment method, can be \"gradient\", \"hybrid_correlation\", \"cross_correlation\", or \"phase_correlation\", string, [\"gradient\"]\n\t\t\tsa_step_size: step_size of shift parameters, float, [0.1]\n\t\t\tsa_start_iteration: alignment process will not start until then, int, [0]\n\n\t\t\t-- Defocus refinement parameters -- \n\t\t\tdefocus_refine: whether to turn on defocus refinement for each measurement, boolean, [False]\n\t\t\tdr_method: defocus refinement method, can be \"gradient\", string, [\"gradient\"]\n\t\t\tdr_step_size: step_size of defocus refinement parameters, float, [0.1]\n\t\t\tdr_start_iteration: refinement process will not start until then, int, [0]\n\n\t\t\t-- regularizer parameters --\n\t\t\tregularizer_total_variation: boolean [False]\n\t\t\tregularizer_total_variation_gpu: boolean [False]\n\t\t\tregularizer_total_variation_parameter: controls amount of total variation, scalar or vector of length maxitr. [scalar 1.0]\n\t\t\tregularizer_total_variation_maxitr: number of iterations for total variation, integer [15]\n\t\t\tregularizer_total_variation_order: differential order, scalar [1], higher order not yet implemented\n\t\t\tregularizer_pure_real: boolean [False]\n\t\t\tregularizer_pure_imag: boolean [False]\n\t\t\tregularizer_pure_amplitude: boolean [False]\n\t\t\tregularizer_pure_phase: boolean [False]\n\t\t\tregularizer_positivity_real: boolean [False]\n\t\t\tregularizer_positivity_imag: boolean [False]\n\t\t\tregularizer_negativity_real: boolean [False]\n\t\t\tregularizer_negativity_imag: boolean [False]\n\t\t\tregularizer_dtype: torch dtype class [torch.float32]\n\t\t\"\"\"\n\t\t\n\t\tself.shape \t\t\t = kwargs.get(\"shape\")\n\t\t\n\t\tself.shuffle\t\t = kwargs.get(\"shuffle\", True)\n\t\tself.optim_max_itr = kwargs.get(\"maxitr\", 100)\n\t\tself.optim_step_size = kwargs.get(\"step_size\", 0.1)\n\t\tself.optim_momentum = kwargs.get(\"momentum\", 0.0)\n\n\t\tself.obj_update_iterations = kwargs.get(\"obj_update_iterations\", np.arange(self.optim_max_itr))\n\n\t\t#parameters for transform alignment\n\t\tself.transform_align = kwargs.get(\"transform_align\", False)\n\t\tself.ta_method = kwargs.get(\"ta_method\", \"optical_flow\")\n\t\tself.ta_start_iteration = kwargs.get(\"ta_start_iteration\", 0)\n\n\t\t#parameters for shift alignment\n\t\tself.shift_align = kwargs.get(\"shift_align\", False)\n\t\tself.sa_method = kwargs.get(\"sa_method\", \"gradient\")\n\t\tself.sa_step_size = kwargs.get(\"sa_step_size\", 0.1)\n\t\tself.sa_start_iteration = kwargs.get(\"sa_start_iteration\", 0)\n\n\t\t#parameters for defocus refinement\n\t\tself.defocus_refine = kwargs.get(\"defocus_refine\", False)\n\t\tself.dr_method = kwargs.get(\"dr_method\", \"gradient\")\n\t\tself.dr_step_size = kwargs.get(\"dr_step_size\", 0.1)\n\t\tself.dr_start_iteration = kwargs.get(\"dr_start_iteration\", 0)\t\t\n\n\t\tif not shift.is_valid_method(self.sa_method):\n\t\t\traise ValueError('Shift alignment method not valid.')\n\t\tif self.shift_align and shift.is_correlation_method(self.sa_method):\n\t\t\tself.shift_obj\t\t = shift.ImageShiftCorrelationBased(kwargs[\"amplitude_measurements\"].shape[0:2], \\\n\t\t\t\t\t\t\t\t\t\t \t\t\t\t\t upsample_factor = 10, method = self.sa_method, \\\n\t\t\t\t\t\t\t\t\t\t\t \t\t\t\t\t device=torch.device('cpu'))\n\n\t\tif self.transform_align:\n\t\t\tself.transform_obj = transform.ImageTransformOpticalFlow(kwargs[\"amplitude_measurements\"].shape[0:2],\\\n\t\t\t\t \t\t\t\t\t\t\t\t\t\t\t method = self.ta_method)\n\n\t\tself.dataset \t = AETDataset(**kwargs)\n\t\tself.num_defocus\t = self.dataset.get_all_defocus_lists().shape[0]\n\t\tself.num_rotation = len(self.dataset.tilt_angles)\n\t\tself.tomography_obj = PhaseContrastScattering(**kwargs)\n\t\tself.regularizer_obj = Regularizer(**kwargs)\n\t\tself.rotation_obj\t = utilities.ImageRotation(self.shape, axis = 0)\n\t\t\n\t\tself.cost_function = nn.MSELoss(reduction='sum')\n \t\n\tdef run(self, obj_init=None, forward_only=False, callback=None):\n\t\t\"\"\"\n\t\trun tomography solver\n\t\tArgs:\n\t\tforward_only: True -- only runs forward model on estimated object\n\t\t\t\t\t False -- runs reconstruction\n\t\t\"\"\"\n\t\tif forward_only:\n\t\t\tassert obj_init is not None\n\t\t\tself.shuffle = False\n\t\t\tamplitude_list = []\n\t\t\n\t\tself.dataloader = DataLoader(self.dataset, batch_size = 1, shuffle=self.shuffle)\n\n\t\terror = []\n \t#initialize object\n\t\tself.obj = obj_init\n\t\tif self.obj is None:\n\t\t\tself.obj = op.r2c(torch.zeros(self.shape).cuda())\n\t\telse:\n\t\t\tif not self.obj.is_cuda:\n\t\t\t\tself.obj = self.obj.cuda()\n\t\t\tif len(self.obj.shape) == 3:\n\t\t\t\tself.obj = op.r2c(self.obj)\n\t\t\n\t\t#initialize shift parameters\n\t\tself.yx_shifts = None\n\t\tif self.shift_align:\n\t\t\tself.sa_pixel_count = []\n\t\t\tself.yx_shift_all = []\n\t\t\tself.yx_shifts = torch.zeros((2, self.num_defocus, self.num_rotation))\n\n\t\tif self.transform_align:\n\t\t\tself.xy_transform_all = []\n\t\t\tself.xy_transforms = torch.zeros((3, self.num_defocus, self.num_rotation))\n\n\t\t# TEMPP\n\t\t# defocus_list_grad = torch.zeros((self.num_defocus, self.num_rotation), dtype = torch.float32)\n\n\t\t#begin iteration\n\t\tfor itr_idx in range(self.optim_max_itr):\n\t\t\tsys.stdout.flush()\n\t\t\trunning_cost = 0.0\n\t\t\t#defocus_list_grad[:] = 0.0\n\t\t\tif self.shift_align and itr_idx >= self.sa_start_iteration:\n\t\t\t\trunning_sa_pixel_count = 0.0\n\t\t\tfor data_idx, data in enumerate(self.dataloader, 0):\n\t \t\t#parse data\n\t\t\t\tif not forward_only:\n\t\t\t\t\tamplitudes, rotation_angle, defocus_list, rotation_idx = data\n\t\t\t\t\tamplitudes = torch.squeeze(amplitudes)\n\t\t\t\t\tif len(amplitudes.shape) < 3:\n\t\t\t\t\t\tamplitudes = amplitudes.unsqueeze(-1)\n\n\t\t\t\telse:\n\t\t\t\t\trotation_angle, defocus_list, rotation_idx = data[-3:]\n\t\t\t\t#prepare tilt specific parameters\n\t\t\t\tdefocus_list = torch.flatten(defocus_list).cuda()\n\t\t\t\trotation_angle = rotation_angle.item()\n\t\t\t\tyx_shift = None\n\t\t\t\tif self.shift_align and self.sa_method == \"gradient\" and itr_idx >= self.sa_start_iteration:\n\t\t\t\t\tyx_shift = self.yx_shifts[:,:,rotation_idx]\n\t\t\t\t\tyx_shift = yx_shift.cuda()\n\t\t\t\t\tyx_shift.requires_grad_()\n\t\t\t\tif self.defocus_refine and self.dr_method == \"gradient\" and itr_idx >= self.dr_start_iteration:\n\t\t\t\t\tdefocus_list.requires_grad_()\t\t\t\t\t\n\t\t\t\t#rotate object\n\t\t\t\tif data_idx == 0:\n\t\t\t\t\tself.obj = self.rotation_obj.forward(self.obj, rotation_angle)\n\t\t\t\telse:\n\t\t\t\t\tif abs(rotation_angle - previous_angle) > 90:\n\t\t\t\t\t\tself.obj = self.rotation_obj.forward(self.obj, -1 * previous_angle)\n\t\t\t\t\t\tself.obj = self.rotation_obj.forward(self.obj, rotation_angle)\n\t\t\t\t\telse:\t\t\n\t\t\t\t\t\tself.obj = self.rotation_obj.forward(self.obj, rotation_angle - previous_angle)\t\t\t\t\t\n\t\t\t\tif not forward_only:\n\t\t\t\t\t#define optimizer\n\t\t\t\t\toptimizer_params = []\n\t\t\t\t\t\n\t\t\t\t\tif itr_idx in self.obj_update_iterations:\n\t\t\t\t\t\tself.obj.requires_grad_()\n\t\t\t\t\t\toptimizer_params.append({'params': self.obj, 'lr': self.optim_step_size})\n\t\t\t\t\tif self.shift_align and self.sa_method == \"gradient\" and itr_idx >= self.sa_start_iteration:\n\t\t\t\t\t\toptimizer_params.append({'params': yx_shift, 'lr': self.sa_step_size})\n\t\t\t\t\tif self.defocus_refine and self.dr_method == \"gradient\" and itr_idx >= self.dr_start_iteration:\n\t\t\t\t\t\toptimizer_params.append({'params': defocus_list, 'lr': self.dr_step_size})\n\t\t\t\t\toptimizer = optim.SGD(optimizer_params)\n\t\t\t\t\n\t\t\t\t#forward scattering\n\t\t\t\testimated_amplitudes = self.tomography_obj(self.obj, defocus_list, yx_shift)\n\n\t\t\t\t#Correlation based shift estimation\n\t\t\t\tif self.shift_align and shift.is_correlation_method(self.sa_method) and itr_idx >= self.sa_start_iteration:\n\t\t\t\t\tif abs(rotation_angle) - 0.0 > 1e-2:\n\t\t\t\t\t\tamplitudes, yx_shift, _ = self.shift_obj.estimate(estimated_amplitudes, amplitudes)\n\t\t\t\t\t\tyx_shift = yx_shift.unsqueeze(-1)\n\t\t\t\t\t\tself.dataset.update_amplitudes(amplitudes, rotation_idx)\n\t\t\t\tif self.transform_align and itr_idx >= self.ta_start_iteration:\n\t\t\t\t\tif abs(rotation_angle) - 0.0 > 1e-2:\n\t\t\t\t\t\tamplitudes, xy_transform = self.transform_obj.estimate(estimated_amplitudes, amplitudes)\t\t\t\t\t\t\n\t\t\t\t\t\txy_transform = xy_transform.unsqueeze(-1)\n\t\t\t\t\t\tself.dataset.update_amplitudes(amplitudes, rotation_idx)\n\t\t\t\tif not forward_only:\n\n\t\t \t\t#compute cost\n\t\t\t\t\tcost = self.cost_function(estimated_amplitudes, amplitudes.cuda())\n\t\t\t\t\trunning_cost += cost.item()\n\n\t\t\t\t\t#backpropagation\n\t\t\t\t\tcost.backward()\n\t\t\t\t\t#update object\n\t\t\t\t\t# if itr_idx >= self.dr_start_iteration:\n\t\t\t\t\t# \t# print(torch.norm(defocus_list.grad.data))\n\t\t\t\t\t# \tdefocus_list_grad[:,data_idx] = defocus_list.grad.data * self.dr_step_size\n\t\t\t\t\toptimizer.step()\n\t\t\t\t\toptimizer.zero_grad()\n\t\t\t\t\tdel cost\n\t\t\t\telse:\n\t\t\t\t\t#store measurement\n\t\t\t\t\tamplitude_list.append(estimated_amplitudes.cpu().detach())\n\t\t\t\tdel estimated_amplitudes\n\t\t\t\tself.obj.requires_grad = False\n\n\t\t\t\t#keep track of shift alignment for the tilt\n\t\t\t\tif self.shift_align and itr_idx >= self.sa_start_iteration:\n\t\t\t\t\tif yx_shift is not None:\n\t\t\t\t\t\tyx_shift.requires_grad = False \n\t\t\t\t\t\tif abs(rotation_angle) - 0.0 > 1e-2:\n\t\t\t\t\t\t\tself.yx_shifts[:,:,rotation_idx] = yx_shift[:].cpu()\n\t\t\t\t\t\t\trunning_sa_pixel_count += torch.sum(torch.abs(yx_shift.cpu().flatten()))\n\t\t\t\t\n\t\t\t\t#keep track of transform alignment for the tilt\n\t\t\t\tif self.transform_align and itr_idx >= self.ta_start_iteration:\n\t\t\t\t\tif abs(rotation_angle) - 0.0 > 1e-2:\t\n\t\t\t\t\t\tself.xy_transforms[...,rotation_idx] = xy_transform[:].cpu()\n\n\t\t\t\t#keep track of defocus alignment for the tilt\n\t\t\t\tif self.defocus_refine and itr_idx >= self.dr_start_iteration:\n\t\t\t\t\tdefocus_list.requires_grad = False\n\t\t\t\t\tself.dataset.update_defocus_list(defocus_list[:].cpu().detach(), rotation_idx)\n\n\t\t\t\tprevious_angle = rotation_angle\n\t\t\t\t\n\t\t\t\t#rotate object back\n\t\t\t\tif data_idx == (self.dataset.__len__() - 1):\n\t\t\t\t\tprevious_angle = 0.0\n\t\t\t\t\tself.obj = self.rotation_obj.forward(self.obj, -1.0*rotation_angle)\n\t\t\t\tprint(\"Rotation {:03d}/{:03d}.\".format(data_idx+1, self.dataset.__len__()), end=\"\\r\")\n\t\t\t\n\t\t\t#apply regularization\n\t\t\tamplitudes = None\n\t\t\ttorch.cuda.empty_cache()\n\t\t\tif itr_idx in self.obj_update_iterations:\n\t\t\t\tself.obj = self.regularizer_obj.apply(self.obj)\n\t\t\terror.append(running_cost)\n\n\t\t\t#keep track of shift alignment results\n\t\t\tif self.shift_align and itr_idx >= self.sa_start_iteration:\n\t\t\t\tself.sa_pixel_count.append(running_sa_pixel_count)\t\t\n\t\t\t\tself.yx_shift_all.append(np.array(self.yx_shifts).copy())\n\t\t\t\n\t\t\t#keep track of transform alignment results\n\t\t\tif self.transform_align and itr_idx >= self.ta_start_iteration:\n\t\t\t\tself.xy_transform_all.append(np.array(self.xy_transforms).copy())\n\n\t\t\tif callback is not None:\n\t\t\t\tcallback(self.obj.cpu().detach(), error)\n\t\t\t\t#TEMPPPPP\n\t\t\t\t# callback(defocus_list_grad, self.dataset.get_all_defocus_lists(), error)\n\t\t\tif forward_only and itr_idx == 0:\n\t\t\t\treturn torch.cat([torch.unsqueeze(amplitude_list[idx],-1) for idx in range(len(amplitude_list))], axis=-1)\n\t\t\tprint(\"Iteration {:03d}/{:03d}. Error: {:03f}\".format(itr_idx+1, self.optim_max_itr, np.log10(running_cost)))\n\n\t\tself.defocus_list = self.dataset.get_all_defocus_lists()\n\t\treturn self.obj.cpu().detach(), error\n\nclass AETDataset(Dataset):\n\tdef __init__(self, amplitude_measurements=None, tilt_angles=[0], defocus_list=None, **kwargs):\n\t\t\"\"\"\n\t\tArgs:\n\t\t transform (callable, optional): Optional transform to be applied\n\t\t on a sample.\n\t\t\"\"\"\n\t\tself.amplitude_measurements = amplitude_measurements\n\t\tif self.amplitude_measurements is not None:\n\t\t\tself.amplitude_measurements = amplitude_measurements.astype(\"float32\")\n\t\tif tilt_angles is not None:\n\t\t\tself.tilt_angles = tilt_angles * 1.0\n\t\tif defocus_list is not None:\n\t\t\tif not torch.is_tensor(defocus_list):\n\t\t\t\tdefocus_list = torch.tensor(defocus_list)\n\t\t\tif len(defocus_list.shape) == 1:\n\t\t\t\tself.defocus_list = defocus_list.unsqueeze(1).repeat(1, len(self.tilt_angles)) * 1.0\n\t\t\telif len(defocus_list.shape) == 2:\n\t\t\t\tassert defocus_list.shape[1] == len(tilt_angles)\n\t\t\t\tself.defocus_list = defocus_list * 1.0\n\t\t\telse:\n\t\t\t\traise ValueError('Invalid defocus_list shape.')\n\n\tdef __len__(self):\n\t\treturn self.tilt_angles.shape[0]\n\n\tdef __getitem__(self, idx):\n #X x Y x #defocus\n\t\tif self.amplitude_measurements is not None:\n\t\t\treturn self.amplitude_measurements[...,idx], self.tilt_angles[idx], self.defocus_list[:,idx], idx\n\t\telse:\n\t\t\treturn self.tilt_angles[idx], self.defocus_list[:,idx], idx\n\n\tdef update_defocus_list(self,defocus_list, idx):\n\t\tself.defocus_list[:,idx] = defocus_list.unsqueeze(-1)\n\t\treturn\n\n\tdef update_amplitudes(self, amplitudes, idx):\n\t\tself.amplitude_measurements[...,idx] = amplitudes\n\t\treturn\n\n\tdef get_all_defocus_lists(self):\n\t\treturn self.defocus_list \n\n\tdef get_all_measurements(self):\n\t\treturn self.amplitude_measurements\n\n\nclass PhaseContrastScattering(nn.Module):\n\n\tdef __init__(self, shape, voxel_size, wavelength, sigma=None, binning_factor=1, pad_size=[0,0], **kwargs):\n\t\t\"\"\"\n\t\tPhase contrast scattering model\n\t\tStarts from a plane wave, 3D object, and a list of defocus distance (in Angstrom).\n\t\tComputes intensity phase contrast image after electron scatters through the sample using multislice algorithm\n\t\tRequired Args:\n\t\t\tshape: shape of the object in [y, x, z]\n\t\t\tvoxel_size: size of voxel in [y, x, z]\n\t\t\twavelength: wavelength of probing wave, scalar\n\n\t\tOptional Args [default]:\n\t\t\tsigma: sigma used in calculating transmittance function (exp(1i * sigma * object)), scalar [None]\n\t\t\tbinning_factor: bins the number of slices together to save computation (loses accuracy), scalar [1]\n\t\t\tpad_size: padding reconstruction from measurements in [dy,dx], final size will be measurement.shape + 2*[dy, dx], [0, 0]\n\t\t\"\"\"\n\t\tsuper(PhaseContrastScattering, self).__init__()\n\t\tself.binning_factor = binning_factor\n\t\tself.shape = shape\n\t\tself.pad_size = pad_size\n\t\tself.voxel_size = voxel_size\n\t\tself.wavelength = wavelength\n\t\t\n\t\t#forward propagation\n\t\tself.shape_prop = self.shape.copy()\n\t\tself.shape_prop[2] //= self.binning_factor\n\t\tself.voxel_size_prop = self.voxel_size.copy()\n\t\tself.voxel_size_prop[2] *= self.binning_factor\n\t\tself._propagation = MultislicePropagation(self.shape_prop, self.voxel_size_prop, self.wavelength, **kwargs)\n\t\t\n\t\tself.sigma = sigma\n\t\tif self.sigma is None:\n\t\t\tself.sigma = (2 * np.pi / self.wavelength) * self.voxel_size_prop[2]\n\n\t\t#filter with aperture\n\t\tself._pupil = Pupil(self.shape[0:2], self.voxel_size[0], self.wavelength, **kwargs)\n\n\t\t#defocus operator\n\t\t# self._defocus = Defocus()\n\n\t\t#shift correction\n\t\tself._shift = shift.ImageShiftGradientBased(self.shape[0:2], **kwargs)\n\n\tdef forward(self, obj, defocus_list, yx_shift=None):\n\t\t#bin object\n\t\tobj = bin_obj(obj, self.binning_factor)\n\t\t#raise to transmittance\n\t\tobj = complex_exp(complex_mul(op._j, self.sigma * obj))\n\t\t#forward propagation & defocus\n\t\tfield = self._propagation(obj)\n\t\t#pupil\n\t\tfield = self._pupil(field)\n\t\t#defocus\t\t\n\t\tfield = field_defocus(field, self._propagation.propagate.kernel_phase, defocus_list)\n\t\t# field = self._defocus(field, self._propagation.propagate.kernel_phase, defocus_list)\n\t\t#shift\n\t\tfield = self._shift(field, yx_shift)\n\t\t#crop\n\t\tfield = F.pad(field, (0,0,0,0, \\\n\t\t\t\t\t\t\t -1 * self.pad_size[1], -1 * self.pad_size[1], \\\n\t\t\t\t\t\t\t -1 * self.pad_size[0], -1 * self.pad_size[0]))\n\t\t#compute amplitude\n\t\tamplitudes = complex_abs(field)\n\n\t\treturn amplitudes\n\n\n\n\n\n\n\n"
] | [
[
"numpy.arange",
"numpy.cos",
"torch.tensor",
"numpy.sin",
"numpy.concatenate",
"numpy.zeros_like",
"numpy.array",
"numpy.zeros"
],
[
"torch.zeros",
"torch.squeeze",
"torch.set_printoptions",
"numpy.arange",
"torch.utils.data.DataLoader",
"torch.cuda.empty_cache",
"torch.is_tensor",
"torch.tensor",
"torch.flatten",
"torch.unsqueeze",
"numpy.log10",
"torch.optim.SGD",
"torch.device",
"numpy.array",
"torch.nn.MSELoss",
"torch.nn.functional.pad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mnassar/deep-learning | [
"b69617993b2e67cfd5635460d1a295e91b6c66d6"
] | [
"tv-script-generation/problem_unittests.py"
] | [
"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib import rnn\n\n\ndef _print_success_message():\n print('Tests Passed')\n\n\ndef test_create_lookup_tables(create_lookup_tables):\n with tf.Graph().as_default():\n test_text = '''\n Moe_Szyslak Moe's Tavern Where the elite meet to drink\n Bart_Simpson Eh yeah hello is Mike there Last name Rotch\n Moe_Szyslak Hold on I'll check Mike Rotch Mike Rotch Hey has anybody seen Mike Rotch lately\n Moe_Szyslak Listen you little puke One of these days I'm gonna catch you and I'm gonna carve my name on your back with an ice pick\n Moe_Szyslak Whats the matter Homer You're not your normal effervescent self\n Homer_Simpson I got my problems Moe Give me another one\n Moe_Szyslak Homer hey you should not drink to forget your problems\n Barney_Gumble Yeah you should only drink to enhance your social skills'''\n\n test_text = test_text.lower()\n test_text = test_text.split()\n\n vocab_to_int, int_to_vocab = create_lookup_tables(test_text)\n\n # Check types\n assert isinstance(vocab_to_int, dict),\\\n 'vocab_to_int is not a dictionary.'\n assert isinstance(int_to_vocab, dict),\\\n 'int_to_vocab is not a dictionary.'\n\n # Compare lengths of dicts\n assert len(vocab_to_int) == len(int_to_vocab),\\\n 'Length of vocab_to_int and int_to_vocab don\\'t match. ' \\\n 'vocab_to_int is length {}. int_to_vocab is length {}'.format(len(vocab_to_int), len(int_to_vocab))\n\n # Make sure the dicts have the same words\n vocab_to_int_word_set = set(vocab_to_int.keys())\n int_to_vocab_word_set = set(int_to_vocab.values())\n\n assert not (vocab_to_int_word_set - int_to_vocab_word_set),\\\n 'vocab_to_int and int_to_vocab don\\'t have the same words.' \\\n '{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_set - int_to_vocab_word_set)\n assert not (int_to_vocab_word_set - vocab_to_int_word_set),\\\n 'vocab_to_int and int_to_vocab don\\'t have the same words.' \\\n '{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_set - vocab_to_int_word_set)\n\n # Make sure the dicts have the same word ids\n vocab_to_int_word_id_set = set(vocab_to_int.values())\n int_to_vocab_word_id_set = set(int_to_vocab.keys())\n\n assert not (vocab_to_int_word_id_set - int_to_vocab_word_id_set),\\\n 'vocab_to_int and int_to_vocab don\\'t contain the same word ids.' \\\n '{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_id_set - int_to_vocab_word_id_set)\n assert not (int_to_vocab_word_id_set - vocab_to_int_word_id_set),\\\n 'vocab_to_int and int_to_vocab don\\'t contain the same word ids.' \\\n '{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_id_set - vocab_to_int_word_id_set)\n\n # Make sure the dicts make the same lookup\n missmatches = [(word, id, id, int_to_vocab[id]) for word, id in vocab_to_int.items() if int_to_vocab[id] != word]\n\n assert not missmatches,\\\n 'Found {} missmatche(s). First missmatch: vocab_to_int[{}] = {} and int_to_vocab[{}] = {}'.format(\n len(missmatches),\n *missmatches[0])\n\n assert len(vocab_to_int) > len(set(test_text))/2,\\\n 'The length of vocab seems too small. Found a length of {}'.format(len(vocab_to_int))\n\n _print_success_message()\n\n\ndef test_get_batches(get_batches):\n with tf.Graph().as_default():\n test_batch_size = 128\n test_seq_length = 5\n test_int_text = list(range(1000*test_seq_length))\n batches = get_batches(test_int_text, test_batch_size, test_seq_length)\n\n # Check type\n assert isinstance(batches, np.ndarray),\\\n 'Batches is not a Numpy array'\n\n # Check shape\n assert batches.shape == (7, 2, 128, 5),\\\n 'Batches returned wrong shape. Found {}'.format(batches.shape)\n\n for x in range(batches.shape[2]):\n assert np.array_equal(batches[0,0,x], np.array(range(x * 35, x * 35 + batches.shape[3]))),\\\n 'Batches returned wrong contents. For example, input sequence {} in the first batch was {}'.format(x, batches[0,0,x])\n assert np.array_equal(batches[0,1,x], np.array(range(x * 35 + 1, x * 35 + 1 + batches.shape[3]))),\\\n 'Batches returned wrong contents. For example, target sequence {} in the first batch was {}'.format(x, batches[0,1,x])\n\n\n last_seq_target = (test_batch_size-1) * 35 + 31\n last_seq = np.array(range(last_seq_target, last_seq_target+ batches.shape[3]))\n last_seq[-1] = batches[0,0,0,0]\n\n assert np.array_equal(batches[-1,1,-1], last_seq),\\\n 'The last target of the last batch should be the first input of the first batch. Found {} but expected {}'.format(batches[-1,1,-1], last_seq)\n\n _print_success_message()\n\n\ndef test_tokenize(token_lookup):\n with tf.Graph().as_default():\n symbols = set(['.', ',', '\"', ';', '!', '?', '(', ')', '--', '\\n'])\n token_dict = token_lookup()\n\n # Check type\n assert isinstance(token_dict, dict), \\\n 'Returned type is {}.'.format(type(token_dict))\n\n # Check symbols\n missing_symbols = symbols - set(token_dict.keys())\n unknown_symbols = set(token_dict.keys()) - symbols\n\n assert not missing_symbols, \\\n 'Missing symbols: {}'.format(missing_symbols)\n assert not unknown_symbols, \\\n 'Unknown symbols: {}'.format(unknown_symbols)\n\n # Check values type\n bad_value_type = [type(val) for val in token_dict.values() if not isinstance(val, str)]\n\n assert not bad_value_type,\\\n 'Found token as {} type.'.format(bad_value_type[0])\n\n # Check for spaces\n key_has_spaces = [k for k in token_dict.keys() if ' ' in k]\n val_has_spaces = [val for val in token_dict.values() if ' ' in val]\n\n assert not key_has_spaces,\\\n 'The key \"{}\" includes spaces. Remove spaces from keys and values'.format(key_has_spaces[0])\n assert not val_has_spaces,\\\n 'The value \"{}\" includes spaces. Remove spaces from keys and values'.format(val_has_spaces[0])\n\n # Check for symbols in values\n symbol_val = ()\n for symbol in symbols:\n for val in token_dict.values():\n if symbol in val:\n symbol_val = (symbol, val)\n\n assert not symbol_val,\\\n 'Don\\'t use a symbol that will be replaced in your tokens. Found the symbol {} in value {}'.format(*symbol_val)\n\n _print_success_message()\n\n\ndef test_get_inputs(get_inputs):\n with tf.Graph().as_default():\n input_data, targets, lr = get_inputs()\n\n # Check type\n assert input_data.op.type == 'Placeholder',\\\n 'Input not a Placeholder.'\n assert targets.op.type == 'Placeholder',\\\n 'Targets not a Placeholder.'\n assert lr.op.type == 'Placeholder',\\\n 'Learning Rate not a Placeholder.'\n\n # Check name\n assert input_data.name == 'input:0',\\\n 'Input has bad name. Found name {}'.format(input_data.name)\n\n # Check rank\n input_rank = 0 if input_data.get_shape() == None else len(input_data.get_shape())\n targets_rank = 0 if targets.get_shape() == None else len(targets.get_shape())\n lr_rank = 0 if lr.get_shape() == None else len(lr.get_shape())\n\n assert input_rank == 2,\\\n 'Input has wrong rank. Rank {} found.'.format(input_rank)\n assert targets_rank == 2,\\\n 'Targets has wrong rank. Rank {} found.'.format(targets_rank)\n assert lr_rank == 0,\\\n 'Learning Rate has wrong rank. Rank {} found'.format(lr_rank)\n\n _print_success_message()\n\n\ndef test_get_init_cell(get_init_cell):\n with tf.Graph().as_default():\n test_batch_size_ph = tf.placeholder(tf.int32, [])\n test_rnn_size = 256\n\n cell, init_state = get_init_cell(test_batch_size_ph, test_rnn_size)\n\n # Check type\n assert isinstance(cell, tf.contrib.rnn.MultiRNNCell),\\\n 'Cell is wrong type. Found {} type'.format(type(cell))\n\n # Check for name attribute\n assert hasattr(init_state, 'name'),\\\n 'Initial state doesn\\'t have the \"name\" attribute. Try using `tf.identity` to set the name.'\n\n # Check name\n assert init_state.name == 'initial_state:0',\\\n 'Initial state doesn\\'t have the correct name. Found the name {}'.format(init_state.name)\n\n _print_success_message()\n\n\ndef test_get_embed(get_embed):\n with tf.Graph().as_default():\n embed_shape = [50, 5, 256]\n test_input_data = tf.placeholder(tf.int32, embed_shape[:2])\n test_vocab_size = 27\n test_embed_dim = embed_shape[2]\n\n embed = get_embed(test_input_data, test_vocab_size, test_embed_dim)\n\n # Check shape\n assert embed.shape == embed_shape,\\\n 'Wrong shape. Found shape {}'.format(embed.shape)\n\n _print_success_message()\n\n\ndef test_build_rnn(build_rnn):\n with tf.Graph().as_default():\n test_rnn_size = 256\n test_rnn_layer_size = 2\n test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size) for _ in range(test_rnn_layer_size)])\n\n test_inputs = tf.placeholder(tf.float32, [None, None, test_rnn_size])\n outputs, final_state = build_rnn(test_cell, test_inputs)\n\n # Check name\n assert hasattr(final_state, 'name'),\\\n 'Final state doesn\\'t have the \"name\" attribute. Try using `tf.identity` to set the name.'\n assert final_state.name == 'final_state:0',\\\n 'Final state doesn\\'t have the correct name. Found the name {}'.format(final_state.name)\n\n # Check shape\n assert outputs.get_shape().as_list() == [None, None, test_rnn_size],\\\n 'Outputs has wrong shape. Found shape {}'.format(outputs.get_shape())\n assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size],\\\n 'Final state wrong shape. Found shape {}'.format(final_state.get_shape())\n\n _print_success_message()\n\n\ndef test_build_nn(build_nn):\n with tf.Graph().as_default():\n test_input_data_shape = [None, 5]\n test_input_data = tf.placeholder(tf.int32, test_input_data_shape)\n test_rnn_size = 256\n test_embed_dim = 300\n test_rnn_layer_size = 2\n test_vocab_size = 27\n test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size) for _ in range(test_rnn_layer_size)])\n\n logits, final_state = build_nn(test_cell, test_rnn_size, test_input_data, test_vocab_size, test_embed_dim)\n\n # Check name\n assert hasattr(final_state, 'name'), \\\n 'Final state doesn\\'t have the \"name\" attribute. Are you using build_rnn?'\n assert final_state.name == 'final_state:0', \\\n 'Final state doesn\\'t have the correct name. Found the name {}. Are you using build_rnn?'.format(final_state.name)\n\n # Check Shape\n assert logits.get_shape().as_list() == test_input_data_shape + [test_vocab_size], \\\n 'Outputs has wrong shape. Found shape {}'.format(logits.get_shape())\n assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size], \\\n 'Final state wrong shape. Found shape {}'.format(final_state.get_shape())\n\n _print_success_message()\n\n\ndef test_get_tensors(get_tensors):\n test_graph = tf.Graph()\n with test_graph.as_default():\n test_input = tf.placeholder(tf.int32, name='input')\n test_initial_state = tf.placeholder(tf.int32, name='initial_state')\n test_final_state = tf.placeholder(tf.int32, name='final_state')\n test_probs = tf.placeholder(tf.float32, name='probs')\n\n input_text, initial_state, final_state, probs = get_tensors(test_graph)\n\n # Check correct tensor\n assert input_text == test_input,\\\n 'Test input is wrong tensor'\n assert initial_state == test_initial_state, \\\n 'Initial state is wrong tensor'\n assert final_state == test_final_state, \\\n 'Final state is wrong tensor'\n assert probs == test_probs, \\\n 'Probabilities is wrong tensor'\n\n _print_success_message()\n\n\ndef test_pick_word(pick_word):\n with tf.Graph().as_default():\n test_probabilities = np.array([0.1, 0.8, 0.05, 0.05])\n test_int_to_vocab = {word_i: word for word_i, word in enumerate(['this', 'is', 'a', 'test'])}\n\n pred_word = pick_word(test_probabilities, test_int_to_vocab)\n\n # Check type\n assert isinstance(pred_word, str),\\\n 'Predicted word is wrong type. Found {} type.'.format(type(pred_word))\n\n # Check word is from vocab\n assert pred_word in test_int_to_vocab.values(),\\\n 'Predicted word not found in int_to_vocab.'\n\n\n _print_success_message()\n\n"
] | [
[
"tensorflow.Graph",
"numpy.array_equal",
"tensorflow.contrib.rnn.BasicLSTMCell",
"tensorflow.placeholder",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.0",
"1.2"
]
}
] |
otosense/oplot | [
"5b4b4b96ebfa5486501c02e7051d1c11b1c3b86c"
] | [
"oplot/multiplots.py"
] | [
"\"\"\"Drawing multiple plots in a single figure\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import gridspec\n\n\ndef make_space_above(axes, topmargin=1):\n \"\"\" increase figure size to make topmargin (in inches) space for\n titles, without changing the axes sizes\"\"\"\n\n fig = axes.flatten()[0].figure\n s = fig.subplotpars\n w, h = fig.get_size_inches()\n\n figh = h - (1 - s.top) * h + topmargin\n fig.subplots_adjust(bottom=s.bottom * h / figh, top=1 - topmargin / figh)\n fig.set_figheight(figh)\n\n\ndef ax_func_to_plot(\n list_func_per_ax,\n n_per_row=3,\n title=None,\n title_font_size=10,\n width=15,\n height_row=10,\n saving_path=None,\n x_labels=None,\n y_labels=None,\n outer_axis_labels_only=False,\n dpi=200,\n plot=True,\n h_pad=0,\n w_pad=0,\n title_offset=0,\n):\n \"\"\"\n Draw one grid of plots from the individual plots\n\n :param list_func_per_ax: a list/generator of functions, each taking an ax object as an input and plotting something on it\n :param n_per_row: number of plots per row\n :param title: global title of the plot\n :param title_font_size: font size of the global title\n :param width: width of the global plot\n :param height_row: height of each row\n :param saving_path: path where to save the plot, can be left to none in which case the plot is not saved\n :param x_labels: label of the x axis\n :param y_labels: label of the y axis\n :param outer_axis_labels_only: if set to true, only the axis labels on the left column and bottom row will show\n :return:\n \"\"\"\n\n n_rows = int(np.ceil(len(list_func_per_ax) / n_per_row))\n fig, axes = plt.subplots(\n nrows=n_rows,\n ncols=n_per_row,\n figsize=(width, height_row * n_rows),\n squeeze=False,\n )\n\n # fig.legend(fancybox=True, framealpha=1, shadow=True, borderpad=1)\n fig.suptitle(title, fontsize=title_font_size)\n\n for idx, ax in enumerate(axes.flat):\n if idx < len(list_func_per_ax):\n ax.set(xlabel=x_labels, ylabel=y_labels)\n\n if outer_axis_labels_only:\n for idx, ax in enumerate(axes.flat):\n if idx < len(list_func_per_ax):\n ax.label_outer()\n\n for idx, (ax, func) in enumerate(zip(axes.flatten(), list_func_per_ax)):\n if idx < len(list_func_per_ax):\n func(ax=ax)\n\n # Delete the remaining empty plots if any\n for i in range(len(list_func_per_ax), n_rows * n_per_row):\n fig.delaxes(axes.flatten()[i])\n\n handles, labels = ax.get_legend_handles_labels()\n fig.legend(handles, labels, loc=1)\n plt.tight_layout(h_pad=h_pad, w_pad=w_pad)\n\n make_space_above(axes, topmargin=title_offset)\n\n if saving_path:\n fig.savefig(saving_path, dpi=dpi)\n if plot:\n plt.show()\n\n\ndef multiplot_with_max_size(\n list_func_per_ax,\n max_plot_per_file=60,\n n_per_row=3,\n title=None,\n title_font_size=10,\n width=15,\n height_row=10,\n saving_path_format=None,\n x_labels=None,\n y_labels=None,\n outer_axis_labels_only=False,\n dpi=300,\n plot=True,\n):\n \"\"\"\n Same as ax_func_to_plot but saves on several files\n :param max_plot_per_file: the maximum number of plots per file\n \"\"\"\n\n n_files, n_remainder_rows = divmod(len(list_func_per_ax), max_plot_per_file)\n file_idx = 0\n for file_idx in range(n_files):\n funcs = list_func_per_ax[\n file_idx * max_plot_per_file : (file_idx + 1) * max_plot_per_file\n ]\n if saving_path_format:\n saving_path = saving_path_format.format(file_idx)\n else:\n saving_path = None\n ax_func_to_plot(\n funcs,\n n_per_row=n_per_row,\n title=title,\n title_font_size=title_font_size,\n width=width,\n height_row=height_row,\n saving_path=saving_path,\n x_labels=x_labels,\n y_labels=y_labels,\n outer_axis_labels_only=outer_axis_labels_only,\n )\n file_idx += 1\n if saving_path_format:\n saving_path = saving_path_format.format(file_idx)\n else:\n saving_path = None\n funcs = list_func_per_ax[-n_remainder_rows:]\n ax_func_to_plot(\n funcs,\n n_per_row=n_per_row,\n title=title,\n title_font_size=title_font_size,\n width=width,\n height_row=height_row,\n saving_path=saving_path,\n x_labels=x_labels,\n y_labels=y_labels,\n outer_axis_labels_only=outer_axis_labels_only,\n dpi=dpi,\n plot=plot,\n )\n\n\n# # Example of usage\n# if __name__ == '__main__':\n# def ax_func(ax):\n# ax.plot([1, 5, 3])\n# ax.set_title('test_test')\n#\n#\n# ax_func_to_plot([ax_func] * 6, title='Test', x_labels='x_name_here', y_labels='something',\n# outer_axis_labels_only=True)\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ICASSP-2020-Robustness-Tutorial/Robust-Signal-Processing-Toolbox-Python | [
"293f4281fdbd475549aa42eae9fe615976af27a0",
"293f4281fdbd475549aa42eae9fe615976af27a0",
"293f4281fdbd475549aa42eae9fe615976af27a0"
] | [
"robustsp/Regression/enetpath.py",
"robustsp/Covariance/.ipynb_checkpoints/spatmed-checkpoint.py",
"robustsp/AuxiliaryFunctions/wtuk.py"
] | [
"'''\n enethpath computes the elastic net (EN) regularization path (over grid \n of penalty parameter values). Uses pathwise CCD algorithm. \n INPUT: \n y : Numeric 1darray of size N (output, respones)\n X : Nnumeric 2darray of size N x p. Each row represents one \n observation, and each column represents one predictor (feature). \n intcpt: Logical flag to indicate if intercept is in the model\n alpha : Numeric scalar, elastic net tuning parameter in the range [0,1].\n If not given then use alpha = 1 (Lasso)\n eps: Positive scalar, the ratio of the smallest to the \n largest Lambda value in the grid. Default is eps = 10^-4. \n L : Positive integer, the number of lambda values EN/Lasso uses. \n Default is L=100. \n printitn: print iteration number (default = 0, no printing)\n OUTPUT:\n B : Fitted EN/Lasso regression coefficients, a p-by-(L+1) matrix, \n where p is the number of predictors (columns) in X, and L is \n the number of Lambda values. If intercept is in the model, then\n B is (p+1)-by-(L+1) matrix, with first element the intercept.\n stats : Dictionary with following fields: \n Lambda = lambda parameters in ascending order\n MSE = Mean squared error (MSE)\n BIC = Bayesian information criterion values \n'''\nimport numpy as np\nfrom robustsp.Regression.enet import enet\n\ndef enetpath(yx,Xx,alpha=1,L=120,eps=10**-3,intcpt=True,printitn=0):\n\n # ensure inputs are ndarrays\n Xc = np.copy(np.asarray(Xx))\n y = np.copy(np.asarray(yx))\n if len(y.shape) == 2: y = y.flatten()\n n,p = Xc.shape\n\n # if intercept is in the model, center the data\n if intcpt:\n meanX = np.mean(Xc,axis=0)\n meany = np.mean(y)\n Xc -= meanX\n y -= meany\n \n \n if printitn > 0:\n print('enetpath: using alpha = %.1f \\n' % alpha)\n\n sdX = np.sqrt(np.sum(Xc*np.conj(Xc),axis=0)) \n Xc /= sdX\n \n lam0 = np.linalg.norm(Xc.T @ y,np.inf)/alpha # smallest penalty value giving zero solution\n \n lamgrid = eps**(np.arange(0,L+1,1)/L) * lam0 # grid of penalty values\n\n B = np.zeros([p,L+1])\n\n for jj in range(L):\n B[:,jj+1], b = enet(y,Xc,B[:,jj], lamgrid[jj+1], alpha, printitn)\n\n B[np.abs(B) < 5e-8] = 0\n\n DF = np.sum([np.abs(B)!=0],axis=1) # non-zero values in each column\n\n if n > p:\n MSE = np.sum(np.abs(np.repeat(y[:,np.newaxis],L+1,axis=1)\n -Xc@B)**2,axis=0) *(1/(n-DF-1))\n BIC = n * np.log(MSE) + DF * np.log(n)\n else:\n MSE = []\n BIC = []\n\n B = B / sdX[:,None]\n if intcpt:\n B = np.vstack([meany - meanX @ B, B])\n\n stats = {'MSE':MSE,'BIC':BIC,'Lambda':lamgrid} \n\n\n return B, stats",
"'''\n Computes the spatial median based on (real or complex) data matrix X.\n INPUT:\n X: Numeric data matrix of size N x p. Each row represents one \n observation, and each column represents one variable \n printitn : print iteration number (default = 0, no printing)\n\n OUTPUT\n smed: Spatial median estimate\n'''\n\nimport numpy as np\n\ndef spatmed(X,printitn=0,iterMAX = 500,EPS=1e-6,TOL=1e-5):\n l = np.sum(X*np.conj(X),axis=1)\n X = X[l!=0,:]\n n = len(X)\n \n smed0 = np.median(X) if np.isrealobj(X) else np.mean(X)\n norm0 = np.linalg.norm(smed0)\n \n for it in range(iterMAX):\n Xc = X - smed0\n l = np.sqrt(np.sum(Xc*np.conj(Xc),axis=1))\n l[l<EPS] = EPS\n Xpsi = Xc / l\n update = np.sum(Xpsi,axis=0)/sum(1/l)\n smed = smed0 + update\n \n dis = np.linalg.norm(update,ord=2)/norm0\n \n if printitn>0 and (i+1) % printitn == 0: print('At iter = %.3d, dis =%.7f \\n' % (i,dis))\n \n if dis <= TOL: break\n smed0 = smed\n norm0 = np.linalg.norm(smed,ord=2)\n return smed",
"'''\nTukeys's weight function w: \nabsx is N x 1 data vector which can be complex\nor real and threshold contant cl\n'''\nimport numpy as np\ndef wtuk(absx,cl):\n return np.square(1-np.square(absx/cl)) * (absx<=cl)"
] | [
[
"numpy.log",
"numpy.conj",
"numpy.abs",
"numpy.asarray",
"numpy.arange",
"numpy.linalg.norm",
"numpy.mean",
"numpy.repeat",
"numpy.zeros",
"numpy.vstack"
],
[
"numpy.conj",
"numpy.median",
"numpy.linalg.norm",
"numpy.mean",
"numpy.isrealobj",
"numpy.sum"
],
[
"numpy.square"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
neuropoly/ivadomed | [
"e5f14c02a6c73d9360eee130ff39f0a364e0a697",
"e5f14c02a6c73d9360eee130ff39f0a364e0a697"
] | [
"ivadomed/uncertainty.py",
"testing/unit_tests/test_bounding_box.py"
] | [
"import nibabel as nib\nfrom tqdm import tqdm\nfrom scipy.ndimage import label, generate_binary_structure\nfrom pathlib import Path\nimport json\nimport numpy as np\nfrom ivadomed import postprocessing as imed_postpro\nfrom typing import List\n\n\ndef run_uncertainty(image_folder):\n \"\"\"Compute uncertainty from model prediction.\n\n This function loops across the model predictions (nifti masks) and estimates the uncertainty from the Monte Carlo\n samples. Both voxel-wise and structure-wise uncertainty are estimates.\n\n Args:\n image_folder (str): Folder containing the Monte Carlo samples.\n \"\"\"\n # list subj_acq prefixes\n subj_acq_lst = [file.name.split('_pred')[0] for file in Path(image_folder).iterdir()\n if file.name.endswith('.nii.gz') and '_pred' in file.name]\n # remove duplicates\n subj_acq_lst = list(set(subj_acq_lst))\n # keep only the images where unc has not been computed yet\n subj_acq_lst = [file for file in subj_acq_lst if not Path(image_folder, file + '_unc-cv.nii.gz').is_file()]\n\n # loop across subj_acq\n for subj_acq in tqdm(subj_acq_lst, desc=\"Uncertainty Computation\"):\n # hard segmentation from MC samples\n fname_pred: Path = Path(image_folder, subj_acq + '_pred.nii.gz')\n # fname for soft segmentation from MC simulations\n fname_soft: Path = Path(image_folder, subj_acq + '_soft.nii.gz')\n # find Monte Carlo simulations\n fname_pred_lst: List[str] = []\n for file in Path(image_folder).iterdir():\n if subj_acq + '_pred_' in file.name and ('_painted' not in file.name) and ('_color' not in file.name):\n fname_pred_lst.append(str(file))\n\n # if final segmentation from Monte Carlo simulations has not been generated yet\n if not fname_pred.is_file() or not fname_soft.is_file():\n # threshold used for the hard segmentation\n thr = 1. / len(fname_pred_lst) # 1 for all voxels where at least on MC sample predicted 1\n # average then argmax\n combine_predictions(fname_pred_lst, str(fname_pred), str(fname_soft), thr=thr)\n\n fname_unc_vox = Path(image_folder, subj_acq + '_unc-vox.nii.gz')\n if not fname_unc_vox.is_file():\n # compute voxel-wise uncertainty map\n voxelwise_uncertainty(fname_pred_lst, str(fname_unc_vox))\n\n fname_unc_struct = Path(image_folder, subj_acq + '_unc.nii.gz')\n if not Path(image_folder, subj_acq + '_unc-cv.nii.gz').is_file():\n # compute structure-wise uncertainty\n structurewise_uncertainty(fname_pred_lst, str(fname_pred), str(fname_unc_vox), str(fname_unc_struct))\n\n\ndef combine_predictions(fname_lst, fname_hard, fname_prob, thr=0.5):\n \"\"\"Combine predictions from Monte Carlo simulations.\n\n Combine predictions from Monte Carlo simulations and save the resulting as:\n (1) `fname_prob`, a soft segmentation obtained by averaging the Monte Carlo samples.\n (2) `fname_hard`, a hard segmentation obtained thresholding with `thr`.\n\n Args:\n fname_lst (list of str): List of the Monte Carlo samples.\n fname_hard (str): Filename for the output hard segmentation.\n fname_prob (str): Filename for the output soft segmentation.\n thr (float): Between 0 and 1. Used to threshold the soft segmentation and generate the hard segmentation.\n \"\"\"\n # collect all MC simulations\n mc_data = np.array([nib.load(fname).get_fdata() for fname in fname_lst])\n first_file_header = nib.load(fname_lst[0]).header\n\n # average over all the MC simulations\n data_prob = np.mean(mc_data, axis=0)\n # save prob segmentation\n nib_prob = nib.Nifti1Image(\n dataobj=data_prob,\n affine=first_file_header.get_best_affine(),\n header=first_file_header.copy()\n )\n nib.save(nib_prob, fname_prob)\n\n # argmax operator\n data_hard = imed_postpro.threshold_predictions(data_prob, thr=thr).astype(np.uint8)\n # save hard segmentation\n nib_hard = nib.Nifti1Image(\n dataobj=data_hard,\n affine=first_file_header.get_best_affine(),\n header=first_file_header.copy()\n )\n nib.save(nib_hard, fname_hard)\n\n\ndef voxelwise_uncertainty(fname_lst, fname_out, eps=1e-5):\n \"\"\"Estimate voxel wise uncertainty.\n\n Voxel-wise uncertainty is estimated as entropy over all N MC probability maps, and saved in `fname_out`.\n\n Args:\n fname_lst (list of str): List of the Monte Carlo samples.\n fname_out (str): Output filename.\n eps (float): Epsilon value to deal with np.log(0).\n \"\"\"\n # collect all MC simulations\n mc_data = np.array([nib.load(fname).get_fdata() for fname in fname_lst])\n affine = nib.load(fname_lst[0]).header.get_best_affine()\n\n # entropy\n unc = np.repeat(np.expand_dims(mc_data, -1), 2, -1) # n_it, x, y, z, 2\n unc[..., 0] = 1 - unc[..., 1]\n unc = -np.sum(np.mean(unc, 0) * np.log(np.mean(unc, 0) + eps), -1)\n\n # Clip values to 0\n unc[unc < 0] = 0\n\n # save uncertainty map\n nib_unc = nib.Nifti1Image(unc, affine)\n nib.save(nib_unc, fname_out)\n\n\ndef structurewise_uncertainty(fname_lst, fname_hard, fname_unc_vox, fname_out):\n \"\"\"Estimate structure wise uncertainty.\n\n Structure-wise uncertainty from N MC probability maps (`fname_lst`) and saved in `fname_out` with the following\n suffixes:\n\n * '-cv.nii.gz': coefficient of variation\n * '-iou.nii.gz': intersection over union\n * '-avgUnc.nii.gz': average voxel-wise uncertainty within the structure.\n\n Args:\n fname_lst (list of str): List of the Monte Carlo samples.\n fname_hard (str): Filename of the hard segmentation, which is used to compute the `avgUnc` by providing a mask\n of the structures.\n fname_unc_vox (str): Filename of the voxel-wise uncertainty, which is used to compute the `avgUnc`.\n fname_out (str): Output filename.\n \"\"\"\n # 18-connectivity\n bin_struct = np.array(generate_binary_structure(3, 2))\n\n # load hard segmentation\n nib_hard = nib.load(fname_hard)\n data_hard = nib_hard.get_fdata()\n # Label each object of each class\n data_hard_labeled = [label(data_hard[..., i_class], structure=bin_struct)[0] for i_class in\n range(data_hard.shape[-1])]\n\n # load all MC simulations (in mc_dict[\"mc_data\"]) and label them (in mc_dict[\"mc_labeled\"])\n mc_dict = {\"mc_data\": [], \"mc_labeled\": []}\n for fname in fname_lst:\n data = nib.load(fname).get_fdata()\n mc_dict[\"mc_data\"].append([data[..., i_class] for i_class in range(data.shape[-1])])\n\n labeled_list = [label(data[..., i_class], structure=bin_struct)[0] for i_class in range(data.shape[-1])]\n mc_dict[\"mc_labeled\"].append(labeled_list)\n\n # load uncertainty map\n data_uncVox = nib.load(fname_unc_vox).get_fdata()\n\n # Init output arrays\n data_iou, data_cv, data_avgUnc = np.zeros(data_hard.shape), np.zeros(data_hard.shape), np.zeros(data_hard.shape)\n\n # Loop across classes\n for i_class in range(data_hard.shape[-1]):\n # Hard segmentation of the i_class that has been labeled\n data_hard_labeled_class = data_hard_labeled[i_class]\n # Get number of objects in\n l, l_count = np.unique(data_hard_labeled_class, return_counts=True)\n\n # Get all non zero labels and exclude structure of 1 pixel\n labels = l[l_count != 1][1:]\n # Loop across objects\n for i_obj in labels:\n # select the current structure, remaining voxels are set to zero\n data_hard_labeled_class_obj = (np.array(data_hard_labeled_class) == i_obj).astype(int)\n\n # Get object coordinates\n xx_obj, yy_obj, zz_obj = np.where(data_hard_labeled_class_obj)\n\n # Loop across the MC samples and mask the structure of interest\n data_class_obj_mc = []\n for i_mc in range(len(fname_lst)):\n # Get index of the structure of interest in the MC sample labeled\n i_mc_labels, i_mc_counts = np.unique(data_hard_labeled_class_obj * mc_dict[\"mc_labeled\"][i_mc][i_class],\n return_counts=True)\n i_mc_label = i_mc_labels[np.argmax(i_mc_counts[1:]) + 1] if len(i_mc_counts) > 1 else 0\n\n data_tmp = np.zeros(mc_dict[\"mc_data\"][i_mc][i_class].shape)\n # If i_mc_label is zero, it means the structure is not present in this mc_sample\n if i_mc_label > 0:\n data_tmp[mc_dict[\"mc_labeled\"][i_mc][i_class] == i_mc_label] = 1.\n\n data_class_obj_mc.append(data_tmp.astype(np.bool))\n\n # COMPUTE IoU\n # Init intersection and union\n intersection = np.logical_and(data_class_obj_mc[0], data_class_obj_mc[1])\n union = np.logical_or(data_class_obj_mc[0], data_class_obj_mc[1])\n # Loop across remaining MC samples\n for i_mc in range(2, len(data_class_obj_mc)):\n intersection = np.logical_and(intersection, data_class_obj_mc[i_mc])\n union = np.logical_or(union, data_class_obj_mc[i_mc])\n # Compute float\n iou = np.sum(intersection) * 1. / np.sum(union)\n # assign uncertainty value to the structure\n data_iou[xx_obj, yy_obj, zz_obj, i_class] = iou\n\n # COMPUTE COEFFICIENT OF VARIATION\n # List of volumes for each MC sample\n vol_mc_lst = [np.sum(data_class_obj_mc[i_mc]) for i_mc in range(len(data_class_obj_mc))]\n # Mean volume\n mu_mc = np.mean(vol_mc_lst)\n # STD volume\n sigma_mc = np.std(vol_mc_lst)\n # Coefficient of variation\n cv = sigma_mc / mu_mc\n # assign uncertainty value to the structure\n data_cv[xx_obj, yy_obj, zz_obj, i_class] = cv\n\n # COMPUTE AVG VOXEL WISE UNC\n avgUnc = np.mean(data_uncVox[xx_obj, yy_obj, zz_obj, i_class])\n # assign uncertainty value to the structure\n data_avgUnc[xx_obj, yy_obj, zz_obj, i_class] = avgUnc\n\n # save nifti files\n fname_iou = fname_out.split('.nii.gz')[0] + '-iou.nii.gz'\n fname_cv = fname_out.split('.nii.gz')[0] + '-cv.nii.gz'\n fname_avgUnc = fname_out.split('.nii.gz')[0] + '-avgUnc.nii.gz'\n\n nib_iou = nib.Nifti1Image(\n dataobj=data_iou,\n affine=nib_hard.header.get_best_affine(),\n header=nib_hard.header.copy()\n )\n nib_cv = nib.Nifti1Image(\n dataobj=data_cv,\n affine=nib_hard.header.get_best_affine(),\n header=nib_hard.header.copy()\n )\n nib_avgUnc = nib.Nifti1Image(\n data_avgUnc,\n affine=nib_hard.header.get_best_affine(),\n header=nib_hard.header.copy()\n )\n\n nib.save(nib_iou, fname_iou)\n nib.save(nib_cv, fname_cv)\n nib.save(nib_avgUnc, fname_avgUnc)\n",
"import json\nimport shutil\nimport pickle\nfrom pathlib import Path\n\nimport numpy as np\nimport pytest\n\nfrom ivadomed.loader.bids_dataframe import BidsDataframe\nfrom ivadomed.loader import loader as imed_loader\nfrom ivadomed.object_detection import utils as imed_obj_detect\nimport logging\nfrom testing.unit_tests.t_utils import create_tmp_dir, __data_testing_dir__, __tmp_dir__, download_data_testing_test_files\nfrom testing.common_testing_util import remove_tmp_dir\nlogger = logging.getLogger(__name__)\n\nBATCH_SIZE = 8\nPATH_OUTPUT = Path(__tmp_dir__, \"log\")\n\n\ndef setup_function():\n create_tmp_dir()\n\n\[email protected]('train_lst', [['sub-unf01_T2w.nii.gz']])\[email protected]('target_lst', [[\"_lesion-manual\"]])\[email protected]('config', [\n {\n \"object_detection_params\": {\n \"object_detection_path\": \"object_detection\",\n \"safety_factor\": [1.0, 1.0, 1.0],\n \"path_output\": str(PATH_OUTPUT)\n },\n \"transforms_params\": {\n \"NumpyToTensor\": {}},\n \"roi_params\": {\"suffix\": \"_seg-manual\", \"slice_filter_roi\": 10},\n \"contrast_params\": {\"contrast_lst\": ['T2w'], \"balance\": {}},\n \"multichannel\": False,\n \"model_params\": {\"name\": \"Unet\"},\n }, {\n \"object_detection_params\": {\n \"object_detection_path\": \"object_detection\",\n \"safety_factor\": [1.0, 1.0, 1.0],\n \"path_output\": str(PATH_OUTPUT)\n },\n \"transforms_params\": {\"NumpyToTensor\": {}},\n \"roi_params\": {\"suffix\": \"_seg-manual\", \"slice_filter_roi\": 10},\n \"contrast_params\": {\"contrast_lst\": ['T2w'], \"balance\": {}},\n \"Modified3DUNet\": {\n \"applied\": True,\n \"length_3D\": [16, 16, 16],\n \"stride_3D\": [1, 1, 1],\n \"attention\": False,\n \"n_filters\": 8\n },\n \"multichannel\": False,\n \"model_params\": {\"name\": \"Unet\"},\n }])\ndef test_bounding_box(download_data_testing_test_files, train_lst, target_lst, config):\n # Create mask\n mask_coord = [20, 40, 20, 90, 0, 25]\n mx1, mx2, my1, my2, mz1, mz2 = mask_coord\n mask = np.zeros((96, 96, 96))\n mask[mx1:mx2 + 1, my1:my2 + 1, mz1:mz2 + 1] = 1\n coord = imed_obj_detect.get_bounding_boxes(mask)\n assert coord[0] == mask_coord\n\n loader_params = {\n \"data_list\": train_lst,\n \"dataset_type\": \"training\",\n \"requires_undo\": False,\n \"path_data\": [__data_testing_dir__],\n \"target_suffix\": target_lst,\n \"extensions\": [\".nii.gz\"],\n \"slice_filter_params\": {\"filter_empty_mask\": False, \"filter_empty_input\": True},\n \"patch_filter_params\": {\"filter_empty_mask\": False, \"filter_empty_input\": False},\n \"slice_axis\": \"axial\"\n }\n\n if \"Modified3DUNet\" in config:\n config['model_params'][\"name\"] = \"Modified3DUNet\"\n config['model_params'].update(config[\"Modified3DUNet\"])\n\n bounding_box_dict = {}\n bounding_box_path = Path(PATH_OUTPUT, 'bounding_boxes.json')\n if not Path(PATH_OUTPUT).exists():\n PATH_OUTPUT.mkdir(parents=True, exist_ok=True)\n current_dir = Path.cwd()\n sub = train_lst[0].split('_')[0]\n contrast = config['contrast_params']['contrast_lst'][0]\n bb_path = str(Path(current_dir, __data_testing_dir__, sub, \"anat\", sub + \"_\" + contrast + \".nii.gz\"))\n bounding_box_dict[bb_path] = coord\n with open(bounding_box_path, 'w') as fp:\n json.dump(bounding_box_dict, fp, indent=4)\n\n # Update loader_params with config\n loader_params.update(config)\n\n bids_df = BidsDataframe(loader_params, __tmp_dir__, derivatives=True)\n\n ds = imed_loader.load_dataset(bids_df, **loader_params)\n\n handler = ds.handlers if \"Modified3DUNet\" in config else ds.indexes\n for index in range(len(handler)):\n\n if \"Modified3DUNet\" in config:\n if ds.disk_cache:\n path_seg_pair, _ = handler[index]\n with path_seg_pair.open('rb') as f:\n seg_pair = pickle.load(f)\n else:\n seg_pair, _ = handler[index]\n assert seg_pair['input'][0].shape[-3:] == (mx2 - mx1, my2 - my1, mz2 - mz1)\n else:\n if ds.disk_cache:\n path_seg_pair = handler[index]\n with path_seg_pair.open('rb') as f:\n seg_pair, _ = pickle.load(f)\n else:\n seg_pair, _ = handler[index]\n assert seg_pair['input'][0].shape[-2:] == (mx2 - mx1, my2 - my1)\n\n shutil.rmtree(PATH_OUTPUT)\n\n\ndef test_adjust_bb_size():\n test_coord = (0, 10, 0, 10, 0, 10)\n res = imed_obj_detect.adjust_bb_size(test_coord, (2, 2, 2), True)\n assert(res == [0, 20, 0, 20, 0, 20])\n\n\ndef test_compute_bb_statistics(download_data_testing_test_files):\n \"\"\"Check to make sure compute_bb_statistics runs.\"\"\"\n imed_obj_detect.compute_bb_statistics(str(Path(__data_testing_dir__,\n \"bounding_box_dict.json\")))\n\n\ndef teardown_function():\n remove_tmp_dir()\n"
] | [
[
"numpy.expand_dims",
"numpy.sum",
"numpy.unique",
"scipy.ndimage.generate_binary_structure",
"numpy.logical_or",
"scipy.ndimage.label",
"numpy.std",
"numpy.argmax",
"numpy.mean",
"numpy.array",
"numpy.logical_and",
"numpy.zeros",
"numpy.where"
],
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AghilasSini/AT-Annotator | [
"532c6de0fe143e2b6ace0d382cc79f1f0f2cf941",
"532c6de0fe143e2b6ace0d382cc79f1f0f2cf941",
"532c6de0fe143e2b6ace0d382cc79f1f0f2cf941",
"532c6de0fe143e2b6ace0d382cc79f1f0f2cf941"
] | [
"cnn-for-sentence-classification/train_keras.py",
"dimension_reduction/autoencoder_dimension_reduction.py",
"classification/rcnn_classifier_v0.py",
"parameter_optimization/random_Forest_using_GridSearchCV.py"
] | [
"import numpy as np\nimport codecs\nimport os\nimport random\n\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers import Input, Dense, Lambda, Permute, Dropout\nfrom keras.layers import Conv2D, MaxPooling1D\n\ndef load_data(fpath, label):\n data = []\n with codecs.open(fpath, 'r', 'utf-8', errors='ignore') as f:\n lines = f.readlines()\n for l in lines:\n l = l.rstrip()\n data.append((l.split(' '), label))\n return data\n\ndef vectorize(data, sentence_maxlen, w2i):\n vec_data = []\n labels = []\n for d, label in data:\n vec = [w2i[w] for w in d if w in w2i]\n pad_len = max(0, sentence_maxlen - len(vec))\n vec += [0] * pad_len\n vec_data.append(vec)\n \n labels.append(label)\n vec_data = np.array(vec_data)\n labels = np.array(labels)\n return vec_data, labels\n\ndef load_glove_weights(glove_dir, embd_dim, vocab_size, word_index):\n embeddings_index = {}\n f = open(os.path.join(glove_dir, 'glove.6B.' + str(embd_dim) + 'd.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n\n print('Found %s word vectors.' % len(embeddings_index)) \n embedding_matrix = np.zeros((vocab_size, embd_dim))\n print('embed_matrix.shape', embedding_matrix.shape)\n for word, i in word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n # words not found in embedding index will be all-zeros.\n embedding_matrix[i] = embedding_vector\n\n return embedding_matrix\n\npos = load_data('./dataset/rt-polaritydata/rt-polarity.pos', 1)\nneg = load_data('./dataset/rt-polaritydata/rt-polarity.neg', 0)\ndata = pos + neg\n\nsentence_maxlen = max(map(len, (d for d, _ in data)))\nprint('sentence maxlen', sentence_maxlen)\n\nvocab = []\nfor d, _ in data:\n for w in d:\n if w not in vocab: vocab.append(w)\nvocab = sorted(vocab)\nvocab_size = len(vocab)\nprint('vocab size', len(vocab))\nw2i = {w:i for i,w in enumerate(vocab)}\n\nrandom.shuffle(data)\nvecX, vecY = vectorize(data, sentence_maxlen, w2i)\nn_data = len(vecX)\nsplit_ind = (int)(n_data * 0.9)\ntrainX, trainY = vecX[:split_ind], vecY[:split_ind]\ntestX, testY = vecX[split_ind:], vecY[split_ind:]\n\nembd_dim = 300\nglove_embd_w = load_glove_weights('./dataset', embd_dim, vocab_size, w2i)\n\ndef Net(vocab_size, embd_size, sentence_maxlen, glove_embd_w):\n sentence = Input((sentence_maxlen,), name='SentenceInput')\n \n # embedding\n embd_layer = Embedding(input_dim=vocab_size, \n output_dim=embd_size, \n weights=[glove_embd_w], \n trainable=False,\n name='shared_embd')\n embd_sentence = embd_layer(sentence)\n embd_sentence = Permute((2,1))(embd_sentence)\n embd_sentence = Lambda(lambda x: K.expand_dims(x, -1))(embd_sentence)\n \n # cnn\n cnn = Conv2D(1, \n kernel_size=(3, sentence_maxlen),\n activation='relu')(embd_sentence)\n cnn = Lambda(lambda x: K.sum(x, axis=3))(cnn)\n cnn = MaxPooling1D(3)(cnn)\n cnn = Lambda(lambda x: K.sum(x, axis=2))(cnn)\n out = Dense(1, activation='sigmoid')(cnn)\n\n model = Model(inputs=sentence, outputs=out, name='sentence_claccification')\n model.compile(optimizer='adagrad', loss='binary_crossentropy', metrics=['accuracy']) \n return model\n\nmodel = Net(vocab_size, embd_dim, sentence_maxlen, glove_embd_w)\nprint(model.summary())\n\nmodel.fit(trainX, trainY,\n batch_size=32,\n epochs=10,\n validation_data=(testX, testY)\n )\n",
"from keras.layers import Input, Dense\nfrom keras.models import Model\nimport nltk\nimport sklearn_crfsuite\nfrom sklearn_crfsuite import metrics\nimport pandas as pd\n\nfrom sklearn.preprocessing import label_binarize\nimport string\n# nltk.download('conll2002')\nflatten = lambda l: [item for sublist in l for item in sublist]\n\nprint(__doc__)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom itertools import cycle\n\nimport os \nimport sys\n\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom math import sqrt\nfrom sklearn.metrics import mean_squared_error\n\nfrom sklearn import svm, datasets\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom scipy import interp\nfrom sklearn.metrics import roc_auc_score\nimport argparse\nimport matplotlib.cm as cm\n\nimport codecs\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import NMF\n# nltk.corpus.conll2002.fileids()\n\nfrom tqdm import tqdm_notebook as tqdm\nfrom tqdm import trange\n\n\nfrom sklearn.cluster import KMeans\nfrom sklearn import metrics\nfrom scipy.spatial.distance import cdist\n\n\nfrom sklearn.metrics import silhouette_samples, silhouette_score\nfrom sklearn.metrics import confusion_matrix\n\n\nfrom sklearn.decomposition import PCA \nfrom sklearn.preprocessing import StandardScaler\n\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import silhouette_samples, silhouette_score\nfrom sklearn.metrics import confusion_matrix\n\nfrom sklearn.preprocessing import scale\n\n\nfrom gensim.models.word2vec import Word2Vec\n\nimport gensim\nimport random\nfrom collections import OrderedDict\n\n\nfrom sklearn.model_selection import KFold\n\n\n# classifier information\nfrom keras.layers import Dropout, Dense\nfrom keras.models import Sequential\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom scipy import interp\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import accuracy_score\nLabeledSentence = gensim.models.doc2vec.LabeledSentence\nimport hdbscan\n\n\ndef model_ae(X_train,x_test,n=600,encoding_dim=2):\n\t# this is the size of our encoded representations\n\tencoding_dim = 2\n\t# http://gradientdescending.com/pca-vs-autoencoders-for-dimensionality-reduction/\n\t# r program\n\n\n\t# this is our input placeholder\n\tinput = Input(shape=(n,))\n\t# \"encoded\" is the encoded representation of the input\n\tencoded = Dense(encoding_dim, activation='relu')(input)\n\n\t# \"decoded\" is the lossy reconstruction of the input\n\tdecoded = Dense(n, activation='sigmoid')(encoded)\n\n\n\t# this model maps an input to its reconstruction\n\tautoencoder = Model(input, decoded)\n\n\n\n\t# this model maps an input to its encoded representation\n\tencoder = Model(input, encoded)\n\n\n\n\tencoded_input = Input(shape=(encoding_dim,))\n\t# retrieve the last layer of the autoencoder model\n\tdecoder_layer = autoencoder.layers[-1]\n\t# create the decoder model\n\tdecoder = Model(encoded_input, decoder_layer(encoded_input))\n\n\n\tautoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')\n\n\tautoencoder.fit(X_train, X_train,\n\t\tepochs=50,\n\t\tbatch_size=256,\n\t\tshuffle=True,\n\t\tvalidation_data=(x_test, x_test))\n\tpredicted = encoder.predict(x_test)\n\n\n\tprint(predicted)\n\n\n\n\n\treturn autoencoder\n\n\ndef main():\n\tparser = argparse.ArgumentParser(description=\"\")\n\n\t# Add options\n\tparser.add_argument(\"-v\", \"--verbosity\", action=\"count\", default=0,\n\t\t\t\t\t\thelp=\"increase output verbosity\")\n\n\t# Add arguments\n\t\n\t\n\tparser.add_argument(\"input_file\", help=\"The input file to be projected\")\n\t# parser.add_argument(\"speech_feats_file\", help=\"The input file to be projected\")\n\t# parser.add_argument(\"out_path_file\", help=\"The input file to be projected\")\n\targs = parser.parse_args()\n\tdf_=pd.read_csv(args.input_file)\n\tprint(df_.head())\n\tdf_doc2vec=df_.copy()\n\tdf_doc2vec=df_doc2vec.drop(['utterance'], axis=1)\n\tprint(df_doc2vec.columns.to_list())\n\n\t# df_['sentence_label']=sentence_emotion_labeling\n\n\tprint('loading the database')\n\tprint(df_doc2vec.head())\n\tfrom sklearn.preprocessing import scale\n\ttrain_vecs = scale(df_doc2vec)\n\tprint('scaling the data')\n\n\t## X_train,x_test,Y_train,y_test=train_test_split(X,Y,test_size=0.2)\n\tX_train,x_test,Y_train,y_test=train_test_split(train_vecs, df_['utterance'].to_list(),test_size=0.2)\n\t\n\tmodel=model_ae(X_train,x_test,train_vecs.shape[1],2)\n\t# print(model.summary())\n\t\n\t# model.fit(X_train, X_train,\n\t\t# epochs=50,\n\t\t# batch_size=256,\n\t\t# shuffle=True,\n\t\t# validation_data=(x_test, x_test))\n\t# predicted = model.predict(x_test)\n\t# print(predicted)\nif __name__ == '__main__':\n\tmain()",
"from keras.preprocessing import sequence\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation\nfrom keras.layers import Embedding\nfrom keras.layers import GRU\nfrom keras.layers import Conv1D, MaxPooling1D\nfrom keras.datasets import imdb\nfrom sklearn.datasets import fetch_20newsgroups\nimport numpy as np\nfrom sklearn import metrics\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nimport argparse\nimport glob\nimport os\nimport sys\nimport re\nimport pandas as pd\nfrom keras.layers import LSTM\n\nfrom sklearn.model_selection import train_test_split\n\n\n\ndef loadData_Tokenizer(X_train, X_test,MAX_NB_WORDS=75000,MAX_SEQUENCE_LENGTH=500):\n\tnp.random.seed(7)\n\ttext = np.concatenate((X_train, X_test), axis=0)\n\ttext = np.array(text)\n\ttokenizer = Tokenizer(num_words=MAX_NB_WORDS)\n\ttokenizer.fit_on_texts(text)\n\tsequences = tokenizer.texts_to_sequences(text)\n\tword_index = tokenizer.word_index\n\ttext = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)\n\tprint('Found %s unique tokens.' % len(word_index))\n\tindices = np.arange(text.shape[0])\n\t# np.random.shuffle(indices)\n\ttext = text[indices]\n\tprint(text.shape)\n\tX_train = text[0:len(X_train), ]\n\tX_test = text[len(X_train):, ]\n\tembeddings_index = {}\n\tf = open(\"/home/aghilas/Workspace/Experiments/SynPaFlex-Code/ml_template/classification/dataset/synpaflex_w2v.txt\",encoding=\"utf8\")\n\tfor line in f:\n\t\tvalues = line.split()\n\t\tword = values[0]\n\t\ttry:\n\t\t\tcoefs = np.asarray(values[1:], dtype='float32')\n\t\texcept:\n\t\t\tpass\n\t\tembeddings_index[word] = coefs\n\n\tf.close()\n\tprint('Total %s word vectors.' % len(embeddings_index))\n\treturn (X_train, X_test, word_index,embeddings_index)\n\n\ndef Build_Model_RCNN_Text(word_index, embeddings_index, nclasses, MAX_SEQUENCE_LENGTH=500, EMBEDDING_DIM=100): \n\tkernel_size = 2\n\tfilters = 256\n\tpool_size = 2\n\tgru_node = 256 \n\tembedding_matrix = np.random.random((len(word_index) + 1, EMBEDDING_DIM))\n\tfor word, i in word_index.items():\n\t\tembedding_vector = embeddings_index.get(word)\n\t\tif embedding_vector is not None:\n\t\t\t# words not found in embedding index will be all-zeros.\n\t\t\tif len(embedding_matrix[i]) !=len(embedding_vector):\n\t\t\t\tprint(\"could not broadcast input array from shape\",str(len(embedding_matrix[i])),\n\t\t\t\t\t\"into shape\",str(len(embedding_vector)),\" Please make sure your\"\n\t\t\t\t\t\" EMBEDDING_DIM is equal to embedding_vector file ,GloVe,\")\n\t\t\t\texit(1)\n\t\t\tembedding_matrix[i] = embedding_vector \n\tmodel = Sequential()\n\tmodel.add(Embedding(len(word_index) + 1,\n\t\t\t\t\t\t\t\tEMBEDDING_DIM,\n\t\t\t\t\t\t\t\tweights=[embedding_matrix],\n\t\t\t\t\t\t\t\tinput_length=MAX_SEQUENCE_LENGTH,\n\t\t\t\t\t\t\t\ttrainable=True))\n\tmodel.add(Dropout(0.25))\n\tmodel.add(Conv1D(filters, kernel_size, activation='relu'))\n\tmodel.add(MaxPooling1D(pool_size=pool_size))\n\tmodel.add(Conv1D(filters, kernel_size, activation='relu'))\n\tmodel.add(MaxPooling1D(pool_size=pool_size))\n\tmodel.add(Conv1D(filters, kernel_size, activation='relu'))\n\tmodel.add(MaxPooling1D(pool_size=pool_size))\n\tmodel.add(Conv1D(filters, kernel_size, activation='relu'))\n\tmodel.add(MaxPooling1D(pool_size=pool_size))\n\tmodel.add(LSTM(gru_node, return_sequences=True, recurrent_dropout=0.2))\n\tmodel.add(LSTM(gru_node, return_sequences=True, recurrent_dropout=0.2))\n\tmodel.add(LSTM(gru_node, return_sequences=True, recurrent_dropout=0.2))\n\tmodel.add(LSTM(gru_node, recurrent_dropout=0.2))\n\tmodel.add(Dense(1024,activation='relu'))\n\tmodel.add(Dense(nclasses))\n\tmodel.add(Activation('softmax'))\n\tmodel.compile(loss='sparse_categorical_crossentropy',\n\t\t\t\t optimizer='adam',\n\t\t\t\t metrics=['accuracy'])\n\n\treturn model\n\ndef clean_text(Text):\n\tText = re.sub(\"\\n\", \" \", Text)\n\tText = re.sub(\"--\", \"\", Text)\n\tText = re.sub(\"\\.\\.\\.\", \".\", Text)\n\tText = Text.lower()\n\t# Text = re.split(\"[.!?]\", Text)\n\tSent = re.split(\"\\W\", Text)\n\tSent = [Token for Token in Sent if Token]\n\treturn Sent\n\n\ndef build_args():\n\tparser=argparse.ArgumentParser(description='')\n\tparser.add_argument('path', type=str, nargs=1, help='data input')\n\treturn parser.parse_args()\n\n\ndef main():\n\targs=build_args()\n\t# \n\t# newsgroups_train = fetch_20newsgroups(subset='train')\n\t# newsgroups_test = fetch_20newsgroups(subset='test')\n\t# X_train = newsgroups_train.data\n\t# X_test = newsgroups_test.data\n\t# y_train = newsgroups_train.target\n\t# y_test = newsgroups_test.target\n\n\t# \n\t# Parsing arguments\n\t\n\n\tdataset=pd.read_csv(args.path[0],sep='\\t')\n\n\n\tconvert_dict={\n\t'label':'category',\n\t\t\t\n\t}\n\t# # print(cat_list)\n\tdataset = dataset.astype(convert_dict)\n\tdataset['label_cat'] = dataset.label.cat.codes\n\tdata=[]\n\tfor frame in dataset['text'].to_list():\n\t\tdata.append(clean_text(frame))\n\n\t\n\ttarget_data=dataset['label_cat'].to_list()\n\t\n\tX_train, X_test, y_train, y_test = train_test_split(data, target_data, test_size=0.3)\n\n\t#\n\tX_train_Glove,X_test_Glove, word_index,embeddings_index = loadData_Tokenizer(X_train,X_test)\n\n\tmodel_RCNN = Build_Model_RCNN_Text(word_index,embeddings_index, 20)\n\tmodel_RCNN.summary()\n\tmodel_RCNN.fit(X_train_Glove, y_train,\n\t\t\t\t\t\t\t validation_data=(X_test_Glove, y_test),\n\t\t\t\t\t\t\t epochs=15,\n\t\t\t\t\t\t\t batch_size=128,\n\t\t\t\t\t\t\t verbose=2)\n\tpredicted = model_RCNN.predict(X_test_Glove)\n\n\tpredicted = np.argmax(predicted, axis=1)\n\t\n\tprint(metrics.classification_report(y_test, predicted))\n\n\nif __name__ == '__main__':\n\tmain()",
"import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport re\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.stats import chi2_contingency\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.svm import SVC\n\n\n\n\nfrom subprocess import check_output\nprint(check_output([\"ls\", \"./input\"]).decode(\"utf8\"))\n\ntrain=pd.read_csv(\"./input/train.csv\")\ntest=pd.read_csv(\"./input/test.csv\")\n\nprint(\"Train dataset has {} samples and {} attributes\".format(*train.shape))\nprint(\"Test dataset has {} samples and {} attributes\".format(*test.shape))\n\nprint(train.head())\n\n\nfig , ax = plt.subplots(figsize=(6,4))\nsns.countplot(x='Survived', data=train)\nplt.title(\"Count of Survival\")\nplt.show()\n\n\nn=len(train)\nsurv_0=len(train[train['Survived']==0])\nsurv_1=len(train[train['Survived']==1])\n\nprint(\"% of passanger survived in train dataset: \",surv_1*100/n)\nprint(\"% of passanger not survived in train dataset: \",surv_0*100/n)\n\n\ncat=['Pclass','Sex','Embarked']\nnum=['Age','SibSp','Parch','Fare']\n\n\ncorr_df=train[num] #New dataframe to calculate correlation between numeric features\ncor= corr_df.corr(method='pearson')\nprint(cor)\n\nfig, ax =plt.subplots(figsize=(8, 6))\nplt.title(\"Correlation Plot\")\nsns.heatmap(cor, mask=np.zeros_like(cor, dtype=np.bool), cmap=sns.diverging_palette(220, 10, as_cmap=True),\n square=True, ax=ax)\nplt.show()\n\n\ncsq=chi2_contingency(pd.crosstab(train['Survived'], train['Sex']))\nprint(\"P-value: \",csq[1])\n\n\ncsq2=chi2_contingency(pd.crosstab(train['Survived'], train['Embarked']))\nprint(\"P-value: \",csq2[1])\n\n\n\ncsq3=chi2_contingency(pd.crosstab(train['Survived'], train['Pclass']))\nprint(\"P-value: \",csq3[1])\n\n\nfig, ax=plt.subplots(figsize=(8,6))\nsns.countplot(x='Survived', data=train, hue='Sex')\nax.set_ylim(0,500)\nplt.title(\"Impact of Sex on Survived\")\nplt.show()\n\n\nfig, ax=plt.subplots(figsize=(8,6))\nsns.countplot(x='Survived', data=train, hue='Embarked')\nax.set_ylim(0,500)\nplt.title(\"Impact of Embarked on Survived\")\nplt.show()\n\n\nfig, ax=plt.subplots(figsize=(8,6))\nsns.countplot(x='Survived', data=train, hue='Pclass')\nax.set_ylim(0,400)\nplt.title(\"Impact of Pclass on Survived\")\nplt.show()\n\nfig, ax=plt.subplots(1,figsize=(8,6))\nsns.boxplot(x='Survived',y='Fare', data=train)\nax.set_ylim(0,300)\nplt.title(\"Survived vs Fare\")\nplt.show()\n\nprint(train.isnull().sum())\n\nprint(test.isnull().sum())\n\ntrain['Age'].describe()\n\n\nmed=np.nanmedian(train['Age'])\ntrain['Age']=train['Age'].fillna(med)\ntest['Age']=test['Age'].fillna(med)\n\ntrain['Cabin'].value_counts()\n\n\ntrain['Cabin']=train['Cabin'].fillna(0)\ntest['Cabin']=test['Cabin'].fillna(0)\n\ntrain['Embarked'].value_counts()\n\n\ntrain['Cabin']=train['Cabin'].fillna(\"S\")\n\ntrain['Fare'].describe()\n\nmed=np.nanmedian(train['Fare'])\ntest['Fare']=test['Fare'].fillna(med)\n\n\ntrain['hasCabin']=train['Cabin'].apply(lambda x: 0 if x==0 else 1)\ntest['hasCabin']=test['Cabin'].apply(lambda x: 0 if x==0 else 1)\n\n\ntrain['FamilyMem']=train.apply(lambda x: x['SibSp']+x['Parch'], axis=1)\ntest['FamilyMem']=test.apply(lambda x: x['SibSp']+x['Parch'], axis=1)\n\n\n\ndef get_title(name):\n title_search = re.search(' ([A-Za-z]+)\\.', name)\n if title_search:\n return title_search.group(1)\n return \"\"\n\ntrain['title']=train['Name'].apply(get_title)\ntest['title']=test['Name'].apply(get_title)\n\n\ntitle_lev1=list(train['title'].value_counts().reset_index()['index'])\ntitle_lev2=list(test['title'].value_counts().reset_index()['index'])\n\n\n\ntitle_lev=list(set().union(title_lev1, title_lev2))\nprint(title_lev)\n\n\ntrain['title']=pd.Categorical(train['title'], categories=title_lev)\ntest['title']=pd.Categorical(test['title'], categories=title_lev)\n\ncols=['Pclass','Sex','Embarked','hasCabin','title']\nfcol=['Pclass','Sex','Embarked','hasCabin','title','Age','FamilyMem','Fare']\n\n\n\nfor c in cols:\n train[c]=train[c].astype('category')\n test[c]=test[c].astype('category')\n\ntrain_df=train[fcol]\ntest_df=test[fcol]\n\ntrain_df=pd.get_dummies(train_df, columns=cols, drop_first=True)\ntest_df=pd.get_dummies(test_df, columns=cols, drop_first=True)\n\n\ny=train['Survived']\n\nx_train, x_test, y_train, y_test = train_test_split(train_df, y, test_size=0.3, random_state=42)\n\n\nrfc=RandomForestClassifier(random_state=42)\n\n\nparam_grid = { \n 'n_estimators': [200, 500],\n 'max_features': ['auto', 'sqrt', 'log2'],\n 'max_depth' : [4,5,6,7,8],\n 'criterion' :['gini', 'entropy']\n}\n\n\nCV_rfc = GridSearchCV(estimator=rfc, param_grid=param_grid, cv= 5)\nCV_rfc.fit(x_train, y_train)\n\n\nprint(CV_rfc.best_params_)\n\n\nrfc1=RandomForestClassifier(random_state=42, max_features='auto', n_estimators= 200, max_depth=8, criterion='gini')\nrfc1.fit(x_train, y_train)\n\npred=rfc1.predict(x_test)\n\nprint(\"Accuracy for Random Forest on CV data: \",accuracy_score(y_test,pred))\n\nop_rf=rfc1.predict(test_df)\n\nop=pd.DataFrame(test['PassengerId'])\nop['Survived']=op_rf\n\nop.to_csv(\"op_rf.csv\", index=False)"
] | [
[
"numpy.asarray",
"numpy.array",
"numpy.zeros"
],
[
"sklearn.preprocessing.scale",
"pandas.read_csv"
],
[
"pandas.read_csv",
"numpy.random.seed",
"numpy.asarray",
"numpy.arange",
"sklearn.model_selection.train_test_split",
"numpy.concatenate",
"numpy.argmax",
"numpy.array",
"sklearn.metrics.classification_report"
],
[
"pandas.crosstab",
"sklearn.model_selection.GridSearchCV",
"pandas.read_csv",
"numpy.nanmedian",
"matplotlib.pyplot.title",
"sklearn.ensemble.RandomForestClassifier",
"pandas.Categorical",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.subplots",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"numpy.zeros_like",
"matplotlib.pyplot.show",
"pandas.get_dummies"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ostap-viniavskyi/acme | [
"8fbae90217557a35e1d773aa63ab80890e799765",
"8fbae90217557a35e1d773aa63ab80890e799765",
"8fbae90217557a35e1d773aa63ab80890e799765"
] | [
"acme/agents/tf/svg0_prior/networks.py",
"acme/agents/jax/d4pg/networks.py",
"examples/atari/scratch/jax_rnn.py"
] | [
"# python3\n# Copyright 2018 DeepMind Technologies Limited. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Shared helpers for different experiment flavours.\"\"\"\n\nimport functools\nfrom typing import Mapping, Sequence, Optional\n\nfrom acme import specs\nfrom acme import types\nfrom acme.agents.tf.svg0_prior import utils as svg0_utils\nfrom acme.tf import networks\nfrom acme.tf import utils as tf2_utils\n\nimport numpy as np\nimport sonnet as snt\n\n\ndef make_default_networks(\n action_spec: specs.BoundedArray,\n policy_layer_sizes: Sequence[int] = (256, 256, 256),\n critic_layer_sizes: Sequence[int] = (512, 512, 256),\n) -> Mapping[str, types.TensorTransformation]:\n \"\"\"Creates networks used by the agent.\"\"\"\n\n # Get total number of action dimensions from action spec.\n num_dimensions = np.prod(action_spec.shape, dtype=int)\n\n policy_network = snt.Sequential([\n tf2_utils.batch_concat,\n networks.LayerNormMLP(policy_layer_sizes, activate_final=True),\n networks.MultivariateNormalDiagHead(\n num_dimensions,\n tanh_mean=True,\n min_scale=0.3,\n init_scale=0.7,\n fixed_scale=False,\n use_tfd_independent=False)\n ])\n # The multiplexer concatenates the (maybe transformed) observations/actions.\n multiplexer = networks.CriticMultiplexer(\n action_network=networks.ClipToSpec(action_spec))\n critic_network = snt.Sequential([\n multiplexer,\n networks.LayerNormMLP(critic_layer_sizes, activate_final=True),\n networks.NearZeroInitializedLinear(1),\n ])\n\n return {\n \"policy\": policy_network,\n \"critic\": critic_network,\n }\n\n\ndef make_network_with_prior(\n action_spec: specs.BoundedArray,\n policy_layer_sizes: Sequence[int] = (200, 100),\n critic_layer_sizes: Sequence[int] = (400, 300),\n prior_layer_sizes: Sequence[int] = (200, 100),\n policy_keys: Optional[Sequence[str]] = None,\n prior_keys: Optional[Sequence[str]] = None,\n) -> Mapping[str, types.TensorTransformation]:\n \"\"\"Creates networks used by the agent.\"\"\"\n\n # Get total number of action dimensions from action spec.\n num_dimensions = np.prod(action_spec.shape, dtype=int)\n flatten_concat_policy = functools.partial(\n svg0_utils.batch_concat_selection, concat_keys=policy_keys)\n flatten_concat_prior = functools.partial(\n svg0_utils.batch_concat_selection, concat_keys=prior_keys)\n\n policy_network = snt.Sequential([\n flatten_concat_policy,\n networks.LayerNormMLP(policy_layer_sizes, activate_final=True),\n networks.MultivariateNormalDiagHead(\n num_dimensions,\n tanh_mean=True,\n min_scale=0.1,\n init_scale=0.7,\n fixed_scale=False,\n use_tfd_independent=False)\n ])\n # The multiplexer concatenates the (maybe transformed) observations/actions.\n multiplexer = networks.CriticMultiplexer(\n observation_network=flatten_concat_policy,\n action_network=networks.ClipToSpec(action_spec))\n critic_network = snt.Sequential([\n multiplexer,\n networks.LayerNormMLP(critic_layer_sizes, activate_final=True),\n networks.NearZeroInitializedLinear(1),\n ])\n prior_network = snt.Sequential([\n flatten_concat_prior,\n networks.LayerNormMLP(prior_layer_sizes, activate_final=True),\n networks.MultivariateNormalDiagHead(\n num_dimensions,\n tanh_mean=True,\n min_scale=0.1,\n init_scale=0.7,\n fixed_scale=False,\n use_tfd_independent=False)\n ])\n return {\n \"policy\": policy_network,\n \"critic\": critic_network,\n \"prior\": prior_network,\n }\n",
"# python3\n# Copyright 2018 DeepMind Technologies Limited. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"D4PG networks definition.\"\"\"\n\nimport dataclasses\nfrom typing import Sequence\n\nfrom acme import specs\nfrom acme import types\nfrom acme.agents.jax import actor_core as actor_core_lib\nfrom acme.agents.jax.d4pg import config as d4pg_config\nfrom acme.jax import networks as networks_lib\nfrom acme.jax import utils\nimport haiku as hk\nimport jax.numpy as jnp\nimport numpy as np\nimport rlax\n\n\[email protected]\nclass D4PGNetworks:\n \"\"\"Network and pure functions for the D4PG agent..\"\"\"\n policy_network: networks_lib.FeedForwardNetwork\n critic_network: networks_lib.FeedForwardNetwork\n\n\ndef get_default_behavior_policy(\n networks: D4PGNetworks,\n config: d4pg_config.D4PGConfig) -> actor_core_lib.FeedForwardPolicy:\n \"\"\"Selects action according to the training policy.\"\"\"\n def behavior_policy(params: networks_lib.Params, key: networks_lib.PRNGKey,\n observation: types.NestedArray):\n action = networks.policy_network.apply(params, observation)\n if config.sigma != 0:\n action = rlax.add_gaussian_noise(key, action, config.sigma)\n return action\n\n return behavior_policy\n\n\ndef get_default_eval_policy(\n networks: D4PGNetworks) -> actor_core_lib.FeedForwardPolicy:\n \"\"\"Selects action according to the training policy.\"\"\"\n def behavior_policy(params: networks_lib.Params, key: networks_lib.PRNGKey,\n observation: types.NestedArray):\n del key\n action = networks.policy_network.apply(params, observation)\n return action\n return behavior_policy\n\n\ndef make_networks(\n spec: specs.EnvironmentSpec,\n policy_layer_sizes: Sequence[int] = (300, 200),\n critic_layer_sizes: Sequence[int] = (400, 300),\n vmin: float = -150.,\n vmax: float = 150.,\n num_atoms: int = 51,\n) -> D4PGNetworks:\n \"\"\"Creates networks used by the agent.\"\"\"\n\n action_spec = spec.actions\n\n num_dimensions = np.prod(action_spec.shape, dtype=int)\n critic_atoms = jnp.linspace(vmin, vmax, num_atoms)\n\n def _actor_fn(obs):\n network = hk.Sequential([\n utils.batch_concat,\n networks_lib.LayerNormMLP(policy_layer_sizes, activate_final=True),\n networks_lib.NearZeroInitializedLinear(num_dimensions),\n networks_lib.TanhToSpec(action_spec),\n ])\n return network(obs)\n\n def _critic_fn(obs, action):\n network = hk.Sequential([\n utils.batch_concat,\n networks_lib.LayerNormMLP(layer_sizes=[*critic_layer_sizes, num_atoms]),\n ])\n value = network([obs, action])\n return value, critic_atoms\n\n policy = hk.without_apply_rng(hk.transform(_actor_fn))\n critic = hk.without_apply_rng(hk.transform(_critic_fn))\n\n # Create dummy observations and actions to create network parameters.\n dummy_action = utils.zeros_like(spec.actions)\n dummy_obs = utils.zeros_like(spec.observations)\n dummy_action = utils.add_batch_dim(dummy_action)\n dummy_obs = utils.add_batch_dim(dummy_obs)\n\n return D4PGNetworks(\n policy_network=networks_lib.FeedForwardNetwork(\n lambda rng: policy.init(rng, dummy_obs), policy.apply),\n critic_network=networks_lib.FeedForwardNetwork(\n lambda rng: critic.init(rng, dummy_obs, dummy_action), critic.apply))\n",
"import jax\nimport haiku as hk\nimport numpy as np\nimport jax.numpy as jnp\nimport optax\nimport jax.nn as nn\nfrom functools import partial\n\n\nclass RNN(hk.Module):\n def __init__(self):\n super(RNN, self).__init__()\n self.gru_core = hk.GRU(64)\n self.fc_head = hk.Linear(1)\n\n def __call__(self, x: jnp.array, state):\n x, new_state = self.gru_core(x, state)\n return self.fc_head(x), new_state\n\n def initial_state(self, batch_size: int):\n return self.gru_core.initial_state(batch_size)\n\n def unroll(self, x, state):\n # x - [T, B, E]\n embeddings, new_states = hk.static_unroll(self.gru_core, x, state)\n embeddings = self.fc_head(embeddings)\n print(embeddings.shape, new_states.shape)\n return embeddings, new_states\n\n\ndef forward_fn(x: jnp.array, state):\n model = RNN()\n return model(x, state)\n\n\ndef initial_state_fn(batch_size):\n model = RNN()\n return model.initial_state(batch_size)\n\n\ndef unroll_fn(x, state):\n model = RNN()\n return model.unroll(x, state)\n\n\ndef data_gen():\n batch_size = 32\n seq_len = 64\n n_features = 3\n noise = 0.0\n ma_coeffs = np.array([])\n\n w = np.array([1.0, 2.0, 3.0])\n b = 1.\n\n while True:\n xs = np.random.randn(batch_size, n_features)\n ys = np.dot(xs, w) + noise * np.random.randn(batch_size)\n yield jnp.array(xs), jnp.array(ys)\n\n\ndef train():\n rng_key = jax.random.PRNGKey(42)\n lr = 0.001\n\n model = hk.without_apply_rng(hk.transform(model_forward))\n params = model.init(rng_key, x=jnp.ones((1, 3)))\n\n optimizer = optax.adam(learning_rate=lr)\n opt_state = optimizer.init(params)\n\n def mse(params, xs, ys):\n ys_pred = model.apply(params, xs)\n return jnp.mean((ys - ys_pred) ** 2)\n\n @partial(jax.jit, static_argnames=['lr'])\n def update(params, opt_state, xs, ys, lr):\n loss, grads = jax.value_and_grad(mse)(params, xs, ys)\n updates, opt_state = optimizer.update(grads, opt_state, params)\n params = optax.apply_updates(params, updates)\n return params, opt_state, loss\n\n losses = []\n for i, (xs, ys) in enumerate(data_gen(), start=1):\n ys_pred = model.apply(params, xs)\n params, opt_state, loss = update(params, opt_state, xs, ys, lr)\n losses.append(loss)\n\n if i % 100 == 0:\n print(np.mean(losses[-100:]))\n losses = []\n\n\nif __name__ == '__main__':\n # xs, ys = next(data_gen())\n # print(xs, ys)\n # train()\n model_hk = hk.without_apply_rng(hk.transform(forward_fn))\n model_init_hk = hk.without_apply_rng(hk.transform(initial_state_fn))\n model_unroll_hk = hk.without_apply_rng(hk.transform(unroll_fn))\n\n rng_key = jax.random.PRNGKey(42)\n\n state_init_params = model_init_hk.init(rng_key, batch_size=2)\n state = model_init_hk.apply(state_init_params, batch_size=2)\n\n params = model_hk.init(rng_key, jnp.ones((2, 10)), state)\n y, state = model_hk.apply(params, jnp.ones((2, 10)), state)\n\n model_unroll_hk.apply(params, jnp.ones((3, 2, 10)), state)\n\n\n # state = model_init_hk.apply()"
] | [
[
"numpy.prod"
],
[
"numpy.prod"
],
[
"numpy.dot",
"numpy.array",
"numpy.random.randn",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chappers/multiagent-particle-envs | [
"5c56a0cc4241fb2e5a6f4dc62bf1735862d6e30c"
] | [
"examples/ppo_agent.py"
] | [
"\"\"\"\nExample using this environment with stable_baseline library\n\nfrom stable_baselines.common.vec_env import DummyVecEnv\nenv = DummyVecEnv([lambda: env])\n\nmodel = PPO2(MlpPolicy, env, verbose=1)\nmodel.learn(total_timesteps=25000)\nmodel.save(\"ppo2_cartpole\")\n\n# Enjoy trained agent\nmodel = PPO2.load(\"ppo2_cartpole\")\n\nobs = env.reset()\nwhile True:\n action, _states = model.predict(obs)\n obs, rewards, dones, info = env.step(action)\n env.render()\n\"\"\"\n\nimport gym\nimport numpy as np\nimport copy\n\nfrom stable_baselines.common.policies import MlpPolicy\nfrom stable_baselines.common.vec_env import DummyVecEnv\nfrom stable_baselines import PPO2\nfrom multiagent.environment import MultiAgentEnv\nimport multiagent.scenarios as scenarios\nfrom stable_baselines.common.base_class import BaseRLModel\nfrom stable_baselines.common.runners import AbstractEnvRunner\n\nfrom collections import OrderedDict\nimport numpy as np\n\nfrom stable_baselines.common.vec_env import VecEnv\nfrom stable_baselines.common.vec_env.util import (\n copy_obs_dict,\n dict_to_obs,\n obs_space_info,\n)\n\nscenario_name = \"simple_tag\"\nnum_adversaries = 5\n\n\ndef to_categorical(action, shape=5):\n if type(action) is list:\n action = action[0]\n z = np.zeros(shape)\n z[action] = 1\n return z\n\n\nclass SingleAgentEnv(gym.Env):\n def __init__(self, observation_space, action_space):\n self.observation_space = observation_space\n self.action_space = action_space\n\n def step(self):\n return self\n\n def reset(self):\n return self\n\n\ndef env_splitter(multi_env):\n \"\"\"\n Takes in multiagentenv, and spits out each env individually?\n \"\"\"\n return [\n SingleAgentEnv(obs_space, act_space)\n for obs_space, act_space in zip(\n multi_env.observation_space, multi_env.action_space\n )\n ]\n\n\nclass MultiRunner(object):\n def __init__(self, *, env, models, n_steps, gamma, lam):\n \"\"\"\n A runner to learn the policy of an environment for a model\n :param env: (Gym environment) The environment to learn from\n :param model: (list[Model]) The model to learn\n :param n_steps: (int) The number of steps to run for each environment\n :param gamma: (float) Discount factor\n :param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n \"\"\"\n self.lam = lam\n self.gamma = gamma\n\n # super().__init__(env=env, model=models, n_steps=n_steps)\n self.env = env\n self.model = model\n n_env = 1 # env.num_envs\n\n self.batch_ob_shape = []\n self.obs = []\n for idx, env_observation_space in enumerate(env.observation_space):\n self.batch_ob_shape.append((n_env * n_steps,) + env.observation_space.shape)\n self.obs.append(\n np.zeros(\n (n_env,) + env.observation_space.shape,\n dtype=env.observation_space.dtype.name,\n )\n )\n\n obs_reset = env.reset()\n for idx, x in enumerate(obs_reset):\n self.obs[idx][:] = x\n self.n_steps = n_steps\n self.states = [x.initial_state for x in self.model] # get states...\n self.dones = [False for _ in range(n_env)]\n\n def run(self):\n \"\"\"\n Run a learning step of the model\n :return:\n - observations: (list[np.ndarray]) the observations\n - rewards: (np.ndarray) the rewards\n - masks: (numpy bool) whether an episode is over or not\n - actions: (np.ndarray) the actions\n - values: (np.ndarray) the value function output\n - negative log probabilities: (np.ndarray)\n - states: (np.ndarray) the internal states of the recurrent policies\n - infos: (dict) the extra information of the model\n \"\"\"\n # mb stands for minibatch\n mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = (\n [],\n [],\n [],\n [],\n [],\n [],\n )\n mb_states = self.states\n ep_infos = []\n for _ in range(self.n_steps):\n actions = []\n values = []\n states = []\n neglogpacs = []\n for idx, agent in enumerate(self.model):\n actions_, values_, states_, neglogpacs_ = self.model.step(\n self.obs[idx].reshape(1, -1), self.states[idx], self.dones\n )\n actions.append(actions_)\n values.append(values_)\n states.append(states_)\n neglogpacs.append(neglogpacs_)\n mb_obs.append(copy.copy(self.obs))\n mb_actions.append(actions)\n mb_values.append(values)\n mb_neglogpacs.append(neglogpacs)\n mb_dones.append(self.dones)\n clipped_actions = copy.copy(actions)\n\n # Clip the actions to avoid out of bound error - do this by agent\n # we will skip this for now...\n \"\"\"\n clipped_actions = []\n for idx, agent in enumerate(self.model):\n if isinstance(self.env.action_space, gym.spaces.Box):\n clipped_actions.append(np.clip(actions, self.env.action_space.low, self.env.action_space.high))\n \"\"\"\n\n self.obs[:], rewards, self.dones, infos = self.env.step(clipped_actions)\n for info in infos:\n maybe_ep_info = info.get(\"episode\")\n if maybe_ep_info is not None:\n ep_infos.append(maybe_ep_info)\n mb_rewards.append(rewards)\n # batch of steps to batch of rollouts\n mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)\n mb_rewards = np.asarray(mb_rewards, dtype=np.float32)\n mb_actions = np.asarray(mb_actions)\n mb_values = np.asarray(mb_values, dtype=np.float32)\n mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)\n mb_dones = np.asarray(mb_dones, dtype=np.bool)\n last_values = self.model.value(self.obs, self.states, self.dones)\n # discount/bootstrap off value fn\n mb_advs = np.zeros_like(mb_rewards)\n true_reward = np.copy(mb_rewards)\n last_gae_lam = 0\n for step in reversed(range(self.n_steps)):\n if step == self.n_steps - 1:\n nextnonterminal = 1.0 - self.dones\n nextvalues = last_values\n else:\n nextnonterminal = 1.0 - mb_dones[step + 1]\n nextvalues = mb_values[step + 1]\n delta = (\n mb_rewards[step]\n + self.gamma * nextvalues * nextnonterminal\n - mb_values[step]\n )\n mb_advs[step] = last_gae_lam = (\n delta + self.gamma * self.lam * nextnonterminal * last_gae_lam\n )\n mb_returns = mb_advs + mb_values\n\n mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward = map(\n swap_and_flatten,\n (\n mb_obs,\n mb_returns,\n mb_dones,\n mb_actions,\n mb_values,\n mb_neglogpacs,\n true_reward,\n ),\n )\n\n return (\n mb_obs,\n mb_returns,\n mb_dones,\n mb_actions,\n mb_values,\n mb_neglogpacs,\n mb_states,\n ep_infos,\n true_reward,\n )\n\n\nscenario = scenarios.load(scenario_name + \".py\").Scenario()\n# create world\nworld = scenario.make_world()\nenv = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation)\n# multi_env = DummyVecMultiEnv([lambda: env]) # TODO and implement??\nsplit_env = env_splitter(env)\nagents = [PPO2(MlpPolicy, DummyVecEnv([lambda: x]), verbose=1) for x in split_env]\n\n# based on these agents on the parent world we want to act and observe it.\nobs_reset = env.reset()\nstates = [x.initial_state for x in agents]\ndones = [False for _ in range(1)]\n\nactions = []\nfor idx in range(len(env.agents)):\n action = agents[idx].step(obs_reset[idx].reshape(1, -1), states[idx], False)\n actions.append(action[0][0])\n\n# see https://github.com/openai/multiagent-particle-envs/blob/master/multiagent/policy.py\n# for how to construct this...\nenv.step(\n [np.concatenate([to_categorical(x), np.zeros(env.world.dim_c)]) for x in actions]\n)\n\n\nobs_shape_n = [env.observation_space[i].shape for i in range(env.n)]\nnum_adversaries = env.n # min(env.n, arglist.num_adversaries)\nnum_adversaries = 0\n\n# get the trainers to train using PPO\n"
] | [
[
"numpy.asarray",
"numpy.copy",
"numpy.zeros",
"numpy.zeros_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
YerongLi2/LTVRR | [
"ec3be058da9c4f2f68d7c4dfb759209748732b93"
] | [
"lib/roi_data/fast_rcnn_rel.py"
] | [
"# Copyright (c) 2017-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\"\"\"Construct minibatches for Fast R-CNN training. Handles the minibatch blobs\nthat are specific to Fast R-CNN. Other blobs that are generic to RPN, etc.\nare handled by their respecitive roi_data modules.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\nimport numpy.random as npr\nimport logging\n\nfrom core.config import cfg\nimport utils.boxes as box_utils\nimport utils.blob as blob_utils\nimport utils.fpn as fpn_utils\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef add_rel_blobs(blobs, im_scales, roidb):\n \"\"\"Add blobs needed for training Fast R-CNN style models.\"\"\"\n # Sample training RoIs from each image and append them to the blob lists\n for im_i, entry in enumerate(roidb):\n frcn_blobs = _sample_pairs(entry, im_scales[im_i], im_i)\n for k, v in frcn_blobs.items():\n blobs[k].append(v)\n # Concat the training blob lists into tensors\n for k, v in blobs.items():\n if isinstance(v, list) and len(v) > 0:\n blobs[k] = np.concatenate(v)\n \n if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_ROIS:\n _add_rel_multilevel_rois(blobs)\n\n return True\n\n\ndef _sample_pairs(roidb, im_scale, batch_idx):\n \"\"\"Generate a random sample of RoIs comprising foreground and background\n examples.\n \"\"\"\n fg_pairs_per_image = cfg.TRAIN.FG_REL_SIZE_PER_IM\n pairs_per_image = int(cfg.TRAIN.FG_REL_SIZE_PER_IM / cfg.TRAIN.FG_REL_FRACTION) # need much more pairs since it's quadratic\n max_pair_overlaps = roidb['max_pair_overlaps']\n\n gt_pair_inds = np.where(max_pair_overlaps > 1.0 - 1e-4)[0]\n fg_pair_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH) &\n (max_pair_overlaps <= 1.0 - 1e-4))[0]\n \n fg_pairs_per_this_image = np.minimum(fg_pairs_per_image, gt_pair_inds.size + fg_pair_inds.size)\n # Sample foreground regions without replacement\n # if rel_pos_inds.size > 0 and rel_pos_inds.size > fg_rois_per_image - rel_gt_inds.size:\n\n if fg_pair_inds.size > 0 and fg_pair_inds.size > (fg_pairs_per_this_image - gt_pair_inds.size) \\\n and fg_pairs_per_this_image > gt_pair_inds.size:\n fg_pair_inds = npr.choice(\n fg_pair_inds, size=(fg_pairs_per_this_image - gt_pair_inds.size), replace=False)\n fg_pair_inds = np.append(fg_pair_inds, gt_pair_inds)\n\n # Label is the class each RoI has max overlap with\n fg_prd_labels = roidb['max_prd_classes'][fg_pair_inds]\n blob_dict = dict(\n fg_prd_labels_int32=fg_prd_labels.astype(np.int32, copy=False))\n \n bg_pair_inds = np.where((max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]\n \n # Compute number of background RoIs to take from this image (guarding\n # against there being fewer than desired)\n bg_pairs_per_this_image = pairs_per_image - fg_pairs_per_this_image\n bg_pairs_per_this_image = np.minimum(bg_pairs_per_this_image, bg_pair_inds.size)\n # Sample foreground regions without replacement\n if bg_pair_inds.size > 0:\n bg_pair_inds = npr.choice(\n bg_pair_inds, size=bg_pairs_per_this_image, replace=False)\n keep_pair_inds = np.append(fg_pair_inds, bg_pair_inds)\n all_prd_labels = np.zeros(keep_pair_inds.size, dtype=np.int32)\n all_prd_labels[:fg_pair_inds.size] = fg_prd_labels + 1 # class should start from 1 # size 311\n\n blob_dict['all_prd_labels_int32'] = all_prd_labels.astype(np.int32, copy=False)\n blob_dict['fg_size'] = np.array([fg_pair_inds.size], dtype=np.int32) # this is used to check if there is at least one fg to learn\n\n sampled_sbj_boxes = roidb['sbj_boxes'][keep_pair_inds]\n sampled_obj_boxes = roidb['obj_boxes'][keep_pair_inds]\n # Scale rois and format as (batch_idx, x1, y1, x2, y2)\n sampled_sbj_rois = sampled_sbj_boxes * im_scale\n sampled_obj_rois = sampled_obj_boxes * im_scale\n repeated_batch_idx = batch_idx * blob_utils.ones((keep_pair_inds.shape[0], 1))\n sampled_sbj_rois = np.hstack((repeated_batch_idx, sampled_sbj_rois))\n sampled_obj_rois = np.hstack((repeated_batch_idx, sampled_obj_rois))\n blob_dict['sbj_rois'] = sampled_sbj_rois\n blob_dict['obj_rois'] = sampled_obj_rois\n sampled_rel_rois = box_utils.rois_union(sampled_sbj_rois, sampled_obj_rois)\n blob_dict['rel_rois'] = sampled_rel_rois\n if cfg.MODEL.USE_FREQ_BIAS or cfg.MODEL.USE_SEPARATE_SO_SCORES:\n sbj_labels = roidb['max_sbj_classes'][keep_pair_inds]\n obj_labels = roidb['max_obj_classes'][keep_pair_inds]\n blob_dict['all_sbj_labels_int32'] = sbj_labels.astype(np.int32, copy=False) # 1703\n blob_dict['all_obj_labels_int32'] = obj_labels.astype(np.int32, copy=False) # 1703\n\n return blob_dict\n\n\ndef _add_rel_multilevel_rois(blobs):\n \"\"\"By default training RoIs are added for a single feature map level only.\n When using FPN, the RoIs must be distributed over different FPN levels\n according the level assignment heuristic (see: modeling.FPN.\n map_rois_to_fpn_levels).\n \"\"\"\n lvl_min = cfg.FPN.ROI_MIN_LEVEL\n lvl_max = cfg.FPN.ROI_MAX_LEVEL\n\n def _distribute_rois_over_fpn_levels(rois_blob_names):\n \"\"\"Distribute rois over the different FPN levels.\"\"\"\n # Get target level for each roi\n # Recall blob rois are in (batch_idx, x1, y1, x2, y2) format, hence take\n # the box coordinates from columns 1:5\n lowest_target_lvls = None\n for rois_blob_name in rois_blob_names:\n target_lvls = fpn_utils.map_rois_to_fpn_levels(\n blobs[rois_blob_name][:, 1:5], lvl_min, lvl_max)\n if lowest_target_lvls is None:\n lowest_target_lvls = target_lvls\n else:\n lowest_target_lvls = np.minimum(lowest_target_lvls, target_lvls)\n for rois_blob_name in rois_blob_names:\n # Add per FPN level roi blobs named like: <rois_blob_name>_fpn<lvl>\n fpn_utils.add_multilevel_roi_blobs(\n blobs, rois_blob_name, blobs[rois_blob_name], lowest_target_lvls, lvl_min,\n lvl_max)\n\n _distribute_rois_over_fpn_levels(['sbj_rois'])\n _distribute_rois_over_fpn_levels(['obj_rois'])\n _distribute_rois_over_fpn_levels(['rel_rois'])"
] | [
[
"numpy.hstack",
"numpy.minimum",
"numpy.random.choice",
"numpy.concatenate",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Cryaaa/Master-Thesis-Repository | [
"a887fd9dd95c32ce9275d14ec6583bd19cd8bc15"
] | [
"Code/Generating video data/embryo serosa kmeans finsterwalde making the pictures.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 28 15:24:40 2021\n\n@author: ryans\n\"\"\"\nimport tribolium_clustering as tc\nimport pyclesperanto_prototype as cle\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport umap\nimport hdbscan\nimport napari\n\n\ndef nice_screenshots_of_1_timepoint(dataset, prediction_list, timepoint, \n cmap, save_data_location, name, rotations):\n import napari\n import pyclesperanto_prototype as cle\n from qtpy.QtCore import QTimer\n \n label_image = dataset.get_labels(timepoint)\n intensity_image = dataset.get_intensity_image(timepoint)\n cum_indices = dataset.cumulative_label_lengths()\n \n prediction = prediction_list[cum_indices[timepoint]:cum_indices[timepoint+1]]\n prop = dataset.get_regionprops_timepoint(timepoint)\n\n regprop_with_predict = pd.concat([prop,pd.DataFrame(prediction, columns = ['prediction'],\n index = prop.index)], axis = 1)\n regprop_with_predict.to_csv(save_data_location + 'regprops with ' + name +' t{}.csv'.format(timepoint))\n\n cluster_image = tc.generate_parametric_cluster_image(label_image,cle.push(label_image),prediction)\n \n for i,rot in enumerate(rotations):\n with napari.gui_qt() as app:\n viewer = napari.Viewer(ndisplay=3)\n viewer.add_image(intensity_image, rotate= rot)\n viewer.add_labels(cluster_image, rotate= rot, color = cmap)\n\n viewer.screenshot(save_data_location + name +' rotation{}'.format(i) + ' t{}.tif'.format(timepoint))\n\n time_in_msec = 1000\n QTimer().singleShot(time_in_msec, app.quit)\n viewer.close()\n\n\n\nfolder = 'D:/Uni/MSTER TUD/Master Thesis/output data/Finsterwalde Gastrulation Labels (new timeframe)//'\nfinster = tc.processed_dataset(folder )\n\npred_location = 'C:/Users/ryans/OneDrive/Documents/Master Thesis/Documents/Figures/embryo serosa video files//'\nfinster_prediction_scaled = np.load(pred_location + 'finsterwalde_scaled_prediction.npy')\nfinster_prediction_unscaled = np.load(pred_location + 'finsterwalde_scaled_prediction.npy')\n\n\n\nimage_output_folder = pred_location + 'finster/'\nrotations_finster = [(0,170,0),(0,0,0)]\ncmap_napari = tc.napari_label_cmap()\n\n \nfor time in range(30,32):\n nice_screenshots_of_1_timepoint(finster,finster_prediction_unscaled,\n time,cmap_napari,image_output_folder, \n 'embryo serosa Kmeans unscaled'\n ,rotations_finster) \n"
] | [
[
"numpy.load",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jmaberk/RGPUCB | [
"a14cc524fa10bc90166ba0955c611a1c46d0f779"
] | [
"RGP-UCB/prada_bayes_opt/bayesian_optimization_function.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 29 11:49:58 2016\r\n\r\n\"\"\"\r\n\r\nfrom __future__ import division\r\nimport numpy as np\r\n#from sklearn.gaussian_process import GaussianProcess\r\nfrom scipy.optimize import minimize\r\nfrom prada_bayes_opt.acquisition_functions import AcquisitionFunction, unique_rows\r\n#from visualization import Visualization\r\nfrom prada_bayes_opt.prada_gaussian_process import PradaGaussianProcess\r\n#from prada_gaussian_process import PradaMultipleGaussianProcess\r\n\r\nfrom prada_bayes_opt.acquisition_maximization import acq_max\r\nfrom prada_bayes_opt.acquisition_maximization import acq_max_thompson\r\nfrom prada_bayes_opt.acquisition_maximization import acq_max_global\r\nfrom sklearn.metrics.pairwise import euclidean_distances\r\nfrom scipy.spatial.distance import pdist\r\nfrom scipy.spatial.distance import squareform\r\nfrom scipy import optimize\r\nfrom scipy import stats\r\nfrom pyDOE import lhs\r\nimport matplotlib.pyplot as plt\r\nfrom cycler import cycler\r\nimport time\r\nimport math\r\n\r\n\r\n#@author: Julian\r\n\r\n#==============================================================================\r\n#==============================================================================\r\n#==============================================================================\r\n#==============================================================================\r\ncounter = 0\r\n\r\n###############################################################################\r\nclass PradaBayOptFn(object):\r\n\r\n def __init__(self, gp_params, func_params, acq_params, experiment_num, seed):\r\n \"\"\" \r\n Input parameters\r\n ----------\r\n \r\n gp_params: GP parameters\r\n gp_params.l: to compute the kernel\r\n gp_params.theta: paramater for DGP-UCB gamma distribution\r\n gp_params.delta: to compute the kernel\r\n \r\n func_params: function to optimize\r\n func_params.init bound: initial bounds for parameters\r\n func_params.bounds: bounds on parameters \r\n func_params.func: a function to be optimized\r\n \r\n \r\n acq_params: acquisition function, \r\n acq_params.acq_func['name']=['ei','ucb','poi','lei']\r\n ,acq['kappa'] for ucb, acq['k'] for lei\r\n acq_params.opt_toolbox: optimization toolbox 'nlopt','direct','scipy'\r\n \r\n experiment_num: the interation of the GP method. Used to make sure each \r\n independant stage of the experiment uses different \r\n initial conditions\r\n seed: Variable used as part of a seed to generate random initial points\r\n \r\n Returns\r\n -------\r\n dim: dimension\r\n scalebounds: bound used thoughout the BO algorithm\r\n time_opt: will record the time spent on optimization\r\n gp: Gaussian Process object\r\n \"\"\"\r\n\r\n self.experiment_num=experiment_num\r\n np.random.seed(self.experiment_num*seed)\r\n self.seed=seed\r\n \r\n # Prior distribution paramaters for the DDB method\r\n self.theta=1\r\n # Find number of parameters\r\n bounds=func_params['bounds']\r\n if 'init_bounds' not in func_params:\r\n init_bounds=bounds\r\n else:\r\n init_bounds=func_params['init_bounds']\r\n # Find input dimention\r\n self.dim = len(bounds)\r\n self.radius=np.ones([self.dim,1])\r\n\r\n # Generate bound array\r\n scalebounds=np.array([np.zeros(self.dim), np.ones(self.dim)])\r\n self.scalebounds=scalebounds.T\r\n \r\n # find function to be optimized\r\n self.f = func_params['f']\r\n\r\n # acquisition function type\r\n \r\n self.acq=acq_params['acq_func']\r\n self.delta=acq_params[\"delta\"]\r\n self.acq['max_iterations']=acq_params['max_iterations']\r\n self.acq['num_initial_points']=acq_params['num_initial_points']\r\n self.acq['iterations_num']=acq_params['iterations_num']\r\n \r\n # Other checks\r\n if 'debug' not in self.acq:\r\n self.acq['debug']=0 \r\n if 'stopping' not in acq_params:\r\n self.stopping_criteria=0\r\n else:\r\n self.stopping_criteria=acq_params['stopping']\r\n if 'optimize_gp' not in acq_params:\r\n self.optimize_gp=0\r\n else: \r\n self.optimize_gp=acq_params['optimize_gp']\r\n if 'marginalize_gp' not in acq_params:\r\n self.marginalize_gp=0\r\n else: \r\n self.marginalize_gp=acq_params['marginalize_gp']\r\n \r\n # optimization toolbox\r\n if 'opt_toolbox' not in acq_params:\r\n if self.acq['name']=='ei_reg':\r\n self.opt_toolbox='unbounded'\r\n else:\r\n self.opt_toolbox='scipy'\r\n else:\r\n self.opt_toolbox=acq_params['opt_toolbox']\r\n self.iteration_factor=acq_params['iteration_factor']\r\n # store X in original scale\r\n self.X_original= None\r\n\r\n # store X in 0-1 scale\r\n self.X = None\r\n \r\n # store y=f(x)\r\n # (y - mean)/(max-min)\r\n self.Y = None\r\n \r\n # y original scale\r\n self.Y_original = None\r\n \r\n # value of the acquisition function at the selected point\r\n self.alpha_Xt=None\r\n self.Tau_Xt=None\r\n \r\n self.time_opt=0\r\n\r\n self.k_Neighbor=2\r\n \r\n # Gaussian Process class\r\n self.gp=PradaGaussianProcess(gp_params)\r\n self.gp_params=gp_params\r\n #self.gp.theta=gp_params['theta']\r\n # acquisition function\r\n self.acq_func = None\r\n \r\n # stop condition\r\n self.stop_flag=0\r\n self.logmarginal=0\r\n \r\n # xt_suggestion, caching for Consensus\r\n self.xstars=[]\r\n self.ystars=np.zeros((2,1))\r\n \r\n # l vector for marginalization GP\r\n self.l_vector =[]\r\n \r\n def init(self,gp_params, n_init_points=3):\r\n \"\"\" \r\n Input parameters\r\n ----------\r\n gp_params: Gaussian Process structure \r\n n_init_points: # init points\r\n \"\"\"\r\n # set seed to allow for reproducible results\r\n np.random.seed(self.experiment_num*self.seed)\r\n print(self.experiment_num)\r\n #Generate initial points on grid\r\n l=np.zeros([n_init_points,self.dim])\r\n bound_length=self.scalebounds[0,1]-self.scalebounds[0,0]\r\n for d in range(0,self.dim):\r\n l[:,d]=lhs(n_init_points)[:,0]\r\n self.X=np.asarray(l)+self.scalebounds[:,0] \r\n self.X=self.X*bound_length #initial inouts\r\n print(\"starting points={}\".format(self.X))\r\n y_init=self.f(self.X)\r\n y_init=np.reshape(y_init,(n_init_points,1))\r\n self.Y_original = np.asarray(y_init) #initial outputs \r\n print('initial_bound={}'.format(self.scalebounds))\r\n \r\n def maximize(self,gp_params):\r\n \"\"\"\r\n Main optimization method.\r\n\r\n Input parameters\r\n ----------\r\n gp_params: parameter for Gaussian Process\r\n\r\n Returns\r\n -------\r\n x: recommented point for evaluation\r\n \"\"\"\r\n\r\n if self.stop_flag==1:\r\n return\r\n \r\n if self.acq['name']=='random':\r\n x_max = [np.random.uniform(x[0], x[1], size=1) for x in self.scalebounds]\r\n x_max=np.asarray(x_max)\r\n x_max=x_max.T\r\n self.X_original=np.vstack((self.X_original, x_max))\r\n # evaluate Y using original X\r\n \r\n self.Y_original = np.append(self.Y_original, self.f(x_max))\r\n \r\n # update Y after change Y_original\r\n self.Y=(self.Y_original-np.mean(self.Y_original))/np.std(self.Y_original)\r\n \r\n self.time_opt=np.hstack((self.time_opt,0))\r\n return \r\n\r\n # init a new Gaussian Process\r\n self.gp=PradaGaussianProcess(gp_params)\r\n if self.gp.KK_x_x_inv ==[]:\r\n self.Y=(self.Y_original-np.mean(self.Y_original))/np.std(self.Y_original)\r\n # Find unique rows of X to avoid GP from breaking\r\n ur = unique_rows(self.X)\r\n self.gp.fit(self.X[ur], self.Y[ur])\r\n\r\n \r\n acq=self.acq\r\n self.acq_func = AcquisitionFunction(self.acq,self.delta)\r\n if acq['debug']==1:\r\n logmarginal=self.gp.log_marginal_lengthscale(gp_params['l'],gp_params['noise_delta'])\r\n print(gp_params['l'])\r\n print(\"log marginal before optimizing ={:.4f}\".format(logmarginal))\r\n self.logmarginal=logmarginal\r\n \r\n if logmarginal<-999999:\r\n logmarginal=self.gp.log_marginal_lengthscale(gp_params['l'],gp_params['noise_delta'])\r\n\r\n if self.optimize_gp==1 and len(self.Y)%2*self.dim==0 and len(self.Y)>5*self.dim:\r\n\r\n print(\"Initial length scale={}\".format(gp_params['l']))\r\n newl = self.gp.optimize_lengthscale(gp_params['l'],gp_params['noise_delta'],self.scalebounds)\r\n gp_params['l']=newl\r\n print(\"New length scale={}\".format(gp_params['l']))\r\n\r\n # init a new Gaussian Process after optimizing hyper-parameter\r\n self.gp=PradaGaussianProcess(gp_params)\r\n # Find unique rows of X to avoid GP from breaking\r\n ur = unique_rows(self.X)\r\n self.gp.fit(self.X[ur], self.Y[ur])\r\n \r\n # Set acquisition function\r\n start_opt=time.time()\r\n\r\n y_max = self.Y.max() \r\n \r\n if 'xstars' not in globals():\r\n xstars=[]\r\n \r\n self.xstars=xstars\r\n\r\n self.acq['xstars']=xstars\r\n self.acq['WW']=False\r\n self.acq['WW_dim']=False\r\n self.acq_func = AcquisitionFunction(self.acq,self.delta)\r\n\r\n if acq['name']==\"thompson\":\r\n x_max = acq_max_thompson(gp=self.gp,y_max=y_max,bounds=self.scalebounds)\r\n else:\r\n x_max = acq_max(ac=self.acq_func.acq_kind,gp=self.gp,y_max=y_max,bounds=self.scalebounds,opt_toolbox=self.opt_toolbox,seeds=self.xstars)\r\n \r\n \r\n\r\n \r\n val_acq=self.acq_func.acq_kind(x_max,self.gp,y_max)\r\n #print x_max\r\n #print val_acq\r\n if self.stopping_criteria!=0 and val_acq<self.stopping_criteria:\r\n val_acq=self.acq_func.acq_kind(x_max,self.gp,y_max)\r\n\r\n self.stop_flag=1\r\n print(\"Stopping Criteria is violated. Stopping Criteria is {:.15f}\".format(self.stopping_criteria))\r\n \r\n \r\n self.alpha_Xt= np.append(self.alpha_Xt,val_acq)\r\n \r\n mean,var=self.gp.predict(x_max, eval_MSE=True)\r\n var.flags['WRITEABLE']=True\r\n var[var<1e-20]=0\r\n #self.Tau_Xt= np.append(self.Tau_Xt,val_acq/var)\r\n \r\n # record the optimization time\r\n finished_opt=time.time()\r\n elapse_opt=finished_opt-start_opt\r\n self.time_opt=np.hstack((self.time_opt,elapse_opt))\r\n \r\n # store X \r\n self.X = np.vstack((self.X, x_max.reshape((1, -1))))\r\n\r\n # evaluate Y using original X\r\n self.Y_original = np.append(self.Y_original, self.f(x_max))\r\n \r\n # update Y after change Y_original\r\n self.Y=(self.Y_original-np.mean(self.Y_original))/np.std(self.Y_original)\r\n\r\n self.experiment_num=self.experiment_num+1"
] | [
[
"numpy.hstack",
"numpy.random.seed",
"numpy.reshape",
"numpy.asarray",
"numpy.ones",
"numpy.append",
"numpy.std",
"numpy.mean",
"numpy.random.uniform",
"numpy.zeros",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zoeimogen/AoC2019 | [
"44ffc08a38cb07273d7c4fd49200fb7912d4a1cb"
] | [
"tests/day09_test.py"
] | [
"#!/usr/bin/python3\n'''Advent of Code 2019 Day 5 tests'''\nimport unittest\nimport os\nimport sys\nimport numpy\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nfrom aoc2019 import intcode # pylint: disable=wrong-import-position\n\nclass TestUM(unittest.TestCase):\n '''Tests from day nine, although we actually test intcode rather than day09.py'''\n def test_day09part1(self) -> None:\n '''Part one tests'''\n prg = [109, 1, 204, -1, 1001, 100, 1, 100, 1008, 100, 16, 101, 1006, 101, 0, 99]\n prg.extend((map(int, numpy.zeros(100))))\n pgm = intcode.Program('standard', prg)\n self.assertEqual(pgm.run(), prg[:16])\n\n prg = [1102, 34915192, 34915192, 7, 4, 7, 99, 0]\n prg.extend((map(int, numpy.zeros(100))))\n pgm = intcode.Program('standard', prg)\n output = pgm.run()[0]\n self.assertEqual(len(f\"{output}\"), 16)\n\n prg = [104, 1125899906842624, 99]\n pgm = intcode.Program('standard', prg)\n self.assertEqual(pgm.run()[0], prg[1])\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
timsque/deep-histopath | [
"a91619cc5b20c5a760d72d89124e558306ef5fc3"
] | [
"resnet.py"
] | [
"\"\"\"Custom ResNet model with pre-activation residual blocks.\n\nHe K, Zhang X, Ren S, Sun J. Identity Mappings in Deep Residual\nNetworks. arXiv.org. 2016.\n\nAuthor: Mike Dusenberry\n\"\"\"\nimport tensorflow as tf\n\n\ndef res_block(xin, dbottle, dout, k, stride):\n \"\"\"A residual block.\n\n This implements the \"pre-activation\" formulation of a residual block,\n as discussed in:\n\n He K, Zhang X, Ren S, Sun J. Identity Mappings in Deep Residual\n Networks. arXiv.org. 2016.\n\n Args:\n xin: Input tensor.\n dbottle: Bottleneck depth.\n dout: Output depth.\n k: Integer kernel size.\n stride: Integer stride.\n\n Returns:\n Output tensor for the block.\n \"\"\"\n depth_axis = 3 if tf.keras.backend.image_data_format() == 'channels_last' else 1\n din = tf.shape(xin)[depth_axis] # input depth\n he_init = tf.keras.initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='normal')\n\n # TODO: ReLUs have been quite successful, but it still seems like it could be a problem due to\n # gradient stopping at ReLU zero values. Perhaps look into leaky ReLUs, ELUs, etc.\n\n # conv 1x1\n x = tf.keras.layers.BatchNormalization(axis=depth_axis, momentum=0.9, epsilon=1e-4)(xin)\n x = tf.keras.layers.Activation('relu')(x)\n x = tf.keras.layers.Conv2D(\n dbottle, (1, 1), strides=(stride, stride), kernel_initializer=he_init)(x)\n\n # conv 3x3\n x = tf.keras.layers.BatchNormalization(axis=depth_axis, momentum=0.9, epsilon=1e-4)(x)\n x = tf.keras.layers.Activation('relu')(x)\n x = tf.keras.layers.Conv2D(dbottle, (k, k), padding='same', kernel_initializer=he_init)(x)\n\n # conv 1x1\n x = tf.keras.layers.BatchNormalization(axis=depth_axis, momentum=0.9, epsilon=1e-4)(x)\n x = tf.keras.layers.Activation('relu')(x)\n x = tf.keras.layers.Conv2D(dout, (1, 1), kernel_initializer=he_init)(x)\n\n # shortcut\n if din == dout: # identity shortcut for same input/output depths\n shortcut = xin\n else: # conv shortcut to change depth (usually to increase depth)\n shortcut = tf.keras.layers.Conv2D(\n dout, (1, 1), strides=(stride, stride), kernel_initializer=he_init)(xin)\n\n x = tf.keras.layers.add([x, shortcut])\n\n return x\n\n\ndef ResNet(xin, shape): # camel case makes it feel like a class -- eventually we'll subclass Model\n \"\"\"Custom ResNet model with pre-activation residual blocks.\n\n Reference:\n\n He K, Zhang X, Ren S, Sun J. Identity Mappings in Deep Residual\n Networks. arXiv.org. 2016.\n\n Args:\n xin: Input tensor.\n shape: Integer tuple of length 3 containing the shape of a single\n example.\n\n Returns:\n A Keras Model.\n\n Example:\n ```\n import tensorflow as tf\n import numpy as np\n import resnet\n\n shape = (64, 64, 3)\n xin = tf.placeholder(tf.float32, shape=(None, *shape))\n model = resnet.ResNet(xin, shape)\n\n model.summary()\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n out = sess.run(model.output, feed_dict={xin: np.random.randn(10, *shape)})\n print(out)\n ```\n \"\"\"\n # TODO: `tf.keras.layers` -> `tf.layers`\n assert len(shape) == 3\n depth_axis = 3 if tf.keras.backend.image_data_format() == 'channels_last' else 1\n\n d = [16, 32, 64, 128] # depths (must be divisible by 4)\n db = [int(depth/4) for depth in d] # bottleneck depths\n n = 3 # num layers at each depth\n\n # input & conv\n with tf.variable_scope(\"beg\"):\n xin = tf.keras.layers.Input(tensor=xin, shape=shape) # shape (h,w,c)\n he_init = tf.keras.initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='normal')\n x = tf.keras.layers.Conv2D(\n d[0], (3, 3), strides=(2, 2),\n padding='same', kernel_initializer=he_init)(xin) # shape (h/2,w/2,d[0])\n\n # stage 1\n with tf.variable_scope(\"stage1\"):\n x = res_block(x, db[0], d[1], 3, 1) # shape (h/2,w/2,d[1]) <-- increase depth\n for i in range(n-1):\n x = res_block(x, db[1], d[1], 3, 1) # shape (h/2,w/2,d[1])\n\n # stage 2\n with tf.variable_scope(\"stage2\"):\n x = res_block(x, db[1], d[2], 3, 2) # shape (h/4,w/4,d[2]) <-- increase depth, cut spatial size\n for i in range(n-1):\n x = res_block(x, db[2], d[2], 3, 1) # shape (h/4,w/4,d[2])\n\n # stage 3\n with tf.variable_scope(\"stage3\"):\n x = res_block(x, db[2], d[3], 3, 2) # shape (h/8,w/8,d[3]) <-- increase depth, cut spatial size\n for i in range(n-1):\n x = res_block(x, db[3], d[3], 3, 1) # shape (h/8,w/8,d[3])\n\n # final functions\n with tf.variable_scope(\"end\"):\n x = tf.keras.layers.BatchNormalization(\n axis=depth_axis, momentum=0.9, epsilon=1e-4)(x) # shape (h/8,w/8,d[3])\n x = tf.keras.layers.Activation('relu')(x) # shape (h/8,w/8,d[3])\n if shape[1] == 64:\n x = tf.keras.layers.AvgPool2D((8, 8))(x) # shape (h/64,w/64,d[3])\n elif shape[1] == 128:\n x = tf.keras.layers.AvgPool2D((16, 16))(x) # shape (h/128,w/128,d[3]) NOTE: assumes 128x128\n elif shape[1] == 100:\n x = tf.keras.layers.AvgPool2D((12, 12))(x) # shape (h/100,w/100,d[3]) NOTE: assumes 100x100\n else:\n # Note for potential surgery reasons, we won't use global pooling\n #x = tf.keras.layers.GlobalAvgPool2D()(x) # shape (h/64,w/64,d[3])\n raise Exception(\"patch size unsupported\")\n init = tf.keras.initializers.VarianceScaling(scale=1.0, mode='fan_in', distribution='normal')\n # TODO: this is a binary classification problem so optimizing a loss derived from a Bernoulli\n # distribution is appropriate. however, would the dynamics of the training algorithm be more\n # stable if we treated this as a multi-class classification problem and derived a loss from a\n # Multinomial distribution with two classes (and a single trial)? it would be\n # over-parameterized, but then again, the deep net itself is already heavily parameterized.\n x = tf.keras.layers.Conv2D(\n 1, (1, 1), kernel_initializer=init)(x) # shape (h/64,w/64,1) <-- could use this for surgery\n #2, (1, 1), kernel_initializer=init)(x) # shape (h/64,w/64,2) <-- could use this for surgery\n x = tf.keras.layers.Flatten()(x) # shape ((h/64)*(w/64)*1) <-- normally will be a single value\n\n # create model (106 functions)\n model = tf.keras.Model(xin, x, name='resnet')\n\n return model\n\n"
] | [
[
"tensorflow.keras.layers.Activation",
"tensorflow.keras.backend.image_data_format",
"tensorflow.shape",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.Model",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.variable_scope",
"tensorflow.keras.initializers.VarianceScaling",
"tensorflow.keras.layers.AvgPool2D",
"tensorflow.keras.layers.add",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
hqucms/onnxruntime | [
"6e4e76414639f50836a64546603c8957227857b0"
] | [
"docs/python/examples/plot_backend.py"
] | [
"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"\n\n.. _l-example-backend-api:\n\nONNX Runtime Backend for ONNX\n=============================\n\n*ONNX Runtime* extends the \n`onnx backend API <https://github.com/onnx/onnx/blob/master/docs/ImplementingAnOnnxBackend.md>`_\nto run predictions using this runtime.\nLet's use the API to compute the prediction\nof a simple logistic regression model.\n\"\"\"\nimport numpy as np\nfrom onnxruntime import datasets\nimport onnxruntime.backend as backend\nfrom onnx import load\n\nname = datasets.get_example(\"logreg_iris.onnx\")\nmodel = load(name)\n\nrep = backend.prepare(model, 'CPU')\nx = np.array([[-1.0, -2.0]], dtype=np.float32)\nlabel, proba = rep.run(x)\nprint(\"label={}\".format(label))\nprint(\"probabilities={}\".format(proba))\n\n########################################\n# The device depends on how the package was compiled,\n# GPU or CPU.\nfrom onnxruntime import get_device\nprint(get_device())\n\n########################################\n# The backend can also directly load the model\n# without using *onnx*.\n\nrep = backend.prepare(name, 'CPU')\nx = np.array([[-1.0, -2.0]], dtype=np.float32)\nlabel, proba = rep.run(x)\nprint(\"label={}\".format(label))\nprint(\"probabilities={}\".format(proba))\n\n#######################################\n# The backend API is implemented by other frameworks\n# and makes it easier to switch between multiple runtimes\n# with the same API.\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qdpham/machine-learning-engineering-for-production-public | [
"e02961fa72cd5d009bfb699c2e76594b9dbad1bf"
] | [
"course4/week3-ungraded-labs/C4_W3_Lab_4_Github_Actions/app/main.py"
] | [
"# added by QDP to run the test using the CI/CD pipeline\nimport pickle\nimport numpy as np\nfrom typing import List\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel, conlist\n\n\n\napp = FastAPI(title=\"Predicting Wine Class with batching\")\n\n# Open classifier in global scope\nwith open(\"models/wine-95-fixed.pkl\", \"rb\") as file:\n clf = pickle.load(file)\n\n\nclass Wine(BaseModel):\n batches: List[conlist(item_type=float, min_items=13, max_items=13)]\n\n\[email protected](\"/predict\")\ndef predict(wine: Wine):\n batches = wine.batches\n np_batches = np.array(batches)\n pred = clf.predict(np_batches).tolist()\n return {\"Prediction\": pred}\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
world2vec/coref | [
"a314773215f7f59e3dc1cb14034460a23734b291"
] | [
"coref_ops.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\n\nimport tensorflow as tf\nfrom tensorflow.python import pywrap_tensorflow\n\ncur_dir = os.path.dirname(os.path.realpath(__file__))\n\ncoref_op_library = tf.load_op_library(os.path.join(cur_dir, \"coref_kernels.so\"))\n\nextract_spans = coref_op_library.extract_spans\ntf.NotDifferentiable(\"ExtractSpans\")\n"
] | [
[
"tensorflow.NotDifferentiable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
graphcore/poprithms | [
"9975a6a343891e3c5f8968a9507261c1185029ed"
] | [
"poprithms/tests/regression/schedule/shift/summarize.py"
] | [
"# Copyright (c) 2020 Graphcore Ltd. All rights reserved.\nimport matplotlib.pyplot as mpl\nimport matplotlib.gridspec as gridspec\nimport numpy as np\nimport sys\nimport os\n\n\ndef run(logsDir, plotsDir=\".\"):\n \"\"\"\n logsDir : \n -- where to read all log files from (.txt and .log extensions). \n This is the data which will be plotted.\n plotsDir : \n -- where to write pdf figures to.\n \"\"\"\n\n lines = []\n for fn in [\n os.path.join(logsDir, x) for x in os.listdir(logsDir)\n if \".txt\" in x or \".log\" in x\n ]:\n filly = open(fn, \"r\")\n lines += filly.readlines()\n\n print(\"In run with \", len(lines), \" lines\")\n\n records = {}\n description = \"\"\n settingsString = \"\"\n for l in lines:\n if \"description\" in l:\n if (description):\n if (description not in records.keys()):\n records[description] = {}\n if settingsString not in records[description]:\n records[description][settingsString] = []\n records[description][settingsString].append({\n \"timeInit\":\n timeInit,\n \"timeShift\":\n timeShift,\n \"nOpsBefore\":\n nOpsBefore,\n \"nOpsAfter\":\n nOpsAfter\n })\n description = l.split(\"=\")[1].strip()\n settingsString = \"\"\n\n elif \"timeInitialize\" in l:\n timeInit = float(l.split(\"=\")[1].split()[0].strip())\n elif \"timeShift\" in l:\n timeShift = float(l.split(\"=\")[1].split()[0].strip())\n elif \"nOpsBefore\" in l:\n nOpsBefore = int(l.split(\"=\")[1])\n elif \"nOpsAfter\" in l:\n nOpsAfter = int(l.split(\"=\")[1])\n else:\n #shorten the string for cleaner figure legend:\n if \"logTime=\" in l:\n l = l.split(\"logTime=\")[1].split(\"at\")[0]\n settingsString += l\n\n nPlots = len(records.keys())\n for i, k in enumerate(records.keys()):\n gs1 = gridspec.GridSpec(1, 1)\n mpl.subplot(gs1[0:1, 0:1])\n mpl.title(k)\n mpl.ylabel(\"time [s]\")\n mpl.xlabel(\"number of Ops\")\n for summary in records[k].keys():\n rs = records[k][summary]\n ax = mpl.gca()\n ax.set_xscale('log', basex=2)\n ax.set_yscale('log', basey=2)\n\n label = summary.replace('\\n', ' ').replace(\"logging=0 \",\n \"\").replace(\n \"tieBreaker=\", \"\")\n\n mpl.plot([x[\"nOpsBefore\"] for x in rs],\n [x[\"timeShift\"] + x[\"timeInit\"] for x in rs],\n linestyle=\":\",\n marker=\"o\",\n label=label)\n mpl.legend(loc=\"lower right\")\n\n plotfilename = os.path.join(plotsDir, \"%s.pdf\" % (k, ))\n print(\"Saving figure at \", plotfilename)\n mpl.savefig(plotfilename)\n\n\nif __name__ == \"__main__\":\n # expected use is something like\n # >> python3 summarize.py logs/ plots/\n if (len(sys.argv) != 3):\n raise RuntimeError(\n \"Expected 2 arguments: (0) where the log files are and (1) where to store pdf plots\"\n )\n\n logsDir = sys.argv[1]\n plotsDir = sys.argv[2]\n run(logsDir, plotsDir)\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
IanYeung/ReCp | [
"1a7ace0e1ca3c262e24a222f3f0ab0d5674e9410",
"1a7ace0e1ca3c262e24a222f3f0ab0d5674e9410"
] | [
"basicsr/archs/iconvsr_arch.py",
"basicsr/utils/img_util.py"
] | [
"import torch\nfrom torch import nn as nn\nfrom torch.nn import functional as F\n\nfrom basicsr.utils.registry import ARCH_REGISTRY\nfrom .spynet_arch import SpyNet\nfrom .basicvsr_arch import ConvResBlock, PSUpsample\nfrom .edvr_arch import PredeblurModule, PCDAlignment, TSAFusion\nfrom .arch_util import ResidualBlockNoBN, flow_warp, make_layer\n\n\n@ARCH_REGISTRY.register()\nclass IconVSR(nn.Module):\n \"\"\"IconVSR network for video super-resolution.\n Args:\n num_feat (int): Channel number of intermediate features. \n Default: 64.\n num_block (int): Block number of residual blocks in each propagation branch.\n Default: 30.\n keyframe_stride (int): Number determining the keyframes. If stride=5,\n then the (0, 5, 10, 15, ...)-th frame will be the keyframes.\n Default: 5.\n temporal_padding (int): Number of frames to be padded at two ends of\n the sequence. 2 for REDS and 3 for Vimeo-90K. Default: 2\n spynet_path (str): The path of Pre-trained SPyNet model.\n Default: None.\n \"\"\"\n def __init__(self, \n num_feat=64, num_block=30, \n keyframe_stride=5, temporal_padding=2, \n spynet_path=None):\n super(IconVSR, self).__init__()\n\n self.num_feat = num_feat\n self.t_pad = temporal_padding\n self.kframe_stride = keyframe_stride\n\n self.edvr = EDVRExtractor(num_frame=temporal_padding*2 + 1,\n center_frame_idx=temporal_padding)\n \n # Flow-based Feature Alignment\n self.spynet = SpyNet(load_path=spynet_path)\n\n # Coupled Propagation and Information-refill\n self.backward_fuse = nn.Conv2d(num_feat * 2, num_feat, kernel_size=3, stride=1, padding=1, bias=True)\n self.backward_resblocks = ConvResBlock(num_feat + 3, num_feat, num_block)\n\n self.forward_fuse = nn.Conv2d(num_feat * 2, num_feat, kernel_size=3, stride=1, padding=1, bias=True)\n self.forward_resblocks = ConvResBlock(num_feat + 3, num_feat, num_block)\n\n # Pixel-Shuffle Upsampling\n self.up1 = PSUpsample(num_feat, num_feat, scale_factor=2)\n self.up2 = PSUpsample(num_feat, 64, scale_factor=2)\n\n # The channel of the tail layers is 64\n self.conv_hr = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)\n self.conv_last = nn.Conv2d(64, 3, kernel_size=3, stride=1, padding=1)\n\n # Global Residual Learning\n self.img_up = nn.Upsample(scale_factor=4, mode='bilinear', align_corners=False)\n\n # Activation Function\n self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n def comp_flow(self, lrs):\n \"\"\"Compute optical flow using SPyNet for feature warping.\n\n Args:\n lrs (tensor): LR frames, the shape is (n, t, c, h, w)\n\n Return:\n tuple(Tensor): Optical flow. \n forward_flow refers to the flow from current frame to the previous frame. \n backward_flow is the flow from current frame to the next frame.\n \"\"\"\n n, t, c, h, w = lrs.size()\n forward_lrs = lrs[:, 1:, :, :, :].reshape(-1, c, h, w) # 'n t c h w -> (n t) c h w'\n backward_lrs = lrs[:, :-1, :, :, :].reshape(-1, c, h, w) # 'n t c h w -> (n t) c h w')\n \n forward_flow = self.spynet(forward_lrs, backward_lrs).view(n, t-1, 2, h, w)\n backward_flow = self.spynet(backward_lrs, forward_lrs).view(n, t-1, 2, h, w)\n\n return forward_flow, backward_flow\n\n def extract_refill_features(self, lrs, keyframe_idx):\n \"\"\"Compute the features for information refill.\n\n We use EDVR-M to extract features from the selected keyframes\n and its neighbor. The window size in EDVR-M is 5 for REDS and\n 7 for Vimeo-90K (following the settings in EDVR).\n\n Args:\n lrs (Tensor): The input LR sequence with shape (n, t, c, h, w).\n keyframe_idx (list[int]): List of the indices of the selected\n keyframes.\n\n Returns:\n dict: The features for information-refill. The keys are the\n corresponding index.\n\n \"\"\"\n lrs_start = lrs[:, 1+self.t_pad : 1+self.t_pad*2].flip(1)\n lrs_end = lrs[:, -1-self.t_pad*2 : -1-self.t_pad].flip(1)\n lrs = torch.cat([lrs_start, lrs, lrs_end], dim=1)\n num_frame = 2 * self.t_pad + 1\n\n refill_feat = {}\n for i in keyframe_idx:\n refill_feat[i] = self.edvr(lrs[:, i:i + num_frame].contiguous())\n return refill_feat\n \n def spatial_padding(self, lrs):\n \"\"\" Apply spatial pdding.\n\n Since the PCD module in EDVR requires a resolution of a multiple of 4, \n we use reflect padding on the LR frame to match the requirements..\n\n Args:\n lrs (Tensor): Input LR sequence with shape (n, t, c, h, w).\n\n Returns:\n Tensor: Padded LR sequence with shape (n, t, c, h_pad, w_pad).\n\n \"\"\"\n n, t, c, h, w = lrs.size()\n\n pad_h = (4 - h % 4) % 4\n pad_w = (4 - w % 4) % 4\n\n # padding\n lrs = lrs.view(-1, c, h, w)\n lrs = F.pad(lrs, [0, pad_w, 0, pad_h], mode='reflect')\n\n return lrs.view(n, t, c, h + pad_h, w + pad_w)\n \n def forward(self, lrs):\n n, t, c, h_in, w_in = lrs.size()\n assert h_in >= 64 and w_in >= 64, (\n 'The height and width of input should be at least 64, '\n f'but got {h_in} and {w_in}.')\n \n # Padding\n lrs = self.spatial_padding(lrs)\n h, w = lrs.size(3), lrs.size(4)\n\n # get the keyframe for information-refill\n keyframe_idx = list(range(0, t, self.kframe_stride))\n if keyframe_idx[-1] != t-1:\n keyframe_idx.append(t-1) # the last frame is a keyframe\n \n # compute flow and refill\n forward_flow, backward_flow = self.comp_flow(lrs)\n refill_feat = self.extract_refill_features(lrs, keyframe_idx)\n\n # backward propgation\n rlt = []\n feat_prop = lrs.new_zeros(n, self.num_feat, h, w)\n for i in range(t-1, -1, -1):\n curr_lr = lrs[:, i, :, :, ]\n if i < t-1:\n flow = backward_flow[:, i, :, :, :]\n feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))\n if i in keyframe_idx:\n feat_prop = torch.cat([feat_prop, refill_feat[i]], dim=1)\n feat_prop = self.backward_fuse(feat_prop)\n feat_prop = torch.cat([feat_prop, curr_lr], dim=1)\n feat_prop = self.backward_resblocks(feat_prop)\n rlt.append(feat_prop)\n rlt = rlt[::-1]\n\n # forward propgation\n feat_prop = torch.zeros_like(feat_prop)\n for i in range(0, t):\n curr_lr = lrs[:, i, :, :, :]\n if i > 0:\n flow = forward_flow[:, i-1, :, :, :]\n feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))\n if i in keyframe_idx:\n feat_prop = torch.cat([feat_prop, refill_feat[i]], dim=1)\n feat_prop = self.forward_fuse(feat_prop)\n feat_prop = torch.cat([curr_lr, rlt[i], feat_prop], dim=1)\n feat_prop = self.forward_resblocks(feat_prop)\n\n # Upsampling\n sr_rlt = self.lrelu(self.up1(sr_rlt))\n sr_rlt = self.lrelu(self.up2(sr_rlt))\n sr_rlt = self.lrelu(self.conv_hr(sr_rlt))\n sr_rlt = self.conv_last(sr_rlt)\n\n # Global Residual Learning\n base = self.img_up(curr_lr)\n\n sr_rlt += base\n rlt[i] = sr_rlt\n return torch.stack(rlt, dim=1)[:, :, :, :4 * h_in, :4 * w_in]\n\n\nclass EDVRExtractor(nn.Module):\n \"\"\"EDVR feature extractor for information-refill in IconVSR.\n\n We use EDVR-M in IconVSR.\n\n Paper:\n EDVR: Video Restoration with Enhanced Deformable Convolutional Networks.\n\n Args:\n num_in_ch (int): Channel number of input image. Default: 3.\n num_out_ch (int): Channel number of output image. Default: 3.\n num_feat (int): Channel number of intermediate features. Default: 64.\n num_frame (int): Number of input frames. Default: 5.\n deformable_groups (int): Deformable groups. Defaults: 8.\n num_extract_block (int): Number of blocks for feature extraction.\n Default: 5.\n center_frame_idx (int): The index of center frame. Frame counting from\n 0. Default: Middle of input frames.\n hr_in (bool): Whether the input has high resolution. Default: False.\n with_predeblur (bool): Whether has predeblur module.\n Default: False.\n with_tsa (bool): Whether has TSA module. Default: True.\n \"\"\"\n def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, num_frame=5,\n deformable_groups=8, num_extract_block=5,\n center_frame_idx=None, hr_in=None, \n with_predeblur=False, with_tsa=True):\n super(EDVRExtractor, self).__init__()\n\n if center_frame_idx is None:\n self.center_frame_idx = num_frame // 2\n else:\n self.center_frame_idx = center_frame_idx\n \n self.hr_in = hr_in\n self.with_predeblur = with_predeblur\n self.with_tsa = with_tsa\n\n # extract features for each frame\n if self.with_predeblur:\n self.pre_deblur = PredeblurModule(num_feat=num_feat, hr_in=self.hr_in)\n self.conv_1x1 = nn.Conv2d(num_feat, num_feat, kernel_size=1, stride=1, padding=0, bias=True)\n else:\n self.conv_first = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1)\n \n # extract pyramid features \n self.feature_extraction = make_layer(ResidualBlockNoBN, num_extract_block, num_feat=num_feat)\n self.conv_l2_1 = nn.Conv2d(num_feat, num_feat, kernel_size=3, stride=2, padding=1)\n self.conv_l2_2 = nn.Conv2d(num_feat, num_feat, kernel_size=3, stride=1, padding=1)\n self.conv_l3_1 = nn.Conv2d(num_feat, num_feat, kernel_size=3, stride=2, padding=1)\n self.conv_l3_2 = nn.Conv2d(num_feat, num_feat, kernel_size=3, stride=1, padding=1)\n\n # pcd and tsa module\n self.pcd_align = PCDAlignment(num_feat=num_feat, deformable_groups=deformable_groups)\n \n if self.with_tsa:\n self.fusion = TSAFusion(\n num_feat=num_feat,\n num_frame=num_frame,\n center_frame_idx=self.center_frame_idx)\n else:\n self.fusion = nn.Conv2d(num_frame * num_feat, num_feat, 1, 1)\n\n # activation function\n self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n \n def forward(self, x):\n n, t, c, h, w = x.size()\n\n if self.hr_in:\n assert h % 16 == 0 and w % 16 == 0, (\n 'The height and width must be multiple of 16.')\n else:\n assert h % 4 == 0 and w % 4 == 0, (\n 'The height and width must be multiple of 4.')\n \n # extract features for each frame\n # Level 1\n if self.with_predeblur:\n feat_l1 = self.conv_1x1(self.pre_deblur(x.view(-1, c, h, w)))\n if self.hr_in:\n h, w = h // 4, w // 4\n else:\n feat_l1 = self.lrelu(self.conv_first(x.view(-1, c, h, w)))\n \n feat_l1 = self.feature_extraction(feat_l1)\n\n # Level 2\n feat_l2 = self.lrelu(self.conv_l2_1(feat_l1))\n feat_l2 = self.lrelu(self.conv_l2_2(feat_l2))\n\n # Level 3\n feat_l3 = self.lrelu(self.conv_l3_1(feat_l2))\n feat_l3 = self.lrelu(self.conv_l3_2(feat_l3))\n\n feat_l1 = feat_l1.view(n, t, -1, h, w)\n feat_l2 = feat_l2.view(n, t, -1, h // 2, w // 2)\n feat_l3 = feat_l3.view(n, t, -1, h // 4, w // 4)\n\n # PCD alignment\n ref_feat_l = [ # reference feature list\n feat_l1[:, self.center_frame_idx, :, :, :].clone(),\n feat_l2[:, self.center_frame_idx, :, :, :].clone(),\n feat_l3[:, self.center_frame_idx, :, :, :].clone()\n ]\n aligned_feat = []\n for i in range(t):\n nbr_feat_l = [ # neighboring feature list\n feat_l1[:, i, :, :, :].clone(), feat_l2[:, i, :, :, :].clone(),\n feat_l3[:, i, :, :, :].clone()\n ]\n aligned_feat.append(self.pcd_align(nbr_feat_l, ref_feat_l))\n aligned_feat = torch.stack(aligned_feat, dim=1) # (n, t, c, h, w)\n\n if not self.with_tsa:\n aligned_feat = aligned_feat.view(n, -1, h, w)\n feat = self.fusion(aligned_feat)\n\n return feat\n\n\nif __name__ == '__main__':\n model = IconVSR()\n lrs = torch.randn(3, 4, 3, 64, 64)\n rlt = model(lrs)\n print(rlt.size())\n \n\n \n\n\n",
"import cv2\nimport math\nimport numpy as np\nimport os\nimport torch\nfrom torchvision.utils import make_grid\n\n\ndef normalize(imgs):\n\n def _norm(img):\n return img.astype(np.float32) / 255.\n\n if isinstance(imgs, list):\n return [_norm(img) for img in imgs]\n else:\n return _norm(imgs)\n\n\ndef img2tensor(imgs, bgr2rgb=True, float32=True):\n \"\"\"Numpy array to tensor.\n\n Args:\n imgs (list[ndarray] | ndarray): Input images.\n bgr2rgb (bool): Whether to change bgr to rgb.\n float32 (bool): Whether to change to float32.\n\n Returns:\n list[tensor] | tensor: Tensor images. If returned results only have\n one element, just return tensor.\n \"\"\"\n\n def _totensor(img, bgr2rgb, float32):\n if img.shape[2] == 3 and bgr2rgb:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = torch.from_numpy(img.transpose(2, 0, 1))\n if float32:\n img = img.float()\n return img\n\n if isinstance(imgs, list):\n return [_totensor(img, bgr2rgb, float32) for img in imgs]\n else:\n return _totensor(imgs, bgr2rgb, float32)\n\n\ndef tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):\n \"\"\"Convert torch Tensors into image numpy arrays.\n\n After clamping to [min, max], values will be normalized to [0, 1].\n\n Args:\n tensor (Tensor or list[Tensor]): Accept shapes:\n 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W);\n 2) 3D Tensor of shape (3/1 x H x W);\n 3) 2D Tensor of shape (H x W).\n Tensor channel should be in RGB order.\n rgb2bgr (bool): Whether to change rgb to bgr.\n out_type (numpy type): output types. If ``np.uint8``, transform outputs\n to uint8 type with range [0, 255]; otherwise, float type with\n range [0, 1]. Default: ``np.uint8``.\n min_max (tuple[int]): min and max values for clamp.\n\n Returns:\n (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of\n shape (H x W). The channel order is BGR.\n \"\"\"\n if not (torch.is_tensor(tensor) or\n (isinstance(tensor, list)\n and all(torch.is_tensor(t) for t in tensor))):\n raise TypeError(\n f'tensor or list of tensors expected, got {type(tensor)}')\n\n if torch.is_tensor(tensor):\n tensor = [tensor]\n result = []\n for _tensor in tensor:\n _tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)\n _tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])\n\n n_dim = _tensor.dim()\n if n_dim == 4:\n img_np = make_grid(\n _tensor, nrow=int(math.sqrt(_tensor.size(0))),\n normalize=False).numpy()\n img_np = img_np.transpose(1, 2, 0)\n if rgb2bgr:\n img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)\n elif n_dim == 3:\n img_np = _tensor.numpy()\n img_np = img_np.transpose(1, 2, 0)\n if img_np.shape[2] == 1: # gray image\n img_np = np.squeeze(img_np, axis=2)\n else:\n if rgb2bgr:\n img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)\n elif n_dim == 2:\n img_np = _tensor.numpy()\n else:\n raise TypeError('Only support 4D, 3D or 2D tensor. '\n f'But received with dimension: {n_dim}')\n if out_type == np.uint8:\n # Unlike MATLAB, numpy.unit8() WILL NOT round by default.\n img_np = (img_np * 255.0).round()\n img_np = img_np.astype(out_type)\n result.append(img_np)\n if len(result) == 1:\n result = result[0]\n return result\n\n\ndef imfrombytes(content, flag='color', float32=False):\n \"\"\"Read an image from bytes.\n\n Args:\n content (bytes): Image bytes got from files or other streams.\n flag (str): Flags specifying the color type of a loaded image,\n candidates are `color`, `grayscale` and `unchanged`.\n float32 (bool): Whether to change to float32., If True, will also norm\n to [0, 1]. Default: False.\n\n Returns:\n ndarray: Loaded image array.\n \"\"\"\n img_np = np.frombuffer(content, np.uint8)\n imread_flags = {\n 'color': cv2.IMREAD_COLOR,\n 'grayscale': cv2.IMREAD_GRAYSCALE,\n 'unchanged': cv2.IMREAD_UNCHANGED\n }\n img = cv2.imdecode(img_np, imread_flags[flag])\n if float32:\n img = img.astype(np.float32) / 255.\n return img\n\n\ndef imread(path, flag='color', float32=False):\n \"\"\"\n read image by cv2 or from lmdb\n return: Numpy float32, HWC, BGR, [0,1]\n \"\"\"\n imread_flags = {\n 'color': cv2.IMREAD_COLOR,\n 'grayscale': cv2.IMREAD_GRAYSCALE,\n 'unchanged': cv2.IMREAD_UNCHANGED\n }\n img = cv2.imread(path, imread_flags[flag])\n if float32:\n img = img.astype(np.float32) / 255.\n return img\n\n\ndef imwrite(img, file_path, params=None, auto_mkdir=True):\n \"\"\"Write image to file.\n\n Args:\n img (ndarray): Image array to be written.\n file_path (str): Image file path.\n params (None or list): Same as opencv's :func:`imwrite` interface.\n auto_mkdir (bool): If the parent folder of `file_path` does not exist,\n whether to create it automatically.\n\n Returns:\n bool: Successful or not.\n \"\"\"\n if auto_mkdir:\n dir_name = os.path.abspath(os.path.dirname(file_path))\n os.makedirs(dir_name, exist_ok=True)\n return cv2.imwrite(file_path, img, params)\n\n\ndef crop_border(imgs, crop_border):\n \"\"\"Crop borders of images.\n\n Args:\n imgs (list[ndarray] | ndarray): Images with shape (h, w, c).\n crop_border (int): Crop border for each end of height and weight.\n\n Returns:\n list[ndarray]: Cropped images.\n \"\"\"\n if crop_border == 0:\n return imgs\n else:\n if isinstance(imgs, list):\n return [\n v[crop_border:-crop_border, crop_border:-crop_border, ...]\n for v in imgs\n ]\n else:\n return imgs[crop_border:-crop_border, crop_border:-crop_border,\n ...]\n"
] | [
[
"torch.cat",
"torch.randn",
"torch.nn.Conv2d",
"torch.zeros_like",
"torch.nn.Upsample",
"torch.nn.LeakyReLU",
"torch.stack",
"torch.nn.functional.pad"
],
[
"numpy.frombuffer",
"numpy.squeeze",
"torch.is_tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JulianoLagana/MT3 | [
"c1270e1de5b8d68eab50a797d16061310fa95d97"
] | [
"src/modules/position_encoder.py"
] | [
"import torch\nfrom torch import nn, Tensor\nimport math\n\nclass LearnedPositionEncoder(nn.Module):\n \"\"\"\n Learned Position Encoder. Takes tensor of positional indicies and converts to learned embeddings \n \"\"\"\n\n def __init__(self, n_timesteps, d_model):\n super().__init__()\n self.embeddor = nn.Embedding(n_timesteps, d_model) # lookup table, each with vector of size d_model \n nn.init.uniform_(self.embeddor.weight)\n\n def forward(self, pos_indicies):\n pos_indicies = pos_indicies.long()\n return self.embeddor(pos_indicies)\n\n\nclass PositionEmbeddingSine(nn.Module):\n \"\"\"\n This is a more standard version of the position embedding, very similar to the one\n used by the Attention is all you need paper, generalized to work on images.\n \"\"\"\n def __init__(self,params, temperature=10000, scale=2*math.pi):\n super().__init__()\n self.params=params\n self.num_pos_feats = params.arch.d_model\n self.temperature = temperature\n self.scale = scale\n self.max_time = params.data_generation.n_timesteps\n\n def forward(self, proposals):\n proposals = proposals + 1\n dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=proposals.device)\n dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)\n # N, L\n proposals = proposals / self.max_time * self.scale\n # N, L, num_pos_feats\n pos = proposals[:, :, None] / dim_t\n # N, L, 2, num_pos_feats/2, 2\n pos = torch.stack((pos[:, :, 0::2].sin(), pos[:, :, 1::2].cos()), dim=3).flatten(2)\n # N, L, num_pos_feats*2\n return pos\n\n\n"
] | [
[
"torch.nn.init.uniform_",
"torch.nn.Embedding",
"torch.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bfabiandev/atom3d | [
"b2499ff743be2e851c286cabf64696682abffa44",
"b2499ff743be2e851c286cabf64696682abffa44",
"b2499ff743be2e851c286cabf64696682abffa44"
] | [
"atom3d/util/graph.py",
"benchmarking/pytorch_geometric/train_resdel_gnn.py",
"benchmarking/cormorant/train_mutation.py"
] | [
"import numpy as np\nimport scipy.spatial as ss\nimport torch\n\nimport atom3d.util.formats as fo\n\n# PDB atom names -- these include co-crystallized metals\nprot_atoms = ['C', 'H', 'O', 'N', 'S', 'P', 'ZN', 'NA', 'FE', 'CA', 'MN', 'NI', 'CO', 'MG', 'CU', 'CL', 'SE', 'F']\n# RDKit molecule atom names\nmol_atoms = ['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na',\n 'Ca', 'Fe', 'As', 'Al', 'I', 'B', 'V', 'K', 'Tl', 'Yb',\n 'Sb', 'Sn', 'Ag', 'Pd', 'Co', 'Se', 'Ti', 'Zn', 'H', # H?\n 'Li', 'Ge', 'Cu', 'Au', 'Ni', 'Cd', 'In', 'Mn', 'Zr',\n 'Cr', 'Pt', 'Hg', 'Pb']\n# Residue names\nresidues = ['ALA', 'CYS', 'ASP', 'GLU', 'PHE', 'GLY', 'HIS', 'ILE', 'LYS', 'LEU', 'MET', 'ASN', 'PRO', 'GLN', 'ARG',\n 'SER', 'THR', 'VAL', 'TRP', 'TYR']\n\n\ndef prot_df_to_graph(df, feat_col='element', allowable_feats=prot_atoms, edge_dist_cutoff=4.5):\n r\"\"\"\n Converts protein in dataframe representation to a graph compatible with Pytorch-Geometric, where each node is an atom.\n\n :param df: Protein structure in dataframe format.\n :type df: pandas.DataFrame\n :param node_col: Column of dataframe to find node feature values. For example, for atoms use ``feat_col=\"element\"`` and for residues use ``feat_col=\"resname\"``\n :type node_col: str, optional\n :param allowable_feats: List containing all possible values of node type, to be converted into 1-hot node features. \n Any elements in ``feat_col`` that are not found in ``allowable_feats`` will be added to an appended \"unknown\" bin (see :func:`atom3d.util.graph.one_of_k_encoding_unk`).\n :type allowable_feats: list, optional\n :param edge_dist_cutoff: Maximum distance cutoff (in Angstroms) to define an edge between two atoms, defaults to 4.5.\n :type edge_dist_cutoff: float, optional\n\n :return: tuple containing\n\n - node_feats (torch.FloatTensor): Features for each node, one-hot encoded by values in ``allowable_feats``.\n\n - edges (torch.LongTensor): Edges in COO format\n\n - edge_weights (torch.LongTensor): Edge weights, defined as a function of distance between atoms given by :math:`w_{i,j} = \\frac{1}{d(i,j)}`, where :math:`d(i, j)` is the Euclidean distance between node :math:`i` and node :math:`j`.\n\n - node_pos (torch.FloatTensor): x-y-z coordinates of each node\n :rtype: Tuple\n \"\"\" \n\n node_pos = torch.FloatTensor(df[['x', 'y', 'z']].to_numpy())\n\n kd_tree = ss.KDTree(node_pos)\n edge_tuples = list(kd_tree.query_pairs(edge_dist_cutoff))\n edges = torch.LongTensor(edge_tuples).t().contiguous()\n\n node_feats = torch.FloatTensor([one_of_k_encoding_unk(e, allowable_feats) for e in df[feat_col]])\n edge_weights = torch.FloatTensor(\n [1.0 / (np.linalg.norm(node_pos[i] - node_pos[j]) + 1e-5) for i, j in edge_tuples]).view(-1, 1)\n # feats = F.one_hot(elems, num_classes=len(atom_int_dict))\n \n return node_feats, edges, edge_weights.view(-1), node_pos\n\n\ndef mol_df_to_graph(mol, allowable_atoms=mol_atoms):\n \"\"\"\n Converts molecule to a graph compatible with Pytorch-Geometric\n\n TODO: Change to operate on dataframe representation instead of Mol object\n\n :param mol: Molecule structure in RDKit format\n :type mol: rdkit.Chem.rdchem.Mol\n :param allowable_atoms: List containing allowable atom types\n :type allowable_atoms: list[str], optional\n\n :return: Tuple containing \\n\n - node_feats (torch.FloatTensor): Features for each node, one-hot encoded by atom type in ``allowable_atoms``.\n - edges (torch.LongTensor): Edges from chemical bond graph in COO format.\n - edge_feats (torch.FloatTensor): Edge features given by bond type. Single = 1.0, Double = 2.0, Triple = 3.0, Aromatic = 1.5.\n - node_pos (torch.FloatTensor): x-y-z coordinates of each node.\n \"\"\"\n node_pos = torch.FloatTensor(fo.get_coordinates_of_conformer(mol))\n bonds = fo.get_bonds_matrix_from_mol(mol)\n edge_tuples = np.argwhere(bonds)\n edges = torch.LongTensor(edge_tuples).t().contiguous()\n\n node_feats = torch.FloatTensor([one_of_k_encoding_unk(a.GetSymbol(), mol_atoms) for a in mol.GetAtoms()])\n edge_feats = torch.FloatTensor([bonds[i, j] for i, j in edge_tuples]).view(-1, 1)\n\n return node_feats, edges, edge_feats, node_pos\n\n\ndef combine_graphs(graph1, graph2, edges_between=True, edges_between_dist=4.5):\n \"\"\"Combine two graphs into one, optionally adding edges between the two graphs using :func:`atom3d.util.graph.edges_between_graphs`. Node features are concatenated in the feature dimension, to distinguish which nodes came from which graph.\n\n :param graph1: One of the graphs to be combined, in the format returned by :func:`atom3d.util.graph.prot_df_to_graph` or :func:`atom3d.util.graph.mol_df_to_graph`.\n :type graph1: Tuple\n :param graph2: The other graph to be combined, in the format returned by :func:`atom3d.util.graph.prot_df_to_graph` or :func:`atom3d.util.graph.mol_df_to_graph`.\n :type graph2: Tuple\n :param edges_between: Indicates whether to add new edges between graphs, defaults to True.\n :type edges_between: bool, optional\n :param edges_between_dist: Distance cutoff in Angstroms for adding edges between graphs, defaults to 4.5.\n :type edges_between_dist: float, optional\n :return: Tuple containing \\n\n - node_feats (torch.FloatTensor): Features for each node in the combined graph, concatenated along the feature dimension.\\n\n - edges (torch.LongTensor): Edges of combined graph in COO format, including edges from two input graphs and edges between them, if specified.\\n\n - edge_weights (torch.FloatTensor): Concatenated edge features from two input graphs and edges between them, if specified.\\n\n - node_pos (torch.FloatTensor): x-y-z coordinates of each node in combined graph.\n :rtype: Tuple\n \"\"\" \n node_feats1, edges1, edge_feats1, pos1 = graph1\n node_feats2, edges2, edge_feats2, pos2 = graph2\n\n dummy_node_feats1 = torch.zeros(pos1.shape[0], node_feats2.shape[1])\n dummy_node_feats2 = torch.zeros(pos2.shape[0], node_feats1.shape[1])\n node_feats1 = torch.cat((node_feats1, dummy_node_feats1), dim=1)\n node_feats2 = torch.cat((dummy_node_feats2, node_feats2), dim=1)\n\n edges2 += pos1.shape[0]\n\n node_pos = torch.cat((pos1, pos2), dim=0)\n node_feats = torch.cat((node_feats1, node_feats2), dim=0)\n\n if edges_between:\n edges_between, edge_feats_between = edges_between_graphs(pos1, pos2)\n edge_feats = torch.cat((edge_feats1, edge_feats2, edge_feats_between), dim=0)\n edges = torch.cat((edges1, edges2, edges_between), dim=1)\n else:\n edge_feats = torch.cat((edge_feats1, edge_feats2), dim=0)\n edges = torch.cat((edges1, edges2), dim=1)\n\n return node_feats, edges, edge_feats, node_pos\n\n\ndef edges_between_graphs(pos1, pos2, dist=4.5):\n \"\"\"calculates edges between nodes in two separate graphs using a specified cutoff distance.\n\n :param pos1: x-y-z node coordinates from Graph 1\n :type pos1: torch.FloatTensor or numpy.ndarray\n :param pos2: x-y-z node coordinates from Graph 2\n :type pos2: torch.FloatTensor or numpy.ndarray\n :return: Tuple containing\\n\n - edges (torch.LongTensor): Edges between two graphs, in COO format.\\n\n - edge_weights (torch.FloatTensor): Edge weights between two graphs.\\n\n :rtype: Tuple\n \"\"\" \n tree1 = ss.KDTree(pos1)\n tree2 = ss.KDTree(pos2)\n res = tree1.query_ball_tree(tree2, r=dist)\n edges = []\n edge_weights = []\n for i, contacts in enumerate(res):\n if len(contacts) == 0:\n continue\n for j in contacts:\n edges.append((i, j + pos1.shape[0]))\n edge_weights.append(np.linalg.norm(pos1[i] - pos2[j]))\n\n edges = torch.LongTensor(edges).t().contiguous()\n edge_weights = torch.FloatTensor(edge_weights).view(-1, 1)\n return edges, edge_weights \n\n\ndef adjust_graph_indices(graph):\n \"\"\"Adjusts indices into graphs for concatenated multi-graph batches. Specifically, if each graph in the batch has a different selection index defined relative to that graph, the index is adjusted to be defined relative to the batch indexing.\n\n :param graph: Pytorch-geometric graph object representing a batch of graphs. Assumed to have a ``select_idx`` attribute set, specifying a node index for each graph\n :type graph: torch_geometric.data.Data\n :return: Same graph with selection indices adjusted\n :rtype: torch_geometric.data.Data\n \"\"\" \n batch_size = len(graph.n_nodes)\n total_n = 0\n for i in range(batch_size-1):\n n_nodes = graph.n_nodes[i].item()\n total_n += n_nodes\n graph.select_idx[i+1] += total_n\n return graph\n\n\n# below functions are adapted from DeepChem repository:\ndef one_of_k_encoding(x, allowable_set):\n \"\"\"Converts input to 1-hot encoding given a set of allowable values.\"\"\"\n if x not in allowable_set:\n raise Exception(\"input {0} not in allowable set{1}:\".format(x, allowable_set))\n return list(map(lambda s: x == s, allowable_set))\n\n\ndef one_of_k_encoding_unk(x, allowable_set):\n \"\"\"Converts input to 1-hot encoding given a set of allowable values. Additionally maps inputs not in the allowable set to the last element.\"\"\"\n if x not in allowable_set:\n x = allowable_set[-1]\n return list(map(lambda s: x == s, allowable_set))\n",
"import argparse\nimport datetime\nimport os\nimport time\n\nimport dotenv as de\nimport numpy as np\n\nde.load_dotenv()\n\nimport torch\nimport torch.nn as nn\nfrom torch_geometric.data import DataLoader\nfrom torch_geometric.nn import GCNConv\nfrom torch.nn import Linear\nimport torch.nn.functional as F\n\n\n# import atom3d.util.datatypes as dt\nimport resdel_dataloader as dl\n\n\n\nclass GCN(torch.nn.Module):\n def __init__(self, num_features, hidden_dim):\n super(GCN, self).__init__()\n self.conv1 = GCNConv(num_features, hidden_dim)\n self.bn1 = nn.BatchNorm1d(hidden_dim)\n self.conv2 = GCNConv(hidden_dim, hidden_dim*2)\n self.bn2 = nn.BatchNorm1d(hidden_dim*2)\n self.conv3 = GCNConv(hidden_dim*2, hidden_dim*4)\n self.bn3 = nn.BatchNorm1d(hidden_dim*4)\n self.conv4 = GCNConv(hidden_dim*4, hidden_dim*4)\n self.bn4 = nn.BatchNorm1d(hidden_dim*4)\n self.conv5 = GCNConv(hidden_dim*4, hidden_dim*2)\n self.bn5 = nn.BatchNorm1d(hidden_dim*2)\n self.fc1 = Linear(hidden_dim*2, hidden_dim*2)\n self.fc2 = Linear(hidden_dim*2, 20)\n\n\n def forward(self, x, edge_index, edge_weight, ca_idx, batch):\n x = self.conv1(x, edge_index, edge_weight)\n x = F.relu(x)\n x = self.bn1(x)\n x = self.conv2(x, edge_index, edge_weight)\n x = F.relu(x)\n x = self.bn2(x)\n x = self.conv3(x, edge_index, edge_weight)\n x = F.relu(x)\n x = self.bn3(x)\n x = self.conv4(x, edge_index, edge_weight)\n x = self.bn4(x)\n x = F.relu(x)\n x = self.conv5(x, edge_index, edge_weight)\n x = self.bn5(x)\n # x = global_add_pool(x, batch)\n x = torch.index_select(x, 0, ca_idx)\n x = F.relu(x)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, p=0.25, training=self.training)\n return self.fc2(x)\n\n\ndef get_acc(logits, label, cm=None):\n pred = torch.argmax(logits, 1)\n acc = float((pred == label).sum(-1)) / label.size()[0]\n return acc\n\n# from pytorch ...\ndef get_top_k_acc(output, target, k=3):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n batch_size = target.size(0)\n\n _, pred = output.topk(k, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n #res.append(correct_k.mul_(100.0 / batch_size))\n return correct_k.mul_(1.0 / batch_size).item()\n\ndef adjust_graph_indices(graph):\n batch_size = len(graph.n_nodes)\n total_n = 0\n for i in range(batch_size-1):\n n_nodes = graph.n_nodes[i].item()\n total_n += n_nodes\n graph.ca_idx[i+1] += total_n\n return graph\n\[email protected]_grad()\ndef test(model, loader, criterion, device, batch_size):\n model.eval()\n\n losses = []\n avg_acc = []\n avg_top_k_acc = []\n for i, graph in enumerate(loader):\n graph = graph.to(device)\n if len(graph.ca_idx) != batch_size:\n # print(f'skipping batch, {len(graph1.ca_idx)} CA atoms with batch size {batch_size}')\n continue\n graph = adjust_graph_indices(graph)\n out = model(graph.x, graph.edge_index, graph.edge_attr.view(-1), graph.ca_idx, graph.batch)\n loss = criterion(out, graph.y)\n acc = get_acc(out, graph.y)\n top_k_acc = get_top_k_acc(out, graph.y, k=3)\n losses.append(loss.item())\n avg_acc.append(acc)\n avg_top_k_acc.append(top_k_acc)\n\n return np.mean(losses), np.mean(avg_acc), np.mean(avg_top_k_acc)\n\n\ndef train(data_dir, device, log_dir, checkpoint, seed=None, test_mode=False):\n\n epochs = 5\n batch_size = 64\n in_channels = 5\n learning_rate = 1e-4\n reg = 5e-6\n \n if not os.path.exists(os.path.join(log_dir, 'params.txt')):\n with open(os.path.join(log_dir, 'log.txt'), 'w') as f:\n f.write(f'Epochs: {epochs}\\n')\n f.write(f'Batch size: {batch_size}\\n')\n f.write(f'Learning rate: {learning_rate}\\n')\n\n train_set = dl.Resdel_Dataset_PTG(os.path.join(data_dir, 'train'))\n train_loader = DataLoader(train_set, batch_size=batch_size, num_workers=8, shuffle=True)\n val_set = dl.Resdel_Dataset_PTG(os.path.join(data_dir, 'val'))\n val_loader = DataLoader(val_set, batch_size=batch_size, num_workers=8, shuffle=True)\n\n for graph in train_loader:\n num_features = graph.num_features\n break\n\n model = GCN(num_features, hidden_dim=64)\n model.to(device)\n # if torch.cuda.device_count() > 1:\n # print('using', torch.cuda.device_count(), 'GPUs')\n # parallel = True\n # model = DataParallel(model)\n model.train()\n\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)#, weight_decay=reg)\n criterion = nn.CrossEntropyLoss()\n criterion.to(device)\n\n if checkpoint:\n cpt = torch.load(checkpoint, map_location=device)\n model.load_state_dict(cpt['model_state_dict'])\n optimizer.load_state_dict(cpt['optimizer_state_dict'])\n\n best_val_loss = 999\n best_val_idx = 0\n print_frequency = 100\n\n for epoch in range(1, epochs+1):\n print(f'EPOCH {epoch}\\n------------')\n\n start = time.time()\n\n for it, graph in enumerate(train_loader):\n graph = graph.to(device)\n if len(graph.ca_idx) != batch_size:\n # print(f'skipping batch, {len(graph1.ca_idx)} CA atoms with batch size {batch_size}')\n continue\n graph = adjust_graph_indices(graph)\n optimizer.zero_grad()\n out = model(graph.x, graph.edge_index, graph.edge_attr.view(-1), graph.ca_idx, graph.batch)\n train_loss = criterion(out, graph.y)\n train_loss.backward()\n optimizer.step()\n\n\n if it % print_frequency == 0:\n elapsed = time.time() - start\n print(f'Epoch {epoch}, iter {it}, train loss {train_loss}, avg it/sec {print_frequency / elapsed}')\n start = time.time()\n print('validating...')\n curr_val_loss, val_acc, val_top_k_acc = test(model, val_loader, criterion, device, batch_size)\n # logger.info('{:03d}\\t{}\\t{:.7f}\\t{:.7f}\\t{:.7f}\\t{:.7f}\\n'.format(epoch, it, train_loss, curr_val_loss, val_acc, val_top_k_acc))\n # print('{:03d}\\t{}\\t{:.7f}\\t{:.7f}\\t{:.7f}\\t{:.7f}\\n'.format(epoch, it, train_loss, curr_val_loss, val_acc, v'al_top_k_acc))\n print(f'Epoch {epoch}, iter {it}, val loss {curr_val_loss}, val acc {val_acc}, val top 3 acc {val_top_k_acc}')\n\n if curr_val_loss < best_val_loss:\n\n # save best validation score and iteration number\n best_val_loss = curr_val_loss\n best_val_idx = it\n # overwrite best model\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': train_loss,\n }, os.path.join(log_dir, f'best_weights.pt'))\n\n model.train()\n\n if test_mode:\n print('testing...')\n test_set = dl.Resdel_Dataset_PTG(os.path.join(data_dir, 'test_unbalanced'))\n test_loader = DataLoader(test_set, batch_size=batch_size, num_workers=8, shuffle=True)\n for graph in test_loader:\n num_features = graph.num_features\n break\n model = GCN(num_features, hidden_dim=64).to(device)\n model.eval()\n cpt = torch.load(os.path.join(log_dir, f'best_weights.pt'))\n model.load_state_dict(cpt['model_state_dict'])\n test_loss, test_acc, test_top_k_acc = test(model, test_loader, criterion, device, batch_size)\n print('Test loss: {:7f}, Test Accuracy {:.4f}, Top 3 Accuracy {:4f}, F1 Score {:4f}'.format(test_loss, test_acc, test_top_k_acc, test_f1))\n return test_loss, test_acc, test_top_k_acc\n\n return best_val_loss\n\n\nif __name__=='__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--mode', type=str, default='train')\n parser.add_argument('--log_dir', type=str, default=None)\n parser.add_argument('--checkpoint', type=str, default=None)\n args = parser.parse_args()\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n log_dir = args.log_dir\n\n base_dir = '../../data/residue_deletion'\n data_dir = os.environ['SC_DIR']+'atom3d/graph_pt'\n\n if args.mode == 'train':\n if log_dir is None:\n now = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n log_dir = os.path.join(base_dir, 'logs_cnn', now)\n else:\n log_dir = os.path.join(base_dir, 'logs_cnn', log_dir)\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n train(data_dir, device, log_dir, args.checkpoint)\n elif args.mode == 'test':\n test_loss_list = []\n acc_list = []\n f1_list = []\n for seed in np.random.randint(0, 100, size=3):\n print('seed:', seed)\n log_dir = os.path.join(base_dir, 'logs_cnn', f'test_{seed}')\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n np.random.seed(seed)\n torch.manual_seed(seed)\n test_loss, test_acc, test_top_k_acc, test_f1 = train(data_dir, device, log_dir, args.checkpoint, seed=seed, test_mode=True)\n test_loss_list.append(test_loss)\n acc_list.append(test_acc)\n f1_list.append(test_f1)\n print(f'Avg test_loss: {np.mean(test_loss_list)}, St.Dev test_loss {np.std(test_loss_list)}, \\\n Avg accuracy {np.mean(acc_list)}, St.Dev accuracy {np.std(acc_list)},\\\n Avg F1 {np.mean(f1_list)}, St.Dev F1 {np.std(f1_list)}')\n",
"#\n# Cormorant training script for the residue deletion dataset\n#\n\nimport logging\n\nimport torch\nfrom cormorant.data.collate import collate_siamese\nfrom cormorant.data.utils import initialize_datasets\nfrom cormorant.engine import Engine\nfrom cormorant.engine import init_argparse, init_file_paths, init_logger, init_cuda\nfrom cormorant.engine import init_optimizer, init_scheduler\nfrom cormorant.models import CormorantMutation\nfrom cormorant.models.autotest import cormorant_tests\nfrom torch.utils.data import DataLoader\n\n# This makes printing tensors more readable.\ntorch.set_printoptions(linewidth=1000, threshold=100000)\n\nlogger = logging.getLogger('')\n\ndef main():\n\n # Initialize arguments -- Just\n args = init_argparse('mutation')\n\n # Initialize file paths\n args = init_file_paths(args)\n\n # Initialize logger\n init_logger(args)\n\n # Initialize device and data type\n device, dtype = init_cuda(args)\n\n # Initialize dataloader\n args, datasets, num_species, charge_scale = initialize_datasets(args, args.datadir, 'mutation', \n force_download=args.force_download,\n ignore_check=args.ignore_check\n )\n\n # Construct PyTorch dataloaders from datasets\n dataloaders = {split: DataLoader(dataset,\n batch_size=args.batch_size,\n shuffle=args.shuffle if (split == 'train') else False,\n num_workers=args.num_workers,\n collate_fn=collate_siamese)\n for split, dataset in datasets.items()}\n\n # Initialize model\n model = CormorantMutation(args.maxl, args.max_sh, args.num_cg_levels, args.num_channels, num_species,\n args.cutoff_type, args.hard_cut_rad, args.soft_cut_rad, args.soft_cut_width,\n args.weight_init, args.level_gain, args.charge_power, args.basis_set,\n charge_scale, args.gaussian_mask, num_classes=args.num_classes,\n device=device, dtype=dtype)\n\n # Initialize the scheduler and optimizer\n optimizer = init_optimizer(args, model)\n scheduler, restart_epochs = init_scheduler(args, optimizer)\n\n # Define cross-entropy as the loss function.\n loss_fn = torch.nn.functional.cross_entropy\n\n # Apply the covariance and permutation invariance tests.\n cormorant_tests(model, dataloaders['train'], args, charge_scale=charge_scale, siamese=True)\n\n # Instantiate the training class\n trainer = Engine(args, dataloaders, model, loss_fn, optimizer, scheduler, restart_epochs, device, dtype, task='classification', clip_value=None) # 0.1\n print('Initialized a',trainer.task,'trainer with clip value',trainer.clip_value)\n\n # Load from checkpoint file. If no checkpoint file exists, automatically does nothing.\n trainer.load_checkpoint()\n\n # Train model.\n trainer.train()\n\n # Test predictions on best model and also last checkpointed model.\n trainer.evaluate()\n\nif __name__ == '__main__':\n main()\n\n"
] | [
[
"torch.LongTensor",
"torch.zeros",
"torch.cat",
"numpy.linalg.norm",
"numpy.argwhere",
"scipy.spatial.KDTree",
"torch.FloatTensor"
],
[
"torch.nn.BatchNorm1d",
"torch.nn.CrossEntropyLoss",
"numpy.random.seed",
"torch.nn.functional.dropout",
"torch.load",
"torch.manual_seed",
"torch.nn.Linear",
"numpy.std",
"torch.nn.functional.relu",
"torch.no_grad",
"numpy.mean",
"torch.cuda.is_available",
"numpy.random.randint",
"torch.index_select",
"torch.argmax"
],
[
"torch.set_printoptions",
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Darel13712/rs_metrics | [
"bf1c2f6e02537508255bbf675c48a14f512e51de"
] | [
"tests/test_metrics.py"
] | [
"import numpy as np\nimport pandas as pd\nimport pytest\n\nfrom rs_metrics.metrics import _ndcg_score\nfrom rs_metrics import *\nfrom rs_metrics.statistics import item_pop\n\n\ndef test_dcg_score_1():\n assert _ndcg_score([1], [1], 1) == 1\n\n\ndef test_dcg_score_0():\n assert _ndcg_score([1], [0], 1) == 0\n\n\ndef test_dcg_score_half():\n idcg2 = (1 / np.log2(2) + 1 / np.log2(3))\n dcg = 1 / np.log2(3)\n assert _ndcg_score([1, 2], [0, 2], 2) == dcg / idcg2\n\n\ndef test_ndcg_test_less_than_k():\n y_true = {1: [1, 2, 3]}\n assert ndcg(y_true, y_true, 5) == ndcg(y_true, y_true, 3) == 1\n\n\ndef test_ndcg():\n y_true = {1: [1, 2], 2: [1, 2]}\n y_pred = {1: [1, 2], 2: [0, 0]}\n assert ndcg(y_true, y_pred, 2) == 0.5\n\n\ndef test_ndcg_pandas():\n y_true = pd.DataFrame([[1, 1], [1, 2]], columns=['user_idx', 'item_id'])\n y_pred = pd.DataFrame([[1, 1], [1, 0]], columns=['user_idx', 'item_id'])\n idcg2 = (1 / np.log2(2) + 1 / np.log2(3))\n dcg = 1 / np.log2(2)\n assert ndcg(y_true, y_pred, 2, user_col='user_idx') == dcg / idcg2\n\n\ndef test_a_ndcg_one_user():\n y_true = {1: [1, 2, 3]}\n y_pred = {1: [1, 2, 3]}\n sp = {1: [{1}, {2}, {3}]}\n assert a_ndcg(y_true, y_pred, sp, 3) == 1\n\n\ndef test_a_ndcg():\n y_true = {1: [1, 2, 3], 2: [1, 2, 3]}\n y_pred = {1: [1, 2, 3], 2: [0, 0, 0]}\n sp = {1: [{1, 2}, {3}], 2: [{1, 2, 3}]}\n u1_score = (1 + 0.4/np.log2(3) + 1/np.log2(4)) / (1 + 1/np.log2(3) + 0.4/np.log2(4))\n answer = (u1_score + 0) / 2\n assert a_ndcg(y_true, y_pred, sp, 3, 0.6) == answer\n\n\ndef test_hitrate():\n y_true = {1: [1, 2], 2: [1, 2]}\n y_pred = {1: [0, 1], 2: [0, 0]}\n assert hitrate(y_true, y_pred, 2) == 0.5\n\n\ndef test_precision():\n y_true = {1: [1, 0, 0, 2], 2: [1, 2]}\n y_pred = {1: [1, 2], 2: [1, 3]}\n assert precision(y_true, y_pred, 2) == 0.75\n\n\ndef test_recall():\n y_true = {1: [1, 2], 2: [1, 2]}\n y_pred = {1: [1, 3], 2: [0, 0]}\n assert recall(y_true, y_pred, 2) == 0.25\n\n\ndef test_mrr():\n y_true = {1: [1, 2], 2: [1, 2]}\n y_pred = {1: [1, 3], 2: [0, 1]}\n assert mrr(y_true, y_pred, 2) == 0.75\n\n\ndef test_map():\n y_true = {1: [1, 2], 2: [1, 2]}\n y_pred = {1: [1, 3], 2: [0, 1]}\n assert mapr(y_true, y_pred, 2) == 0.75\n\n\ndef test_mar():\n y_true = {1: [1, 2], 2: [1, 2]}\n y_pred = {1: [1, 3], 2: [0, 1]}\n assert mar(y_true, y_pred, 2) == 0.25\n\n\ndef test_coverage():\n items = [1, 2, 3, 4]\n pred = {1: [1, 2], 2: [2, 5]}\n assert coverage(items, pred) == 0.5\n\n\[email protected]\ndef log():\n return pd.DataFrame({'user_id': [1, 1, 2], 'item_id': [1, 2, 2]})\n\n\ndef test_item_pop(log):\n pops = item_pop(log)\n assert sum(pops) == 1.5\n\n\ndef test_popularity(log):\n pred = {1: [2], 2: [1]}\n assert popularity(log, pred, 2) == 0.75\n\n\ndef test_surprisal():\n df = pd.DataFrame({'user_id': [1, 2], 'item_id': [1, 2]})\n pred = {1: [2], 2: [1]}\n assert surprisal(df, pred, 2) == 1\n"
] | [
[
"numpy.log2",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
aytackanaci/deep-vehicle-reid | [
"9f951288a38f8b295b5c77cc6c9b26f0632ecea3"
] | [
"train_imgreid_dpfl_large_batch.py"
] | [
"from __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport sys\nimport time\nimport datetime\nimport os.path as osp\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom torch.nn import functional as F\nfrom torch.optim import lr_scheduler\n\nfrom tensorboardX import SummaryWriter\n\nfrom args import argument_parser, image_dataset_kwargs, optimizer_kwargs\nfrom torchreid.data_manager import ImageDataManager\nfrom torchreid import models\nfrom torchreid.losses import CrossEntropyLoss, DeepSupervision\nfrom torchreid.utils.iotools import save_checkpoint, check_isfile\nfrom torchreid.utils.avgmeter import AverageMeter\nfrom torchreid.utils.loggers import Logger, RankLogger\nfrom torchreid.utils.torchtools import count_num_param, open_all_layers, open_specified_layers\nfrom torchreid.utils.reidtools import visualize_ranked_results\nfrom torchreid.utils.generaltools import set_random_seed\nfrom torchreid.eval_metrics import evaluate, accuracy\nfrom torchreid.optimizers import init_optimizer\n\ndef exp_name(cfg):\n name = [\n 'e_' + cfg.prefix,\n 'S_' + '-'.join(cfg.source_names),\n 'T_' + '-'.join(cfg.target_names),\n cfg.arch,\n 'E',\n '' if cfg.resume == '' else 'r',\n '' if cfg.fixbase_epoch is 0 else 'warmup' + str(cfg.fixbase_epoch),\n str(cfg.stepsize),\n 'm' + str(cfg.max_epoch),\n 'P',\n 'b' + str(cfg.train_batch_size),\n cfg.optim,\n 'lr' + str(cfg.lr),\n 'wd' + str(cfg.weight_decay),\n ]\n\n return '_'.join(name)\n\n# read config\nparser = argument_parser()\nargs = parser.parse_args()\nargs.fixbase_epoch = 0\nargs.arch = 'dpfl'\nargs.save_dir = exp_name(args)\n\n\ndef main():\n global args\n\n set_random_seed(args.seed)\n if not args.use_avai_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices\n use_gpu = torch.cuda.is_available()\n if args.use_cpu: use_gpu = False\n log_name = 'log_test.txt' if args.evaluate else 'log_train.txt'\n sys.stdout = Logger(osp.join(args.save_dir, log_name))\n print(\"==========\\nArgs:{}\\n==========\".format(args))\n\n if use_gpu:\n print(\"Currently using GPU {}\".format(args.gpu_devices))\n cudnn.benchmark = True\n else:\n print(\"Currently using CPU, however, GPU is highly recommended\")\n\n print(\"Initializing MultiScale data manager\")\n assert args.train_batch_size % args.train_loss_batch_size == 0, \"'{}' is not divisable by {}\".format(args.train_loss_batch_size, args.train_loss_batch_size)\n dm = ImageDataManager(use_gpu, scales=[224,160], **image_dataset_kwargs(args))\n trainloader, testloader_dict = dm.return_dataloaders()\n # sys.exit(0)\n\n print(\"Initializing model: {}\".format(args.arch))\n model = models.init_model(name=args.arch, num_classes=dm.num_train_pids, input_size=args.width, loss={'xent'}, use_gpu=use_gpu)\n print(\"Model size: {:.3f} M\".format(count_num_param(model)))\n # print(model)\n\n criterion = CrossEntropyLoss(num_classes=dm.num_train_pids, use_gpu=use_gpu, label_smooth=args.label_smooth)\n optimizer = init_optimizer(model.parameters(), **optimizer_kwargs(args))\n scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.stepsize, gamma=args.gamma)\n # # scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=3, verbose=True, threshold=1e-04)\n\n if args.load_weights and check_isfile(args.load_weights): # load pretrained weights but ignore layers that don't match in size\n checkpoint = torch.load(args.load_weights)\n pretrain_dict = checkpoint['state_dict']\n model_dict = model.state_dict()\n pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}\n model_dict.update(pretrain_dict)\n model.load_state_dict(model_dict)\n print(\"Loaded pretrained weights from '{}'\".format(args.load_weights))\n\n if args.resume and check_isfile(args.resume):\n checkpoint = torch.load(args.resume)\n model.load_state_dict(checkpoint['state_dict'])\n args.start_epoch = checkpoint['epoch'] + 1\n print(\"Loaded checkpoint from '{}'\".format(args.resume))\n print(\"- start_epoch: {}\\n- rank1: {}\".format(args.start_epoch, checkpoint['rank1']))\n\n if use_gpu:\n model = nn.DataParallel(model).cuda()\n\n if args.evaluate:\n print(\"Evaluate only\")\n\n for name in args.target_names:\n print(\"Evaluating {} ...\".format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n test_set = dm.return_testdataset_by_name(name)\n rank1, mAP = test(model, test_set, name, queryloader, galleryloader, use_gpu, visualize=args.visualize_ranks)\n\n return\n\n start_time = time.time()\n ranklogger = RankLogger(args.source_names, args.target_names)\n maplogger = RankLogger(args.source_names, args.target_names)\n train_time = 0\n\n\n # Tensorboard\n writer = SummaryWriter(log_dir=osp.join('runs', args.save_dir))\n print(\"=> Start training\")\n\n\n if args.fixbase_epoch > 0:\n print(\"Train {} for {} epochs while keeping other layers frozen\".format(args.open_layers, args.fixbase_epoch))\n initial_optim_state = optimizer.state_dict()\n\n for epoch in range(args.fixbase_epoch):\n start_train_time = time.time()\n loss, prec1 = train(epoch, model, criterion, optimizer, trainloader, writer, use_gpu, fixbase=True)\n writer.add_scalar('train/loss', loss, epoch+1)\n writer.add_scalar('train/prec1', prec1, epoch+1)\n print('Epoch: [{:02d}] [Average Loss:] {:.4f}\\t [Average Prec.:] {:.2%}'.format(epoch+1, loss, prec1))\n train_time += round(time.time() - start_train_time)\n\n print(\"Done. All layers are open to train for {} epochs\".format(args.max_epoch))\n optimizer.load_state_dict(initial_optim_state)\n\n args.start_epoch += args.fixbase_epoch\n args.max_epoch += args.fixbase_epoch\n\n for epoch in range(args.start_epoch, args.max_epoch):\n start_train_time = time.time()\n loss, prec1 = train(epoch, model, criterion, optimizer, trainloader, writer, use_gpu)\n writer.add_scalar('train/loss', loss, epoch+1)\n writer.add_scalar('train/prec1', prec1, epoch+1)\n print('Epoch: [{:02d}] [Average Loss:] {:.4f}\\t [Average Prec.:] {:.2%}'.format(epoch+1, loss, prec1))\n train_time += round(time.time() - start_train_time)\n\n scheduler.step()\n\n if (epoch + 1) > args.start_eval and args.eval_freq > 0 and (epoch + 1) % args.eval_freq == 0 or (epoch + 1) == args.max_epoch:\n print(\"=> Test\")\n\n for name in args.target_names:\n print(\"Evaluating {} ...\".format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n\n test_set = dm.return_testdataset_by_name(name)\n\n if epoch+1 == args.max_epoch:\n rank1, mAP = test(model, test_set, name, queryloader, galleryloader, use_gpu, visualize=True)\n else:\n rank1, mAP = test(model, test_set, name, queryloader, galleryloader, use_gpu)\n\n writer.add_scalar(name + '_test/top1', rank1, epoch+1)\n writer.add_scalar(name + '_test/mAP', mAP, epoch+1)\n\n ranklogger.write(name, epoch + 1, rank1)\n maplogger.write(name, epoch + 1, mAP)\n\n if use_gpu:\n state_dict = model.module.state_dict()\n else:\n state_dict = model.state_dict()\n\n save_checkpoint({\n 'state_dict': state_dict,\n 'rank1': rank1,\n 'epoch': epoch,\n }, False, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))\n\n\n # save last checkpoint\n save_checkpoint({\n 'state_dict': state_dict,\n 'rank1': rank1,\n 'epoch': epoch,\n }, False, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))\n\n elapsed = round(time.time() - start_time)\n elapsed = str(datetime.timedelta(seconds=elapsed))\n train_time = str(datetime.timedelta(seconds=train_time))\n print(\"Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.\".format(elapsed, train_time))\n ranklogger.show_summary()\n maplogger.show_summary()\n\n\ndef train(epoch, model, criterion, optimizer, trainloader, writer, use_gpu, fixbase=False):\n losses = AverageMeter()\n precisions = AverageMeter()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n epoch_iterations = len(trainloader)\n\n model.train()\n\n if fixbase or args.always_fixbase:\n open_specified_layers(model, args.open_layers)\n else:\n open_all_layers(model)\n\n end = time.time()\n for batch_idx, ((img1, img2), pids, _, _) in enumerate(trainloader):\n data_time.update(time.time() - end)\n\n if use_gpu:\n img1, img2, pids = img1.cuda(), img2.cuda(), pids.cuda()\n\n y_large, y_small, y_joint = model(img1, img2)\n\n loss_batch = args.train_loss_batch_size\n how_many_mini = args.train_batch_size // loss_batch\n for mini_idx in range(how_many_mini):\n\n start_index = mini_idx * loss_batch\n end_index = start_index + loss_batch\n\n mini_y_large = y_large[start_index:end_index, :]\n mini_y_small = y_small[start_index:end_index, :]\n mini_y_joint = y_joint[start_index:end_index, :]\n mini_pids = pids[start_index:end_index]\n\n loss_large = criterion(mini_y_large, mini_pids)\n loss_small = criterion(mini_y_small, mini_pids)\n loss_joint = criterion(mini_y_joint, mini_pids)\n\n joint_prob = F.softmax(mini_y_joint, dim=1)\n loss_joint_large = criterion(mini_y_large, joint_prob, one_hot=True)\n loss_joint_small = criterion(mini_y_small, joint_prob, one_hot=True)\n\n total_loss_large = loss_large + loss_joint_large #+\n total_loss_small = loss_small + loss_joint_small #+\n total_loss_joint = loss_joint #+\n\n prec, = accuracy(mini_y_joint.data, mini_pids.data)\n prec1 = prec[0] # get top 1\n\n optimizer.zero_grad()\n\n # total_loss_large.backward(retain_graph=True)\n # total_loss_small.backward(retain_graph=True)\n # total_loss_joint.backward()\n # sum losses\n loss = total_loss_joint + total_loss_small + total_loss_large\n loss.backward(retain_graph=True)\n\n optimizer.step()\n\n loss_iter = epoch*epoch_iterations+batch_idx*how_many_mini+mini_idx\n writer.add_scalar('iter/loss_small', loss_small, loss_iter)\n writer.add_scalar('iter/loss_large', loss_large, loss_iter)\n writer.add_scalar('iter/loss_joint', loss_joint, loss_iter)\n writer.add_scalar('iter/loss_joint_small', loss_joint_small, loss_iter)\n writer.add_scalar('iter/loss_joint_large', loss_joint_large, loss_iter)\n writer.add_scalar('iter/total_loss_small', total_loss_small, loss_iter)\n writer.add_scalar('iter/total_loss_large', total_loss_large, loss_iter)\n writer.add_scalar('iter/total_loss_joint', total_loss_joint, loss_iter)\n writer.add_scalar('iter/loss', loss, loss_iter)\n\n\n losses.update(loss.item(), pids.size(0))\n precisions.update(prec1, pids.size(0))\n\n if (batch_idx*how_many_mini+mini_idx + 1) % args.print_freq == 0:\n print('Epoch: [{0:02d}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.4f} ({data_time.avg:.4f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec {prec.val:.2%} ({prec.avg:.2%})\\t'.format(\n epoch + 1, batch_idx + 1, len(trainloader), batch_time=batch_time,\n data_time=data_time, loss=losses, prec=precisions))\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n return losses.avg, precisions.avg\n\n\ndef test(model, test_set, name, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20], visualize=False):\n batch_time = AverageMeter()\n\n model.eval()\n\n with torch.no_grad():\n qf, q_pids, q_camids = [], [], []\n for batch_idx, ((img1, img2), pids, camids, _) in enumerate(queryloader):\n if use_gpu: img1, img2 = img1.cuda(), img2.cuda()\n\n end = time.time()\n features = model(img1, img2)\n batch_time.update(time.time() - end)\n\n features = features.data.cpu()\n qf.append(features)\n q_pids.extend(pids)\n q_camids.extend(camids)\n qf = torch.cat(qf, 0)\n q_pids = np.asarray(q_pids)\n q_camids = np.asarray(q_camids)\n\n print(\"Extracted features for query set, obtained {}-by-{} matrix\".format(qf.size(0), qf.size(1)))\n\n gf, g_pids, g_camids = [], [], []\n end = time.time()\n for batch_idx, ((img1, img2), pids, camids, _) in enumerate(galleryloader):\n if use_gpu: img1, img2 = img1.cuda(), img2.cuda()\n\n end = time.time()\n features = model(img1, img2)\n batch_time.update(time.time() - end)\n\n features = features.data.cpu()\n gf.append(features)\n g_pids.extend(pids)\n g_camids.extend(camids)\n gf = torch.cat(gf, 0)\n g_pids = np.asarray(g_pids)\n g_camids = np.asarray(g_camids)\n\n print(\"Extracted features for gallery set, obtained {}-by-{} matrix\".format(gf.size(0), gf.size(1)))\n\n print(\"=> BatchTime(s)/BatchSize(img): {:.3f}/{}\".format(batch_time.avg, args.test_batch_size))\n\n m, n = qf.size(0), gf.size(0)\n distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \\\n torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()\n distmat.addmm_(1, -2, qf, gf.t())\n distmat = distmat.numpy()\n\n print(\"Computing CMC and mAP\")\n cmc, mAP, all_AP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)\n\n if visualize:\n visualize_ranked_results(\n distmat, all_AP, test_set, name,\n save_path=args.save_dir,\n topk=100\n )\n\n print(\"Results ----------\")\n print(\"mAP: {:.1%}\".format(mAP))\n print(\"CMC curve\")\n for r in ranks:\n print(\"Rank-{:<3}: {:.1%}\".format(r, cmc[r-1]))\n print(\"------------------\")\n\n return cmc[0], mAP\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.optim.lr_scheduler.MultiStepLR",
"torch.nn.functional.softmax",
"torch.cat",
"torch.load",
"numpy.asarray",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.pow"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dhruv9vats/stingray | [
"e952762ebc098de42d8decf2d0df34f9e9b0c200",
"e952762ebc098de42d8decf2d0df34f9e9b0c200"
] | [
"stingray/crossspectrum.py",
"stingray/io.py"
] | [
"import copy\nimport warnings\nfrom collections.abc import Iterable, Iterator\n\nimport numpy as np\nimport scipy\nimport scipy.optimize\nimport scipy.stats\n\nfrom stingray.exceptions import StingrayError\nfrom stingray.gti import bin_intervals_from_gtis, check_gtis, cross_two_gtis\nfrom stingray.largememory import createChunkedSpectra, saveData\nfrom stingray.utils import genDataPath, rebin_data, rebin_data_log, simon\n\nfrom .events import EventList\nfrom .lightcurve import Lightcurve\nfrom .utils import show_progress\n\n# location of factorial moved between scipy versions\ntry:\n from scipy.misc import factorial\nexcept ImportError:\n from scipy.special import factorial\n\ntry:\n from pyfftw.interfaces.scipy_fft import fft, fftfreq\nexcept ImportError:\n warnings.warn(\"pyfftw not installed. Using standard scipy fft\")\n from scipy.fft import fft, fftfreq\n\n__all__ = [\n \"Crossspectrum\", \"AveragedCrossspectrum\", \"coherence\", \"time_lag\",\n \"cospectra_pvalue\", \"normalize_crossspectrum\"\n]\n\n\ndef normalize_crossspectrum(unnorm_power, tseg, nbins, nphots1, nphots2, norm=\"none\", power_type=\"real\"):\n \"\"\"\n Normalize the real part of the cross spectrum to Leahy, absolute rms^2,\n fractional rms^2 normalization, or not at all.\n\n Parameters\n ----------\n unnorm_power: numpy.ndarray\n The unnormalized cross spectrum.\n\n tseg: int\n The length of the Fourier segment, in seconds.\n\n nbins : int\n Number of bins in the light curve\n\n nphots1 : int\n Number of photons in the light curve no. 1\n\n nphots2 : int\n Number of photons in the light curve no. 2\n\n Other parameters\n ----------------\n norm : str\n One of `'leahy'` (Leahy+83), `'frac'` (fractional rms), `'abs'`\n (absolute rms)\n\n power_type : str\n One of `'real'` (real part), `'all'` (all complex powers), `'abs'`\n (absolute value)\n\n Returns\n -------\n power: numpy.nd.array\n The normalized co-spectrum (real part of the cross spectrum). For\n 'none' normalization, imaginary part is returned as well.\n \"\"\"\n\n # The \"effective\" counts/bin is the geometrical mean of the counts/bin\n # of the two light curves. Same goes for counts/second in meanrate.\n\n log_nphots1 = np.log(nphots1)\n log_nphots2 = np.log(nphots2)\n\n actual_nphots = np.float64(np.sqrt(np.exp(log_nphots1 + log_nphots2)))\n\n if power_type == \"all\":\n c_num = unnorm_power\n elif power_type == \"real\":\n c_num = unnorm_power.real\n elif power_type == \"absolute\":\n c_num = np.absolute(unnorm_power)\n else:\n raise ValueError(\"`power_type` not recognized!\")\n\n if norm.lower() == 'leahy':\n power = c_num * 2. / actual_nphots\n\n elif norm.lower() == 'frac':\n meancounts1 = nphots1 / nbins\n meancounts2 = nphots2 / nbins\n\n actual_mean = np.sqrt(meancounts1 * meancounts2)\n\n assert actual_mean > 0.0, \\\n \"Mean count rate is <= 0. Something went wrong.\"\n\n c = c_num / float(nbins ** 2.)\n power = c * 2. * tseg / (actual_mean ** 2.0)\n\n elif norm.lower() == 'abs':\n meanrate = np.sqrt(nphots1 * nphots2) / tseg\n\n power = c_num * 2. * meanrate / actual_nphots\n\n elif norm.lower() == 'none':\n power = unnorm_power\n\n else:\n raise ValueError(\"Value for `norm` not recognized.\")\n\n return power\n\n\ndef normalize_crossspectrum_gauss(\n unnorm_power, mean_flux, var, dt, N, norm=\"none\", power_type=\"real\"):\n \"\"\"\n Normalize the real part of the cross spectrum to Leahy, absolute rms^2,\n fractional rms^2 normalization, or not at all.\n\n Parameters\n ----------\n unnorm_power: numpy.ndarray\n The unnormalized cross spectrum.\n\n mean_flux: float\n The mean flux of the light curve (if a cross spectrum, the geometrical\n mean of the flux in the two channels)\n\n var: float\n The variance of the light curve (if a cross spectrum, the geometrical\n mean of the variance in the two channels)\n\n dt: float\n The sampling time of the light curve\n\n N: int\n The number of bins in the light curve\n\n Other parameters\n ----------------\n norm : str\n One of `'leahy'` (Leahy+83), `'frac'` (fractional rms), `'abs'`\n (absolute rms)\n\n power_type : str\n One of `'real'` (real part), `'all'` (all complex powers), `'abs'`\n (absolute value)\n\n Returns\n -------\n power: numpy.nd.array\n The normalized co-spectrum (real part of the cross spectrum). For\n 'none' normalization, imaginary part is returned as well.\n\n Examples\n --------\n >>> lc_c = np.random.poisson(10000, 10000)\n >>> lc_c_var = 10000\n >>> lc = lc_c / 17.3453\n >>> lc_var = (100 / 17.3453)**2\n >>> pds_c = np.absolute(np.fft.fft(lc_c))**2\n >>> pds = np.absolute(np.fft.fft(lc))**2\n >>> norm_c = normalize_crossspectrum_gauss(pds_c, np.mean(lc_c), lc_c_var, 0.1, len(lc_c), norm='leahy')\n >>> norm = normalize_crossspectrum_gauss(pds, np.mean(lc), lc_var, 0.1, len(lc), norm='leahy')\n >>> np.allclose(norm, norm_c)\n True\n >>> np.isclose(np.mean(norm[1:]), 2, atol=0.1)\n True\n >>> norm_c = normalize_crossspectrum_gauss(pds_c, np.mean(lc_c), np.mean(lc_c), 0.1, len(lc_c), norm='frac')\n >>> norm = normalize_crossspectrum_gauss(pds, np.mean(lc), lc_var, 0.1, len(lc), norm='frac')\n >>> np.allclose(norm, norm_c)\n True\n >>> norm_c = normalize_crossspectrum_gauss(pds_c, np.mean(lc_c), np.mean(lc_c), 0.1, len(lc_c), norm='abs')\n >>> norm = normalize_crossspectrum_gauss(pds, np.mean(lc), lc_var, 0.1, len(lc), norm='abs')\n >>> np.allclose(norm / np.mean(lc)**2, norm_c / np.mean(lc_c)**2)\n True\n >>> np.isclose(np.mean(norm_c[2:]), 2 * np.mean(lc_c * 0.1), rtol=0.1)\n True\n \"\"\"\n\n # The \"effective\" counts/bin is the geometrical mean of the counts/bin\n # of the two light curves. Same goes for counts/second in meanrate.\n if power_type == \"all\":\n c_num = unnorm_power\n elif power_type == \"real\":\n c_num = unnorm_power.real\n elif power_type == \"absolute\":\n c_num = np.absolute(unnorm_power)\n else:\n raise ValueError(\"`power_type` not recognized!\")\n\n common_factor = 2 * dt / N\n rate_mean = mean_flux * dt\n if norm.lower() == 'leahy':\n norm = 2 / var / N\n\n elif norm.lower() == 'frac':\n norm = common_factor / rate_mean**2\n\n elif norm.lower() == 'abs':\n norm = common_factor\n\n elif norm.lower() == 'none':\n norm = 1\n\n else:\n raise ValueError(\"Value for `norm` not recognized.\")\n\n return norm * c_num\n\n\ndef _averaged_cospectra_cdf(xcoord, n):\n \"\"\"\n Function calculating the cumulative distribution function for\n averaged cospectra, Equation 19 of Huppenkothen & Bachetti (2018).\n\n Parameters\n ----------\n xcoord : float or iterable\n The cospectral power for which to calculate the CDF.\n\n n : int\n The number of averaged cospectra\n\n Returns\n -------\n cdf : float\n The value of the CDF at `xcoord` for `n` averaged cospectra\n \"\"\"\n if np.size(xcoord) == 1:\n xcoord = [xcoord]\n\n cdf = np.zeros_like(xcoord)\n\n for i, x in enumerate(xcoord):\n prefac_bottom1 = factorial(n - 1)\n for j in range(n):\n prefac_top = factorial(n - 1 + j)\n prefac_bottom2 = factorial(\n n - 1 - j) * factorial(j)\n prefac_bottom3 = 2.0 ** (n + j)\n\n prefac = prefac_top / (prefac_bottom1 * prefac_bottom2 *\n prefac_bottom3)\n\n gf = -j + n\n\n first_fac = scipy.special.gamma(gf)\n if x >= 0:\n second_fac = scipy.special.gammaincc(gf, n * x) * first_fac\n fac = 2.0 * first_fac - second_fac\n else:\n fac = scipy.special.gammaincc(gf, -n * x) * first_fac\n\n cdf[i] += (prefac * fac)\n if np.size(xcoord) == 1:\n return cdf[i]\n else:\n continue\n return cdf\n\n\ndef cospectra_pvalue(power, nspec):\n \"\"\"\n This function computes the single-trial p-value that the power was\n observed under the null hypothesis that there is no signal in\n the data.\n\n Important: the underlying assumption that make this calculation valid\n is that the powers in the power spectrum follow a Laplace distribution,\n and this requires that:\n\n 1. the co-spectrum is normalized according to [Leahy 1983]_\n 2. there is only white noise in the light curve. That is, there is no\n aperiodic variability that would change the overall shape of the power\n spectrum.\n\n Also note that the p-value is for a *single trial*, i.e. the power\n currently being tested. If more than one power or more than one power\n spectrum are being tested, the resulting p-value must be corrected for the\n number of trials (Bonferroni correction).\n\n Mathematical formulation in [Huppenkothen 2017]_.\n\n Parameters\n ----------\n power : float\n The squared Fourier amplitude of a spectrum to be evaluated\n\n nspec : int\n The number of spectra or frequency bins averaged in ``power``.\n This matters because averaging spectra or frequency bins increases\n the signal-to-noise ratio, i.e. makes the statistical distributions\n of the noise narrower, such that a smaller power might be very\n significant in averaged spectra even though it would not be in a single\n power spectrum.\n\n Returns\n -------\n pval : float\n The classical p-value of the observed power being consistent with\n the null hypothesis of white noise\n\n References\n ----------\n\n * .. [Leahy 1983] https://ui.adsabs.harvard.edu/#abs/1983ApJ...266..160L/abstract\n * .. [Huppenkothen 2017] http://adsabs.harvard.edu/abs/2018ApJS..236...13H\n\n \"\"\"\n if not np.all(np.isfinite(power)):\n raise ValueError(\"power must be a finite floating point number!\")\n\n # if power < 0:\n # raise ValueError(\"power must be a positive real number!\")\n\n if not np.isfinite(nspec):\n raise ValueError(\"nspec must be a finite integer number\")\n\n if not np.isclose(nspec % 1, 0):\n raise ValueError(\"nspec must be an integer number!\")\n\n if nspec < 1:\n raise ValueError(\"nspec must be larger or equal to 1\")\n\n elif nspec == 1:\n lapl = scipy.stats.laplace(0, 1)\n pval = lapl.sf(power)\n\n elif nspec > 50:\n exp_sigma = np.sqrt(2) / np.sqrt(nspec)\n gauss = scipy.stats.norm(0, exp_sigma)\n pval = gauss.sf(power)\n\n else:\n pval = 1. - _averaged_cospectra_cdf(power, nspec)\n\n return pval\n\n\ndef coherence(lc1, lc2):\n \"\"\"\n Estimate coherence function of two light curves.\n For details on the definition of the coherence, see Vaughan and Nowak,\n 1996 [#]_.\n\n Parameters\n ----------\n lc1: :class:`stingray.Lightcurve` object\n The first light curve data for the channel of interest.\n\n lc2: :class:`stingray.Lightcurve` object\n The light curve data for reference band\n\n Returns\n -------\n coh : ``np.ndarray``\n The array of coherence versus frequency\n\n References\n ----------\n .. [#] http://iopscience.iop.org/article/10.1086/310430/pdf\n \"\"\"\n\n if not isinstance(lc1, Lightcurve):\n raise TypeError(\"lc1 must be a lightcurve.Lightcurve object\")\n\n if not isinstance(lc2, Lightcurve):\n raise TypeError(\"lc2 must be a lightcurve.Lightcurve object\")\n\n cs = Crossspectrum(lc1, lc2, norm='none')\n\n return cs.coherence()\n\n\ndef time_lag(lc1, lc2):\n \"\"\"\n Estimate the time lag of two light curves.\n Calculate time lag and uncertainty.\n\n Equation from Bendat & Piersol, 2011 [bendat-2011]_.\n\n Returns\n -------\n lag : np.ndarray\n The time lag\n\n lag_err : np.ndarray\n The uncertainty in the time lag\n\n References\n ----------\n\n .. [bendat-2011] https://www.wiley.com/en-us/Random+Data%3A+Analysis+and+Measurement+Procedures%2C+4th+Edition-p-9780470248775\n\n \"\"\"\n\n if not isinstance(lc1, Lightcurve):\n raise TypeError(\"lc1 must be a lightcurve.Lightcurve object\")\n\n if not isinstance(lc2, Lightcurve):\n raise TypeError(\"lc2 must be a lightcurve.Lightcurve object\")\n\n cs = Crossspectrum(lc1, lc2, norm='none')\n lag = cs.time_lag()\n\n return lag\n\n\nclass Crossspectrum(object):\n \"\"\"\n Make a cross spectrum from a (binned) light curve.\n You can also make an empty :class:`Crossspectrum` object to populate with your\n own Fourier-transformed data (this can sometimes be useful when making\n binned power spectra). Stingray uses the scipy.fft standards for the sign\n of the Nyquist frequency.\n\n Parameters\n ----------\n data1: :class:`stingray.Lightcurve` or :class:`stingray.events.EventList`, optional, default ``None``\n The first light curve data for the channel/band of interest.\n\n data2: :class:`stingray.Lightcurve` or :class:`stingray.events.EventList`, optional, default ``None``\n The light curve data for the reference band.\n\n norm: {``frac``, ``abs``, ``leahy``, ``none``}, default ``none``\n The normalization of the (real part of the) cross spectrum.\n\n power_type: string, optional, default ``real``\n Parameter to choose among complete, real part and magnitude of the cross spectrum.\n\n fullspec: boolean, optional, default ``False``\n If False, keep only the positive frequencies, or if True, keep all of them .\n\n Other Parameters\n ----------------\n gti: 2-d float array\n ``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.\n This choice overrides the GTIs in the single light curves. Use with\n care!\n\n lc1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects\n For backwards compatibility only. Like ``data1``, but no\n :class:`stingray.events.EventList` objects allowed\n\n lc2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects\n For backwards compatibility only. Like ``data2``, but no\n :class:`stingray.events.EventList` objects allowed\n\n dt: float\n The time resolution of the light curve. Only needed when constructing\n light curves in the case where ``data1``, ``data2`` are\n :class:`EventList` objects\n\n\n Attributes\n ----------\n freq: numpy.ndarray\n The array of mid-bin frequencies that the Fourier transform samples\n\n power: numpy.ndarray\n The array of cross spectra (complex numbers)\n\n power_err: numpy.ndarray\n The uncertainties of ``power``.\n An approximation for each bin given by ``power_err= power/sqrt(m)``.\n Where ``m`` is the number of power averaged in each bin (by frequency\n binning, or averaging more than one spectra). Note that for a single\n realization (``m=1``) the error is equal to the power.\n\n df: float\n The frequency resolution\n\n m: int\n The number of averaged cross-spectra amplitudes in each bin.\n\n n: int\n The number of data points/time bins in one segment of the light\n curves.\n\n nphots1: float\n The total number of photons in light curve 1\n\n nphots2: float\n The total number of photons in light curve 2\n \"\"\"\n\n def __init__(self, data1=None, data2=None, norm='none', gti=None,\n lc1=None, lc2=None, power_type=\"real\", dt=None, fullspec=False):\n\n if isinstance(norm, str) is False:\n raise TypeError(\"norm must be a string\")\n\n if norm.lower() not in [\"frac\", \"abs\", \"leahy\", \"none\"]:\n raise ValueError(\"norm must be 'frac', 'abs', 'leahy', or 'none'!\")\n\n self.norm = norm.lower()\n\n # check if input data is a Lightcurve object, if not make one or\n # make an empty Crossspectrum object if lc1 == ``None`` or lc2 == ``None``\n\n if lc1 is not None or lc2 is not None:\n warnings.warn(\"The lcN keywords are now deprecated. Use dataN \"\n \"instead\", DeprecationWarning)\n # for backwards compatibility\n if data1 is None:\n data1 = lc1\n if data2 is None:\n data2 = lc2\n\n if data1 is None or data2 is None:\n if data1 is not None or data2 is not None:\n raise TypeError(\"You can't do a cross spectrum with just one \"\n \"light curve!\")\n else:\n self.freq = None\n self.power = None\n self.power_err = None\n self.df = None\n self.nphots1 = None\n self.nphots2 = None\n self.m = 1\n self.n = None\n return\n\n if (isinstance(data1, EventList) or isinstance(data2, EventList)) and \\\n dt is None:\n raise ValueError(\"If using event lists, please specify the bin \"\n \"time to generate lightcurves.\")\n\n if not isinstance(data1, EventList):\n lc1 = data1\n else:\n lc1 = data1.to_lc(dt)\n\n if not isinstance(data2, EventList):\n lc2 = data2\n elif isinstance(data2, EventList) and data2 is not data1:\n lc2 = data2.to_lc(dt)\n elif data2 is data1:\n lc2 = lc1\n\n self.gti = gti\n self.lc1 = lc1\n self.lc2 = lc2\n self.power_type = power_type\n self.fullspec = fullspec\n\n self._make_crossspectrum(lc1, lc2, fullspec)\n\n # These are needed to calculate coherence\n self._make_auxil_pds(lc1, lc2)\n\n def _make_auxil_pds(self, lc1, lc2):\n \"\"\"\n Helper method to create the power spectrum of both light curves\n independently.\n\n Parameters\n ----------\n lc1, lc2 : :class:`stingray.Lightcurve` objects\n Two light curves used for computing the cross spectrum.\n \"\"\"\n if lc1 is not lc2 and isinstance(lc1, Lightcurve):\n self.pds1 = Crossspectrum(lc1, lc1, norm='none')\n self.pds2 = Crossspectrum(lc2, lc2, norm='none')\n\n def _make_crossspectrum(self, lc1, lc2, fullspec=False):\n \"\"\"\n Auxiliary method computing the normalized cross spectrum from two\n light curves. This includes checking for the presence of and\n applying Good Time Intervals, computing the unnormalized Fourier\n cross-amplitude, and then renormalizing using the required\n normalization. Also computes an uncertainty estimate on the cross\n spectral powers.\n\n Parameters\n ----------\n lc1, lc2 : :class:`stingray.Lightcurve` objects\n Two light curves used for computing the cross spectrum.\n\n fullspec: boolean, default ``False``\n Return full frequency array (True) or just positive frequencies (False)\n\n \"\"\"\n\n # make sure the inputs work!\n if not isinstance(lc1, Lightcurve):\n raise TypeError(\"lc1 must be a lightcurve.Lightcurve object\")\n\n if not isinstance(lc2, Lightcurve):\n raise TypeError(\"lc2 must be a lightcurve.Lightcurve object\")\n\n if self.lc2.mjdref != self.lc1.mjdref:\n raise ValueError(\"MJDref is different in the two light curves\")\n\n # Then check that GTIs make sense\n if self.gti is None:\n self.gti = cross_two_gtis(lc1.gti, lc2.gti)\n\n check_gtis(self.gti)\n\n if self.gti.shape[0] != 1:\n raise TypeError(\"Non-averaged Cross Spectra need \"\n \"a single Good Time Interval\")\n\n lc1 = lc1.split_by_gti()[0]\n lc2 = lc2.split_by_gti()[0]\n\n # total number of photons is the sum of the\n # counts in the light curve\n self.meancounts1 = lc1.meancounts\n self.meancounts2 = lc2.meancounts\n self.nphots1 = np.float64(np.sum(lc1.counts))\n self.nphots2 = np.float64(np.sum(lc2.counts))\n\n self.err_dist = 'poisson'\n if lc1.err_dist == 'poisson':\n self.var1 = lc1.meancounts\n else:\n self.var1 = np.mean(lc1.counts_err) ** 2\n self.err_dist = 'gauss'\n\n if lc2.err_dist == 'poisson':\n self.var2 = lc2.meancounts\n else:\n self.var2 = np.mean(lc2.counts_err) ** 2\n self.err_dist = 'gauss'\n\n if lc1.n != lc2.n:\n raise StingrayError(\"Light curves do not have same number \"\n \"of time bins per segment.\")\n\n # If dt differs slightly, its propagated error must not be more than\n # 1/100th of the bin\n if not np.isclose(lc1.dt, lc2.dt, rtol=0.01 * lc1.dt / lc1.tseg):\n raise StingrayError(\"Light curves do not have same time binning \"\n \"dt.\")\n\n # In case a small difference exists, ignore it\n lc1.dt = lc2.dt\n\n self.dt = lc1.dt\n self.n = lc1.n\n\n # the frequency resolution\n self.df = 1.0 / lc1.tseg\n\n # the number of averaged periodograms in the final output\n # This should *always* be 1 here\n self.m = 1\n\n # make the actual Fourier transform and compute cross spectrum\n self.freq, self.unnorm_power = self._fourier_cross(lc1, lc2, fullspec)\n\n # If co-spectrum is desired, normalize here. Otherwise, get raw back\n # with the imaginary part still intact.\n self.power = self._normalize_crossspectrum(self.unnorm_power, lc1.tseg)\n\n if lc1.err_dist.lower() != lc2.err_dist.lower():\n simon(\"Your lightcurves have different statistics.\"\n \"The errors in the Crossspectrum will be incorrect.\")\n elif lc1.err_dist.lower() != \"poisson\":\n simon(\"Looks like your lightcurve statistic is not poisson.\"\n \"The errors in the Powerspectrum will be incorrect.\")\n\n if self.__class__.__name__ in ['Powerspectrum',\n 'AveragedPowerspectrum']:\n self.power_err = self.power / np.sqrt(self.m)\n elif self.__class__.__name__ in ['Crossspectrum',\n 'AveragedCrossspectrum']:\n # This is clearly a wild approximation.\n simon(\"Errorbars on cross spectra are not thoroughly tested. \"\n \"Please report any inconsistencies.\")\n unnorm_power_err = np.sqrt(2) / np.sqrt(self.m) # Leahy-like\n unnorm_power_err /= (2 / np.sqrt(self.nphots1 * self.nphots2))\n unnorm_power_err += np.zeros_like(self.power)\n\n self.power_err = \\\n self._normalize_crossspectrum(unnorm_power_err, lc1.tseg)\n else:\n self.power_err = np.zeros(len(self.power))\n\n def _fourier_cross(self, lc1, lc2, fullspec=False):\n \"\"\"\n Fourier transform the two light curves, then compute the cross spectrum.\n Computed as CS = lc1 x lc2* (where lc2 is the one that gets\n complex-conjugated). The user has the option to either get just the\n positive frequencies or the full spectrum.\n\n Parameters\n ----------\n lc1: :class:`stingray.Lightcurve` object\n One light curve to be Fourier transformed. Ths is the band of\n interest or channel of interest.\n\n lc2: :class:`stingray.Lightcurve` object\n Another light curve to be Fourier transformed.\n This is the reference band.\n\n fullspec: boolean. Default is False.\n If True, return the whole array of frequencies, or only positive frequencies (False).\n\n Returns\n -------\n fr: numpy.ndarray\n The squared absolute value of the Fourier amplitudes\n\n \"\"\"\n fourier_1 = fft(lc1.counts) # do Fourier transform 1\n fourier_2 = fft(lc2.counts) # do Fourier transform 2\n\n freqs = scipy.fft.fftfreq(lc1.n, lc1.dt)\n cross = np.multiply(fourier_1, np.conj(fourier_2))\n\n if fullspec is True:\n return freqs, cross\n else:\n return freqs[freqs > 0], cross[freqs > 0]\n\n def rebin(self, df=None, f=None, method=\"mean\"):\n \"\"\"\n Rebin the cross spectrum to a new frequency resolution ``df``.\n\n Parameters\n ----------\n df: float\n The new frequency resolution\n\n Other Parameters\n ----------------\n f: float\n the rebin factor. If specified, it substitutes df with ``f*self.df``\n\n Returns\n -------\n bin_cs = :class:`Crossspectrum` (or one of its subclasses) object\n The newly binned cross spectrum or power spectrum.\n Note: this object will be of the same type as the object\n that called this method. For example, if this method is called\n from :class:`AveragedPowerspectrum`, it will return an object of class\n :class:`AveragedPowerspectrum`, too.\n \"\"\"\n\n if f is None and df is None:\n raise ValueError('You need to specify at least one between f and '\n 'df')\n elif f is not None:\n df = f * self.df\n\n # rebin cross spectrum to new resolution\n binfreq, bincs, binerr, step_size = \\\n rebin_data(self.freq, self.power, df, self.power_err,\n method=method, dx=self.df)\n # make an empty cross spectrum object\n # note: syntax deliberate to work with subclass Powerspectrum\n bin_cs = copy.copy(self)\n\n # store the binned periodogram in the new object\n bin_cs.freq = binfreq\n bin_cs.power = bincs\n bin_cs.df = df\n bin_cs.n = self.n\n bin_cs.norm = self.norm\n bin_cs.nphots1 = self.nphots1\n bin_cs.power_err = binerr\n\n if hasattr(self, 'unnorm_power'):\n _, binpower_unnorm, _, _ = \\\n rebin_data(self.freq, self.unnorm_power, df,\n method=method, dx=self.df)\n\n bin_cs.unnorm_power = binpower_unnorm\n\n if hasattr(self, 'cs_all'):\n cs_all = []\n for c in self.cs_all:\n cs_all.append(c.rebin(df=df, f=f, method=method))\n bin_cs.cs_all = cs_all\n if hasattr(self, 'pds1'):\n bin_cs.pds1 = self.pds1.rebin(df=df, f=f, method=method)\n if hasattr(self, 'pds2'):\n bin_cs.pds2 = self.pds2.rebin(df=df, f=f, method=method)\n\n try:\n bin_cs.nphots2 = self.nphots2\n except AttributeError:\n if self.type == 'powerspectrum':\n pass\n else:\n raise AttributeError(\n 'Spectrum has no attribute named nphots2.')\n\n bin_cs.m = np.rint(step_size * self.m)\n\n return bin_cs\n\n def _normalize_crossspectrum(self, unnorm_power, tseg):\n \"\"\"\n Normalize the real part of the cross spectrum to Leahy, absolute rms^2,\n fractional rms^2 normalization, or not at all.\n\n Parameters\n ----------\n unnorm_power: numpy.ndarray\n The unnormalized cross spectrum.\n\n tseg: int\n The length of the Fourier segment, in seconds.\n\n Returns\n -------\n power: numpy.nd.array\n The normalized co-spectrum (real part of the cross spectrum). For\n 'none' normalization, imaginary part is returned as well.\n \"\"\"\n\n if self.err_dist == 'poisson':\n return normalize_crossspectrum(\n unnorm_power, tseg, self.n, self.nphots1, self.nphots2, self.norm,\n self.power_type)\n\n return normalize_crossspectrum_gauss(\n unnorm_power, np.sqrt(self.meancounts1 * self.meancounts2),\n np.sqrt(self.var1 * self.var2),\n dt=self.dt,\n N=self.n,\n norm=self.norm,\n power_type=self.power_type)\n\n def rebin_log(self, f=0.01):\n \"\"\"\n Logarithmic rebin of the periodogram.\n The new frequency depends on the previous frequency\n modified by a factor f:\n\n .. math::\n\n d\\\\nu_j = d\\\\nu_{j-1} (1+f)\n\n Parameters\n ----------\n f: float, optional, default ``0.01``\n parameter that steers the frequency resolution\n\n\n Returns\n -------\n new_spec : :class:`Crossspectrum` (or one of its subclasses) object\n The newly binned cross spectrum or power spectrum.\n Note: this object will be of the same type as the object\n that called this method. For example, if this method is called\n from :class:`AveragedPowerspectrum`, it will return an object of class\n \"\"\"\n\n binfreq, binpower, binpower_err, nsamples = \\\n rebin_data_log(self.freq, self.power, f,\n y_err=self.power_err, dx=self.df)\n\n # the frequency resolution\n df = np.diff(binfreq)\n\n # shift the lower bin edges to the middle of the bin and drop the\n # last right bin edge\n binfreq = binfreq[:-1] + df / 2\n\n new_spec = copy.copy(self)\n new_spec.freq = binfreq\n new_spec.power = binpower\n new_spec.power_err = binpower_err\n new_spec.m = nsamples * self.m\n\n if hasattr(self, 'unnorm_power'):\n _, binpower_unnorm, _, _ = \\\n rebin_data_log(self.freq, self.unnorm_power, f, dx=self.df)\n\n new_spec.unnorm_power = binpower_unnorm\n\n if hasattr(self, 'pds1'):\n new_spec.pds1 = self.pds1.rebin_log(f)\n if hasattr(self, 'pds2'):\n new_spec.pds2 = self.pds2.rebin_log(f)\n\n if hasattr(self, 'cs_all'):\n cs_all = []\n for c in self.cs_all:\n cs_all.append(c.rebin_log(f))\n new_spec.cs_all = cs_all\n\n return new_spec\n\n def coherence(self):\n \"\"\" Compute Coherence function of the cross spectrum.\n\n Coherence is defined in Vaughan and Nowak, 1996 [#]_.\n It is a Fourier frequency dependent measure of the linear correlation\n between time series measured simultaneously in two energy channels.\n\n Returns\n -------\n coh : numpy.ndarray\n Coherence function\n\n References\n ----------\n .. [#] http://iopscience.iop.org/article/10.1086/310430/pdf\n \"\"\"\n # this computes the averaged power spectrum, but using the\n # cross spectrum code to avoid circular imports\n\n return self.unnorm_power.real / (self.pds1.power.real *\n self.pds2.power.real)\n\n def _phase_lag(self):\n \"\"\"Return the fourier phase lag of the cross spectrum.\"\"\"\n return np.angle(self.unnorm_power)\n\n def time_lag(self):\n \"\"\"\n Calculate the fourier time lag of the cross spectrum. The time lag is\n calculate using the center of the frequency bins.\n \"\"\"\n if self.__class__ in [Crossspectrum, AveragedCrossspectrum]:\n ph_lag = self._phase_lag()\n\n return ph_lag / (2 * np.pi * self.freq)\n else:\n raise AttributeError(\"Object has no attribute named 'time_lag' !\")\n\n def plot(self, labels=None, axis=None, title=None, marker='-', save=False,\n filename=None):\n \"\"\"\n Plot the amplitude of the cross spectrum vs. the frequency using ``matplotlib``.\n\n Parameters\n ----------\n labels : iterable, default ``None``\n A list of tuple with ``xlabel`` and ``ylabel`` as strings.\n\n axis : list, tuple, string, default ``None``\n Parameter to set axis properties of the ``matplotlib`` figure. For example\n it can be a list like ``[xmin, xmax, ymin, ymax]`` or any other\n acceptable argument for the``matplotlib.pyplot.axis()`` method.\n\n title : str, default ``None``\n The title of the plot.\n\n marker : str, default '-'\n Line style and color of the plot. Line styles and colors are\n combined in a single format string, as in ``'bo'`` for blue\n circles. See ``matplotlib.pyplot.plot`` for more options.\n\n save : boolean, optional, default ``False``\n If ``True``, save the figure with specified filename.\n\n filename : str\n File name of the image to save. Depends on the boolean ``save``.\n \"\"\"\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n raise ImportError(\"Matplotlib required for plot()\")\n\n plt.figure('crossspectrum')\n plt.plot(self.freq,\n np.abs(self.power),\n marker,\n color='b',\n label='Amplitude')\n plt.plot(self.freq,\n np.abs(self.power.real),\n marker,\n color='r',\n alpha=0.5,\n label='Real Part')\n plt.plot(self.freq,\n np.abs(self.power.imag),\n marker,\n color='g',\n alpha=0.5,\n label='Imaginary Part')\n\n if labels is not None:\n try:\n plt.xlabel(labels[0])\n plt.ylabel(labels[1])\n except TypeError:\n simon(\"``labels`` must be either a list or tuple with \"\n \"x and y labels.\")\n raise\n except IndexError:\n simon(\"``labels`` must have two labels for x and y \"\n \"axes.\")\n # Not raising here because in case of len(labels)==1, only\n # x-axis will be labelled.\n plt.legend(loc='best')\n if axis is not None:\n plt.axis(axis)\n\n if title is not None:\n plt.title(title)\n\n if save:\n if filename is None:\n plt.savefig('spec.png')\n else:\n plt.savefig(filename)\n else:\n plt.show(block=False)\n\n def classical_significances(self, threshold=1, trial_correction=False):\n \"\"\"\n Compute the classical significances for the powers in the power\n spectrum, assuming an underlying noise distribution that follows a\n chi-square distributions with 2M degrees of freedom, where M is the\n number of powers averaged in each bin.\n\n Note that this function will *only* produce correct results when the\n following underlying assumptions are fulfilled:\n\n 1. The power spectrum is Leahy-normalized\n 2. There is no source of variability in the data other than the\n periodic signal to be determined with this method. This is important!\n If there are other sources of (aperiodic) variability in the data, this\n method will *not* produce correct results, but instead produce a large\n number of spurious false positive detections!\n 3. There are no significant instrumental effects changing the\n statistical distribution of the powers (e.g. pile-up or dead time)\n\n By default, the method produces ``(index,p-values)`` for all powers in\n the power spectrum, where index is the numerical index of the power in\n question. If a ``threshold`` is set, then only powers with p-values\n *below* that threshold with their respective indices. If\n ``trial_correction`` is set to ``True``, then the threshold will be corrected\n for the number of trials (frequencies) in the power spectrum before\n being used.\n\n Parameters\n ----------\n threshold : float, optional, default ``1``\n The threshold to be used when reporting p-values of potentially\n significant powers. Must be between 0 and 1.\n Default is ``1`` (all p-values will be reported).\n\n trial_correction : bool, optional, default ``False``\n A Boolean flag that sets whether the ``threshold`` will be corrected\n by the number of frequencies before being applied. This decreases\n the ``threshold`` (p-values need to be lower to count as significant).\n Default is ``False`` (report all powers) though for any application\n where `threshold`` is set to something meaningful, this should also\n be applied!\n\n Returns\n -------\n pvals : iterable\n A list of ``(index, p-value)`` tuples for all powers that have p-values\n lower than the threshold specified in ``threshold``.\n\n \"\"\"\n if not self.norm == \"leahy\":\n raise ValueError(\"This method only works on \"\n \"Leahy-normalized power spectra!\")\n\n if np.size(self.m) == 1:\n # calculate p-values for all powers\n # leave out zeroth power since it just encodes the number of photons!\n pv = np.array([cospectra_pvalue(power, self.m)\n for power in self.power])\n else:\n pv = np.array([cospectra_pvalue(power, m)\n for power, m in zip(self.power, self.m)])\n\n # if trial correction is used, then correct the threshold for\n # the number of powers in the power spectrum\n if trial_correction:\n threshold /= self.power.shape[0]\n\n # need to add 1 to the indices to make up for the fact that\n # we left out the first power above!\n indices = np.where(pv < threshold)[0]\n\n pvals = np.vstack([pv[indices], indices])\n\n return pvals\n\n\nclass AveragedCrossspectrum(Crossspectrum):\n \"\"\"\n Make an averaged cross spectrum from a light curve by segmenting two\n light curves, Fourier-transforming each segment and then averaging the\n resulting cross spectra.\n\n Parameters\n ----------\n data1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects OR :class:`stingray.EventList` object\n A light curve from which to compute the cross spectrum. In some cases, this would\n be the light curve of the wavelength/energy/frequency band of interest.\n\n data2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects OR :class:`stingray.EventList` object\n A second light curve to use in the cross spectrum. In some cases, this would be\n the wavelength/energy/frequency reference band to compare the band of interest with.\n\n segment_size: float\n The size of each segment to average. Note that if the total\n duration of each :class:`Lightcurve` object in ``lc1`` or ``lc2`` is not an\n integer multiple of the ``segment_size``, then any fraction left-over\n at the end of the time series will be lost. Otherwise you introduce\n artifacts.\n\n norm: {``frac``, ``abs``, ``leahy``, ``none``}, default ``none``\n The normalization of the (real part of the) cross spectrum.\n\n Other Parameters\n ----------------\n gti: 2-d float array\n ``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.\n This choice overrides the GTIs in the single light curves. Use with\n care!\n\n dt : float\n The time resolution of the light curve. Only needed when constructing\n light curves in the case where data1 or data2 are of :class:EventList\n\n power_type: string, optional, default ``real``\n Parameter to choose among complete, real part and magnitude of\n the cross spectrum.\n\n silent : bool, default False\n Do not show a progress bar when generating an averaged cross spectrum.\n Useful for the batch execution of many spectra\n\n lc1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects\n For backwards compatibility only. Like ``data1``, but no\n :class:`stingray.events.EventList` objects allowed\n\n lc2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects\n For backwards compatibility only. Like ``data2``, but no\n :class:`stingray.events.EventList` objects allowed\n\n fullspec: boolean, optional, default ``False``\n If True, return the full array of frequencies, otherwise return just the\n positive frequencies.\n\n large_data : bool, default False\n Use only for data larger than 10**7 data points!! Uses zarr and dask for computation.\n\n save_all : bool, default False\n Save all intermediate PDSs used for the final average. Use with care.\n This is likely to fill up your RAM on medium-sized datasets, and to\n slow down the computation when rebinning.\n\n Attributes\n ----------\n freq: numpy.ndarray\n The array of mid-bin frequencies that the Fourier transform samples\n\n power: numpy.ndarray\n The array of cross spectra\n\n power_err: numpy.ndarray\n The uncertainties of ``power``.\n An approximation for each bin given by ``power_err= power/sqrt(m)``.\n Where ``m`` is the number of power averaged in each bin (by frequency\n binning, or averaging powerspectrum). Note that for a single\n realization (``m=1``) the error is equal to the power.\n\n df: float\n The frequency resolution\n\n m: int\n The number of averaged cross spectra\n\n n: int\n The number of time bins per segment of light curve\n\n nphots1: float\n The total number of photons in the first (interest) light curve\n\n nphots2: float\n The total number of photons in the second (reference) light curve\n\n gti: 2-d float array\n ``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.\n They are calculated by taking the common GTI between the\n two light curves\n \"\"\"\n\n def __init__(self, data1=None, data2=None, segment_size=None, norm='none',\n gti=None, power_type=\"real\", silent=False, lc1=None, lc2=None,\n dt=None, fullspec=False, large_data=False, save_all=False):\n\n\n if lc1 is not None or lc2 is not None:\n warnings.warn(\"The lcN keywords are now deprecated. Use dataN \"\n \"instead\", DeprecationWarning)\n # for backwards compatibility\n if data1 is None:\n data1 = lc1\n if data2 is None:\n data2 = lc2\n\n if segment_size is None and data1 is not None:\n raise ValueError(\"segment_size must be specified\")\n if segment_size is not None and not np.isfinite(segment_size):\n raise ValueError(\"segment_size must be finite!\")\n\n if large_data and data1 is not None and data2 is not None:\n if isinstance(data1, EventList):\n input_data = 'EventList'\n elif isinstance(data1, Lightcurve):\n input_data = 'Lightcurve'\n chunks = int(np.rint(segment_size // data1.dt))\n segment_size = chunks * data1.dt\n else:\n raise ValueError(\n f'Invalid input data type: {type(data1).__name__}')\n\n dir_path1 = saveData(data1, persist=False, chunks=chunks)\n dir_path2 = saveData(data2, persist=False, chunks=chunks)\n\n data_path1 = genDataPath(dir_path1)\n data_path2 = genDataPath(dir_path2)\n\n spec = createChunkedSpectra(input_data,\n 'AveragedCrossspectrum',\n data_path=list(data_path1 +\n data_path2),\n segment_size=segment_size,\n norm=norm,\n gti=gti,\n power_type=power_type,\n silent=silent,\n dt=dt)\n\n for key, val in spec.__dict__.items():\n setattr(self, key, val)\n\n return\n\n self.type = \"crossspectrum\"\n\n\n self.segment_size = segment_size\n self.power_type = power_type\n self.fullspec = fullspec\n\n self.show_progress = not silent\n self.dt = dt\n self.save_all = save_all\n\n if isinstance(data1, EventList):\n lengths = data1.gti[:, 1] - data1.gti[:, 0]\n good = lengths >= segment_size\n data1.gti = data1.gti[good]\n data1 = list(data1.to_lc_list(dt))\n\n if isinstance(data2, EventList):\n lengths = data2.gti[:, 1] - data2.gti[:, 0]\n good = lengths >= segment_size\n data2.gti = data2.gti[good]\n data2 = list(data2.to_lc_list(dt))\n\n Crossspectrum.__init__(self, data1, data2, norm, gti=gti,\n power_type=power_type, dt=dt, fullspec=fullspec)\n\n return\n\n def _make_auxil_pds(self, lc1, lc2):\n \"\"\"\n Helper method to create the power spectrum of both light curves\n independently.\n\n Parameters\n ----------\n lc1, lc2 : :class:`stingray.Lightcurve` objects\n Two light curves used for computing the cross spectrum.\n \"\"\"\n is_event = isinstance(lc1, EventList)\n is_lc = isinstance(lc1, Lightcurve)\n is_lc_iter = isinstance(lc1, Iterator)\n is_lc_list = isinstance(lc1, Iterable) and not is_lc_iter\n # A way to say that this is actually not a power spectrum\n if self.type != \"powerspectrum\" and \\\n (lc1 is not lc2) and (is_event or is_lc or is_lc_list):\n self.pds1 = AveragedCrossspectrum(lc1, lc1,\n segment_size=self.segment_size,\n norm='none', gti=self.gti,\n power_type=self.power_type,\n dt=self.dt, fullspec=self.fullspec,\n save_all=self.save_all)\n\n self.pds2 = AveragedCrossspectrum(lc2, lc2,\n segment_size=self.segment_size,\n norm='none', gti=self.gti,\n power_type=self.power_type,\n dt=self.dt, fullspec=self.fullspec,\n save_all=self.save_all)\n\n def _make_segment_spectrum(self, lc1, lc2, segment_size, silent=False):\n \"\"\"\n Split the light curves into segments of size ``segment_size``, and calculate a cross spectrum for\n each.\n\n Parameters\n ----------\n lc1, lc2 : :class:`stingray.Lightcurve` objects\n Two light curves used for computing the cross spectrum.\n\n segment_size : ``numpy.float``\n Size of each light curve segment to use for averaging.\n\n Other parameters\n ----------------\n silent : bool, default False\n Suppress progress bars\n\n Returns\n -------\n cs_all : list of :class:`Crossspectrum`` objects\n A list of cross spectra calculated independently from each light curve segment\n\n nphots1_all, nphots2_all : ``numpy.ndarray` for each of ``lc1`` and ``lc2``\n Two lists containing the number of photons for all segments calculated from ``lc1`` and ``lc2``.\n\n \"\"\"\n\n assert isinstance(lc1, Lightcurve)\n assert isinstance(lc2, Lightcurve)\n\n if lc1.tseg != lc2.tseg:\n simon(\"Lightcurves do not have same tseg. This means that the data\"\n \"from the two channels are not completely in sync. This \"\n \"might or might not be an issue. Keep an eye on it.\")\n\n # If dt differs slightly, its propagated error must not be more than\n # 1/100th of the bin\n if not np.isclose(lc1.dt, lc2.dt, rtol=0.01 * lc1.dt / lc1.tseg):\n raise ValueError(\"Light curves do not have same time binning dt.\")\n\n # In case a small difference exists, ignore it\n lc1.dt = lc2.dt\n\n current_gtis = cross_two_gtis(lc1.gti, lc2.gti)\n lc1.gti = lc2.gti = current_gtis\n lc1.apply_gtis()\n lc2.apply_gtis()\n\n if self.gti is None:\n self.gti = current_gtis\n else:\n if not np.allclose(self.gti, current_gtis):\n self.gti = np.vstack([self.gti, current_gtis])\n\n check_gtis(current_gtis)\n\n cs_all = []\n nphots1_all = []\n nphots2_all = []\n\n start_inds, end_inds = \\\n bin_intervals_from_gtis(current_gtis, segment_size, lc1.time,\n dt=lc1.dt)\n simon(\"Errorbars on cross spectra are not thoroughly tested. \"\n \"Please report any inconsistencies.\")\n\n local_show_progress = show_progress\n if not self.show_progress or silent:\n local_show_progress = lambda a: a\n\n for start_ind, end_ind in \\\n local_show_progress(zip(start_inds, end_inds)):\n time_1 = copy.deepcopy(lc1.time[start_ind:end_ind])\n counts_1 = copy.deepcopy(lc1.counts[start_ind:end_ind])\n counts_1_err = copy.deepcopy(lc1.counts_err[start_ind:end_ind])\n time_2 = copy.deepcopy(lc2.time[start_ind:end_ind])\n counts_2 = copy.deepcopy(lc2.counts[start_ind:end_ind])\n counts_2_err = copy.deepcopy(lc2.counts_err[start_ind:end_ind])\n if np.sum(counts_1) == 0 or np.sum(counts_2) == 0:\n warnings.warn(\n \"No counts in interval {}--{}s\".format(time_1[0],\n time_1[-1]))\n continue\n\n gti1 = np.array([[time_1[0] - lc1.dt / 2,\n time_1[-1] + lc1.dt / 2]])\n gti2 = np.array([[time_2[0] - lc2.dt / 2,\n time_2[-1] + lc2.dt / 2]])\n lc1_seg = Lightcurve(time_1, counts_1, err=counts_1_err,\n err_dist=lc1.err_dist,\n gti=gti1,\n dt=lc1.dt, skip_checks=True)\n lc2_seg = Lightcurve(time_2, counts_2, err=counts_2_err,\n err_dist=lc2.err_dist,\n gti=gti2,\n dt=lc2.dt, skip_checks=True)\n with warnings.catch_warnings(record=True) as w:\n cs_seg = Crossspectrum(lc1_seg, lc2_seg, norm=self.norm,\n power_type=self.power_type, fullspec=self.fullspec)\n\n cs_all.append(cs_seg)\n nphots1_all.append(np.sum(lc1_seg.counts))\n nphots2_all.append(np.sum(lc2_seg.counts))\n\n return cs_all, nphots1_all, nphots2_all\n\n def _make_crossspectrum(self, lc1, lc2, fullspec=False):\n \"\"\"\n Auxiliary method computing the normalized cross spectrum from two light curves.\n This includes checking for the presence of and applying Good Time Intervals, computing the\n unnormalized Fourier cross-amplitude, and then renormalizing using the required normalization.\n Also computes an uncertainty estimate on the cross spectral powers. Stingray uses the\n scipy.fft standards for the sign of the Nyquist frequency.\n\n Parameters\n ----------\n lc1, lc2 : :class:`stingray.Lightcurve` objects\n Two light curves used for computing the cross spectrum.\n\n fullspec: boolean, default ``False``,\n If True, return all frequencies otherwise return only positive frequencies\n \"\"\"\n local_show_progress = show_progress\n if not self.show_progress:\n local_show_progress = lambda a: a\n\n # chop light curves into segments\n if isinstance(lc1, Lightcurve) and \\\n isinstance(lc2, Lightcurve):\n\n if self.type == \"crossspectrum\":\n cs_all, nphots1_all, nphots2_all = \\\n self._make_segment_spectrum(lc1, lc2, self.segment_size)\n\n elif self.type == \"powerspectrum\":\n cs_all, nphots1_all = \\\n self._make_segment_spectrum(lc1, self.segment_size)\n\n else:\n raise ValueError(\"Type of spectrum not recognized!\")\n\n else:\n cs_all, nphots1_all, nphots2_all = [], [], []\n\n for lc1_seg, lc2_seg in local_show_progress(zip(lc1, lc2)):\n if self.type == \"crossspectrum\":\n cs_sep, nphots1_sep, nphots2_sep = \\\n self._make_segment_spectrum(lc1_seg, lc2_seg,\n self.segment_size,\n silent=True)\n nphots2_all.append(nphots2_sep)\n elif self.type == \"powerspectrum\":\n cs_sep, nphots1_sep = \\\n self._make_segment_spectrum(lc1_seg, self.segment_size,\n silent=True)\n\n else:\n raise ValueError(\"Type of spectrum not recognized!\")\n cs_all.append(cs_sep)\n nphots1_all.append(nphots1_sep)\n\n cs_all = np.hstack(cs_all)\n nphots1_all = np.hstack(nphots1_all)\n\n if self.type == \"crossspectrum\":\n nphots2_all = np.hstack(nphots2_all)\n\n m = len(cs_all)\n nphots1 = np.mean(nphots1_all)\n\n power_avg = np.zeros_like(cs_all[0].power)\n power_err_avg = np.zeros_like(cs_all[0].power_err)\n unnorm_power_avg = np.zeros_like(cs_all[0].unnorm_power)\n for cs in cs_all:\n power_avg += cs.power\n unnorm_power_avg += cs.unnorm_power\n power_err_avg += (cs.power_err) ** 2\n\n power_avg /= float(m)\n power_err_avg = np.sqrt(power_err_avg) / m\n unnorm_power_avg /= float(m)\n\n self.freq = cs_all[0].freq\n self.power = power_avg\n self.unnorm_power = unnorm_power_avg\n self.m = m\n self.power_err = power_err_avg\n self.df = cs_all[0].df\n self.n = cs_all[0].n\n self.nphots1 = nphots1\n if self.save_all:\n self.cs_all = cs_all\n\n if self.type == \"crossspectrum\":\n self.nphots1 = nphots1\n nphots2 = np.mean(nphots2_all)\n\n self.nphots2 = nphots2\n\n def coherence(self):\n \"\"\"Averaged Coherence function.\n\n\n Coherence is defined in Vaughan and Nowak, 1996 [#]_.\n It is a Fourier frequency dependent measure of the linear correlation\n between time series measured simultaneously in two energy channels.\n\n Compute an averaged Coherence function of cross spectrum by computing\n coherence function of each segment and averaging them. The return type\n is a tuple with first element as the coherence function and the second\n element as the corresponding uncertainty associated with it.\n\n Note : The uncertainty in coherence function is strictly valid for Gaussian \\\n statistics only.\n\n Returns\n -------\n (coh, uncertainty) : tuple of np.ndarray\n Tuple comprising the coherence function and uncertainty.\n\n References\n ----------\n .. [#] http://iopscience.iop.org/article/10.1086/310430/pdf\n \"\"\"\n if np.any(self.m < 50):\n simon(\"Number of segments used in averaging is \"\n \"significantly low. The result might not follow the \"\n \"expected statistical distributions.\")\n\n # Calculate average coherence\n unnorm_power_avg = self.unnorm_power\n\n num = np.absolute(unnorm_power_avg) ** 2\n\n # The normalization was 'none'!\n unnorm_powers_avg_1 = self.pds1.power.real\n unnorm_powers_avg_2 = self.pds2.power.real\n\n coh = num / (unnorm_powers_avg_1 * unnorm_powers_avg_2)\n coh[~np.isfinite(coh)] = 0.0\n\n # Calculate uncertainty\n uncertainty = \\\n (2 ** 0.5 * coh * (1 - coh)) / (np.sqrt(coh) * self.m ** 0.5)\n\n uncertainty[coh == 0] = 0.0\n\n return (coh, uncertainty)\n\n def time_lag(self):\n \"\"\"Calculate time lag and uncertainty.\n\n Equation from Bendat & Piersol, 2011 [bendat-2011]__.\n\n Returns\n -------\n lag : np.ndarray\n The time lag\n\n lag_err : np.ndarray\n The uncertainty in the time lag\n \"\"\"\n lag = super(AveragedCrossspectrum, self).time_lag()\n coh, uncert = self.coherence()\n\n dum = (1. - coh) / (2. * coh)\n\n dum[coh == 0] = 0.0\n\n lag_err = np.sqrt(dum / self.m) / (2 * np.pi * self.freq)\n\n return lag, lag_err\n",
"import logging\nimport math\nimport copy\nimport os\nimport pickle\nimport warnings\nfrom collections.abc import Iterable\n\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.table import Table\nfrom astropy.logger import AstropyUserWarning\n\nimport stingray.utils as utils\n\nfrom .utils import assign_value_if_none, is_string, order_list_of_arrays\nfrom .gti import get_gti_from_all_extensions, load_gtis\n\n# Python 3\nimport pickle\n\n_H5PY_INSTALLED = True\n\ntry:\n import h5py\nexcept ImportError:\n _H5PY_INSTALLED = False\n\n\ndef rough_calibration(pis, mission):\n \"\"\"Make a rough conversion betwenn PI channel and energy.\n\n Only works for NICER, NuSTAR, and XMM.\n\n Parameters\n ----------\n pis: float or array of floats\n PI channels in data\n mission: str\n Mission name\n\n Returns\n -------\n energies : float or array of floats\n Energy values\n\n Examples\n --------\n >>> rough_calibration(0, 'nustar')\n 1.6\n >>> # It's case-insensitive\n >>> rough_calibration(1200, 'XMm')\n 1.2\n >>> rough_calibration(10, 'asDf')\n Traceback (most recent call last):\n ...\n ValueError: Mission asdf not recognized\n >>> rough_calibration(100, 'nicer')\n 1.0\n \"\"\"\n if mission.lower() == \"nustar\":\n return pis * 0.04 + 1.6\n elif mission.lower() == \"xmm\":\n return pis * 0.001\n elif mission.lower() == \"nicer\":\n return pis * 0.01\n raise ValueError(f\"Mission {mission.lower()} not recognized\")\n\n\ndef get_file_extension(fname):\n \"\"\"Get the extension from the file name.\n\n If g-zipped, add '.gz' to extension.\n\n Examples\n --------\n >>> get_file_extension('ciao.tar')\n '.tar'\n >>> get_file_extension('ciao.tar.gz')\n '.tar.gz'\n >>> get_file_extension('ciao.evt.gz')\n '.evt.gz'\n >>> get_file_extension('ciao.a.tutti.evt.gz')\n '.evt.gz'\n \"\"\"\n fname_root = fname.replace('.gz', '')\n fname_root = os.path.splitext(fname_root)[0]\n\n return fname.replace(fname_root, '')\n\n\ndef high_precision_keyword_read(hdr, keyword):\n \"\"\"Read FITS header keywords, also if split in two.\n\n In the case where the keyword is split in two, like\n\n MJDREF = MJDREFI + MJDREFF\n\n in some missions, this function returns the summed value. Otherwise, the\n content of the single keyword\n\n Parameters\n ----------\n hdr : dict_like\n The FITS header structure, or a dictionary\n\n keyword : str\n The key to read in the header\n\n Returns\n -------\n value : long double\n The value of the key, or ``None`` if something went wrong\n\n \"\"\"\n try:\n value = np.longdouble(hdr[keyword])\n return value\n except KeyError:\n pass\n try:\n if len(keyword) == 8:\n keyword = keyword[:7]\n value = np.longdouble(hdr[keyword + 'I'])\n value += np.longdouble(hdr[keyword + 'F'])\n return value\n except KeyError:\n return None\n\n\ndef _patch_mission_info(info, mission=None):\n \"\"\"Add some information that is surely missing in xselect.mdb.\n\n Examples\n --------\n >>> info = {'gti': 'STDGTI'}\n >>> new_info = _patch_mission_info(info, mission=None)\n >>> new_info['gti'] == info['gti']\n True\n >>> new_info = _patch_mission_info(info, mission=\"xmm\")\n >>> new_info['gti']\n 'STDGTI,GTI0'\n \"\"\"\n if mission is None:\n return info\n if mission.lower() == \"xmm\" and \"gti\" in info:\n info[\"gti\"] += \",GTI0\"\n return info\n\n\ndef read_mission_info(mission=None):\n \"\"\"Search the relevant information about a mission in xselect.mdb.\"\"\"\n curdir = os.path.abspath(os.path.dirname(__file__))\n fname = os.path.join(curdir, \"datasets\", \"xselect.mdb\")\n\n # If HEADAS is defined, search for the most up-to-date version of the\n # mission database\n if os.getenv(\"HEADAS\"):\n hea_fname = os.path.join(os.getenv(\"HEADAS\"), \"bin\", \"xselect.mdb\")\n if os.path.exists(hea_fname):\n fname = hea_fname\n if mission is not None:\n mission = mission.lower()\n\n db = {}\n with open(fname) as fobj:\n for line in fobj.readlines():\n line = line.strip()\n if mission is not None and not line.lower().startswith(mission):\n continue\n if line.startswith(\"!\") or line == \"\":\n continue\n allvals = line.split()\n string = allvals[0]\n value = allvals[1:]\n if len(value) == 1:\n value = value[0]\n\n data = string.split(\":\")[:]\n if mission is None:\n if data[0] not in db:\n db[data[0]] = {}\n previous_db_step = db[data[0]]\n else:\n previous_db_step = db\n data = data[1:]\n for key in data[:-1]:\n if key not in previous_db_step:\n previous_db_step[key] = {}\n previous_db_step = previous_db_step[key]\n previous_db_step[data[-1]] = value\n return _patch_mission_info(db, mission)\n\n\ndef _case_insensitive_search_in_list(string, list_of_strings):\n \"\"\"Search for a string in a list of strings, in a case-insensitive way.\n\n Example\n -------\n >>> _case_insensitive_search_in_list(\"a\", [\"A\", \"b\"])\n 'A'\n >>> _case_insensitive_search_in_list(\"a\", [\"c\", \"b\"]) is None\n True\n \"\"\"\n for s in list_of_strings:\n if string.lower() == s.lower():\n return s\n return None\n\n\ndef _get_additional_data(lctable, additional_columns):\n \"\"\"Get additional data from a FITS data table.\n\n Parameters\n ----------\n lctable: `astropy.io.fits.fitsrec.FITS_rec`\n Data table\n additional_columns: list of str\n List of column names to retrieve from the table\n\n Returns\n -------\n additional_data: dict\n Dictionary associating to each additional column the content of the\n table.\n \"\"\"\n additional_data = {}\n if additional_columns is not None:\n for a in additional_columns:\n key = _case_insensitive_search_in_list(a, lctable._coldefs.names)\n if key is not None:\n additional_data[a] = np.array(lctable.field(key))\n else:\n warnings.warn('Column ' + a + ' not found')\n additional_data[a] = np.zeros(len(lctable))\n\n return additional_data\n\n\ndef get_key_from_mission_info(info, key, default, inst=None, mode=None):\n \"\"\"Get the name of a header key or table column from the mission database.\n\n Many entries in the mission database have default values that can be\n altered for specific instruments or observing modes. Here, if there is a\n definition for a given instrument and mode, we take that, otherwise we use\n the default).\n\n Parameters\n ----------\n info : dict\n Nested dictionary containing all the information for a given mission.\n It can be nested, e.g. contain some info for a given instrument, and\n for each observing mode of that instrument.\n key : str\n The key to read from the info dictionary\n default : object\n The default value. It can be of any type, depending on the expected\n type for the entry.\n\n Other parameters\n ----------------\n inst : str\n Instrument\n mode : str\n Observing mode\n\n Returns\n -------\n retval : object\n The wanted entry from the info dictionary\n\n Examples\n --------\n >>> info = {'ecol': 'PI', \"A\": {\"ecol\": \"BLA\"}, \"C\": {\"M1\": {\"ecol\": \"X\"}}}\n >>> get_key_from_mission_info(info, \"ecol\", \"BU\", inst=\"A\", mode=None)\n 'BLA'\n >>> get_key_from_mission_info(info, \"ecol\", \"BU\", inst=\"B\", mode=None)\n 'PI'\n >>> get_key_from_mission_info(info, \"ecol\", \"BU\", inst=\"A\", mode=\"M1\")\n 'BLA'\n >>> get_key_from_mission_info(info, \"ecol\", \"BU\", inst=\"C\", mode=\"M1\")\n 'X'\n >>> get_key_from_mission_info(info, \"ghghg\", \"BU\", inst=\"C\", mode=\"M1\")\n 'BU'\n \"\"\"\n filt_info = copy.deepcopy(info)\n if inst is not None and inst in filt_info:\n filt_info.update(info[inst])\n filt_info.pop(inst)\n if mode is not None and mode in filt_info:\n filt_info.update(info[inst][mode])\n filt_info.pop(mode)\n\n if key in filt_info:\n return filt_info[key]\n return default\n\n\ndef lcurve_from_fits(\n fits_file,\n gtistring=\"GTI\",\n timecolumn=\"TIME\",\n ratecolumn=None,\n ratehdu=1,\n fracexp_limit=0.9,\n outfile=None,\n noclobber=False,\n outdir=None,\n):\n \"\"\"Load a lightcurve from a fits file.\n\n .. note ::\n FITS light curve handling is still under testing.\n Absolute times might be incorrect depending on the light curve format.\n\n Parameters\n ----------\n fits_file : str\n File name of the input light curve in FITS format\n\n Returns\n -------\n data : dict\n Dictionary containing all information needed to create a\n :class:`stingray.Lightcurve` object\n\n Other Parameters\n ----------------\n gtistring : str\n Name of the GTI extension in the FITS file\n timecolumn : str\n Name of the column containing times in the FITS file\n ratecolumn : str\n Name of the column containing rates in the FITS file\n ratehdu : str or int\n Name or index of the FITS extension containing the light curve\n fracexp_limit : float\n Minimum exposure fraction allowed\n noclobber : bool\n If True, do not overwrite existing files\n \"\"\"\n warnings.warn(\n \"\"\"WARNING! FITS light curve handling is still under testing.\n Absolute times might be incorrect.\"\"\"\n )\n # TODO:\n # treat consistently TDB, UTC, TAI, etc. This requires some documentation\n # reading. For now, we assume TDB\n from astropy.io import fits as pf\n from astropy.time import Time\n import numpy as np\n from stingray.gti import create_gti_from_condition\n\n lchdulist = pf.open(fits_file)\n lctable = lchdulist[ratehdu].data\n\n # Units of header keywords\n tunit = lchdulist[ratehdu].header[\"TIMEUNIT\"]\n\n try:\n mjdref = high_precision_keyword_read(\n lchdulist[ratehdu].header, \"MJDREF\"\n )\n mjdref = Time(mjdref, scale=\"tdb\", format=\"mjd\")\n except Exception:\n mjdref = None\n\n try:\n instr = lchdulist[ratehdu].header[\"INSTRUME\"]\n except Exception:\n instr = \"EXTERN\"\n\n # ----------------------------------------------------------------\n # Trying to comply with all different formats of fits light curves.\n # It's a madness...\n try:\n tstart = high_precision_keyword_read(\n lchdulist[ratehdu].header, \"TSTART\"\n )\n tstop = high_precision_keyword_read(lchdulist[ratehdu].header, \"TSTOP\")\n except Exception: # pragma: no cover\n raise (Exception(\"TSTART and TSTOP need to be specified\"))\n\n # For nulccorr lcs this whould work\n\n timezero = high_precision_keyword_read(\n lchdulist[ratehdu].header, \"TIMEZERO\"\n )\n # Sometimes timezero is \"from tstart\", sometimes it's an absolute time.\n # This tries to detect which case is this, and always consider it\n # referred to tstart\n timezero = assign_value_if_none(timezero, 0)\n\n # for lcurve light curves this should instead work\n if tunit == \"d\":\n # TODO:\n # Check this. For now, I assume TD (JD - 2440000.5).\n # This is likely wrong\n timezero = Time(2440000.5 + timezero, scale=\"tdb\", format=\"jd\")\n tstart = Time(2440000.5 + tstart, scale=\"tdb\", format=\"jd\")\n tstop = Time(2440000.5 + tstop, scale=\"tdb\", format=\"jd\")\n # if None, use NuSTAR defaulf MJDREF\n mjdref = assign_value_if_none(\n mjdref,\n Time(\n np.longdouble(\"55197.00076601852\"), scale=\"tdb\", format=\"mjd\"\n ),\n )\n\n timezero = (timezero - mjdref).to(\"s\").value\n tstart = (tstart - mjdref).to(\"s\").value\n tstop = (tstop - mjdref).to(\"s\").value\n\n if timezero > tstart:\n timezero -= tstart\n\n time = np.array(lctable.field(timecolumn), dtype=np.longdouble)\n if time[-1] < tstart:\n time += timezero + tstart\n else:\n time += timezero\n\n try:\n dt = high_precision_keyword_read(lchdulist[ratehdu].header, \"TIMEDEL\")\n if tunit == \"d\":\n dt *= 86400\n except Exception:\n warnings.warn(\n \"Assuming that TIMEDEL is the median difference between the\"\n \" light curve times\",\n AstropyUserWarning,\n )\n # Avoid NaNs\n good = time == time\n dt = np.median(np.diff(time[good]))\n\n # ----------------------------------------------------------------\n if ratecolumn is None:\n for name in [\"RATE\", \"RATE1\", \"COUNTS\"]:\n if name in lctable.names:\n ratecolumn = name\n break\n else: # pragma: no cover\n raise ValueError(\n \"None of the accepted rate columns were found in the file\")\n\n rate = np.array(lctable.field(ratecolumn), dtype=float)\n\n errorcolumn = \"ERROR\"\n if ratecolumn == \"RATE1\":\n errorcolumn = \"ERROR1\"\n\n try:\n rate_e = np.array(lctable.field(errorcolumn), dtype=np.longdouble)\n except Exception:\n rate_e = np.zeros_like(rate)\n\n if \"RATE\" in ratecolumn:\n rate *= dt\n rate_e *= dt\n\n try:\n fracexp = np.array(lctable.field(\"FRACEXP\"), dtype=np.longdouble)\n except Exception:\n fracexp = np.ones_like(rate)\n\n good_intervals = (\n (rate == rate) * (fracexp >= fracexp_limit) * (fracexp <= 1)\n )\n\n rate[good_intervals] /= fracexp[good_intervals]\n rate_e[good_intervals] /= fracexp[good_intervals]\n\n rate[~good_intervals] = 0\n\n try:\n gtitable = lchdulist[gtistring].data\n gti_list = np.array(\n [\n [a, b]\n for a, b in zip(\n gtitable.field(\"START\"), gtitable.field(\"STOP\")\n )\n ],\n dtype=np.longdouble,\n )\n except Exception:\n gti_list = create_gti_from_condition(time, good_intervals)\n\n lchdulist.close()\n\n res = {\"time\": time,\n \"counts\": rate,\n \"err\": rate_e,\n \"gti\": gti_list,\n \"mjdref\": mjdref.mjd,\n \"dt\": dt,\n \"instr\": instr,\n \"header\": lchdulist[ratehdu].header.tostring()}\n return res\n\n\ndef load_events_and_gtis(\n fits_file,\n additional_columns=None,\n gtistring=None,\n gti_file=None,\n hduname=None,\n column=None,\n):\n \"\"\"Load event lists and GTIs from one or more files.\n\n Loads event list from HDU EVENTS of file fits_file, with Good Time\n intervals. Optionally, returns additional columns of data from the same\n HDU of the events.\n\n Parameters\n ----------\n fits_file : str\n\n Other parameters\n ----------------\n additional_columns: list of str, optional\n A list of keys corresponding to the additional columns to extract from\n the event HDU (ex.: ['PI', 'X'])\n gtistring : str\n Comma-separated list of accepted GTI extensions (default GTI,STDGTI),\n with or without appended integer number denoting the detector\n gti_file : str, default None\n External GTI file\n hduname : str or int, default 1\n Name of the HDU containing the event list\n column : str, default None\n The column containing the time values. If None, we use the name\n specified in the mission database, and if there is nothing there,\n \"TIME\"\n return_limits: bool, optional\n Return the TSTART and TSTOP keyword values\n\n Returns\n -------\n retvals : Object with the following attributes:\n ev_list : array-like\n Event times in Mission Epoch Time\n gti_list: [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]\n GTIs in Mission Epoch Time\n additional_data: dict\n A dictionary, where each key is the one specified in additional_colums.\n The data are an array with the values of the specified column in the\n fits file.\n t_start : float\n Start time in Mission Epoch Time\n t_stop : float\n Stop time in Mission Epoch Time\n pi_list : array-like\n Raw Instrument energy channels\n cal_pi_list : array-like\n Calibrated PI channels (those that can be easily converted to energy\n values, regardless of the instrument setup.)\n energy_list : array-like\n Energy of each photon in keV (only for NuSTAR, NICER, XMM)\n instr : str\n Name of the instrument (e.g. EPIC-pn or FPMA)\n mission : str\n Name of the instrument (e.g. XMM or NuSTAR)\n mjdref : float\n MJD reference time for the mission\n header : str\n Full header of the FITS file, for debugging purposes\n detector_id : array-like, int\n Detector id for each photon (e.g. each of the CCDs composing XMM's or\n Chandra's instruments)\n \"\"\"\n from astropy.io import fits as pf\n\n hdulist = pf.open(fits_file)\n probe_header = hdulist[0].header\n # Let's look for TELESCOP here. This is the most common keyword to be\n # found in well-behaved headers. If it is not in header 0, I take this key\n # and the remaining information from header 1.\n if \"TELESCOP\" not in probe_header:\n probe_header = hdulist[1].header\n mission_key = \"MISSION\"\n if mission_key not in probe_header:\n mission_key = \"TELESCOP\"\n mission = probe_header[mission_key].lower()\n\n db = read_mission_info(mission)\n instkey = get_key_from_mission_info(db, \"instkey\", \"INSTRUME\")\n instr = mode = None\n if instkey in probe_header:\n instr = probe_header[instkey].strip()\n\n modekey = get_key_from_mission_info(db, \"dmodekey\", None, instr)\n if modekey is not None and modekey in probe_header:\n mode = probe_header[modekey].strip()\n\n gtistring = get_key_from_mission_info(db, \"gti\", \"GTI,STDGTI\", instr, mode)\n if hduname is None:\n hduname = get_key_from_mission_info(db, \"events\", \"EVENTS\", instr, mode)\n\n if hduname not in hdulist:\n warnings.warn(f'HDU {hduname} not found. Trying first extension')\n hduname = 1\n\n datatable = hdulist[hduname].data\n header = hdulist[hduname].header\n\n ephem = timeref = timesys = None\n\n if \"PLEPHEM\" in header:\n ephem = header[\"PLEPHEM\"].strip().lstrip('JPL-').lower()\n if \"TIMEREF\" in header:\n timeref = header[\"TIMEREF\"].strip().lower()\n if \"TIMESYS\" in header:\n timesys = header[\"TIMESYS\"].strip().lower()\n\n if column is None:\n column = get_key_from_mission_info(db, \"time\", \"TIME\", instr, mode)\n ev_list = np.array(datatable.field(column), dtype=np.longdouble)\n\n detector_id = None\n ckey = get_key_from_mission_info(db, \"ccol\", \"NONE\", instr, mode)\n if ckey != \"NONE\" and ckey in datatable.columns.names:\n detector_id = datatable.field(ckey)\n\n det_number = None if detector_id is None else list(set(detector_id))\n\n timezero = np.longdouble(0.)\n if \"TIMEZERO\" in header:\n timezero = np.longdouble(header[\"TIMEZERO\"])\n\n ev_list += timezero\n\n t_start = ev_list[0]\n t_stop = ev_list[-1]\n if \"TSTART\" in header:\n t_start = np.longdouble(header[\"TSTART\"])\n if \"TSTOP\" in header:\n t_stop = np.longdouble(header[\"TSTOP\"])\n\n mjdref = np.longdouble(high_precision_keyword_read(header, \"MJDREF\"))\n\n # Read and handle GTI extension\n accepted_gtistrings = gtistring.split(\",\")\n\n if gti_file is None:\n # Select first GTI with accepted name\n try:\n gti_list = get_gti_from_all_extensions(\n hdulist,\n accepted_gtistrings=accepted_gtistrings,\n det_numbers=det_number,\n )\n except Exception: # pragma: no cover\n warnings.warn(\n \"No extensions found with a valid name. \"\n \"Please check the `accepted_gtistrings` values.\",\n AstropyUserWarning,\n )\n gti_list = np.array([[t_start, t_stop]], dtype=np.longdouble)\n else:\n gti_list = load_gtis(gti_file, gtistring)\n\n pi_col = get_key_from_mission_info(db, \"ecol\", \"PI\", instr, mode)\n if additional_columns is None:\n additional_columns = [pi_col]\n if pi_col not in additional_columns:\n additional_columns.append(pi_col)\n\n additional_data = _get_additional_data(datatable, additional_columns)\n hdulist.close()\n # Sort event list\n order = np.argsort(ev_list)\n ev_list = ev_list[order]\n if detector_id is not None:\n detector_id = detector_id[order]\n\n additional_data = order_list_of_arrays(additional_data, order)\n\n pi = additional_data[pi_col].astype(np.float32)\n cal_pi = pi\n\n # EventReadOutput() is an empty class. We will assign a number of attributes to\n # it, like the arrival times of photons, the energies, and some information\n # from the header.\n returns = EventReadOutput()\n\n returns.ev_list = ev_list\n returns.gti_list = gti_list\n returns.pi_list = pi\n returns.cal_pi_list = cal_pi\n if \"energy\" in additional_data:\n returns.energy_list = additional_data[\"energy\"]\n else:\n try:\n returns.energy_list = rough_calibration(cal_pi, mission)\n except ValueError:\n returns.energy_list = None\n returns.instr = instr.lower()\n returns.mission = mission.lower()\n returns.mjdref = mjdref\n returns.header = header.tostring()\n returns.additional_data = additional_data\n returns.t_start = t_start\n returns.t_stop = t_stop\n returns.detector_id = detector_id\n returns.ephem = ephem\n returns.timeref = timeref\n returns.timesys = timesys\n\n return returns\n\n\nclass EventReadOutput():\n def __init__(self):\n pass\n\n\ndef mkdir_p(path): # pragma: no cover\n \"\"\"Safe ``mkdir`` function, found at [so-mkdir]_.\n\n Parameters\n ----------\n path : str\n The absolute path to the directory to be created\n\n Notes\n -----\n .. [so-mkdir] http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python\n \"\"\"\n import os\n import errno\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef read_header_key(fits_file, key, hdu=1):\n \"\"\"Read the header key key from HDU hdu of the file ``fits_file``.\n\n Parameters\n ----------\n fits_file: str\n The file name and absolute path to the event file.\n\n key: str\n The keyword to be read\n\n Other Parameters\n ----------------\n hdu : int\n Index of the HDU extension from which the header key to be read.\n\n Returns\n -------\n value : object\n The value stored under ``key`` in ``fits_file``\n \"\"\"\n\n hdulist = fits.open(fits_file, ignore_missing_end=True)\n try:\n value = hdulist[hdu].header[key]\n except KeyError: # pragma: no cover\n value = ''\n hdulist.close()\n return value\n\n\ndef ref_mjd(fits_file, hdu=1):\n \"\"\"Read ``MJDREFF``, ``MJDREFI`` or, if failed, ``MJDREF``, from the FITS header.\n\n Parameters\n ----------\n fits_file : str\n The file name and absolute path to the event file.\n\n Other Parameters\n ----------------\n hdu : int\n Index of the HDU extension from which the header key to be read.\n\n Returns\n -------\n mjdref : numpy.longdouble\n the reference MJD\n \"\"\"\n\n if isinstance(fits_file, Iterable) and\\\n not is_string(fits_file): # pragma: no cover\n fits_file = fits_file[0]\n logging.info(\"opening %s\" % fits_file)\n\n hdulist = fits.open(fits_file, ignore_missing_end=True)\n\n ref_mjd_val = high_precision_keyword_read(hdulist[hdu].header, \"MJDREF\")\n\n hdulist.close()\n return ref_mjd_val\n\n\ndef common_name(str1, str2, default='common'):\n \"\"\"Strip two strings of the letters not in common.\n\n Filenames must be of same length and only differ by a few letters.\n\n Parameters\n ----------\n str1 : str\n str2 : str\n\n Other Parameters\n ----------------\n default : str\n The string to return if ``common_str`` is empty\n\n Returns\n -------\n common_str : str\n A string containing the parts of the two names in common\n\n \"\"\"\n if not len(str1) == len(str2):\n return default\n common_str = ''\n # Extract the MP root of the name (in case they're event files)\n\n for i, letter in enumerate(str1):\n if str2[i] == letter:\n common_str += letter\n # Remove leading and trailing underscores and dashes\n common_str = common_str.rstrip('_').rstrip('-')\n common_str = common_str.lstrip('_').lstrip('-')\n if common_str == '':\n common_str = default\n logging.debug('common_name: %s %s -> %s' % (str1, str2, common_str))\n return common_str\n\n\ndef split_numbers(number, shift=0):\n \"\"\"\n Split high precision number(s) into doubles.\n\n You can specify the number of shifts to move the decimal point.\n\n Parameters\n ----------\n number: long double\n The input high precision number which is to be split\n\n Other parameters\n ----------------\n shift: integer\n Move the cut by `shift` decimal points to the right (left if negative)\n\n Returns\n -------\n number_I: double\n First part of high precision number\n\n number_F: double\n Second part of high precision number\n\n Examples\n --------\n >>> n = 12.34\n >>> i, f = split_numbers(n)\n >>> i == 12\n True\n >>> np.isclose(f, 0.34)\n True\n >>> split_numbers(n, 2)\n (12.34, 0.0)\n >>> split_numbers(n, -1)\n (10.0, 2.34)\n \"\"\"\n if isinstance(number, Iterable):\n number = np.asarray(number)\n number *= 10**shift\n mods = [math.modf(n) for n in number]\n number_F = [f for f, _ in mods]\n number_I = [i for _, i in mods]\n else:\n number *= 10**shift\n number_F, number_I = math.modf(number)\n\n return np.double(number_I) / 10**shift, np.double(number_F) / 10**shift\n\n\ndef _save_pickle_object(object, filename):\n \"\"\"\n Save a class object in pickle format.\n\n Parameters\n ----------\n object: class instance\n A class object whose attributes are saved in a\n dictionary format\n\n filename: str\n Name of the file in which object is saved\n \"\"\"\n\n with open(filename, \"wb\") as f:\n pickle.dump(object, f)\n\n\ndef _retrieve_pickle_object(filename):\n \"\"\"\n Retrieves a pickled class object.\n\n Parameters\n ----------\n filename: str\n Name of the file in which object is saved\n\n Returns\n -------\n data: class object\n \"\"\"\n\n with open(filename, \"rb\") as f:\n return pickle.load(f)\n\n\ndef _save_hdf5_object(object, filename):\n \"\"\"\n Save a class object in hdf5 format.\n\n Parameters\n ----------\n object: class instance\n A class object whose attributes are saved in a\n dictionary format\n\n filename: str\n Name of the file in which object is saved\n \"\"\"\n\n items = vars(object)\n attrs = [name for name in items if items[name] is not None]\n\n with h5py.File(filename, 'w') as hf:\n for attr in attrs:\n data = items[attr]\n\n # If data is a single number, store as an attribute.\n if _isattribute(data):\n if isinstance(data, np.longdouble):\n data_I, data_F = split_numbers(data)\n names = [attr + '_I', attr + '_F']\n hf.attrs[names[0]] = data_I\n hf.attrs[names[1]] = data_F\n else:\n hf.attrs[attr] = data\n\n # If data is an array or list, create a dataset.\n else:\n try:\n if isinstance(data[0], np.longdouble):\n data_I, data_F = split_numbers(data)\n names = [attr + '_I', attr + '_F']\n hf.create_dataset(names[0], data=data_I)\n hf.create_dataset(names[1], data=data_F)\n else:\n hf.create_dataset(attr, data=data)\n except IndexError:\n # To account for numpy arrays of type 'None' (0-d)\n pass\n\n\ndef _retrieve_hdf5_object(filename):\n \"\"\"\n Retrieves an hdf5 format class object.\n\n Parameters\n ----------\n filename: str\n The name of file with which object was saved\n\n Returns\n -------\n data: dictionary\n Loads the data from an hdf5 object file and returns\n in dictionary format.\n \"\"\"\n\n with h5py.File(filename, 'r') as hf:\n dset_keys = hf.keys()\n attr_keys = hf.attrs.keys()\n data = {}\n\n dset_copy = list(dset_keys)[:]\n for key in dset_keys:\n\n # Make sure key hasn't been removed\n if key in dset_copy:\n # Longdouble case\n if key[-2:] in ['_I', '_F']:\n m_key = key[:-2]\n # Add integer and float parts\n data[m_key] = np.longdouble(hf[m_key + '_I'][()])\n data[m_key] += np.longdouble(hf[m_key + '_F'][()])\n # Remove integer and float parts from attributes\n dset_copy.remove(m_key + '_I')\n dset_copy.remove(m_key + '_F')\n else:\n data[key] = hf[key][()]\n\n attr_copy = list(attr_keys)[:]\n for key in attr_keys:\n\n # Make sure key hasn't been removed\n if key in attr_copy:\n # Longdouble case\n if key[-2:] in ['_I', '_F']:\n m_key = key[:-2]\n # Add integer and float parts\n data[m_key] = np.longdouble(hf.attrs[m_key + '_I'])\n data[m_key] += np.longdouble(hf.attrs[m_key + '_F'])\n # Remove integer and float parts from attributes\n attr_copy.remove(m_key + '_I')\n attr_copy.remove(m_key + '_F')\n else:\n data[key] = hf.attrs[key]\n\n return data\n\n\ndef _save_ascii_object(object, filename, fmt=\"%.18e\", **kwargs):\n \"\"\"\n Save an array to a text file.\n\n Parameters\n ----------\n object : numpy.ndarray\n An array with the data to be saved\n\n filename : str\n The file name to save to\n\n fmt : str or sequence of strs, optional\n Use for formatting of columns. See `numpy.savetxt` documentation\n for details.\n\n Other Parameters\n ----------------\n kwargs : any keyword argument taken by `numpy.savetxt`\n\n \"\"\"\n\n try:\n np.savetxt(filename, object, fmt=fmt, **kwargs)\n except TypeError:\n raise Exception(\"Formatting of columns not recognized! Use 'fmt' \"\n \"to format columns including strings or mixed types!\")\n\n pass\n\n\ndef _retrieve_ascii_object(filename, **kwargs):\n \"\"\"\n Helper function to retrieve ascii objects from file.\n Uses astropy.Table for reading and storing the data.\n\n Parameters\n ----------\n filename : str\n The name of the file with the data to be retrieved.\n\n Other Parameters\n -----------------------------\n usecols : {int | iterable}\n The indices of the columns in the file to be returned.\n By default, all columns will be returned\n\n skiprows : int\n The number of rows at the beginning to skip\n By default, no rows will be skipped.\n\n names : iterable\n A list of column names to be attached to the columns.\n By default, no column names are added, unless they are specified\n in the file header and can be read by astropy.Table.read\n automatically.\n\n Returns\n -------\n data : astropy.Table object\n An astropy.Table object with the data from the file\n \"\"\"\n if not isinstance(filename, str):\n raise TypeError(\"filename must be string!\")\n\n if 'usecols' in list(kwargs.keys()):\n if np.size(kwargs['usecols']) != 2:\n raise ValueError(\"Need to define two columns\")\n usecols = kwargs[\"usecols\"]\n else:\n usecols = None\n\n if 'skiprows' in list(kwargs.keys()):\n assert isinstance(kwargs[\"skiprows\"], int)\n skiprows = kwargs[\"skiprows\"]\n else:\n skiprows = 0\n\n if \"names\" in list(kwargs.keys()):\n names = kwargs[\"names\"]\n else:\n names = None\n\n data = Table.read(filename, data_start=skiprows,\n names=names, format=\"ascii\")\n\n if usecols is None:\n return data\n else:\n colnames = np.array(data.colnames)\n cols = colnames[usecols]\n\n return data[cols]\n\n\ndef _save_fits_object(object, filename, **kwargs):\n \"\"\"\n Save a class object in fits format.\n\n Parameters\n ----------\n object: class instance\n A class object whose attributes would be saved in a dictionary format.\n\n filename: str\n The file name to save to\n\n Additional Keyword Parameters\n -----------------------------\n tnames: str iterable\n The names of HDU tables. For instance, in case of eventlist,\n tnames could be ['EVENTS', 'GTI']\n\n colsassign: dictionary iterable\n This indicates the correct tables to which to assign columns\n to. If this is None or if a column is not provided, it/they will\n be assigned to the first table.\n\n For example, [{'gti':'GTI'}] indicates that gti values should be\n stored in GTI table.\n \"\"\"\n\n tables = []\n\n if 'colsassign' in list(kwargs.keys()):\n colsassign = kwargs['colsassign']\n iscolsassigned = True\n else:\n iscolsassigned = False\n\n if 'tnames' in list(kwargs.keys()):\n tables = kwargs['tnames']\n else:\n tables = ['MAIN']\n\n items = vars(object)\n attrs = [name for name in items if items[name] is not None]\n\n cols = []\n hdrs = []\n\n for t in tables:\n cols.append([])\n hdrs.append(fits.Header())\n\n for attr in attrs:\n data = items[attr]\n\n # Get the index of table to which column belongs\n if iscolsassigned and attr in colsassign.keys():\n index = tables.index(colsassign[attr])\n else:\n index = 0\n\n # If data is a single number, store as metadata\n if _isattribute(data):\n if isinstance(data, np.longdouble):\n # Longdouble case. Split and save integer and float parts\n data_I, data_F = split_numbers(data)\n names = [attr + '_I', attr + '_F']\n hdrs[index][names[0]] = data_I\n hdrs[index][names[1]] = data_F\n else:\n # Normal case. Save as it is\n hdrs[index][attr] = data\n\n # If data is an array or list, insert as table column\n else:\n try:\n if isinstance(data[0], np.longdouble):\n # Longdouble case. Split and save integer and float parts\n data_I, data_F = split_numbers(data)\n names = [attr + '_I', attr + '_F']\n cols[index].append(\n fits.Column(name=names[0],\n format='D',\n array=data_I))\n cols[index].append(\n fits.Column(name=names[1],\n format='D',\n array=data_F))\n else:\n # Normal case. Save as it is\n cols[index].append(\n fits.Column(name=attr,\n format=_lookup_format(data[0]),\n array=data))\n except IndexError:\n # To account for numpy arrays of type 'None' (0-d)\n pass\n\n tbhdu = fits.HDUList()\n\n # Create binary tables\n for i in range(0, len(tables)):\n if len(cols[i]) > 0:\n tbhdu.append(fits.BinTableHDU.from_columns(cols[i],\n header=hdrs[i],\n name=tables[i]))\n\n tbhdu.writeto(filename)\n\n\ndef _retrieve_fits_object(filename, **kwargs):\n \"\"\"\n Retrieves a fits format class object.\n\n Parameters\n ----------\n filename: str\n The name of file with which object was saved\n\n Other Parameters\n ----------------\n cols: str iterable\n The names of columns to extract from fits tables.\n\n Returns\n -------\n data: dictionary\n Loads the data from a fits object file and returns\n in dictionary format.\n \"\"\"\n\n data = {}\n\n if 'cols' in list(kwargs.keys()):\n cols = [col.upper() for col in kwargs['cols']]\n else:\n cols = []\n\n with fits.open(filename, memmap=False, ignore_missing_end=True) as hdulist:\n fits_cols = []\n\n # Get columns from all tables\n for i in range(1, len(hdulist)):\n fits_cols.append([h.upper() for h in hdulist[i].data.names])\n\n for c in cols:\n for i in range(0, len(fits_cols)):\n # .upper() is used because `fits` stores values in upper case\n hdr_keys = [h.upper() for h in hdulist[i + 1].header.keys()]\n\n # Longdouble case. Check for columns\n if c + '_I' in fits_cols[i] or c + '_F' in fits_cols[i]:\n if c not in data.keys():\n data[c] = np.longdouble(hdulist[i + 1].data[c + '_I'])\n data[c] += np.longdouble(hdulist[i + 1].data[c + '_F'])\n\n # Longdouble case. Check for header keys\n if c + '_I' in hdr_keys or c + '_F' in hdr_keys:\n if c not in data.keys():\n data[c] = \\\n np.longdouble(hdulist[i + 1].header[c + '_I'])\n data[c] += \\\n np.longdouble(hdulist[i + 1].header[c + '_F'])\n\n # Normal case. Check for columns\n elif c in fits_cols[i]:\n data[c] = hdulist[i + 1].data[c]\n\n # Normal case. Check for header keys\n elif c in hdr_keys:\n data[c] = hdulist[i + 1].header[c]\n hdulist.close()\n return data\n\n\ndef _lookup_format(var):\n \"\"\"\n Looks up relevant format in fits.\n\n Parameters\n ----------\n var : object\n An object to look up in the table\n\n Returns\n -------\n lookup : str\n The str describing the type of ``var``\n \"\"\"\n\n lookup = {\"<type 'int'>\": \"J\", \"<type 'float'>\": \"E\",\n \"<type 'numpy.int64'>\": \"K\", \"<type 'numpy.float64'>\": \"D\",\n \"<type 'numpy.float128'>\": \"D\", \"<type 'str'>\": \"30A\",\n \"<type 'bool'\": \"L\"}\n\n form = type(var)\n\n try:\n return lookup[str(form)]\n except KeyError:\n # If an entry is not contained in lookup dictionary\n return \"D\"\n\n\ndef _isattribute(data):\n \"\"\"\n Check if data is a single number or an array.\n\n Parameters\n ----------\n data : object\n The object to be checked.\n\n Returns:\n bool\n True if the data is a single number, False if it is an iterable.\n \"\"\"\n\n if isinstance(data, Iterable) and not isinstance(data, (str, bytes)):\n return False\n else:\n return True\n\n\ndef write(input_, filename, format_='pickle', **kwargs):\n \"\"\"\n Pickle a class instance. For parameters depending on\n ``format_``, see individual function definitions.\n\n Parameters\n ----------\n object: a class instance\n The object to be stored\n\n filename: str\n The name of the file to be created\n\n format_: str\n The format in which to store file. Formats supported\n are ``pickle``, ``hdf5``, ``ascii`` or ``fits``\n \"\"\"\n\n if format_ == 'pickle':\n _save_pickle_object(input_, filename)\n\n elif format_ == 'hdf5':\n if _H5PY_INSTALLED:\n _save_hdf5_object(input_, filename)\n else:\n utils.simon('h5py not installed, using pickle instead'\n 'to save object.')\n _save_pickle_object(input_, filename.split('.')[0] +\n '.pickle')\n\n elif format_ == 'ascii':\n _save_ascii_object(input_, filename, **kwargs)\n\n elif format_ == 'fits':\n _save_fits_object(input_, filename, **kwargs)\n\n else:\n utils.simon('Format not understood.')\n\n\ndef read(filename, format_='pickle', **kwargs):\n \"\"\"\n Return a saved class instance.\n\n Parameters\n ----------\n filename: str\n The name of the file to be retrieved.\n\n format_: str\n The format used to store file. Supported formats are\n pickle, hdf5, ascii or fits.\n\n Returns\n -------\n data : {``object`` | ``astropy.table`` | ``dict``}\n\n * If ``format_`` is ``pickle``, an object is returned.\n * If ``format_`` is ``ascii``, `astropy.table` object is returned.\n * If ``format_`` is ``hdf5`` or 'fits``, a dictionary object is returned.\n \"\"\"\n\n if format_ == 'pickle':\n return _retrieve_pickle_object(filename)\n\n elif format_ == 'hdf5':\n if _H5PY_INSTALLED:\n return _retrieve_hdf5_object(filename)\n else:\n utils.simon('h5py not installed, cannot read an'\n 'hdf5 object.')\n\n elif format_ == 'ascii':\n return _retrieve_ascii_object(filename, **kwargs)\n\n elif format_ == 'fits':\n return _retrieve_fits_object(filename, **kwargs)\n\n else:\n utils.simon('Format not understood.')\n\n\ndef savefig(filename, **kwargs):\n \"\"\"\n Save a figure plotted by ``matplotlib``.\n\n Note : This function is supposed to be used after the ``plot``\n function. Otherwise it will save a blank image with no plot.\n\n Parameters\n ----------\n filename : str\n The name of the image file. Extension must be specified in the\n file name. For example filename with `.png` extension will give a\n rasterized image while ``.pdf`` extension will give a vectorized\n output.\n\n kwargs : keyword arguments\n Keyword arguments to be passed to ``savefig`` function of\n ``matplotlib.pyplot``. For example use `bbox_inches='tight'` to\n remove the undesirable whitepace around the image.\n \"\"\"\n\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n raise ImportError(\"Matplotlib required for savefig()\")\n\n if not plt.fignum_exists(1):\n utils.simon(\"use ``plot`` function to plot the image first and \"\n \"then use ``savefig`` to save the figure.\")\n\n plt.savefig(filename, **kwargs)\n"
] | [
[
"matplotlib.pyplot.legend",
"scipy.stats.laplace",
"numpy.sqrt",
"numpy.zeros_like",
"numpy.mean",
"numpy.any",
"scipy.fft.fftfreq",
"numpy.exp",
"numpy.where",
"numpy.hstack",
"scipy.special.gammaincc",
"numpy.allclose",
"numpy.size",
"numpy.diff",
"scipy.special.factorial",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"numpy.isclose",
"numpy.log",
"scipy.special.gamma",
"matplotlib.pyplot.title",
"numpy.rint",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"scipy.fft.fft",
"numpy.absolute",
"numpy.conj",
"numpy.isfinite",
"numpy.abs",
"scipy.stats.norm",
"matplotlib.pyplot.xlabel",
"numpy.angle",
"numpy.vstack"
],
[
"matplotlib.pyplot.fignum_exists",
"numpy.ones_like",
"numpy.asarray",
"matplotlib.pyplot.savefig",
"numpy.longdouble",
"numpy.size",
"numpy.zeros_like",
"numpy.diff",
"numpy.savetxt",
"numpy.argsort",
"numpy.array",
"numpy.double"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SanjibSarkarU/EDRC | [
"c2408fad8b007b4709ee91caf173f98612afadb1"
] | [
"iver.py"
] | [
"import datetime\nimport re\nimport threading\nimport time\nimport tkinter as tk\nfrom collections import deque\nfrom queue import Queue\nfrom time import monotonic\n\nimport pandas as pd\nimport rasterio\nimport serial\nfrom geographiclib.geodesic import Geodesic\nfrom matplotlib import pyplot as plt, animation\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom rasterio.plot import show\n\nimport functions\n# just checking git\nrf, ac = 'COM11', 'COM13'\n# rf, ac = 'COM5', 'COM7'\n\nser_rf = serial.Serial(rf, baudrate=9600, bytesize=8, parity='N', stopbits=1, timeout=1, xonxoff=0)\nser_ac = serial.Serial(ac, baudrate=9600, bytesize=8, parity='N', stopbits=1, timeout=1, xonxoff=0)\n\n\nclass App(tk.Frame):\n def __init__(self, master=None, **kwargs):\n tk.Frame.__init__(self, master, **kwargs)\n self.event_plot = threading.Event()\n self.q_plot = Queue()\n self.current_position_iver = {}\n self.disnc_remaining = 0\n self.wp_nxt = '1'\n self.auv = '3089'\n self.omw_clear = False\n self.q_wp_omw = Queue()\n self.send_through_rf = True\n self.send_through_ac = False\n\n self.running = False\n self.ani = None\n btns = tk.Frame(self)\n btns.pack()\n\n lbl = tk.Label(btns, text=\"update interval (ms)\")\n lbl.pack(side=tk.LEFT)\n\n self.interval = tk.Entry(btns, width=5)\n self.intervl = 20\n self.interval.insert(0, str(self.intervl))\n self.interval.pack(side=tk.LEFT)\n\n self.btn = tk.Button(btns, text='Start', command=self.on_click)\n self.btn.pack(side=tk.LEFT)\n\n self.btn_rf = tk.Button(btns, text='RF', command=self.rf)\n self.btn_rf.pack(side=tk.LEFT)\n\n self.btn_ac = tk.Button(btns, text='AC', command=self.ac)\n self.btn_ac.pack(side=tk.LEFT)\n\n self.btn_exit = tk.Button(btns, text='Exit', command=quit)\n self.btn_exit.pack(side=tk.LEFT)\n\n self.fig = plt.Figure()\n self.ax1 = self.fig.add_subplot(111)\n self.line_iver, = self.ax1.plot([], [], 'r-', linewidth=1.5)\n self.canvas = FigureCanvasTkAgg(self.fig, master=master)\n img = rasterio.open('Stennis_QW.tif') # 'Cat_Island_Low.tif' , 'Stennis_QW.tif'\n show(img, ax=self.ax1)\n self.canvas.get_tk_widget().pack(expand=True)\n self.canvas.figure.tight_layout()\n self.geod = Geodesic(6378388, 1 / 297.0)\n # self.waypoints_iver = [[30.35099, -89.63138, 3], [30.35125, -89.63079, 3.5]]\n # self.waypoints_iver = [[30.3603, -89.0942, 10.5], [30.3546, -89.0734, 14.5],\n # [30.3151, -89.0589, 5.5], [30.2833, -89.0693, 3.0]]\n self.waypoints_iver = [[30.35099, -89.63138, 3], [30.35125, -89.63079, 3.5],\n [30.35173, -89.63064, 3], [30.35203, -89.62992, 3],\n [30.35247, -89.62979, 4], [30.35270, -89.62917, 4],\n [30.35322, -89.62920, 3.5], [30.35345, -89.62827, 4],\n [30.35099, -89.63138, 3.5]]\n # self.waypoints_iver = [[30.3612, -89.1002, 9], [30.3569, -89.1003, 9.5],\n # [30.3666, -89.1004, 5]]\n self.total_WPs = len(self.waypoints_iver)\n df = pd.DataFrame(self.waypoints_iver, columns=['lat', 'lon', 'speed'])\n self.ax1.scatter(df['lon'], df['lat'], color='red', marker='.', s=250,\n linewidths=0.05) # facecolors='none', edgecolors='r',\n for i in range(len(df)):\n self.ax1.scatter(df.lon[i], df.lat[i], marker=\"$\" + str(i + 1) + \"$\", color='black', linewidths=.09)\n\n HISTORY_LEN = 2000000\n self.xdata = deque([], maxlen=HISTORY_LEN)\n self.ydata = deque([], maxlen=HISTORY_LEN)\n\n def on_click(self):\n if self.ani is None:\n return self.start()\n if self.running:\n self.ani.event_source.stop()\n self.btn.config(text='Un-Pause')\n else:\n self.ani.event_source.start()\n self.btn.config(text='Pause')\n self.running = not self.running\n\n def start(self):\n threading.Thread(target=self.iver, daemon=True).start()\n threading.Thread(target=self.read_comports, daemon=True).start()\n self.ani = animation.FuncAnimation(\n self.fig,\n self.update_graph,\n # frames=self.lat_w.size - 1,\n interval=int(self.interval.get()),\n repeat=False,\n blit=True)\n self.running = True\n self.btn.config(text='Pause')\n self.ani._start()\n\n def iver_status(self):\n # print (nxt_wp)\n # 1 m/s = 1.94384 Knot\n iver_sta = '$OSI,8080808080,S,' + self.wp_nxt + ',' + \\\n str(self.current_position_iver['Latitude']) + ',' + str(self.current_position_iver['Longitude']) \\\n + ',' + str(self.current_position_iver['speed'] * 1.94384) + ',' + str(self.disnc_remaining) \\\n + ',N,0.000,P0,-1.4743,,0,292.5,0.0,94.3,False,IVER3-3089,2.5,True,False ' + '*'\n return '$AC;IVER3-' + self.auv + ';' + iver_sta + functions.check_sum(iver_sta) + '\\r\\n'\n\n def osd_ACK(self):\n return '$AC;IVER3-' + self.auv + ';$ACK,8,0,0*5D' + '\\r\\n'\n\n def omw_Ack(self):\n ack = '$ACK,16,0,0*'\n return '$AC;IVER3-' + self.auv + ';' + ack + functions.check_sum(ack) + '\\r\\n'\n\n def rf(self):\n if self.send_through_rf:\n self.send_through_rf = False\n self.send_through_ac = True\n self.btn_rf.config(text='RF-stop')\n self.btn_ac.config(text='AC-on')\n else:\n self.send_through_rf = True\n self.send_through_ac = False\n self.btn_rf.config(text='RF-on')\n self.btn_ac.config(text='AC-stop')\n\n def ac(self):\n if self.send_through_ac:\n self.send_through_ac = False\n self.send_through_rf = True\n self.btn_ac.config(text='AC-stop')\n self.btn_ac.config(text='AC-on')\n else:\n self.send_through_ac = True\n self.send_through_rf = False\n self.btn_ac.config(text='AC-on')\n self.btn_rf.config(text='RF-stop')\n\n def iver(self):\n print(datetime.datetime.now(), ': started')\n lat_i_past, lng_i_past, _ = self.waypoints_iver[0]\n while self.waypoints_iver:\n t_start = monotonic()\n lat_i_nxt, lng_i_nxt, speed_i = self.waypoints_iver[0]\n # speed_i *= 0.51 # * 1 knot = 0.514 m/s\n l = self.geod.InverseLine(lat_i_past, lng_i_past, lat_i_nxt, lng_i_nxt)\n nxt_wp_disnc = l.s13\n distance_travelled = 0\n while distance_travelled <= nxt_wp_disnc:\n g = l.Position(distance_travelled, Geodesic.STANDARD | Geodesic.LONG_UNROLL)\n lat_i, lng_i = g['lat2'], g['lon2']\n self.current_position_iver = {'Latitude': lat_i, 'Longitude': lng_i, 'speed': speed_i}\n # self.q_plot.put(self.current_position_iver)\n self.event_plot.set()\n # t_elapsed = monotonic() - t_start\n # distance_travelled = speed_i * t_elapsed\n # self.disnc_remaining = nxt_wp_disnc - distance_travelled\n # time.sleep(self.intervl * 0.009)\n while not self.q_wp_omw.empty():\n wp_omw = self.q_wp_omw.get()\n lat_i_r, lng_i_r, speed_i_r = wp_omw['lat'], wp_omw['lon'], wp_omw['speed']\n # speed_i_r *= 0.51 # 1 knot = 0.514 m/s\n self.wp_nxt = 'WP1'\n l_i_r = self.geod.InverseLine(self.current_position_iver['Latitude'],\n self.current_position_iver['Longitude'],\n lat_i_r, lng_i_r)\n omw_distance = l_i_r.s13\n omw_dstnce_travld = 0\n t_start_r = monotonic()\n while omw_dstnce_travld < omw_distance:\n if self.omw_clear:\n self.omw_clear = False\n print('OMW_CLEAR')\n break\n g_i_r = l_i_r.Position(omw_dstnce_travld, Geodesic.STANDARD | Geodesic.LONG_UNROLL)\n lat_i_r, lng_i_r = g_i_r['lat2'], g_i_r['lon2']\n self.current_position_iver = {'Latitude': lat_i_r, 'Longitude': lng_i_r, 'speed': speed_i_r}\n # self.q_plot.put(self.current_position_iver)\n t_elapsed_r = monotonic() - t_start_r\n omw_dstnce_travld = speed_i_r * t_elapsed_r\n omw_distance_remaining = omw_distance - omw_dstnce_travld\n self.disnc_remaining = omw_distance_remaining\n time.sleep(self.intervl * 0.009)\n if self.q_wp_omw.qsize() == 0:\n self.waypoints_iver.insert(0, self.waypoints_iver[0])\n t_elapsed = monotonic() - t_start\n distance_travelled = speed_i * t_elapsed\n self.disnc_remaining = nxt_wp_disnc - distance_travelled\n time.sleep(self.intervl * 0.009)\n lat_i_past, lng_i_past = self.current_position_iver['Latitude'], self.current_position_iver['Longitude']\n self.waypoints_iver.pop(0)\n remaining_WPs = self.total_WPs - len(self.waypoints_iver)\n print(datetime.datetime.now(),\n ': Total WPs: {}, remaining WPs: {}/{}'.format(self.total_WPs, len(self.waypoints_iver),\n remaining_WPs))\n self.wp_nxt = str(remaining_WPs)\n print(datetime.datetime.now(), ': nxt_WP: ', self.wp_nxt)\n\n def read_comports(self):\n while True:\n # print('Status: RF: {}, AC {}'.format(self.send_through_rf, self.send_through_ac))\n try:\n if (self.send_through_rf and ser_rf.inWaiting() > 0) or (\n self.send_through_ac and ser_ac.inWaiting() > 0):\n received_data_through = 'RF' if ser_rf.inWaiting() > 0 else 'AC'\n read_com = ser_rf.readline().decode().strip() if received_data_through == 'RF' else ser_ac.readline().decode().strip()\n print(datetime.datetime.now(), ':received through: ', received_data_through, read_com)\n # print('Status: RF: {}, AC {}', self.send_through_rf, self.send_through_ac)\n if functions.received_stream(read_com) == 'osd' and functions.osd_req_recvd(read_com) == 0:\n print(datetime.datetime.now(), \": Sending current Status through : \", received_data_through)\n ser_rf.write(self.iver_status().encode()) if received_data_through == 'RF' else ser_ac.write(\n self.iver_status().encode())\n ser_rf.write(self.osd_ACK().encode()) if received_data_through == 'RF' else ser_ac.write(\n self.osd_ACK().encode())\n # print(\"Time write:{} sec\".format(time.perf_counter() - toc_CS))\n elif functions.received_stream(read_com) == 'omw' and functions.omw_req_recvd(read_com) == 0:\n omw_rec = read_com.split(\";\")[2].split(',')\n ser_rf.write(self.omw_Ack().encode()) if received_data_through == 'RF' else ser_ac.write(\n self.omw_Ack().encode())\n print(datetime.datetime.now(), ': Sending OMW acknowledgement through :', received_data_through,\n self.omw_Ack())\n if re.search('CLEAR', read_com):\n self.q_wp_omw.queue.clear()\n self.omw_clear = True\n self.q_wp_omw.put({'lat': float(omw_rec[2]), 'lon': float(omw_rec[3]),\n 'speed': float(omw_rec[7])})\n else:\n self.q_wp_omw.put({'lat': float(omw_rec[2]), 'lon': float(omw_rec[3]),\n 'speed': float(omw_rec[7])})\n else:\n time.sleep(0.5)\n except Exception as e:\n print(\" Exception raised\", e)\n continue\n\n def update_graph(self, i):\n self.event_plot.wait()\n self.xdata.append(self.current_position_iver['Longitude'])\n self.ydata.append(self.current_position_iver['Latitude'])\n # plot_inbox = self.q_plot.get()\n # self.xdata.append(plot_inbox['Longitude'])\n # self.ydata.append(plot_inbox['Latitude'])\n # ax.plot([lng_i_p, current_position_iver['Longitude']], [lat_i_p, current_position_iver['Latitude']], 'r') if \\\n # lat_i_p != 0.0 else ax.plot(plot_inbox['Longitude'], plot_inbox['Latitude'], 'r')\n # lat_i_p, lng_i_p = current_position_iver['Latitude'], current_position_iver['Longitude']\n\n self.line_iver.set_data(self.xdata, self.ydata)\n return self.line_iver,\n\n\ndef main():\n root = tk.Tk()\n root.title('Iver_v2')\n root.iconbitmap('usm.ico')\n app = App(root)\n app.pack()\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.pyplot.Figure",
"pandas.DataFrame",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jiye-ML/CoCosNet | [
"c4b3f44393462c8353c6c6952d7b05496298df1c"
] | [
"models/networks/base_network.py"
] | [
"\"\"\"\nCopyright (C) 2019 NVIDIA Corporation. All rights reserved.\nLicensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).\n\"\"\"\n\nimport torch.nn as nn\nfrom torch.nn import init\n\n\nclass BaseNetwork(nn.Module):\n def __init__(self):\n super(BaseNetwork, self).__init__()\n\n @staticmethod\n def modify_commandline_options(parser, is_train):\n return parser\n\n def print_network(self):\n if isinstance(self, list):\n self = self[0]\n num_params = 0\n for param in self.parameters():\n num_params += param.numel()\n print('Network [%s] was created. Total number of parameters: %.1f million. '\n 'To see the architecture, do print(network).'\n % (type(self).__name__, num_params / 1000000))\n\n def init_weights(self, init_type='normal', gain=0.02):\n def init_func(m):\n classname = m.__class__.__name__\n if classname.find('BatchNorm2d') != -1:\n if hasattr(m, 'weight') and m.weight is not None:\n init.normal_(m.weight.data, 1.0, gain)\n if hasattr(m, 'bias') and m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n if init_type == 'normal':\n init.normal_(m.weight.data, 0.0, gain)\n elif init_type == 'xavier':\n init.xavier_normal_(m.weight.data, gain=gain)\n elif init_type == 'xavier_uniform':\n init.xavier_uniform_(m.weight.data, gain=1.0)\n elif init_type == 'kaiming':\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif init_type == 'orthogonal':\n init.orthogonal_(m.weight.data, gain=gain)\n elif init_type == 'none': # uses pytorch's default init method\n m.reset_parameters()\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n if hasattr(m, 'bias') and m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n\n self.apply(init_func)\n\n # propagate to children\n for m in self.children():\n if hasattr(m, 'init_weights'):\n m.init_weights(init_type, gain)\n"
] | [
[
"torch.nn.init.constant_",
"torch.nn.init.xavier_normal_",
"torch.nn.init.normal_",
"torch.nn.init.orthogonal_",
"torch.nn.init.xavier_uniform_",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
am-ivanov/dace | [
"4d65e0951c112160fe783766404a806b6043b521",
"4d65e0951c112160fe783766404a806b6043b521"
] | [
"tests/state_transition_array_test.py",
"tests/codegen/mpi_axpy.py"
] | [
"import dace as dp\nimport numpy as np\n\nsdfg = dp.SDFG('sta_test')\ns0 = sdfg.add_state()\ns1 = sdfg.add_state()\ns2 = sdfg.add_state()\n\n# Arrays\ninp = s0.add_array('inp', [1], dp.float32)\nA = s0.add_array('A', [1], dp.float32)\nt = s0.add_tasklet('seta', {'a'}, {'b'}, 'b = a')\ns0.add_edge(inp, None, t, 'a', dp.Memlet.from_array(inp.data, inp.desc(sdfg)))\ns0.add_edge(t, 'b', A, None, dp.Memlet.from_array(A.data, A.desc(sdfg)))\n\nA = s1.add_array('A', [1], dp.float32)\nt = s1.add_tasklet('geta', {'a'}, {}, 'printf(\"ok %f\\\\n\", a + 1)')\ns1.add_edge(A, None, t, 'a', dp.Memlet.from_array(A.data, A.desc(sdfg)))\n\nA = s2.add_array('A', [1], dp.float32)\nt = s2.add_tasklet('geta', {'a'}, {}, 'printf(\"BAD %f\\\\n\", a - 1)')\ns2.add_edge(A, None, t, 'a', dp.Memlet.from_array(A.data, A.desc(sdfg)))\n\nsdfg.add_edge(s0, s1, dp.InterstateEdge('A[0] > 3'))\nsdfg.add_edge(s0, s2, dp.InterstateEdge('A[0] <= 3'))\n\nif __name__ == '__main__':\n print('Toplevel array usage in interstate edge')\n input = np.ndarray([1], np.float32)\n input[0] = 10\n output = np.ndarray([1], np.float32)\n output[0] = 10\n\n sdfg(inp=input, A=output)\n\n exit(0)\n",
"#!/usr/bin/env python\n\nimport argparse\nimport dace\nimport numpy as np\nimport scipy as sp\nfrom mpi4py import MPI\nfrom dace.transformation.dataflow import MPITransformMap\n\nN = dace.symbol('N')\n\n\[email protected](dace.float64, dace.float64[N], dace.float64[N])\ndef axpy(A, X, Y):\n @dace.map(_[0:N])\n def multiplication(i):\n in_A << A\n in_X << X[i]\n in_Y << Y[i]\n out >> Y[i]\n\n out = in_A * in_X + in_Y\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"N\", type=int, nargs=\"?\", default=24)\n args = vars(parser.parse_args())\n\n N.set(args[\"N\"])\n\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n ranks = comm.Get_size()\n\n if rank == 0:\n print('Scalar-vector multiplication %d (MPI, ranks = %d)' %\n (N.get(), ranks))\n else:\n dace.Config.set('debugprint', value=False)\n\n # Initialize arrays: Randomize A and X, zero Y\n a = dace.float64(np.random.rand())\n x = np.random.rand(N.get()).astype(np.float64)\n y = np.random.rand(N.get()).astype(np.float64)\n regression = (a * x + y)\n\n sdfg = axpy.to_sdfg()\n\n # Transform program to run with MPI\n sdfg.apply_transformations(MPITransformMap)\n\n # Compile MPI program once\n if ranks == 1:\n csdfg = sdfg.compile()\n print('Compiled, exiting')\n exit(0)\n else:\n # Use cached compiled file\n dace.Config.set('compiler', 'use_cache', value=True)\n csdfg = sdfg.compile()\n\n csdfg(A=a, X=x, Y=y, N=N)\n\n # Get range handled by this rank\n partition = N.get() // ranks\n reg = regression[partition * rank:partition * (rank + 1)]\n res = y[partition * rank:partition * (rank + 1)]\n\n diff = np.linalg.norm(reg - res)\n print(\"== Rank %d == Difference:\" % rank, diff)\n if rank == 0:\n print(\"==== Program end ====\")\n exit(0 if diff <= 1e-5 else 1)\n"
] | [
[
"numpy.ndarray"
],
[
"numpy.random.rand",
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sixin-zh/kymatio_wph | [
"237c0d2009766cf83b2145420a14d3c6e90dc983",
"237c0d2009766cf83b2145420a14d3c6e90dc983",
"237c0d2009766cf83b2145420a14d3c6e90dc983"
] | [
"kymatio/phaseexp1d/pyscatwave/scatwave/scattering1d/examples/real_signal.py",
"kymatio/phaseharmonics2d/phase_harmonics_k_bump_chunkid_scaleinter.py",
"kymatio/phaseexp1d/phaseexp/solver_hack_phase.py"
] | [
"import torch\nfrom torch.autograd import Variable\nfrom scatwave import Scattering1D\nfrom scatwave import fetch_fsdd\nimport matplotlib.pyplot as plt\nfrom scipy.io import wavfile\nimport numpy as np\nimport os\n\n\ndef loadfile(path_file):\n sr, x = wavfile.read(path_file)\n x = np.asarray(x, dtype='float')\n # make it mono\n if x.ndim > 1:\n smallest_axis = np.argmin(x.shape)\n x = x.mean(axis=smallest_axis)\n x = np.asarray(x, dtype='float')\n x /= np.max(np.abs(x))\n return sr, x\n\n\ndef show_signal(x, s, order0, order1, order2):\n fig, axarr = plt.subplots(4, 1, figsize=(8, 16))\n axarr[0].plot(x.data[0, 0])\n axarr[0].set_title('Original signal')\n axarr[1].plot(s[order0][0])\n axarr[1].set_title('Scattering Order 0')\n axarr[2].imshow(s[order1], aspect='auto')\n axarr[2].set_title('Scattering Order 1')\n axarr[3].imshow(s[order2], aspect='auto')\n axarr[3].set_title('Scattering Order 2')\n plt.show()\n\n\nif __name__ == '__main__':\n # fetch the dataset and get the signal\n info_dataset = fetch_fsdd(base_dir='fsdd', verbose=True)\n filepath = os.path.join(info_dataset['path_dataset'],\n sorted(info_dataset['files'])[0])\n\n # Load the signal\n sr, x = loadfile(filepath)\n x_th = Variable(torch.from_numpy(x).float().unsqueeze(0).unsqueeze(0))\n\n # Prepare the scattering\n T = x_th.shape[-1]\n J = 6\n Q = 16\n scattering = Scattering1D(T, J, Q)\n\n # Get the metadata\n coords = Scattering1D.compute_meta_scattering(J, Q, order2=True)\n order0 = torch.LongTensor([0])\n order1 = torch.LongTensor(\n sorted([cc for cc in coords.keys() if coords[cc]['order'] == '1']))\n order2 = torch.LongTensor(\n sorted([cc for cc in coords.keys() if coords[cc]['order'] == '2']))\n\n # Compute the scattering\n s = scattering.forward(x_th).data.numpy()[0]\n\n # show it\n show_signal(x_th, s, order0, order1, order2)\n",
"# scale interaction cov coefficients\n\n__all__ = ['PhaseHarmonics2d']\n\nimport warnings\nimport math\nimport torch\nimport numpy as np\nimport scipy.io as sio\n#import torch.nn.functional as F\nfrom .backend import cdgmm, Modulus, fft, \\\n Pad, SubInitSpatialMeanC, PhaseHarmonics2, mulcu\nfrom .filter_bank import filter_bank\nfrom .utils import fft2_c2c, ifft2_c2c, periodic_dis\n\nclass PhkScaleInter2d(object):\n def __init__(self, M, N, J, L, delta_j, delta_l, delta_k, nb_chunks, chunk_id, devid=0, filname='bumpsteerableg', filid=1):\n self.M, self.N, self.J, self.L = M, N, J, L # size of image, max scale, number of angles [0,pi]\n self.dj = delta_j # max scale interactions\n self.dl = delta_l # max angular interactions\n self.dk = delta_k #\n self.nb_chunks = nb_chunks # number of chunks to cut whp cov\n self.chunk_id = chunk_id\n self.devid = devid # gpu id\n self.filname = filname\n self.filid = filid\n self.haspsi0 = False\n \n assert( self.chunk_id <= self.nb_chunks ) # chunk_id = 0..nb_chunks-1, are the wph cov\n if self.dl > self.L:\n raise (ValueError('delta_l must be <= L'))\n \n self.pre_pad = False # no padding\n self.cache = False # cache filter bank\n self.build()\n\n def build(self):\n check_for_nan = False # True\n self.modulus = Modulus()\n self.pad = Pad(0, pre_pad = self.pre_pad)\n self.phase_harmonics = PhaseHarmonics2.apply\n self.M_padded, self.N_padded = self.M, self.N\n self.filters_tensor()\n if self.chunk_id < self.nb_chunks:\n self.idx_wph = self.compute_idx()\n self.this_wph = self.get_this_chunk(self.nb_chunks, self.chunk_id)\n self.preselect_filters()\n #print('la1',self.this_wph['la1'])\n #print(self.this_wph['la2'])\n #print(self.this_wph['k1'])\n #print(self.this_wph['k2'])\n self.subinitmean1 = SubInitSpatialMeanC()\n self.subinitmean2 = SubInitSpatialMeanC()\n else:\n self.subinitmeanJ = SubInitSpatialMeanC()\n \n def preselect_filters(self):\n # only use thoses filters in the this_wph list\n M = self.M\n N = self.N\n J = self.J\n L = self.L\n L2 = L*2\n min_la1 = self.this_wph['la1'].min()\n max_la1 = self.this_wph['la1'].max()\n min_la2 = self.this_wph['la2'].min()\n max_la2 = self.this_wph['la2'].max()\n min_la = min(min_la1,min_la2)\n max_la = max(max_la1,max_la2)\n print('this la range',min_la,max_la)\n hatpsi_la = self.hatpsi.view(1,J*L2,M,N,2) # (J,L2,M,N,2) -> (1,J*L2,M,N,2)\n self.hatpsi_pre = hatpsi_la[:,min_la:max_la+1,:,:,:] # Pa = max_la-min_la+1, (1,Pa,M,N,2)\n self.this_wph['la1_pre'] = self.this_wph['la1'] - min_la\n self.this_wph['la2_pre'] = self.this_wph['la2'] - min_la\n \n def filters_tensor(self):\n J = self.J\n L = self.L\n L2 = L*2\n \n assert(self.M == self.N)\n filpath = './matlab/filters/' + self.filname + str(self.filid) + '_fft2d_N'\\\n + str(self.N) + '_J' + str(self.J) + '_L' + str(self.L) + '.mat'\n matfilters = sio.loadmat(filpath)\n print('filter loaded:', filpath)\n \n fftphi = matfilters['filt_fftphi'].astype(np.complex_)\n hatphi = np.stack((np.real(fftphi), np.imag(fftphi)), axis=-1)\n\n if 'filt_fftpsi0' in matfilters:\n fftpsi0 = matfilters['filt_fftpsi0'].astype(np.complex_)\n hatpsi0 = np.stack((np.real(fftpsi0), np.imag(fftpsi0)), axis=-1)\n self.hatpsi0 = torch.FloatTensor(hatpsi0) # (M,N,2)\n self.haspsi0 = True\n \n fftpsi = matfilters['filt_fftpsi'].astype(np.complex_)\n #print(self.hatpsi.dtype)\n hatpsi = np.stack((np.real(fftpsi), np.imag(fftpsi)), axis=-1)\n \n self.hatpsi = torch.FloatTensor(hatpsi) # (J,L2,M,N,2)\n self.hatphi = torch.FloatTensor(hatphi) # (M,N,2)\n \n #print('filter shapes')\n #print(self.hatpsi.shape)\n #print(self.hatphi.shape)\n\n def get_this_chunk(self, nb_chunks, chunk_id):\n # cut self.idx_wph into smaller pieces\n #print('la1 shape',self.idx_wph['la1'].shape)\n\n nb_cov = len(self.idx_wph['la1'])\n #print('nb cov is', nb_cov)\n max_chunk = nb_cov // nb_chunks\n nb_cov_chunk = np.zeros(nb_chunks,dtype=np.int32)\n for idxc in range(nb_chunks):\n if idxc < nb_chunks-1:\n nb_cov_chunk[idxc] = int(max_chunk)\n else:\n nb_cov_chunk[idxc] = int(nb_cov - max_chunk*(nb_chunks-1))\n assert(nb_cov_chunk[idxc] > 0)\n\n this_wph = dict()\n offset = int(0)\n for idxc in range(nb_chunks):\n if idxc == chunk_id:\n this_wph['la1'] = self.idx_wph['la1'][offset:offset+nb_cov_chunk[idxc]]\n this_wph['la2'] = self.idx_wph['la2'][offset:offset+nb_cov_chunk[idxc]]\n this_wph['k1'] = self.idx_wph['k1'][:,offset:offset+nb_cov_chunk[idxc],:,:]\n this_wph['k2'] = self.idx_wph['k2'][:,offset:offset+nb_cov_chunk[idxc],:,:]\n offset = offset + nb_cov_chunk[idxc]\n\n print('this chunk', chunk_id, ' size is ', len(this_wph['la1']), ' among ', nb_cov)\n\n return this_wph\n\n def compute_ncoeff(self):\n # return number of mean (nb1) and cov (nb2) of all idx\n L = self.L\n L2 = L*2\n J = self.J\n dj = self.dj\n dl = self.dl\n dk = self.dk\n \n hit_nb1 = dict() # hash table\n hit_nb2 = dict() # value counts either real or complex numbers\n \n # k1 = 0\n # k2 = 0,1,2\n # j1+1 <= j2 <= min(j1+dj,J-1)\n # skip nb1 counted in pershift\n for j1 in range(J):\n for ell1 in range(L2):\n k1 = 0\n for j2 in range(j1+1,min(j1+dj+1,J)):\n for ell2 in range(L2):\n if periodic_dis(ell1, ell2, L2) <= dl:\n #hit_nb1[(j1,k1,ell1)]=1\n for k2 in range(3):\n if k2==0:\n #hit_nb1[(j2,k2,ell2)]=1\n hit_nb2[(j1,k1,ell1,j2,k2,ell2)]=1\n elif k2==1:\n #hit_nb1[(j2,k2,ell2)]=0\n hit_nb2[(j1,k1,ell1,j2,k2,ell2)]=2\n else:\n hit_nb1[(j2,k2,ell2)]=2\n hit_nb2[(j1,k1,ell1,j2,k2,ell2)]=2\n\n # k1 = 1\n # k2 = 2^(j2-j1)±dk\n # j1+1 <= j2 <= min(j1+dj,J-1)\n # skip nb1 counted in pershift\n for j1 in range(J):\n for ell1 in range(L2):\n k1 = 1\n for j2 in range(j1+1,min(j1+dj+1,J)):\n for ell2 in range(L2):\n if periodic_dis(ell1, ell2, L2) <= dl:\n #hit_nb1[(j1,k1,ell1)]=0\n for k2 in range(max(0,2**(j2-j1)-dk),2**(j2-j1)+dk+1):\n if k2==0:\n #hit_nb1[(j2,k2,ell2)]=1\n hit_nb2[(j1,k1,ell1,j2,k2,ell2)]=1\n elif k2==1:\n #hit_nb1[(j2,k2,ell2)]=0\n hit_nb2[(j1,k1,ell1,j2,k2,ell2)]=2\n else:\n hit_nb1[(j2,k2,ell2)]=2\n hit_nb2[(j1,k1,ell1,j2,k2,ell2)]=2\n\n #print('hit nb1 values',list(hit_nb1.values()))\n nb1 = np.array(list(hit_nb1.values()), dtype=int).sum()\n nb2 = np.array(list(hit_nb2.values()), dtype=int).sum()\n\n # plus last phiJ channel\n nb1 += 1\n nb2 += 1\n\n if self.haspsi0:\n nb2 += 1 # plus psi0 channel\n \n return nb1, nb2\n \n def compute_idx(self):\n L = self.L\n L2 = L*2\n J = self.J\n dj = self.dj\n dl = self.dl\n dk = self.dk\n\n idx_la1 = []\n idx_la2 = []\n idx_k1 = []\n idx_k2 = []\n\n # k1 = 0\n # k2 = 0,1,2\n # j1+1 <= j2 <= min(j1+dj,J-1)\n for j1 in range(J):\n for ell1 in range(L2):\n k1 = 0\n for j2 in range(j1+1,min(j1+dj+1,J)):\n for ell2 in range(L2):\n if periodic_dis(ell1, ell2, L2) <= dl:\n for k2 in range(3):\n idx_la1.append(L2*j1+ell1)\n idx_la2.append(L2*j2+ell2)\n idx_k1.append(k1)\n idx_k2.append(k2)\n\n # k1 = 1\n # k2 = 2^(j2-j1)±dk\n # j1+1 <= j2 <= min(j1+dj,J-1)\n for j1 in range(J):\n for ell1 in range(L2):\n k1 = 1\n for j2 in range(j1+1,min(j1+dj+1,J)):\n for ell2 in range(L2):\n if periodic_dis(ell1, ell2, L2) <= dl:\n for k2 in range(max(0,2**(j2-j1)-dk),2**(j2-j1)+dk+1):\n idx_la1.append(L2*j1+ell1)\n idx_la2.append(L2*j2+ell2)\n idx_k1.append(k1)\n idx_k2.append(k2)\n\n idx_wph = dict()\n idx_wph['la1'] = torch.tensor(idx_la1).type(torch.long)\n idx_wph['k1'] = torch.tensor(idx_k1).type(torch.long).float().unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n idx_wph['la2'] = torch.tensor(idx_la2).type(torch.long)\n idx_wph['k2'] = torch.tensor(idx_k2).type(torch.long).float().unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n\n return idx_wph\n\n def _type(self, _type, devid=None):\n if devid is not None:\n if self.chunk_id < self.nb_chunks:\n self.hatpsi_pre = self.hatpsi_pre.to(devid)\n else:\n self.hatphi = self.hatphi.to(devid)\n if self.haspsi0:\n self.hatpsi0 = self.hatpsi0.to(devid)\n else:\n if self.chunk_id < self.nb_chunks:\n self.hatpsi_pre = self.hatpsi_pre.type(_type)\n else:\n self.hatphi = self.hatphi.type(_type)\n if self.haspsi0:\n self.hatpsi0 = self.hatpsi0.type(_type)\n #print('in _type',type(self.hatpsi))\n self.pad.padding_module.type(_type)\n return self\n\n def cuda(self):\n \"\"\"\n Moves tensors to the GPU\n \"\"\"\n devid = self.devid\n print('call cuda with devid=', devid)\n assert(devid>=0)\n if self.chunk_id < self.nb_chunks:\n self.this_wph['k1'] = self.this_wph['k1'].type(torch.cuda.FloatTensor).to(devid)\n self.this_wph['k2'] = self.this_wph['k2'].type(torch.cuda.FloatTensor).to(devid)\n self.this_wph['la1_pre'] = self.this_wph['la1_pre'].type(torch.cuda.LongTensor).to(devid)\n self.this_wph['la2_pre'] = self.this_wph['la2_pre'].type(torch.cuda.LongTensor).to(devid)\n return self._type(torch.cuda.FloatTensor, devid)\n\n def cpu(self):\n \"\"\"\n Moves tensors to the CPU\n \"\"\"\n print('call cpu')\n return self._type(torch.FloatTensor)\n\n def forward(self, input):\n J = self.J\n M = self.M\n N = self.N\n L2 = self.L*2\n dj = self.dj\n dl = self.dl\n pad = self.pad\n\n # denote\n # nb=batch number\n # nc=number of color channels\n # input: (nb,nc,M,N)\n x_c = pad(input) # add zeros to imag part -> (nb,nc,M,N,2)\n hatx_c = fft2_c2c(x_c) # fft2 -> (nb,nc,M,N,2)\n #print('nbchannels',nb_channels)\n if self.chunk_id < self.nb_chunks:\n nb = hatx_c.shape[0]\n nc = hatx_c.shape[1]\n hatpsi_pre = self.hatpsi_pre # hatpsi_la[:,self.min_la:self.max_la+1,:,:,:] # Pa = max_la-min_la+1, (1,Pa,M,N,2)\n assert(nb==1 and nc==1) # for submeanC\n nb_channels = self.this_wph['la1_pre'].shape[0]\n Sout = input.new(nb, nc, nb_channels, \\\n 1, 1, 2) # (nb,nc,nb_channels,1,1,2)\n for idxb in range(nb):\n for idxc in range(nc):\n hatx_bc = hatx_c[idxb,idxc,:,:,:] # (M,N,2)\n # print('hatpsi_la is cuda?',hatpsi_la.is_cuda)\n # print('hatx_bc is cuda?',hatx_bc.is_cuda)\n hatxpsi_bc = cdgmm(hatpsi_pre, hatx_bc) # (1,Pa,M,N,2)\n # print( 'hatxpsi_bc shape', hatxpsi_bc.shape )\n xpsi_bc = ifft2_c2c(hatxpsi_bc)\n # reshape to (1,J*L,M,N,2)\n # select la1, et la2, P_c = number of |la1| in this chunk\n xpsi_bc_la1 = torch.index_select(xpsi_bc, 1, self.this_wph['la1_pre']) # - self.min_la) # (1,P_c,M,N,2)\n xpsi_bc_la2 = torch.index_select(xpsi_bc, 1, self.this_wph['la2_pre']) #- self.min_la) # (1,P_c,M,N,2)\n # print('xpsi la1 shape', xpsi_bc_la1.shape)\n # print('xpsi la2 shape', xpsi_bc_la2.shape)\n k1 = self.this_wph['k1']\n k2 = self.this_wph['k2']\n xpsi_bc_la1k1 = self.phase_harmonics(xpsi_bc_la1, k1) # (1,P_c,M,N,2)\n xpsi_bc_la2k2 = self.phase_harmonics(xpsi_bc_la2, -k2) # (1,P_c,M,N,2)\n # sub spatial mean along M and N\n xpsi0_bc_la1k1 = self.subinitmean1(xpsi_bc_la1k1) # (1,P_c,M,N,2)\n xpsi0_bc_la2k2 = self.subinitmean2(xpsi_bc_la2k2) # (1,P_c,M,N,2)\n # compute mean spatial\n corr_xpsi_bc = mulcu(xpsi0_bc_la1k1,xpsi0_bc_la2k2) # (1,P_c,M,N,2)\n corr_bc = torch.mean(torch.mean(corr_xpsi_bc,-2,True),-3,True) # (1,P_c,1,1,2), better numerical presision?!\n Sout[idxb,idxc,:,:,:,:] = corr_bc[0,:,:,:,:]\n\n else:\n # ADD 1 chennel for spatial phiJ\n # add l2 phiJ to last channel\n hatxphi_c = cdgmm(hatx_c, self.hatphi) # (nb,nc,M,N,2)\n xphi_c = ifft2_c2c(hatxphi_c)\n # submean from spatial M N\n xphi0_c = self.subinitmeanJ(xphi_c)\n xphi0_mod = self.modulus(xphi0_c) # (nb,nc,M,N,2)\n xphi0_mod2 = mulcu(xphi0_mod,xphi0_mod) # (nb,nc,M,N,2)\n nb = hatx_c.shape[0]\n nc = hatx_c.shape[1]\n if self.haspsi0:\n #print('compute psi0')\n Sout = input.new(nb, nc, 2, 1, 1, 2)\n Sout[:,:,0,:,:,:] = torch.mean(torch.mean(xphi0_mod2,-2,True),-3,True)\n hatxpsi00_c = cdgmm(hatx_c, self.hatpsi0)\n xpsi00_c = ifft2_c2c(hatxpsi00_c)\n xpsi00_mod = self.modulus(xpsi00_c) # (nb,nc,M,N,2)\n xpsi00_mod2 = mulcu(xpsi00_mod,xpsi00_mod) # (nb,nc,M,N,2)\n Sout[:,:,1,:,:,:] = torch.mean(torch.mean(xpsi00_mod2,-2,True),-3,True)\n else:\n Sout = torch.mean(torch.mean(xphi0_mod2,-2,True),-3,True)\n\n return Sout\n \n def compute_mean(self,input):\n J = self.J\n M = self.M\n N = self.N\n L2 = self.L*2\n dj = self.dj\n dl = self.dl\n pad = self.pad\n\n x_c = pad(input) # add zeros to imag part -> (nb,nc,M,N,2)\n hatx_c = fft2_c2c(x_c) # fft2 -> (nb,nc,M,N,2)\n #print('nbchannels',nb_channels)\n if self.chunk_id < self.nb_chunks:\n nb = hatx_c.shape[0]\n nc = hatx_c.shape[1]\n hatpsi_la = self.hatpsi # (J,L2,M,N,2)\n assert(nb==1 and nc==1) # for submeanC\n nb_channels = self.this_wph['la1'].shape[0]\n Sout1 = input.new(nb, nc, nb_channels, \\\n 1, 1, 2) # (nb,nc,nb_channels,1,1,2)\n Sout2 = input.new(nb, nc, nb_channels, \\\n 1, 1, 2) # (nb,nc,nb_channels,1,1,2)\n for idxb in range(nb):\n for idxc in range(nc):\n hatx_bc = hatx_c[idxb,idxc,:,:,:] # (M,N,2)\n # print('hatx_bc is cuda?',hatx_bc.is_cuda)\n hatxpsi_bc = cdgmm(hatpsi_la, hatx_bc) # (J,L2,M,N,2)\n # print( 'hatxpsi_bc shape', hatxpsi_bc.shape )\n xpsi_bc = ifft2_c2c(hatxpsi_bc)\n # reshape to (1,J*L,M,N,2)\n xpsi_bc = xpsi_bc.view(1,J*L2,M,N,2)\n\n # select la1, et la2, P_c = number of |la1| in this chunk\n xpsi_bc_la1 = torch.index_select(xpsi_bc, 1, self.this_wph['la1']) # (1,P_c,M,N,2)\n xpsi_bc_la2 = torch.index_select(xpsi_bc, 1, self.this_wph['la2']) # (1,P_c,M,N,2)\n #print('xpsi la1 shape', xpsi_bc_la1.shape)\n #print('xpsi la2 shape', xpsi_bc_la2.shape)\n k1 = self.this_wph['k1']\n k2 = self.this_wph['k2']\n xpsi_bc_la1k1 = self.phase_harmonics(xpsi_bc_la1, k1) # (1,P_c,M,N,2)\n xpsi_bc_la2k2 = self.phase_harmonics(xpsi_bc_la2, -k2) # (1,P_c,M,N,2)\n mean1_bc = torch.mean(torch.mean(xpsi_bc_la1k1,-2,True),-3,True) # (1,P_c,1,1,2)\n mean2_bc = torch.mean(torch.mean(xpsi_bc_la2k2,-2,True),-3,True) # (1,P_c,1,1,2)\n Sout1[idxb,idxc,:,:,:,:] = mean1_bc[0,:,:,:,:]\n Sout2[idxb,idxc,:,:,:,:] = mean2_bc[0,:,:,:,:]\n Sout = torch.stack((Sout1,Sout2),dim=0)\n else:\n hatxphi_c = cdgmm(hatx_c, self.hatphi) # (nb,nc,M,N,2)\n xpsi_c = ifft2_c2c(hatxphi_c) # (nb,nc,M,N,2)\n Sout = torch.mean(torch.mean(xpsi_c,-2,True),-3,True) # (nb,nc,1,1,2)\n\n return Sout\n\n def __call__(self, input):\n return self.forward(input)\n",
"import sys\nif __name__ == \"__main__\":\n sys.path.append(\"../pyscatwave\")\nfrom os.path import join\nfrom math import sqrt\nimport numpy as np\nimport scipy as sp\nimport scipy.optimize as opt\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable, grad\nfrom termcolor import colored\nimport complex_utils as cplx\nfrom metric import PhaseHarmonicCov, PhaseHarmonicCovNonLinCoeff, PhaseHarmonicPruned\nimport make_figs as signal\nfrom time import time\nfrom utils import cuda_available, make_dir_if_not_there\nfrom itertools import product\nfrom global_const import RESPATH, DATAPATH\nfrom tqdm import tqdm\nfrom loss import PSNR\nimport librosa.core\n\n\nclass SolverHack(nn.Module):\n def __init__(self, embedding, x0, loss_fn, cuda=False):\n super(SolverHack, self).__init__()\n\n self.embedding = embedding\n self.x0 = x0\n self.loss = loss_fn\n self.is_cuda = False\n self.res = None, None\n\n if cuda:\n self.cuda()\n\n # compute embedding and loss at initial guess\n x0_torch = self.format(self.x0, requires_grad=False)\n self.emb0 = self.embedding(x0_torch)\n # print(\"emb0 shape:\", self.emb0[0].shape, self.emb0[1].shape)\n self.err0 = self.loss(self.emb0, None)\n\n def cuda(self):\n if not self.is_cuda:\n self.is_cuda = True\n self.embedding = self.embedding.cuda()\n if 'emb0' in self.__dict__:\n self.emb0 = self.emb0.cuda()\n return self\n\n def cpu(self):\n if self.is_cuda:\n self.is_cuda = False\n self.embedding = self.embedding.cpu()\n if 'emb0' in self.__dict__:\n self.emb0 = self.emb0.cpu()\n return self\n\n def format(self, x, requires_grad=True):\n \"\"\"Transforms x into a compatible format for the embedding.\"\"\"\n\n x = cplx.from_numpy(x[None, None], tensor=torch.DoubleTensor)\n if self.is_cuda:\n x = x.cuda()\n x = Variable(x, requires_grad=requires_grad)\n return x\n\n def joint(self, x):\n # format x and set gradient to 0\n x_torch = self.format(x)\n if x_torch.grad is not None:\n x_torch.grad.data.zero_()\n\n # compute embedding\n emb = self.embedding(x_torch)\n\n # compute loss function\n loss = self.loss(emb, self.emb0) / self.err0\n\n # compute gradient\n grad_x, = grad([loss], [x_torch], retain_graph=True)\n\n # only get the real part\n grad_x = grad_x[0, 0, ..., 0]\n\n # move to numpy\n grad_x = grad_x.contiguous().detach().data.cpu().numpy()\n loss = loss.detach().data.cpu().numpy()\n\n self.res = loss, grad_x\n\n return loss, grad_x\n\n\nclass MSELoss(nn.Module):\n def __init__(self, phi):\n super(MSELoss, self).__init__()\n self.phi = phi\n\n def forward(self, input, target):\n i_f, i_s = input\n if target is None:\n t_f = torch.zeros_like(i_f)\n t_s = torch.zeros_like(i_s)\n else:\n t_f, t_s = target\n\n s_gap = i_s - t_s\n f_gap = i_f - t_f\n\n sel = torch.index_select\n\n gap = []\n start = 0\n for xi_idx, ks in zip(self.phi.xi_idx, self.phi.ks):\n f_gap0 = sel(f_gap, 2, xi_idx[:, 0])\n f_gap1 = sel(f_gap, 2, xi_idx[:, 1])\n t_f0 = sel(t_f, 2, xi_idx[:, 0])\n t_f1 = sel(t_f, 2, xi_idx[:, 1])\n\n # set to zero first order coefficients with harmonic k > 0\n idx_null0 = (ks[..., 0] > 0).unsqueeze(0).unsqueeze(0).unsqueeze(-1)\n idx_null1 = (ks[..., 1] > 0).unsqueeze(0).unsqueeze(0).unsqueeze(-1)\n f_gap0 = f_gap0.masked_fill(idx_null0, 0)\n f_gap1 = f_gap1.masked_fill(idx_null1, 0)\n t_f0 = t_f0.masked_fill(idx_null0, 0)\n t_f1 = t_f1.masked_fill(idx_null1, 0)\n\n err_fst0 = cplx.mul(f_gap0, cplx.conjugate(t_f1))\n err_fst1 = cplx.mul(t_f0, cplx.conjugate(f_gap1))\n\n l = xi_idx.size(0)\n s_gap_l = s_gap[:, :, start:start + l]\n start += l\n\n g = s_gap_l - err_fst0 - err_fst1\n gap.append(g)\n gap = torch.cat(gap, dim=2)\n return self.mse_norm(gap)\n\n @staticmethod\n def mse_norm(x):\n sq_err = (x ** 2).sum(-1)\n return torch.mean(sq_err.view(-1))\n\n\nclass SmallEnoughException(Exception):\n pass\n\n\nclass CheckConvCriterion:\n def __init__(self, phi, tol):\n super(CheckConvCriterion, self).__init__()\n self.phi = phi\n self.tol = tol\n self.result = None\n self.next_milestone = None\n self.counter = 0\n self.err = None\n self.gerr = None\n self.tic = time()\n self.logs = []\n self.glogs = []\n\n def __call__(self, xk):\n # err, grad_xk = self.phi.joint(xk)\n err, grad_xk = self.phi.res\n gerr = np.linalg.norm(grad_xk, ord=float('inf'))\n self.logs.append(float(err))\n self.glogs.append(float(gerr))\n self.err = err\n self.gerr = gerr\n self.counter += 1\n\n if self.next_milestone is None:\n # self.next_milestone = 10 ** (np.floor(np.log10(gerr)))\n self.next_milestone = gerr\n\n if err <= self.tol:\n self.result = xk\n raise SmallEnoughException()\n elif gerr <= self.next_milestone:\n delta_t = time() - self.tic\n tqdm.write(colored(\"{:6}it in {} ( {:.2f}it/s ) ........ {:.3E} -- {:.3E}\".format(\n self.counter, hms_string(delta_t), self.counter / delta_t,\n err, gerr\n ), 'blue'))\n self.next_milestone /= 2.\n # self.next_milestone /= 10\n\n\ndef hms_string(sec_elapsed):\n h = int(sec_elapsed / (60 * 60))\n m = int((sec_elapsed % (60 * 60)) / 60)\n s = sec_elapsed % 60.\n return \"{}:{:>02}:{:>05.2f}\".format(h, m, s)\n\n\ndef offset_greed_search_psnr(x, x0):\n T = np.size(x)\n max_psnr, best_offset = float('-inf'), None\n for offset in range(T):\n x1 = np.roll(x, offset)\n psnr = PSNR(x1, x0, 2.)\n if psnr > max_psnr:\n max_psnr, best_offset = psnr, offset\n\n return best_offset\n\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n\n n_exp = 1\n n_reconstr = 8\n seeds = [np.random.randint(2 ** 20) for _ in range(n_reconstr)]\n maxiter = 10000\n max_chunk = None # 2000\n # data = 'cantor'\n data = 'lena'\n # data = 'local_smooth'\n # data = 'single_freq_modulated_bis'\n\n compact = \"smooth\"\n\n overwrite_save_dir = 'trash_error'\n # overwrite_save_dir = None\n\n do_cuda = True # if True, will check if CUDA is available et use the GPU accordingly\n find_offset_x = True # set to False if there is no point translating x to match x0\n\n # facts = [0.1]\n facts = [1.]\n # facts = [1., .1, .01, .001]\n\n print()\n cuda = cuda_available()\n if do_cuda or not cuda:\n print(\"CUDA available: {}\\n\".format(cuda))\n else:\n print(\"CUDA denied\\n\".format(cuda))\n cuda = False\n\n set_to_zero = True\n\n nscales_l = [9]\n \n # Qs = [2]\n Qs = [1, 2]\n\n # nocts = [8]\n nocts = [1, 2, 3, 4, 5, 6, 7, 8]\n\n delta_k = [0]\n num_k_modulus = 3\n\n # wavelet_types = [\"bump_steerable\", \"battle_lemarie\"]\n # wavelet_types = [\"battle_lemarie\"]\n wavelet_types = [\"bump_steerable\"]\n # high_freqs = list(np.linspace(0.35, 0.5, 16))\n high_freqs = [0.425]\n\n exp_desc = colored('Experiments', 'yellow')\n for exp in range(n_exp):\n T = 1024\n if data == 'lena':\n # line_idx = int(np.random.randint(512))\n line_idx = 448\n x0_raw, line_idx = signal.lena_line(line_idx=line_idx, compact=compact)\n extra_info = {'line_idx': line_idx, 'border': compact}\n save_dir = data + '{}{}_pruned'.format(line_idx, compact)\n elif data == 'data':\n # filename = 'applause_2.0s_8192.wav'\n filename = 'flute_2.0s_8192.wav'\n load_path = join(DATAPATH, \"gen_phaseexp_inv\", filename)\n x0_raw, _ = librosa.core.load(load_path)\n rate = int(filename.split('.')[-2].split('_')[-1])\n extra_info = {'sr': rate}\n print(colored(\"\\nSignal info: size {} at rate {}\".format(x0_raw.shape, rate), 'red'))\n x0_raw = torch.Tensor(x0_raw[None, None, :])\n save_dir = '.'.join(filename.split('.')[:-1])\n elif data == 'local_smooth':\n n_set = 10\n x0_raw = signal.locally_smooth(T, n_set, compact=compact)\n extra_info = {'n_set': n_set, 'border': compact}\n save_dir = 'locallysmooth{}{}_pruned_{}'.format(n_set, compact, int(np.random.randint(100000)))\n elif data == 'single_freq_modulated':\n per0, per1 = 11., 127.\n x0_raw = signal.single_freq_modulated(T, per0=per0, per1=per1)\n extra_info = {'per0': per0, 'per1': per1}\n save_dir = 'single_freq_modulated'\n elif data == 'single_freq_modulated_bis':\n per0, per1 = 5., 127.\n x0_raw = signal.single_freq_modulated_bis(T, per0=per0, per1=per1, compact=\"padd\")\n extra_info = {'per0': per0, 'per1': per1, 'border': compact}\n save_dir = 'single_freq_modulated_bis_{}'.format(compact)\n elif data == 'cantor':\n a1, a2 = 1/3, 2/3 # size factors for left / right cantors\n b1, b2 = 0.5, 0.5 # weight factors for left / right cantors\n x0_raw = signal.make_cantor(T, a1, a2, b1, b2, zero_mean=False)\n extra_info = {'a1': a1, 'a2': a2, 'b1': b1, 'b2': b2}\n save_dir = 'cantor_pruned_a1{}a2{}b1{}b2{}'.format(\n np.round(a1, 2), np.round(a2, 2), np.round(b1, 2), np.round(b2, 2)\n )\n else:\n raise ValueError(\"Unknown data: '{}'\".format(data))\n\n if overwrite_save_dir is not None:\n save_dir = overwrite_save_dir\n\n # plt.figure()\n # plt.plot(x0_raw.data.cpu().numpy()[0, 0])\n # plt.show()\n\n exp_desc = colored('Experiment {}/{}'.format(exp + 1, n_exp), 'yellow')\n params_order = ('J', 'Q', 'Jmax', 'wav_type', 'fact', 'high_freq')\n for num_exp, seed in enumerate(tqdm(seeds, desc=exp_desc)):\n args = list(product(nscales_l, Qs, nocts, wavelet_types, facts, high_freqs))\n init_desc = 'Parameter set'.format(num_exp + 1, len(seeds))\n with tqdm(args, desc=colored(init_desc, 'yellow'), leave=False) as t:\n for params in t:\n\n nscales, Q, noct, wavelet_type, fact, high_freq = params\n emb_descr = \"Embedding: \" + ' '.join(\n ['{}={}'.format(name, val) for name, val in zip(params_order, params)])\n\n # set random seed\n tqdm.write('Random seed used : {}'.format(seed))\n np.random.seed(seed)\n torch.manual_seed(seed + 1)\n torch.cuda.manual_seed(seed + 2)\n\n # generate data\n x0 = x0_raw.clone()\n\n signal_info = data\n x0 = x0.cpu().numpy()[0, 0]\n x = fact * np.random.randn(*x0.shape) / np.sqrt(x0.size)\n T = x0.shape[-1]\n\n ndiag = noct * Q + 1\n\n phi = PhaseHarmonicPruned(\n nscales, Q, T, wav_type=wavelet_type,\n delta_j=noct, high_freq=high_freq,\n delta_k=delta_k, num_k_modulus=num_k_modulus,\n check_for_nan=False, max_chunk=max_chunk)\n\n # loss_fn = mse_loss\n loss_fn = MSELoss(phi)\n\n function_obj = SolverHack(phi, x0, loss_fn, cuda=cuda)\n fst_order_dim, scd_order_dim = phi.shape() # size of the embedding in complex numbers\n num_coeff = phi.num_coeff() # number of coefficients\n\n check_conv_criterion = CheckConvCriterion(function_obj, 1e-24)\n\n tqdm.write(colored(emb_descr, 'red'))\n tqdm.write(colored(\"Using embedding \" + function_obj.embedding.__class__.__name__, 'red'))\n tqdm.write(colored(\"Embedding using {} coefficients.\".format(num_coeff), 'red'))\n\n wavelet_info = wavelet_type.replace(\"_\", \"-\") + '{:.3f}'.format(high_freq)\n save_name = signal_info + \"{}coeff_{}_{}_N{}_Q{}_init{}_seed{}\".format(\n num_coeff, wavelet_info, 'MSE', nscales, Q, fact, seed)\n tqdm.write(save_name)\n\n\n tic = time()\n\n method = 'L-BFGS-B'\n # method = 'CG'\n\n func = function_obj.joint\n res = opt.minimize(\n func, x, method=method, jac=True, tol=1e-16,\n callback=check_conv_criterion, options={'maxiter': maxiter}\n )\n final_loss, x_opt, niter, msg = res['fun'], res['x'], res['nit'], res['message']\n\n toc = time()\n final_loss, final_grad = function_obj.joint(x_opt)\n final_gloss = np.linalg.norm(final_grad, ord=float('inf'))\n err_logs = check_conv_criterion.logs\n gerr_logs = check_conv_criterion.glogs\n\n tqdm.write(colored(\" ---- \", 'blue'))\n\n if not isinstance(msg, str):\n msg = msg.decode(\"ASCII\")\n tqdm.write(colored('Optimization Exit Message : ' + msg, 'blue'))\n tqdm.write(colored(\"found parameters in {}s, {} iterations -- {}it/s\".format(\n round(toc - tic, 4), niter, round(niter / (toc - tic), 2)), 'blue'))\n tqdm.write(colored(\" relative error {:.3E}\".format(final_loss), 'blue'))\n tqdm.write(colored(\" relative gradient error {:.3E}\".format(final_gloss), 'blue'))\n tqdm.write(colored(\" x0 norm {:.3E}\".format(float(function_obj.err0.data.cpu().numpy())), 'blue'))\n\n if find_offset_x:\n offset = offset_greed_search_psnr(x_opt, x0)\n x_opt = np.roll(x_opt, offset)\n\n psnr = PSNR(x_opt, x0, 2.) # signal values are always in [-1, 1]\n tqdm.write(colored(\"PSNR : {}\".format(psnr), 'green'))\n else:\n psnr = None\n\n save_path = join(RESPATH, \"gen_phaseexp_inv/\", save_dir)\n make_dir_if_not_there(save_path)\n save_var = {\n 'x0': x0, 'x': x_opt, 'psnr': psnr, 'seed': seed,\n 'N': nscales, 'Q': Q, 'Jmax': noct, 'T': T, 'wav_type': wavelet_type,\n 'final_loss': final_loss, 'num_coeff': num_coeff,\n 'fst_order_dim': fst_order_dim, 'scd_order_dim': scd_order_dim,\n 'high_freq': high_freq, 'data_name': data,\n 'err_logs': err_logs, 'gerr_logs': gerr_logs,\n **extra_info\n }\n npz_path = join(save_path, save_name + \".npz\")\n np.savez(npz_path, **save_var)\n tqdm.write(\"save as '{}'\\n\\n\".format(npz_path))\n\n # plt.figure(figsize=(12, 12))\n # ax = plt.subplot2grid((4, 1), (0, 0), rowspan=3)\n # ax.plot(x0, 'r')\n # ax.plot(x_opt, 'b')\n # ax.set_xticklabels([])\n # ax = plt.subplot2grid((4, 1), (3, 0), rowspan=1)\n # ax.plot(x_opt - x0, 'r')\n # # plt.show()\n # plt.savefig(join(save_path, save_name + '.pdf'))\n\n tqdm.write(\"\\n ----------------------------------------\\n\")\n"
] | [
[
"torch.LongTensor",
"numpy.abs",
"numpy.asarray",
"matplotlib.pyplot.subplots",
"torch.from_numpy",
"numpy.argmin",
"matplotlib.pyplot.show",
"scipy.io.wavfile.read"
],
[
"torch.mean",
"numpy.imag",
"scipy.io.loadmat",
"torch.tensor",
"numpy.real",
"torch.FloatTensor",
"torch.index_select",
"torch.stack",
"numpy.zeros"
],
[
"numpy.savez",
"numpy.sqrt",
"torch.Tensor",
"torch.cat",
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.manual_seed",
"torch.zeros_like",
"numpy.round",
"numpy.size",
"numpy.random.randint",
"scipy.optimize.minimize",
"numpy.random.randn",
"torch.autograd.grad",
"numpy.roll",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
Fdl1989/TimingofOneShotInterventions | [
"cfd7a5238c06baf77ee465b22392367197969a27",
"cfd7a5238c06baf77ee465b22392367197969a27"
] | [
"Fig4.py",
"Fig1.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 29 15:19:36 2020\n@author: Francesco Di Lauro\n@mail: [email protected]\nCopyright 2020 Francesco Di Lauro. All Rights Reserved.\nSee LICENSE file for details\n\"\"\"\nfrom Eulerclasssir import SIR_model\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import\n\nfrom matplotlib import rc\n## for Palatino and other serif fonts use:\n#rc('font',**{'family':'serif','serif':['Palatino']})\nrc('text', usetex=True)\n\n\n\nngroups = 9\ngamma = [1.0]*9\ntauf = 35\nbetaij = np.loadtxt('mixing_baseline.txt', delimiter=',')\n\nc =[0.5]*9\n\ninterventiontime = [1.1]*9\ninterventionduration = [4]*9\n\nSIR = SIR_model(ngroups, gamma, tauf,betaij, betain=0, betaoff=0, seed=1)\n\ny=SIR.sir_intervention( c, [1], interventiontime, interventionduration, nt = 3000, epsilon=0.01, intervention='subgroup_threshold')\n\n#y[:ngroups] is the S_1(t)... S_n(t) susceptible populations evolution,\n#y[ngroups:2*ngroups] \"I(t)\"\n#y[2*ngroups:] \"R(t)\"\n\nt = np.linspace(0,tauf,3000)\nplt.close()\nfig,ax = plt.subplots(3,3, figsize=(5.5,5.5), sharex=True,sharey = True)\nax = ax.ravel()\nplt.subplots_adjust(left=0.1, bottom=0.1, right=0.96, top=0.96, wspace=0.2, hspace=0.2)\n\n#plot I(t)\nfor i,sub in enumerate(ax):\n #S(t)\n #sub.plot(t, y[:,i], color='b')\n #I(t)\n sub.plot(t, y[:,i+ngroups], color='r')\n #R(t)\n #sub.plot(t, y[:,i+2*ngroups], color='g')\n #intervention\n #sub.vlines(SIR.intervention_time[i], 0,np.max(y[:,i+ngroups]))\n sub.set_title(\"sub-population %d\" %(i+1))\n\nfinalsize = np.sum(y[-1:,2*ngroups:])\nax[7].set_xlabel(r\"$t$\", size=11)\nax[3].set_ylabel(r\"$I(t)$\",size=11,labelpad=-2)\nplt.savefig(\"fig4.tiff\",dpi=600)\nplt.savefig(\"fig4.eps\")\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 11 09:31:01 2021\n\n@author: fra\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 29 20:08:19 2020\n@author: Francesco Di Lauro\n@mail: [email protected]\nCopyright 2020 Francesco Di Lauro. All Rights Reserved.\nSee LICENSE file for details\n\"\"\"\n\nfrom Eulerclasssir import SIR_model\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import\nfrom scipy.interpolate import interp1d\nfrom scipy.integrate import quad\n\nfrom matplotlib import rc\n## for Palatino and other serif fonts use:\n#rc('font',**{'family':'serif','serif':['Palatino']})\nrc('text', usetex=True)\n\n\n\n\n'''\ncode for figure 1\n'''\ndef computetime(sol,tauf,nt, beta):\n t = np.linspace(0,tauf,nt)\n dSdt = - beta[0]*sol[:,0]*sol[:,1] \n dSdtfun = interp1d(t,dSdt)\n t_infection = quad(lambda x: -x*dSdtfun(x),0,t[-1])\n R_infty = sol[-1,-1]\n #print(t_infection[0]/R_infty)\n return t_infection[0]/R_infty\n \n\n\n\n\n\ntauf = 15\nngroups = 1\nbeta =[2]*1\ngamma = [1]\n\nt = np.linspace(0,tauf,3000)\nc =[0.5]\n\ninterventiontime = [1.1]\ninterventionduration = [4]\n\nfig,ax = plt.subplots(1,2, figsize=(6,3.5), sharey = True)\nax = ax.ravel()\nplt.subplots_adjust(wspace=0.1, hspace=0)\n\nSIR = SIR_model(ngroups, gamma, tauf,beta, betain=0, betaoff=0, seed=1)\n\ny=SIR.sir_intervention( c, [0], interventiontime, interventionduration, nt = 3000, epsilon=0.01, intervention='subgroup_threshold')\n\ninfectiontime=computetime(y,tauf,3000,beta)\nax[0].plot(t,y[:,0], color='b')\nax[0].plot(t,y[:,1], color='r')\nax[0].plot(t,y[:,2], color='g')\nax[0].hlines(np.max(y[:,1]),0,15, linestyle='--', color='k')\nax[0].vlines(infectiontime, 0, np.max(y[:,0]), linestyle='--', color='k')\n\nax[0].text(13, 0.5, r'$\\mathbf{A}$', size = 12)\n\nax[0].set_ylabel(r\"$S(t), I(t), R(t)$\", size=10)\nax[0].set_xlabel(r\"$t$\", size=10)\nax[0].set_xlim(0,tauf)\nax[0].set_ylim(0,1)\n\n\n#find attack rate:\nimport scipy.optimize as optimize\n\ndef func(x):\n return 1 - (1-0.01)*np.exp(-2*x)\n\nR_0 = optimize.fixed_point(func,0.5)\nax[0].hlines(R_0,0,15, linestyle='--', color='g')\n#ax[0].text(7,R_0+0.05, r\"$R(\\infty)$\", size =11)\n\n\n\ndef func(x):\n return 1 - (1-0.01)*np.exp(-4*x)\n\nR_0 = optimize.fixed_point(func,0.5)\nax[1].hlines(R_0,0,15, linestyle='--', color='g')\n#ax[1].text(5,R_0+0.05, r\"$R(\\infty)$\", size=11)\n\n\n\n\ntauf = 10\nt = np.linspace(0,tauf,3000)\n\nbeta =[4]*1\nSIR = SIR_model(ngroups, gamma, tauf,beta, betain=0, betaoff=0, seed=1)\n\ny=SIR.sir_intervention( c, [0], interventiontime, interventionduration, nt = 3000, epsilon=0.01, intervention='subgroup_threshold')\ninfectiontime=computetime(y,tauf,3000,beta)\n\nax[1].plot(t,y[:,0], color='b')\nax[1].plot(t,y[:,1], color='r')\nax[1].plot(t,y[:,2], color='g')\nax[1].set_xlabel(r\"$t$\", size=10)\nax[1].text(0.9, 0.5, r'$\\mathbf{B}$', transform=plt.gca().transAxes, size = 12)\nax[1].hlines(np.max(y[:,1]),0,10, linestyle='--', color='k')\nax[1].vlines(infectiontime, 0, np.max(y[:,0]), linestyle='--', color='k')\nax[1].set_xlim(0,tauf)\nax[1].set_ylim(0,1)\n\nplt.savefig(\"fig1.tiff\",dpi=600)\nplt.savefig(\"fig1.eps\")"
] | [
[
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.rc",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots_adjust",
"numpy.sum",
"numpy.loadtxt"
],
[
"matplotlib.pyplot.gca",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.max",
"scipy.interpolate.interp1d",
"matplotlib.pyplot.subplots_adjust",
"scipy.optimize.fixed_point",
"numpy.exp",
"matplotlib.rc"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
shelleyHLX/bilm_EMLo | [
"7e3f94c80716665a16bfbc2efc2b8f2f32aad553"
] | [
"usage_token.py"
] | [
"'''\nELMo usage example with pre-computed and cached context independent\ntoken representations\n\nBelow, we show usage for SQuAD where each input example consists of both\na question and a paragraph of context.\n'''\n\nimport tensorflow as tf\nimport os\nfrom bilm_model import TokenBatcher, BidirectionalLanguageModel, weight_layers, \\\n dump_token_embeddings\n\n# Our small dataset.\nraw_context = [\n '同日 , 被告人 陈某 被 传唤 归案',\n '被告人 赵某 于 2013 年 4 月 28 日 事发 后 , 向 其 所在单位 投案'\n]\ntokenized_context = [sentence.split() for sentence in raw_context]\ntokenized_question = [\n ['案件', '审理', '过程', '中', ',', '双方', '已', '就', '民事', '赔偿', '部分', '达成', '了', '调解', '协议'],\n ['打', '了', '一', '、', '二', '分钟', ',', '吉某', '指挥', '被', '纠集', '人员', '逃离现场'],\n]\n\n# Create the vocabulary file with all unique tokens and\n# the special <S>, </S> tokens (case sensitive).\nall_tokens = set(['<S>', '</S>'] + tokenized_question[0])\nfor context_sentence in tokenized_context:\n for token in context_sentence:\n all_tokens.add(token)\n\n# vocab_file = './corpus_me/vocab_elmo.txt/'\nvocab_file = '/home/lxp3/PycharmProjects/bilm-tf-master/corpus_me/vocab_elmo.txt'\n\n# with open(vocab_file, 'w') as fout:\n# fout.write('\\n'.join(all_tokens))\n\n# Location of pretrained LM. Here we use the test fixtures.\n\noptions_file = '/home/lxp3/PycharmProjects/bilm-tf-master/try4/options.json' # try/options.json\nweight_file = '/home/lxp3/PycharmProjects/bilm-tf-master/try4/weights.hdf5'\n\n# Dump the token embeddings to a file. Run this once for your dataset.\ntoken_embedding_file = '/home/lxp3/PycharmProjects/bilm-tf-master/bin/8000_vocab_embedding.hdf5'\n# dump_token_embeddings(\n# vocab_file, options_file, weight_file, token_embedding_file\n# )\ntf.reset_default_graph()\n\n# Now we can do inference.\n# Create a TokenBatcher to map text to token ids.\nbatcher = TokenBatcher(vocab_file)\n\n# Input placeholders to the biLM.\ncontext_token_ids = tf.placeholder('int32', shape=(None, None))\nquestion_token_ids = tf.placeholder('int32', shape=(None, None))\n\n# Build the biLM graph.\nbilm = BidirectionalLanguageModel(options_file, weight_file, use_character_inputs=False,\n embedding_weight_file=token_embedding_file)\n\n# Get ops to compute the LM embeddings.\ncontext_embeddings_op = bilm(context_token_ids)\nquestion_embeddings_op = bilm(question_token_ids)\n\n# Get an op to compute ELMo (weighted average of the internal biLM layers)\n# Our SQuAD model includes ELMo at both the input and output layers\n# of the task GRU, so we need 4x ELMo representations for the question\n# and context at each of the input and output.\n# We use the same ELMo weights for both the question and context\n# at each of the input and output.\nelmo_context_input = weight_layers('input', context_embeddings_op, l2_coef=0.0)\nwith tf.variable_scope('', reuse=True):\n # the reuse=True scope reuses weights from the context for the question\n elmo_question_input = weight_layers(\n 'input', question_embeddings_op, l2_coef=0.0\n )\n\n# elmo_context_output = weight_layers(\n# 'output', context_embeddings_op, l2_coef=0.0\n# )\n# with tf.variable_scope('', reuse=True):\n# # the reuse=True scope reuses weights from the context for the question\n# elmo_question_output = weight_layers(\n# 'output', question_embeddings_op, l2_coef=0.0\n# )\n\n\nwith tf.Session() as sess:\n # It is necessary to initialize variables once before running inference.\n sess.run(tf.global_variables_initializer())\n\n # Create batches of data.\n context_ids = batcher.batch_sentences(tokenized_context)\n question_ids = batcher.batch_sentences(tokenized_question)\n print(context_ids)\n print(question_ids)\n\n # Compute ELMo representations (here for the input only, for simplicity).\n elmo_context_input_, elmo_question_input_ = sess.run(\n [elmo_context_input['weighted_op'], elmo_question_input['weighted_op']],\n feed_dict={ context_token_ids: context_ids,\n question_token_ids: question_ids}\n )\n\nprint('*'*20, type(elmo_context_input_))\nprint(elmo_context_input_.shape) # (2, 16, 1024)\nprint(elmo_context_input_)\nprint('*'*20, type(elmo_question_input_)) # <class 'numpy.ndarray'>\nprint(elmo_question_input_.shape) # (2, 15, 1024)\nprint(elmo_question_input_)\n"
] | [
[
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.reset_default_graph",
"tensorflow.Session",
"tensorflow.variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
laurent90git/scipy | [
"bc111c2f56e854b1bf95b208078da525d267ceff"
] | [
"scipy/integrate/_ivp/tests/test_ivp.py"
] | [
"from itertools import product\nfrom numpy.testing import (assert_, assert_allclose,\n assert_equal, assert_no_warnings, suppress_warnings)\nimport pytest\nfrom pytest import raises as assert_raises\nimport numpy as np\nfrom scipy.optimize._numdiff import group_columns\nfrom scipy.integrate import solve_ivp, RK23, RK45, DOP853, Radau, BDF, LSODA\nfrom scipy.integrate import OdeSolution\nfrom scipy.integrate._ivp.common import num_jac\nfrom scipy.integrate._ivp.base import ConstantDenseOutput\nfrom scipy.sparse import coo_matrix, csc_matrix, diags\n\n\ndef fun_zero(t, y):\n return np.zeros_like(y)\n\n\ndef fun_linear(t, y):\n return np.array([-y[0] - 5 * y[1], y[0] + y[1]])\n\n\ndef jac_linear():\n return np.array([[-1, -5], [1, 1]])\n\n\ndef sol_linear(t):\n return np.vstack((-5 * np.sin(2 * t),\n 2 * np.cos(2 * t) + np.sin(2 * t)))\n\n\ndef fun_rational(t, y):\n return np.array([y[1] / t,\n y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))])\n\n\ndef fun_rational_vectorized(t, y):\n return np.vstack((y[1] / t,\n y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))))\n\n\ndef jac_rational(t, y):\n return np.array([\n [0, 1 / t],\n [-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2),\n (y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))]\n ])\n\n\ndef jac_rational_sparse(t, y):\n return csc_matrix([\n [0, 1 / t],\n [-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2),\n (y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))]\n ])\n\n\ndef sol_rational(t):\n return np.asarray((t / (t + 10), 10 * t / (t + 10) ** 2))\n\n\ndef fun_medazko(t, y):\n n = y.shape[0] // 2\n k = 100\n c = 4\n\n phi = 2 if t <= 5 else 0\n y = np.hstack((phi, 0, y, y[-2]))\n\n d = 1 / n\n j = np.arange(n) + 1\n alpha = 2 * (j * d - 1) ** 3 / c ** 2\n beta = (j * d - 1) ** 4 / c ** 2\n\n j_2_p1 = 2 * j + 2\n j_2_m3 = 2 * j - 2\n j_2_m1 = 2 * j\n j_2 = 2 * j + 1\n\n f = np.empty(2 * n)\n f[::2] = (alpha * (y[j_2_p1] - y[j_2_m3]) / (2 * d) +\n beta * (y[j_2_m3] - 2 * y[j_2_m1] + y[j_2_p1]) / d ** 2 -\n k * y[j_2_m1] * y[j_2])\n f[1::2] = -k * y[j_2] * y[j_2_m1]\n\n return f\n\n\ndef medazko_sparsity(n):\n cols = []\n rows = []\n\n i = np.arange(n) * 2\n\n cols.append(i[1:])\n rows.append(i[1:] - 2)\n\n cols.append(i)\n rows.append(i)\n\n cols.append(i)\n rows.append(i + 1)\n\n cols.append(i[:-1])\n rows.append(i[:-1] + 2)\n\n i = np.arange(n) * 2 + 1\n\n cols.append(i)\n rows.append(i)\n\n cols.append(i)\n rows.append(i - 1)\n\n cols = np.hstack(cols)\n rows = np.hstack(rows)\n\n return coo_matrix((np.ones_like(cols), (cols, rows)))\n\n\ndef fun_complex(t, y):\n return -y\n\n\ndef jac_complex(t, y):\n return -np.eye(y.shape[0])\n\n\ndef jac_complex_sparse(t, y):\n return csc_matrix(jac_complex(t, y))\n\n\ndef sol_complex(t):\n y = (0.5 + 1j) * np.exp(-t)\n return y.reshape((1, -1))\n\n\ndef compute_error(y, y_true, rtol, atol):\n e = (y - y_true) / (atol + rtol * np.abs(y_true))\n return np.linalg.norm(e, axis=0) / np.sqrt(e.shape[0])\n\n\ndef test_integration():\n rtol = 1e-3\n atol = 1e-6\n y0 = [1/3, 2/9]\n\n for vectorized, method, t_span, jac in product(\n [False, True],\n ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA'],\n [[5, 9], [5, 1]],\n [None, jac_rational, jac_rational_sparse]):\n\n if vectorized:\n fun = fun_rational_vectorized\n else:\n fun = fun_rational\n\n with suppress_warnings() as sup:\n sup.filter(UserWarning,\n \"The following arguments have no effect for a chosen \"\n \"solver: `jac`\")\n res = solve_ivp(fun, t_span, y0, rtol=rtol,\n atol=atol, method=method, dense_output=True,\n jac=jac, vectorized=vectorized)\n assert_equal(res.t[0], t_span[0])\n assert_(res.t_events is None)\n assert_(res.y_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n if method == 'DOP853':\n # DOP853 spends more functions evaluation because it doesn't\n # have enough time to develop big enough step size.\n assert_(res.nfev < 50)\n else:\n assert_(res.nfev < 40)\n\n if method in ['RK23', 'RK45', 'DOP853', 'LSODA']:\n assert_equal(res.njev, 0)\n assert_equal(res.nlu, 0)\n else:\n assert_(0 < res.njev < 3)\n assert_(0 < res.nlu < 10)\n\n y_true = sol_rational(res.t)\n e = compute_error(res.y, y_true, rtol, atol)\n assert_(np.all(e < 5))\n\n tc = np.linspace(*t_span)\n yc_true = sol_rational(tc)\n yc = res.sol(tc)\n\n e = compute_error(yc, yc_true, rtol, atol)\n assert_(np.all(e < 5))\n\n tc = (t_span[0] + t_span[-1]) / 2\n yc_true = sol_rational(tc)\n yc = res.sol(tc)\n\n e = compute_error(yc, yc_true, rtol, atol)\n assert_(np.all(e < 5))\n\n # LSODA for some reasons doesn't pass the polynomial through the\n # previous points exactly after the order change. It might be some\n # bug in LSOSA implementation or maybe we missing something.\n if method != 'LSODA':\n assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)\n\n\ndef test_integration_complex():\n rtol = 1e-3\n atol = 1e-6\n y0 = [0.5 + 1j]\n t_span = [0, 1]\n tc = np.linspace(t_span[0], t_span[1])\n for method, jac in product(['RK23', 'RK45', 'DOP853', 'BDF'],\n [None, jac_complex, jac_complex_sparse]):\n with suppress_warnings() as sup:\n sup.filter(UserWarning,\n \"The following arguments have no effect for a chosen \"\n \"solver: `jac`\")\n res = solve_ivp(fun_complex, t_span, y0, method=method,\n dense_output=True, rtol=rtol, atol=atol, jac=jac)\n\n assert_equal(res.t[0], t_span[0])\n assert_(res.t_events is None)\n assert_(res.y_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n if method == 'DOP853':\n assert res.nfev < 35\n else:\n assert res.nfev < 25\n\n if method == 'BDF':\n assert_equal(res.njev, 1)\n assert res.nlu < 6\n else:\n assert res.njev == 0\n assert res.nlu == 0\n\n y_true = sol_complex(res.t)\n e = compute_error(res.y, y_true, rtol, atol)\n assert np.all(e < 5)\n\n yc_true = sol_complex(tc)\n yc = res.sol(tc)\n e = compute_error(yc, yc_true, rtol, atol)\n\n assert np.all(e < 5)\n\n\ndef test_integration_sparse_difference():\n n = 200\n t_span = [0, 20]\n y0 = np.zeros(2 * n)\n y0[1::2] = 1\n sparsity = medazko_sparsity(n)\n\n for method in ['BDF', 'Radau']:\n res = solve_ivp(fun_medazko, t_span, y0, method=method,\n jac_sparsity=sparsity)\n\n assert_equal(res.t[0], t_span[0])\n assert_(res.t_events is None)\n assert_(res.y_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n assert_allclose(res.y[78, -1], 0.233994e-3, rtol=1e-2)\n assert_allclose(res.y[79, -1], 0, atol=1e-3)\n assert_allclose(res.y[148, -1], 0.359561e-3, rtol=1e-2)\n assert_allclose(res.y[149, -1], 0, atol=1e-3)\n assert_allclose(res.y[198, -1], 0.117374129e-3, rtol=1e-2)\n assert_allclose(res.y[199, -1], 0.6190807e-5, atol=1e-3)\n assert_allclose(res.y[238, -1], 0, atol=1e-3)\n assert_allclose(res.y[239, -1], 0.9999997, rtol=1e-2)\n\n\ndef test_integration_const_jac():\n rtol = 1e-3\n atol = 1e-6\n y0 = [0, 2]\n t_span = [0, 2]\n J = jac_linear()\n J_sparse = csc_matrix(J)\n\n for method, jac in product(['Radau', 'BDF'], [J, J_sparse]):\n res = solve_ivp(fun_linear, t_span, y0, rtol=rtol, atol=atol,\n method=method, dense_output=True, jac=jac)\n assert_equal(res.t[0], t_span[0])\n assert_(res.t_events is None)\n assert_(res.y_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n assert_(res.nfev < 100)\n assert_equal(res.njev, 0)\n assert_(0 < res.nlu < 15)\n\n y_true = sol_linear(res.t)\n e = compute_error(res.y, y_true, rtol, atol)\n assert_(np.all(e < 10))\n\n tc = np.linspace(*t_span)\n yc_true = sol_linear(tc)\n yc = res.sol(tc)\n\n e = compute_error(yc, yc_true, rtol, atol)\n assert_(np.all(e < 15))\n\n assert_allclose(res.sol(res.t), res.y, rtol=1e-14, atol=1e-14)\n\n\[email protected]\[email protected]('method', ['Radau', 'BDF', 'LSODA'])\ndef test_integration_stiff(method):\n rtol = 1e-6\n atol = 1e-6\n y0 = [1e4, 0, 0]\n tspan = [0, 1e8]\n\n def fun_robertson(t, state):\n x, y, z = state\n return [\n -0.04 * x + 1e4 * y * z,\n 0.04 * x - 1e4 * y * z - 3e7 * y * y,\n 3e7 * y * y,\n ]\n\n res = solve_ivp(fun_robertson, tspan, y0, rtol=rtol,\n atol=atol, method=method)\n\n # If the stiff mode is not activated correctly, these numbers will be much bigger\n assert res.nfev < 5000\n assert res.njev < 200\n\n\ndef test_events():\n def event_rational_1(t, y):\n return y[0] - y[1] ** 0.7\n\n def event_rational_2(t, y):\n return y[1] ** 0.6 - y[0]\n\n def event_rational_3(t, y):\n return t - 7.4\n\n event_rational_3.terminal = True\n\n for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:\n res = solve_ivp(fun_rational, [5, 8], [1/3, 2/9], method=method,\n events=(event_rational_1, event_rational_2))\n assert_equal(res.status, 0)\n assert_equal(res.t_events[0].size, 1)\n assert_equal(res.t_events[1].size, 1)\n assert_(5.3 < res.t_events[0][0] < 5.7)\n assert_(7.3 < res.t_events[1][0] < 7.7)\n\n assert_equal(res.y_events[0].shape, (1, 2))\n assert_equal(res.y_events[1].shape, (1, 2))\n assert np.isclose(\n event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)\n assert np.isclose(\n event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)\n\n event_rational_1.direction = 1\n event_rational_2.direction = 1\n res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,\n events=(event_rational_1, event_rational_2))\n assert_equal(res.status, 0)\n assert_equal(res.t_events[0].size, 1)\n assert_equal(res.t_events[1].size, 0)\n assert_(5.3 < res.t_events[0][0] < 5.7)\n assert_equal(res.y_events[0].shape, (1, 2))\n assert_equal(res.y_events[1].shape, (0,))\n assert np.isclose(\n event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)\n\n event_rational_1.direction = -1\n event_rational_2.direction = -1\n res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,\n events=(event_rational_1, event_rational_2))\n assert_equal(res.status, 0)\n assert_equal(res.t_events[0].size, 0)\n assert_equal(res.t_events[1].size, 1)\n assert_(7.3 < res.t_events[1][0] < 7.7)\n assert_equal(res.y_events[0].shape, (0,))\n assert_equal(res.y_events[1].shape, (1, 2))\n assert np.isclose(\n event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)\n\n event_rational_1.direction = 0\n event_rational_2.direction = 0\n\n res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,\n events=(event_rational_1, event_rational_2,\n event_rational_3), dense_output=True)\n assert_equal(res.status, 1)\n assert_equal(res.t_events[0].size, 1)\n assert_equal(res.t_events[1].size, 0)\n assert_equal(res.t_events[2].size, 1)\n assert_(5.3 < res.t_events[0][0] < 5.7)\n assert_(7.3 < res.t_events[2][0] < 7.5)\n assert_equal(res.y_events[0].shape, (1, 2))\n assert_equal(res.y_events[1].shape, (0,))\n assert_equal(res.y_events[2].shape, (1, 2))\n assert np.isclose(\n event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)\n assert np.isclose(\n event_rational_3(res.t_events[2][0], res.y_events[2][0]), 0)\n\n res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,\n events=event_rational_1, dense_output=True)\n assert_equal(res.status, 0)\n assert_equal(res.t_events[0].size, 1)\n assert_(5.3 < res.t_events[0][0] < 5.7)\n\n assert_equal(res.y_events[0].shape, (1, 2))\n assert np.isclose(\n event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)\n\n # Also test that termination by event doesn't break interpolants.\n tc = np.linspace(res.t[0], res.t[-1])\n yc_true = sol_rational(tc)\n yc = res.sol(tc)\n e = compute_error(yc, yc_true, 1e-3, 1e-6)\n assert_(np.all(e < 5))\n\n # Test that the y_event matches solution\n assert np.allclose(sol_rational(res.t_events[0][0]), res.y_events[0][0], rtol=1e-3, atol=1e-6)\n\n # Test in backward direction.\n event_rational_1.direction = 0\n event_rational_2.direction = 0\n for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:\n res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,\n events=(event_rational_1, event_rational_2))\n assert_equal(res.status, 0)\n assert_equal(res.t_events[0].size, 1)\n assert_equal(res.t_events[1].size, 1)\n assert_(5.3 < res.t_events[0][0] < 5.7)\n assert_(7.3 < res.t_events[1][0] < 7.7)\n\n assert_equal(res.y_events[0].shape, (1, 2))\n assert_equal(res.y_events[1].shape, (1, 2))\n assert np.isclose(\n event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)\n assert np.isclose(\n event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)\n\n event_rational_1.direction = -1\n event_rational_2.direction = -1\n res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,\n events=(event_rational_1, event_rational_2))\n assert_equal(res.status, 0)\n assert_equal(res.t_events[0].size, 1)\n assert_equal(res.t_events[1].size, 0)\n assert_(5.3 < res.t_events[0][0] < 5.7)\n\n assert_equal(res.y_events[0].shape, (1, 2))\n assert_equal(res.y_events[1].shape, (0,))\n assert np.isclose(\n event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)\n\n event_rational_1.direction = 1\n event_rational_2.direction = 1\n res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,\n events=(event_rational_1, event_rational_2))\n assert_equal(res.status, 0)\n assert_equal(res.t_events[0].size, 0)\n assert_equal(res.t_events[1].size, 1)\n assert_(7.3 < res.t_events[1][0] < 7.7)\n\n assert_equal(res.y_events[0].shape, (0,))\n assert_equal(res.y_events[1].shape, (1, 2))\n assert np.isclose(\n event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)\n\n event_rational_1.direction = 0\n event_rational_2.direction = 0\n\n res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,\n events=(event_rational_1, event_rational_2,\n event_rational_3), dense_output=True)\n assert_equal(res.status, 1)\n assert_equal(res.t_events[0].size, 0)\n assert_equal(res.t_events[1].size, 1)\n assert_equal(res.t_events[2].size, 1)\n assert_(7.3 < res.t_events[1][0] < 7.7)\n assert_(7.3 < res.t_events[2][0] < 7.5)\n\n assert_equal(res.y_events[0].shape, (0,))\n assert_equal(res.y_events[1].shape, (1, 2))\n assert_equal(res.y_events[2].shape, (1, 2))\n assert np.isclose(\n event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)\n assert np.isclose(\n event_rational_3(res.t_events[2][0], res.y_events[2][0]), 0)\n\n # Also test that termination by event doesn't break interpolants.\n tc = np.linspace(res.t[-1], res.t[0])\n yc_true = sol_rational(tc)\n yc = res.sol(tc)\n e = compute_error(yc, yc_true, 1e-3, 1e-6)\n assert_(np.all(e < 5))\n\n assert np.allclose(sol_rational(res.t_events[1][0]), res.y_events[1][0], rtol=1e-3, atol=1e-6)\n assert np.allclose(sol_rational(res.t_events[2][0]), res.y_events[2][0], rtol=1e-3, atol=1e-6)\n\n\ndef test_max_step():\n rtol = 1e-3\n atol = 1e-6\n y0 = [1/3, 2/9]\n for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:\n for t_span in ([5, 9], [5, 1]):\n res = solve_ivp(fun_rational, t_span, y0, rtol=rtol,\n max_step=0.5, atol=atol, method=method,\n dense_output=True)\n assert_equal(res.t[0], t_span[0])\n assert_equal(res.t[-1], t_span[-1])\n assert_(np.all(np.abs(np.diff(res.t)) <= 0.5 + 1e-15))\n assert_(res.t_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n y_true = sol_rational(res.t)\n e = compute_error(res.y, y_true, rtol, atol)\n assert_(np.all(e < 5))\n\n tc = np.linspace(*t_span)\n yc_true = sol_rational(tc)\n yc = res.sol(tc)\n\n e = compute_error(yc, yc_true, rtol, atol)\n assert_(np.all(e < 5))\n\n # See comment in test_integration.\n if method is not LSODA:\n assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)\n\n assert_raises(ValueError, method, fun_rational, t_span[0], y0,\n t_span[1], max_step=-1)\n\n if method is not LSODA:\n solver = method(fun_rational, t_span[0], y0, t_span[1],\n rtol=rtol, atol=atol, max_step=1e-20)\n message = solver.step()\n\n assert_equal(solver.status, 'failed')\n assert_(\"step size is less\" in message)\n assert_raises(RuntimeError, solver.step)\n\n\ndef test_first_step():\n rtol = 1e-3\n atol = 1e-6\n y0 = [1/3, 2/9]\n first_step = 0.1\n for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:\n for t_span in ([5, 9], [5, 1]):\n res = solve_ivp(fun_rational, t_span, y0, rtol=rtol,\n max_step=0.5, atol=atol, method=method,\n dense_output=True, first_step=first_step)\n\n assert_equal(res.t[0], t_span[0])\n assert_equal(res.t[-1], t_span[-1])\n assert_allclose(first_step, np.abs(res.t[1] - 5))\n assert_(res.t_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n y_true = sol_rational(res.t)\n e = compute_error(res.y, y_true, rtol, atol)\n assert_(np.all(e < 5))\n\n tc = np.linspace(*t_span)\n yc_true = sol_rational(tc)\n yc = res.sol(tc)\n\n e = compute_error(yc, yc_true, rtol, atol)\n assert_(np.all(e < 5))\n\n # See comment in test_integration.\n if method is not LSODA:\n assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)\n\n assert_raises(ValueError, method, fun_rational, t_span[0], y0,\n t_span[1], first_step=-1)\n assert_raises(ValueError, method, fun_rational, t_span[0], y0,\n t_span[1], first_step=5)\n\n\ndef test_t_eval():\n rtol = 1e-3\n atol = 1e-6\n y0 = [1/3, 2/9]\n for t_span in ([5, 9], [5, 1]):\n t_eval = np.linspace(t_span[0], t_span[1], 10)\n res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,\n t_eval=t_eval)\n assert_equal(res.t, t_eval)\n assert_(res.t_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n y_true = sol_rational(res.t)\n e = compute_error(res.y, y_true, rtol, atol)\n assert_(np.all(e < 5))\n\n t_eval = [5, 5.01, 7, 8, 8.01, 9]\n res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol,\n t_eval=t_eval)\n assert_equal(res.t, t_eval)\n assert_(res.t_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n y_true = sol_rational(res.t)\n e = compute_error(res.y, y_true, rtol, atol)\n assert_(np.all(e < 5))\n\n t_eval = [5, 4.99, 3, 1.5, 1.1, 1.01, 1]\n res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol,\n t_eval=t_eval)\n assert_equal(res.t, t_eval)\n assert_(res.t_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n t_eval = [5.01, 7, 8, 8.01]\n res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol,\n t_eval=t_eval)\n assert_equal(res.t, t_eval)\n assert_(res.t_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n y_true = sol_rational(res.t)\n e = compute_error(res.y, y_true, rtol, atol)\n assert_(np.all(e < 5))\n\n t_eval = [4.99, 3, 1.5, 1.1, 1.01]\n res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol,\n t_eval=t_eval)\n assert_equal(res.t, t_eval)\n assert_(res.t_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n t_eval = [4, 6]\n assert_raises(ValueError, solve_ivp, fun_rational, [5, 9], y0,\n rtol=rtol, atol=atol, t_eval=t_eval)\n\n\ndef test_t_eval_dense_output():\n rtol = 1e-3\n atol = 1e-6\n y0 = [1/3, 2/9]\n t_span = [5, 9]\n t_eval = np.linspace(t_span[0], t_span[1], 10)\n res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,\n t_eval=t_eval)\n res_d = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,\n t_eval=t_eval, dense_output=True)\n assert_equal(res.t, t_eval)\n assert_(res.t_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n assert_equal(res.t, res_d.t)\n assert_equal(res.y, res_d.y)\n assert_(res_d.t_events is None)\n assert_(res_d.success)\n assert_equal(res_d.status, 0)\n\n # if t and y are equal only test values for one case\n y_true = sol_rational(res.t)\n e = compute_error(res.y, y_true, rtol, atol)\n assert_(np.all(e < 5))\n\n\ndef test_no_integration():\n for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:\n sol = solve_ivp(lambda t, y: -y, [4, 4], [2, 3],\n method=method, dense_output=True)\n assert_equal(sol.sol(4), [2, 3])\n assert_equal(sol.sol([4, 5, 6]), [[2, 2, 2], [3, 3, 3]])\n\n\ndef test_no_integration_class():\n for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:\n solver = method(lambda t, y: -y, 0.0, [10.0, 0.0], 0.0)\n solver.step()\n assert_equal(solver.status, 'finished')\n sol = solver.dense_output()\n assert_equal(sol(0.0), [10.0, 0.0])\n assert_equal(sol([0, 1, 2]), [[10, 10, 10], [0, 0, 0]])\n\n solver = method(lambda t, y: -y, 0.0, [], np.inf)\n solver.step()\n assert_equal(solver.status, 'finished')\n sol = solver.dense_output()\n assert_equal(sol(100.0), [])\n assert_equal(sol([0, 1, 2]), np.empty((0, 3)))\n\n\ndef test_empty():\n def fun(t, y):\n return np.zeros((0,))\n\n y0 = np.zeros((0,))\n\n for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:\n sol = assert_no_warnings(solve_ivp, fun, [0, 10], y0,\n method=method, dense_output=True)\n assert_equal(sol.sol(10), np.zeros((0,)))\n assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3)))\n\n for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:\n sol = assert_no_warnings(solve_ivp, fun, [0, np.inf], y0,\n method=method, dense_output=True)\n assert_equal(sol.sol(10), np.zeros((0,)))\n assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3)))\n\n\ndef test_ConstantDenseOutput():\n sol = ConstantDenseOutput(0, 1, np.array([1, 2]))\n assert_allclose(sol(1.5), [1, 2])\n assert_allclose(sol([1, 1.5, 2]), [[1, 1, 1], [2, 2, 2]])\n\n sol = ConstantDenseOutput(0, 1, np.array([]))\n assert_allclose(sol(1.5), np.empty(0))\n assert_allclose(sol([1, 1.5, 2]), np.empty((0, 3)))\n\n\ndef test_classes():\n y0 = [1 / 3, 2 / 9]\n for cls in [RK23, RK45, DOP853, Radau, BDF, LSODA]:\n solver = cls(fun_rational, 5, y0, np.inf)\n assert_equal(solver.n, 2)\n assert_equal(solver.status, 'running')\n assert_equal(solver.t_bound, np.inf)\n assert_equal(solver.direction, 1)\n assert_equal(solver.t, 5)\n assert_equal(solver.y, y0)\n assert_(solver.step_size is None)\n if cls is not LSODA:\n assert_(solver.nfev > 0)\n assert_(solver.njev >= 0)\n assert_equal(solver.nlu, 0)\n else:\n assert_equal(solver.nfev, 0)\n assert_equal(solver.njev, 0)\n assert_equal(solver.nlu, 0)\n\n assert_raises(RuntimeError, solver.dense_output)\n\n message = solver.step()\n assert_equal(solver.status, 'running')\n assert_equal(message, None)\n assert_equal(solver.n, 2)\n assert_equal(solver.t_bound, np.inf)\n assert_equal(solver.direction, 1)\n assert_(solver.t > 5)\n assert_(not np.all(np.equal(solver.y, y0)))\n assert_(solver.step_size > 0)\n assert_(solver.nfev > 0)\n assert_(solver.njev >= 0)\n assert_(solver.nlu >= 0)\n sol = solver.dense_output()\n assert_allclose(sol(5), y0, rtol=1e-15, atol=0)\n\n\ndef test_OdeSolution():\n ts = np.array([0, 2, 5], dtype=float)\n s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1]))\n s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1]))\n\n sol = OdeSolution(ts, [s1, s2])\n\n assert_equal(sol(-1), [-1])\n assert_equal(sol(1), [-1])\n assert_equal(sol(2), [-1])\n assert_equal(sol(3), [1])\n assert_equal(sol(5), [1])\n assert_equal(sol(6), [1])\n\n assert_equal(sol([0, 6, -2, 1.5, 4.5, 2.5, 5, 5.5, 2]),\n np.array([[-1, 1, -1, -1, 1, 1, 1, 1, -1]]))\n\n ts = np.array([10, 4, -3])\n s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1]))\n s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1]))\n\n sol = OdeSolution(ts, [s1, s2])\n assert_equal(sol(11), [-1])\n assert_equal(sol(10), [-1])\n assert_equal(sol(5), [-1])\n assert_equal(sol(4), [-1])\n assert_equal(sol(0), [1])\n assert_equal(sol(-3), [1])\n assert_equal(sol(-4), [1])\n\n assert_equal(sol([12, -5, 10, -3, 6, 1, 4]),\n np.array([[-1, 1, -1, 1, -1, 1, -1]]))\n\n ts = np.array([1, 1])\n s = ConstantDenseOutput(1, 1, np.array([10]))\n sol = OdeSolution(ts, [s])\n assert_equal(sol(0), [10])\n assert_equal(sol(1), [10])\n assert_equal(sol(2), [10])\n\n assert_equal(sol([2, 1, 0]), np.array([[10, 10, 10]]))\n\n\ndef test_num_jac():\n def fun(t, y):\n return np.vstack([\n -0.04 * y[0] + 1e4 * y[1] * y[2],\n 0.04 * y[0] - 1e4 * y[1] * y[2] - 3e7 * y[1] ** 2,\n 3e7 * y[1] ** 2\n ])\n\n def jac(t, y):\n return np.array([\n [-0.04, 1e4 * y[2], 1e4 * y[1]],\n [0.04, -1e4 * y[2] - 6e7 * y[1], -1e4 * y[1]],\n [0, 6e7 * y[1], 0]\n ])\n\n t = 1\n y = np.array([1, 0, 0])\n J_true = jac(t, y)\n threshold = 1e-5\n f = fun(t, y).ravel()\n\n J_num, factor = num_jac(fun, t, y, f, threshold, None)\n assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5)\n\n J_num, factor = num_jac(fun, t, y, f, threshold, factor)\n assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5)\n\n\ndef test_num_jac_sparse():\n def fun(t, y):\n e = y[1:]**3 - y[:-1]**2\n z = np.zeros(y.shape[1])\n return np.vstack((z, 3 * e)) + np.vstack((2 * e, z))\n\n def structure(n):\n A = np.zeros((n, n), dtype=int)\n A[0, 0] = 1\n A[0, 1] = 1\n for i in range(1, n - 1):\n A[i, i - 1: i + 2] = 1\n A[-1, -1] = 1\n A[-1, -2] = 1\n\n return A\n\n np.random.seed(0)\n n = 20\n y = np.random.randn(n)\n A = structure(n)\n groups = group_columns(A)\n\n f = fun(0, y[:, None]).ravel()\n\n # Compare dense and sparse results, assuming that dense implementation\n # is correct (as it is straightforward).\n J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, None,\n sparsity=(A, groups))\n J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, None)\n assert_allclose(J_num_dense, J_num_sparse.toarray(),\n rtol=1e-12, atol=1e-14)\n assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14)\n\n # Take small factors to trigger their recomputing inside.\n factor = np.random.uniform(0, 1e-12, size=n)\n J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, factor,\n sparsity=(A, groups))\n J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, factor)\n\n assert_allclose(J_num_dense, J_num_sparse.toarray(),\n rtol=1e-12, atol=1e-14)\n assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14)\n\n\ndef test_args():\n\n # sys3 is actually two decoupled systems. (x, y) form a\n # linear oscillator, while z is a nonlinear first order\n # system with equilibria at z=0 and z=1. If k > 0, z=1\n # is stable and z=0 is unstable.\n\n def sys3(t, w, omega, k, zfinal):\n x, y, z = w\n return [-omega*y, omega*x, k*z*(1 - z)]\n\n def sys3_jac(t, w, omega, k, zfinal):\n x, y, z = w\n J = np.array([[0, -omega, 0],\n [omega, 0, 0],\n [0, 0, k*(1 - 2*z)]])\n return J\n\n def sys3_x0decreasing(t, w, omega, k, zfinal):\n x, y, z = w\n return x\n\n def sys3_y0increasing(t, w, omega, k, zfinal):\n x, y, z = w\n return y\n\n def sys3_zfinal(t, w, omega, k, zfinal):\n x, y, z = w\n return z - zfinal\n\n # Set the event flags for the event functions.\n sys3_x0decreasing.direction = -1\n sys3_y0increasing.direction = 1\n sys3_zfinal.terminal = True\n\n omega = 2\n k = 4\n\n tfinal = 5\n zfinal = 0.99\n # Find z0 such that when z(0) = z0, z(tfinal) = zfinal.\n # The condition z(tfinal) = zfinal is the terminal event.\n z0 = np.exp(-k*tfinal)/((1 - zfinal)/zfinal + np.exp(-k*tfinal))\n\n w0 = [0, -1, z0]\n\n # Provide the jac argument and use the Radau method to ensure that the use\n # of the Jacobian function is exercised.\n # If event handling is working, the solution will stop at tfinal, not tend.\n tend = 2*tfinal\n sol = solve_ivp(sys3, [0, tend], w0,\n events=[sys3_x0decreasing, sys3_y0increasing, sys3_zfinal],\n dense_output=True, args=(omega, k, zfinal),\n method='Radau', jac=sys3_jac,\n rtol=1e-10, atol=1e-13)\n\n # Check that we got the expected events at the expected times.\n x0events_t = sol.t_events[0]\n y0events_t = sol.t_events[1]\n zfinalevents_t = sol.t_events[2]\n assert_allclose(x0events_t, [0.5*np.pi, 1.5*np.pi])\n assert_allclose(y0events_t, [0.25*np.pi, 1.25*np.pi])\n assert_allclose(zfinalevents_t, [tfinal])\n\n # Check that the solution agrees with the known exact solution.\n t = np.linspace(0, zfinalevents_t[0], 250)\n w = sol.sol(t)\n assert_allclose(w[0], np.sin(omega*t), rtol=1e-9, atol=1e-12)\n assert_allclose(w[1], -np.cos(omega*t), rtol=1e-9, atol=1e-12)\n assert_allclose(w[2], 1/(((1 - z0)/z0)*np.exp(-k*t) + 1),\n rtol=1e-9, atol=1e-12)\n\n # Check that the state variables have the expected values at the events.\n x0events = sol.sol(x0events_t)\n y0events = sol.sol(y0events_t)\n zfinalevents = sol.sol(zfinalevents_t)\n assert_allclose(x0events[0], np.zeros_like(x0events[0]), atol=5e-14)\n assert_allclose(x0events[1], np.ones_like(x0events[1]))\n assert_allclose(y0events[0], np.ones_like(y0events[0]))\n assert_allclose(y0events[1], np.zeros_like(y0events[1]), atol=5e-14)\n assert_allclose(zfinalevents[2], [zfinal])\n\n\[email protected]('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA'])\ndef test_integration_zero_rhs(method):\n result = solve_ivp(fun_zero, [0, 10], np.ones(3), method=method)\n assert_(result.success)\n assert_equal(result.status, 0)\n assert_allclose(result.y, 1.0, rtol=1e-15)\n\n\[email protected]('method', ['Radau'])\ndef test_mass_matrix_ODE(method):\n \"\"\" The idea is to test the \"mass\" option with a simple vector ODE\n on the array y=(y0,y1,y2,...,yn), which reads:\n d(y[k])/dt = eigvals[k]*y, with eigvals[k] the k-th system eigenvalue\n <=> dy/dt = diag(eigvals)*y\n This simple ODE (no mass matrix) can also be reformulated as a:\n M*dy/dt = y, with M=diag(1/eigvals).\n This allows to verify that the mass matrix implementation is coherent.\"\"\"\n\n rtol = atol = 1e-10\n n = 7 # number of variables\n eigvals = np.array([float(i)*(-1)**(i) for i in range(1, n+1)])\n mass = diags(diagonals=(1/eigvals,), offsets=(0,), format='csc')\n\n # solve both ODEs\n tf = 1.0 # final time\n y0 = np.ones((n,))\n true_solution = y0*np.exp(eigvals*tf)\n sol_with_mass = solve_ivp(fun=lambda t, x: x, t_span=(0., tf),\n y0=y0, max_step=np.inf, rtol=rtol, atol=atol,\n jac=None, jac_sparsity=None, method=method,\n vectorized=False, first_step=None, mass=mass)\n sol_without_mass = solve_ivp(fun=lambda t, x: eigvals*x, t_span=(0., tf),\n y0=y0, max_step=np.inf, rtol=rtol, atol=atol,\n jac=None, jac_sparsity=None, method=method,\n vectorized=False, first_step=None, mass=None)\n\n assert_(sol_without_mass.success,\n msg=f'solver {method} failed without mass matrix')\n assert_(sol_with_mass.success,\n msg=f'solver {method} failed with mass matrix')\n assert_allclose(sol_without_mass.y[:, -1], true_solution,\n rtol=10*max((atol, rtol)),\n err_msg='result without option \"mass\" is wrong')\n assert_allclose(sol_with_mass.y[:, -1], true_solution,\n rtol=10*max((atol, rtol)),\n err_msg='result with option \"mass\" is wrong')\n assert_allclose(sol_without_mass.t.size, sol_with_mass.t.size, rtol=0.05,\n err_msg='option \"mass\" affect number of steps too much')\n\n\[email protected]('method', ['Radau'])\ndef test_DAE_pendulum(method):\n \"\"\" Test the pendulum system, formulated as a DAE of index 0 to 3.\n COmpare with the true solution.\n \"\"\"\n import scipy.optimize._numdiff\n # 1 - setup the model\n m, r0, g = 1., 1., 9.81 # rod length, mass, gravity\n theta_0 = np.pi/4 # initial angle\n theta_dot0 = 0. # initial angular speed\n rtol = atol = 1e-6\n tf = 0.5 # final time\n\n # 2 - compute true solution (ODE on the angle in polar coordinates)\n def fun_ode(t, X):\n theta, theta_dot = X[0], X[1]\n return np.array([theta_dot,\n -g / r0 * np.sin(theta)])\n\n Xini_ode = np.array([theta_0, theta_dot0])\n sol_ode = solve_ivp(fun=fun_ode, t_span=(0., tf), y0=Xini_ode,\n rtol=1e-12, atol=1e-12, method='DOP853')\n theta_ode = sol_ode.y[0, :]\n theta_dot = sol_ode.y[1, :]\n x_ode = r0 * np.sin(theta_ode)\n y_ode = -r0 * np.cos(theta_ode)\n vx_ode = r0 * theta_dot * np.cos(theta_ode)\n vy_ode = r0 * theta_dot * np.sin(theta_ode)\n lbda_ode = m * r0 * theta_dot ** 2 + m * g * np.cos(theta_ode)\n yfinal_ode = np.array([x_ode[-1], y_ode[-1], vx_ode[-1],\n vy_ode[-1], lbda_ode[-1]])\n\n assert_(sol_ode.success,\n msg='the pendulum ODE solution failed')\n # 3 - compute DAE solutions\n x0 = r0 * np.sin(theta_0)\n y0 = -r0 * np.cos(theta_0)\n vx0 = r0 * theta_dot0 * np.cos(theta_0)\n vy0 = r0 * theta_dot0 * np.sin(theta_0)\n lbda_0 = (m * r0 * theta_dot0 ** 2 + m * g * np.cos(theta_0)) / r0\n Xini = np.array([x0, y0, vx0, vy0, lbda_0])\n for chosen_index in range(4):\n def dae_fun(t, X):\n x, y, vx, vy, lbda = X[0], X[1], X[2], X[3], X[4]\n if chosen_index == 3:\n constraint = x ** 2 + y ** 2 - r0 ** 2\n elif chosen_index == 2:\n constraint = x * vx + y * vy\n elif chosen_index == 1:\n constraint = lbda * (x ** 2 + y ** 2) / m \\\n + g * y - (vx ** 2 + vy ** 2)\n elif chosen_index == 0:\n rsq = x ** 2 + y ** 2\n dvx = -lbda * x / m\n dvy = -lbda * y / m - g\n constraint = (1 / m) * (- g * vy / rsq\n + 2 * (vx * dvx + vy * dvy) / rsq\n + (vx ** 2 + vy ** 2 - g * y) *\n (2 * x * vx + 2 * y * vy) / (rsq ** 2))\n return np.array([vx,\n vy,\n -x*lbda/m,\n -g-(y*lbda)/m,\n constraint])\n mass = np.eye(5) # mass matrix M\n if chosen_index > 0:\n mass[-1, -1] = 0\n if chosen_index == 3:\n # test sparse mass matrix\n mass = csc_matrix(mass)\n\n # the jacobian is computed via finite-differences\n def jac_dae(t, x):\n return scipy.optimize._numdiff.approx_derivative(\n fun=lambda x: dae_fun(t, x),\n x0=x, method='cs',\n rel_step=1e-50)\n\n sol = solve_ivp(fun=dae_fun, t_span=(0., tf), y0=Xini, max_step=tf/10,\n rtol=rtol, atol=atol, jac=jac_dae, jac_sparsity=None,\n method=method, vectorized=False, first_step=1e-3,\n dense_output=False, mass=mass)\n assert_(sol.success,\n msg=f'solver {method} failed for the index-{chosen_index} DAE')\n assert_(np.linalg.norm((yfinal_ode[:-1] - sol.y[:-1, -1]) /\n yfinal_ode[:-1]) < 100 * max((rtol, atol)),\n msg='The index-{} DAE does not yield correct results'.format(\n chosen_index))\n"
] | [
[
"scipy.optimize._numdiff.group_columns",
"numpy.linspace",
"numpy.sqrt",
"numpy.asarray",
"numpy.testing.assert_no_warnings",
"numpy.vstack",
"numpy.all",
"numpy.zeros_like",
"numpy.random.randn",
"numpy.exp",
"numpy.hstack",
"numpy.testing.assert_equal",
"numpy.ones_like",
"numpy.testing.suppress_warnings",
"numpy.arange",
"numpy.eye",
"scipy.integrate.solve_ivp",
"scipy.sparse.diags",
"numpy.sin",
"numpy.diff",
"numpy.zeros",
"scipy.integrate.OdeSolution",
"scipy.sparse.csc_matrix",
"scipy.integrate._ivp.common.num_jac",
"numpy.equal",
"numpy.testing.assert_",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.abs",
"numpy.random.seed",
"numpy.linalg.norm",
"numpy.cos",
"numpy.ones",
"numpy.random.uniform",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mseeger/autogluon-1 | [
"e8d82363ce07fd8e3087bcdd2d71c6f6bd8fd7a0",
"e8d82363ce07fd8e3087bcdd2d71c6f6bd8fd7a0",
"e8d82363ce07fd8e3087bcdd2d71c6f6bd8fd7a0"
] | [
"core/tests/unittests/bayesopt/gpmxnet/test_warping.py",
"text/src/autogluon/text/text_prediction/models/basic_v1.py",
"text/src/autogluon/text/text_classification/predictor.py"
] | [
"import numpy as np\nimport mxnet as mx\n\nfrom autogluon.core.searcher import OneDimensionalWarping, \\\n Warping, WarpedKernel\nfrom autogluon.core.searcher.bayesopt.gpmxnet.constants import DATA_TYPE, \\\n NUMERICAL_JITTER\nfrom autogluon.core.searcher import Matern52\nfrom autogluon.core.searcher import \\\n GaussianProcessRegression\nfrom autogluon.core.searcher import \\\n LogarithmScalarEncoding, PositiveScalarEncoding\n\n\ndef test_warping_encoding():\n input_range = (0., 2.)\n warping = OneDimensionalWarping(input_range)\n assert isinstance(warping.encoding, LogarithmScalarEncoding)\n assert warping.encoding.dimension == 2\n warping = OneDimensionalWarping(input_range, encoding_type=\"positive\")\n assert isinstance(warping.encoding, PositiveScalarEncoding)\n\n\ndef test_warping_default_parameters():\n x = mx.nd.array([0., 1., 2.], dtype=DATA_TYPE)\n input_range = (0., 2.)\n warping = OneDimensionalWarping(input_range)\n warping.collect_params().initialize()\n\n warping_parameters = warping.encoding.get(mx.nd, warping.warping_internal.data())\n\n np.testing.assert_almost_equal(warping_parameters.asnumpy(), np.ones(2))\n np.testing.assert_almost_equal(warping(x).asnumpy(), np.array([NUMERICAL_JITTER, 0.5, 1.-NUMERICAL_JITTER]))\n\n\ndef test_warping_with_arbitrary_parameters():\n x = mx.nd.array([0., 1., 2.], dtype=DATA_TYPE)\n input_range = (0., 2.)\n\n warping = OneDimensionalWarping(input_range)\n warping.collect_params().initialize()\n\n warping.encoding.set(warping.warping_internal, [2., 0.5])\n warping_parameters = warping.encoding.get(mx.nd, warping.warping_internal.data())\n\n np.testing.assert_almost_equal(warping_parameters.asnumpy(), [2., 0.5])\n\n # In that case (with parameters [2., 0.5]), the warping is given by x => 1. - sqrt(1. - x^2)\n def expected_warping(x):\n return 1. - np.sqrt(1. - x*x)\n\n np.testing.assert_almost_equal(warping(x).asnumpy(), expected_warping(np.array([NUMERICAL_JITTER, 0.5, 1.-NUMERICAL_JITTER])))\n\n\ndef test_warping_with_multidimension_and_arbitrary_parameters():\n X = mx.nd.array([[0., 1., 0.], [1.,2.,1.], [2., 0., 2.]], dtype=DATA_TYPE)\n\n dimension=3\n\n # We transform only the columns {0,2} of the 3-dimensional data X\n input_range = (0., 2.)\n warping = Warping(index_to_range={0:input_range, 2:input_range}, dimension=dimension)\n\n assert len(warping.transformations) == dimension\n\n warping.collect_params().initialize()\n\n # We change the warping parameters of the first dimension only\n w0 = warping.transformations[0]\n w0.encoding.set(w0.warping_internal, [2., 0.5])\n\n w2 = warping.transformations[2]\n w2_parameters = w2.encoding.get(mx.nd, w2.warping_internal.data())\n\n # The parameters of w2 should be the default ones (as there was no set operations)\n np.testing.assert_almost_equal(w2_parameters.asnumpy(), np.ones(2))\n\n # print(warping(X).asnumpy())\n # for name, p in warping.collect_params().items():\n # print(name, p.data().asnumpy())\n\n # With parameters [2., 0.5], the warping is given by x => 1. - sqrt(1. - x^2)\n def expected_warping(x):\n return 1. - np.sqrt(1. - x*x)\n\n expected_column0 = expected_warping(np.array([NUMERICAL_JITTER, 0.5, 1.-NUMERICAL_JITTER])).reshape((-1,1))\n expected_column1 = np.array([1., 2., 0.]).reshape((-1,1))\n expected_column2 = np.array([NUMERICAL_JITTER, 0.5, 1.-NUMERICAL_JITTER]).reshape((-1,1))\n\n np.testing.assert_almost_equal(warping(X).asnumpy(), np.hstack([expected_column0, expected_column1, expected_column2]))\n\n\ndef test_gp_regression_with_warping():\n\n def f(x):\n return np.sin(3*np.log(x))\n\n np.random.seed(7)\n\n L, U = -5., 12.\n input_range = (2.**L, 2.**U)\n\n x_train = np.sort(2.**np.random.uniform(L, U, 250))\n x_test = np.sort(2.**np.random.uniform(L, U, 500))\n y_train = f(x_train)\n y_test = f(x_test)\n\n # to mx.nd\n y_train_mx_nd = mx.nd.array(y_train)\n x_train_mx_nd = mx.nd.array(x_train)\n x_test_mx_nd = mx.nd.array(x_test)\n\n kernels = [\n Matern52(dimension=1),\n WarpedKernel(\n kernel=Matern52(dimension=1),\n warping=Warping(dimension=1, index_to_range={0: input_range})\n )\n ]\n\n models = [GaussianProcessRegression(kernel=k, random_seed=0) for k in kernels]\n train_errors, test_errors = [], []\n\n for model in models:\n\n model.fit(x_train_mx_nd, y_train_mx_nd)\n\n mu_train, var_train = model.predict(x_train_mx_nd)[0]\n mu_test, var_test = model.predict(x_test_mx_nd)[0]\n\n # back to np.array\n mu_train = mu_train.asnumpy()\n mu_test = mu_test.asnumpy()\n # var_train = var_train.asnumpy()\n # var_test = var_test.asnumpy()\n\n train_errors.append(np.mean(np.abs((mu_train - y_train))))\n test_errors.append(np.mean(np.abs((mu_test - y_test))))\n\n # The two models have similar performance on training points\n np.testing.assert_almost_equal(train_errors[0], train_errors[1], decimal=4)\n\n # As expected, the model with warping largely outperforms the model without\n assert test_errors[1] < 0.1 * test_errors[0]\n\n # If we wish to plot things\n # import matplotlib.pyplot as plt\n # plt.plot(x_train, y_train, \"r-\")\n # plt.plot(x_train, mu_train, \"b--\")\n #\n # plt.plot(x_test, y_test, \"y-\")\n # plt.plot(x_test, mu_test, \"m--\")\n\n # plt.fill_between(x_train,\n # mu_train - np.sqrt(var_train),\n # mu_train + np.sqrt(var_train),\n # alpha=0.5, edgecolor='#3F7F4C', facecolor='#7EFF99', linewidth=0)\n #\n # plt.fill_between(x_test,\n # mu_test - np.sqrt(var_test),\n # mu_test + np.sqrt(var_test),\n # alpha=0.5, edgecolor='#3F7F4C', facecolor='#7EFF99', linewidth=0)\n #\n # plt.show()\n",
"import numpy as np\nimport os\nimport math\nimport logging\nimport pandas as pd\nimport collections\nimport time\nimport json\nimport functools\nimport tqdm\nimport mxnet as mx\nfrom mxnet.util import use_np\nfrom mxnet.lr_scheduler import PolyScheduler, CosineScheduler\nfrom mxnet.gluon.data import DataLoader\nfrom sklearn.metrics import accuracy_score, f1_score, matthews_corrcoef, roc_auc_score\nfrom scipy.stats import pearsonr, spearmanr\nfrom autogluon_contrib_nlp.models import get_backbone\nfrom autogluon_contrib_nlp.lr_scheduler import InverseSquareRootScheduler\nfrom autogluon_contrib_nlp.utils.config import CfgNode\nfrom autogluon_contrib_nlp.utils.misc import logging_config, grouper,\\\n count_parameters, repeat, get_mxnet_available_ctx\nfrom autogluon_contrib_nlp.utils.parameter import move_to_ctx, clip_grad_global_norm\nfrom ..metrics import calculate_metric_by_expr\nfrom .. import constants as _C\nfrom autogluon.core import args, space\nfrom autogluon.core.scheduler import FIFOScheduler, HyperbandScheduler\nfrom ..column_property import get_column_property_metadata, get_column_properties_from_metadata\nfrom ..preprocessing import TabularBasicBERTPreprocessor\nfrom ..modules.basic_prediction import BERTForTabularBasicV1\nfrom ..dataset import TabularDataset\n\n\n@use_np\ndef get_optimizer(cfg, updates_per_epoch):\n max_update = int(updates_per_epoch * cfg.num_train_epochs)\n warmup_steps = int(updates_per_epoch * cfg.num_train_epochs * cfg.warmup_portion)\n if cfg.lr_scheduler == 'triangular':\n lr_scheduler = PolyScheduler(max_update=max_update,\n base_lr=cfg.lr,\n warmup_begin_lr=cfg.begin_lr,\n pwr=1,\n final_lr=cfg.final_lr,\n warmup_steps=warmup_steps,\n warmup_mode='linear')\n elif cfg.lr_scheduler == 'inv_sqrt':\n warmup_steps = int(updates_per_epoch * cfg.num_train_epochs\n * cfg.warmup_portion)\n lr_scheduler = InverseSquareRootScheduler(warmup_steps=warmup_steps,\n base_lr=cfg.lr,\n warmup_init_lr=cfg.begin_lr)\n elif cfg.lr_scheduler == 'constant':\n lr_scheduler = None\n elif cfg.lr_scheduler == 'cosine':\n max_update = int(updates_per_epoch * cfg.num_train_epochs)\n warmup_steps = int(updates_per_epoch * cfg.num_train_epochs\n * cfg.warmup_portion)\n lr_scheduler = CosineScheduler(max_update=max_update,\n base_lr=cfg.lr,\n final_lr=cfg.final_lr,\n warmup_steps=warmup_steps,\n warmup_begin_lr=cfg.begin_lr)\n else:\n raise ValueError('Unsupported lr_scheduler=\"{}\"'\n .format(cfg.lr_scheduler))\n optimizer_params = {'learning_rate': cfg.lr,\n 'wd': cfg.wd,\n 'lr_scheduler': lr_scheduler}\n optimizer = cfg.optimizer\n additional_params = {key: value for key, value in cfg.optimizer_params}\n optimizer_params.update(additional_params)\n return optimizer, optimizer_params, max_update\n\n\n@use_np\ndef apply_layerwise_decay(model, layerwise_decay, backbone_name, not_included=None):\n \"\"\"Apply the layer-wise gradient decay\n .. math::\n lr = lr * layerwise_decay^(max_depth - layer_depth)\n\n Parameters:\n ----------\n model\n qa_net\n layerwise_decay: int\n layer-wise decay power\n not_included: list of str\n A list or parameter names that not included in the layer-wise decay\n \"\"\"\n if not_included is None:\n not_included = []\n # consider the task specific fine-tuning layer as the last layer, following with pooler\n # In addition, the embedding parameters have the smaller learning rate based on this setting.\n if 'electra' in backbone_name:\n all_layers = model.encoder.all_encoder_layers\n else:\n all_layers = model.encoder.all_layers\n max_depth = len(all_layers) + 2\n for key, value in model.collect_params().items():\n if 'scores' in key:\n value.lr_mult = layerwise_decay ** 0\n if 'pool' in key:\n value.lr_mult = layerwise_decay ** 1\n if 'embed' in key:\n value.lr_mult = layerwise_decay ** max_depth\n\n for (layer_depth, layer) in enumerate(all_layers):\n layer_params = layer.collect_params()\n for key, value in layer_params.items():\n for pn in not_included:\n if pn in key:\n continue\n value.lr_mult = layerwise_decay**(max_depth - (layer_depth + 1))\n\n\ndef base_optimization_config():\n \"\"\"The basic optimization phase\"\"\"\n cfg = CfgNode()\n cfg.lr_scheduler = 'triangular'\n cfg.optimizer = 'adamw'\n cfg.optimizer_params = [('beta1', 0.9),\n ('beta2', 0.999),\n ('epsilon', 1e-6),\n ('correct_bias', False)]\n cfg.begin_lr = 0.0\n cfg.batch_size = 32\n cfg.model_average = 5\n cfg.per_device_batch_size = 16 # Per-device batch-size\n cfg.val_batch_size_mult = 2 # By default, we double the batch size for validation\n cfg.lr = 1E-4\n cfg.final_lr = 0.0\n cfg.num_train_epochs = 3\n cfg.warmup_portion = 0.1\n cfg.layerwise_lr_decay = 0.8 # The layer_wise decay\n cfg.wd = 0.01 # Weight Decay\n cfg.max_grad_norm = 1.0 # Maximum Gradient Norm\n # The validation frequency = validation frequency * num_updates_in_an_epoch\n cfg.valid_frequency = 0.1\n # Logging frequency = log frequency * num_updates_in_an_epoch\n cfg.log_frequency = 0.1\n return cfg\n\n\ndef base_model_config():\n cfg = CfgNode()\n cfg.preprocess = CfgNode()\n cfg.preprocess.merge_text = True\n cfg.preprocess.max_length = 128\n cfg.backbone = CfgNode()\n cfg.backbone.name = 'google_electra_base'\n cfg.network = BERTForTabularBasicV1.get_cfg()\n return cfg\n\n\ndef base_learning_config():\n cfg = CfgNode()\n cfg.early_stopping_patience = 10 # Stop if we cannot find a better checkpoint\n cfg.valid_ratio = 0.15 # The ratio of dataset to split for validation\n cfg.stop_metric = 'auto' # Automatically define the stopping metric\n cfg.log_metrics = 'auto' # Automatically determine the metrics used in logging\n return cfg\n\n\ndef base_misc_config():\n cfg = CfgNode()\n cfg.seed = 123\n cfg.exp_dir = './autonlp'\n return cfg\n\n\ndef base_cfg():\n cfg = CfgNode()\n cfg.version = 1\n cfg.optimization = base_optimization_config()\n cfg.learning = base_learning_config()\n cfg.model = base_model_config()\n cfg.misc = base_misc_config()\n cfg.freeze()\n return cfg\n\n\ndef electra_base():\n \"\"\"The search space of Electra Base\"\"\"\n cfg = base_cfg()\n cfg.defrost()\n cfg.optimization.layerwise_lr_decay = 0.8\n cfg.freeze()\n return cfg\n\n\ndef mobile_bert():\n \"\"\"The search space of MobileBERT\"\"\"\n cfg = base_cfg()\n cfg.defrost()\n cfg.optimization.layerwise_lr_decay = -1\n cfg.model.backbone.name = 'google_uncased_mobilebert'\n cfg.optimization.lr = 1E-5\n cfg.optimization.num_train_epochs = 5.0\n cfg.freeze()\n return cfg\n\n\ndef calculate_metric_scores(metrics, predictions, gt_labels,\n pos_label=1):\n \"\"\"\n\n Parameters\n ----------\n metrics\n A list of metric names\n predictions\n gt_labels\n pos_label\n\n Returns\n -------\n metric_scores\n A dictionary contains key --> metric scores\n \"\"\"\n if isinstance(metrics, str):\n metrics = [metrics]\n metric_scores = collections.OrderedDict()\n for metric_name in metrics:\n if metric_name == 'acc':\n metric_scores[metric_name] = float(accuracy_score(gt_labels,\n predictions.argmax(axis=-1)))\n elif metric_name == 'f1':\n metric_scores[metric_name] = float(f1_score(gt_labels,\n predictions.argmax(axis=-1),\n pos_label=pos_label))\n elif metric_name == 'mcc':\n metric_scores[metric_name] = float(matthews_corrcoef(gt_labels,\n predictions.argmax(axis=-1)))\n elif metric_name == 'auc':\n metric_scores[metric_name] = float(roc_auc_score(gt_labels, predictions[:, pos_label]))\n elif metric_name == 'nll':\n metric_scores[metric_name]\\\n = float(- np.log(predictions[np.arange(gt_labels.shape[0]), gt_labels]).mean())\n elif metric_name == 'pearsonr':\n metric_scores[metric_name] = float(pearsonr(gt_labels, predictions)[0])\n elif metric_name == 'spearmanr':\n metric_scores[metric_name] = float(spearmanr(gt_labels, predictions)[0])\n elif metric_name == 'mse':\n metric_scores[metric_name] = float(np.square(predictions - gt_labels).mean())\n elif metric_name == 'rmse':\n metric_scores[metric_name] = float(np.sqrt(np.square(predictions - gt_labels).mean()))\n elif metric_name == 'mae':\n metric_scores[metric_name] = float(np.abs(predictions - gt_labels).mean())\n else:\n raise ValueError('Unknown metric = {}'.format(metric_name))\n metric_scores = collections.OrderedDict(\n [(k, v.item() if isinstance(v, np.ndarray) else v)\n for k, v in metric_scores.items()])\n return metric_scores\n\n\ndef is_better_score(metric_name, baseline, new_score):\n \"\"\"Whether the new score is better than the baseline\n\n Parameters\n ----------\n metric_name\n Name of the metric\n baseline\n The baseline score\n new_score\n The new score\n\n Returns\n -------\n ret\n Whether the new score is better than the baseline\n \"\"\"\n if metric_name in ['acc', 'f1', 'mcc', 'auc', 'pearsonr', 'spearmanr']:\n return new_score > baseline\n elif metric_name in ['mse', 'rmse', 'mae']:\n return new_score < baseline\n else:\n raise NotImplementedError\n\n\n@use_np\ndef _classification_regression_predict(net, dataloader, problem_type, has_label=True):\n \"\"\"\n\n Parameters\n ----------\n net\n The network\n dataloader\n The dataloader\n problem_type\n Types of the labels\n has_label\n\n\n Returns\n -------\n predictions\n \"\"\"\n predictions = []\n ctx_l = net.collect_params().list_ctx()\n for sample_l in grouper(dataloader, len(ctx_l)):\n iter_pred_l = []\n for sample, ctx in zip(sample_l, ctx_l):\n if sample is None:\n continue\n if has_label:\n batch_feature, batch_label = sample\n else:\n batch_feature = sample\n batch_feature = move_to_ctx(batch_feature, ctx)\n pred = net(batch_feature)\n if problem_type == _C.CLASSIFICATION:\n pred = mx.npx.softmax(pred, axis=-1)\n iter_pred_l.append(pred)\n for pred in iter_pred_l:\n predictions.append(pred.asnumpy())\n predictions = np.concatenate(predictions, axis=0)\n return predictions\n\n\n@use_np\ndef train_function(args, reporter, train_data, tuning_data,\n time_limits, base_config, problem_types,\n column_properties, label_columns, label_shapes,\n log_metrics, stopping_metric, console_log,\n ignore_warning=False):\n import os\n os.environ['MKL_NUM_THREADS'] = '1'\n os.environ['OMP_NUM_THREADS'] = '1'\n os.environ['MKL_DYNAMIC'] = 'FALSE'\n if ignore_warning:\n import warnings\n warnings.filterwarnings(\"ignore\")\n search_space = args['search_space']\n cfg = base_config.clone()\n specified_values = []\n for key in search_space:\n specified_values.append(key)\n specified_values.append(search_space[key])\n cfg.merge_from_list(specified_values)\n exp_dir = cfg.misc.exp_dir\n if reporter is not None:\n # When the reporter is not None,\n # we create the saved directory based on the task_id + time\n task_id = args.task_id\n exp_dir = os.path.join(exp_dir, 'task{}'.format(task_id))\n os.makedirs(exp_dir, exist_ok=True)\n cfg.defrost()\n cfg.misc.exp_dir = exp_dir\n cfg.freeze()\n logger = logging.getLogger()\n logging_config(folder=exp_dir, name='training', logger=logger, console=console_log)\n logger.info(cfg)\n # Load backbone model\n backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \\\n = get_backbone(cfg.model.backbone.name)\n with open(os.path.join(exp_dir, 'cfg.yml'), 'w') as f:\n f.write(str(cfg))\n text_backbone = backbone_model_cls.from_cfg(backbone_cfg)\n # Build Preprocessor + Preprocess the training dataset + Inference problem type\n # TODO Move preprocessor + Dataloader to outer loop to better cache the dataloader\n preprocessor = TabularBasicBERTPreprocessor(tokenizer=tokenizer,\n column_properties=column_properties,\n label_columns=label_columns,\n max_length=cfg.model.preprocess.max_length,\n merge_text=cfg.model.preprocess.merge_text)\n logger.info('Process training set...')\n processed_train = preprocessor.process_train(train_data.table)\n logger.info('Done!')\n logger.info('Process dev set...')\n processed_dev = preprocessor.process_test(tuning_data.table)\n logger.info('Done!')\n label = label_columns[0]\n # Get the ground-truth dev labels\n gt_dev_labels = np.array(tuning_data.table[label].apply(column_properties[label].transform))\n ctx_l = get_mxnet_available_ctx()\n base_batch_size = cfg.optimization.per_device_batch_size\n num_accumulated = int(np.ceil(cfg.optimization.batch_size / base_batch_size))\n inference_base_batch_size = base_batch_size * cfg.optimization.val_batch_size_mult\n train_dataloader = DataLoader(processed_train,\n batch_size=base_batch_size,\n shuffle=True,\n batchify_fn=preprocessor.batchify(is_test=False))\n dev_dataloader = DataLoader(processed_dev,\n batch_size=inference_base_batch_size,\n shuffle=False,\n batchify_fn=preprocessor.batchify(is_test=True))\n net = BERTForTabularBasicV1(text_backbone=text_backbone,\n feature_field_info=preprocessor.feature_field_info(),\n label_shape=label_shapes[0],\n cfg=cfg.model.network)\n net.initialize_with_pretrained_backbone(backbone_params_path, ctx=ctx_l)\n net.hybridize()\n num_total_params, num_total_fixed_params = count_parameters(net.collect_params())\n logger.info('#Total Params/Fixed Params={}/{}'.format(num_total_params,\n num_total_fixed_params))\n # Initialize the optimizer\n updates_per_epoch = int(len(train_dataloader) / (num_accumulated * len(ctx_l)))\n optimizer, optimizer_params, max_update \\\n = get_optimizer(cfg.optimization,\n updates_per_epoch=updates_per_epoch)\n valid_interval = math.ceil(cfg.optimization.valid_frequency * updates_per_epoch)\n train_log_interval = math.ceil(cfg.optimization.log_frequency * updates_per_epoch)\n trainer = mx.gluon.Trainer(net.collect_params(),\n optimizer, optimizer_params,\n update_on_kvstore=False)\n if 0 < cfg.optimization.layerwise_lr_decay < 1:\n apply_layerwise_decay(net.text_backbone,\n cfg.optimization.layerwise_lr_decay,\n backbone_name=cfg.model.backbone.name)\n # Do not apply weight decay to all the LayerNorm and bias\n for _, v in net.collect_params('.*beta|.*gamma|.*bias').items():\n v.wd_mult = 0.0\n params = [p for p in net.collect_params().values() if p.grad_req != 'null']\n\n # Set grad_req if gradient accumulation is required\n if num_accumulated > 1:\n logger.info('Using gradient accumulation.'\n ' Global batch size = {}'.format(cfg.optimization.batch_size))\n for p in params:\n p.grad_req = 'add'\n net.collect_params().zero_grad()\n train_loop_dataloader = grouper(repeat(train_dataloader), len(ctx_l))\n log_loss_l = [mx.np.array(0.0, dtype=np.float32, ctx=ctx) for ctx in ctx_l]\n log_num_samples_l = [0 for _ in ctx_l]\n logging_start_tick = time.time()\n best_performance_score = None\n mx.npx.waitall()\n no_better_rounds = 0\n report_idx = 0\n start_tick = time.time()\n for update_idx in tqdm.tqdm(range(max_update)):\n num_samples_per_update_l = [0 for _ in ctx_l]\n for accum_idx in range(num_accumulated):\n sample_l = next(train_loop_dataloader)\n loss_l = []\n num_samples_l = [0 for _ in ctx_l]\n for i, (sample, ctx) in enumerate(zip(sample_l, ctx_l)):\n feature_batch, label_batch = sample\n feature_batch = move_to_ctx(feature_batch, ctx)\n label_batch = move_to_ctx(label_batch, ctx)\n with mx.autograd.record():\n pred = net(feature_batch)\n if problem_types[0] == _C.CLASSIFICATION:\n logits = mx.npx.log_softmax(pred, axis=-1)\n loss = - mx.npx.pick(logits, label_batch[0])\n elif problem_types[0] == _C.REGRESSION:\n loss = mx.np.square(pred - label_batch[0])\n loss_l.append(loss.mean() / len(ctx_l))\n num_samples_l[i] = loss.shape[0]\n num_samples_per_update_l[i] += loss.shape[0]\n for loss in loss_l:\n loss.backward()\n for i in range(len(ctx_l)):\n log_loss_l[i] += loss_l[i] * len(ctx_l) * num_samples_l[i]\n log_num_samples_l[i] += num_samples_per_update_l[i]\n # Begin to update\n trainer.allreduce_grads()\n num_samples_per_update = sum(num_samples_per_update_l)\n total_norm, ratio, is_finite = \\\n clip_grad_global_norm(params, cfg.optimization.max_grad_norm * num_accumulated)\n total_norm = total_norm / num_accumulated\n trainer.update(num_samples_per_update)\n\n # Clear after update\n if num_accumulated > 1:\n net.collect_params().zero_grad()\n if (update_idx + 1) % train_log_interval == 0:\n log_loss = sum([ele.as_in_ctx(ctx_l[0]) for ele in log_loss_l]).asnumpy()\n log_num_samples = sum(log_num_samples_l)\n logger.info(\n '[Iter {}/{}, Epoch {}] train loss={}, gnorm={}, lr={}, #samples processed={},'\n ' #sample per second={}'\n .format(update_idx + 1, max_update,\n int(update_idx / updates_per_epoch),\n log_loss / log_num_samples, total_norm, trainer.learning_rate,\n log_num_samples,\n log_num_samples / (time.time() - logging_start_tick)))\n logging_start_tick = time.time()\n log_loss_l = [mx.np.array(0.0, dtype=np.float32, ctx=ctx) for ctx in ctx_l]\n log_num_samples_l = [0 for _ in ctx_l]\n if (update_idx + 1) % valid_interval == 0 or (update_idx + 1) == max_update:\n valid_start_tick = time.time()\n dev_predictions = \\\n _classification_regression_predict(net, dataloader=dev_dataloader,\n problem_type=problem_types[0],\n has_label=False)\n metric_scores = calculate_metric_scores(log_metrics,\n predictions=dev_predictions,\n gt_labels=gt_dev_labels)\n performance_score = calculate_metric_by_expr(\n {label_columns[0]: metric_scores},\n [label_columns[0]],\n stopping_metric\n )\n valid_time_spent = time.time() - valid_start_tick\n if best_performance_score is None or is_better_score(stopping_metric,\n best_performance_score,\n performance_score):\n find_better = True\n no_better_rounds = 0\n best_performance_score = performance_score\n else:\n find_better = False\n no_better_rounds += 1\n if find_better:\n net.save_parameters(os.path.join(exp_dir, 'best_model.params'))\n mx.npx.waitall()\n loss_string = ', '.join(['{}={}'.format(key, metric_scores[key])\n for key in log_metrics])\n logger.info('[Iter {}/{}, Epoch {}] valid {}, time spent={},'\n ' total_time={:.2f}min'.format(\n update_idx + 1, max_update, int(update_idx / updates_per_epoch),\n loss_string, valid_time_spent, (time.time() - start_tick) / 60))\n report_items = [('iteration', update_idx + 1),\n ('report_idx', report_idx),\n ('epoch', int(update_idx / updates_per_epoch))] +\\\n list(metric_scores.items()) + \\\n [('fine_better', find_better),\n ('time_spent', int(time.time() - start_tick))]\n total_time_spent = time.time() - start_tick\n if time_limits is not None and total_time_spent > time_limits:\n break\n report_idx += 1\n if stopping_metric in ['mse', 'mae', 'rmse']:\n report_items.append(('reward', -performance_score))\n else:\n report_items.append(('reward', performance_score))\n report_items.append(('exp_dir', exp_dir))\n reporter(**dict(report_items))\n if no_better_rounds >= cfg.learning.early_stopping_patience:\n logger.info('Early stopping patience reached!')\n break\n\n\n@use_np\nclass BertForTextPredictionBasic:\n \"\"\"A model object returned by `fit()` in TextPrediction tasks. \n Use for making predictions on new data and viewing information about models trained during `fit()`.\n \"\"\"\n \n def __init__(self, column_properties, label_columns, feature_columns,\n label_shapes, problem_types, stopping_metric, log_metrics,\n output_directory=None, logger=None, base_config=None, search_space=None):\n \"\"\"Creates model object.\n\n Parameters\n ----------\n column_properties\n The column properties.\n label_columns\n Label columns.\n feature_columns\n label_shapes\n problem_types\n stopping_metric\n log_metrics\n output_directory\n logger\n base_config\n The basic configuration that the search space will be based upon.\n search_space\n The hyperparameter search space.\n \"\"\"\n super(BertForTextPredictionBasic, self).__init__()\n if base_config is None:\n self._base_config = base_cfg()\n else:\n self._base_config = base_cfg().clone_merge(base_config)\n self._base_config.defrost()\n if output_directory is not None:\n self._base_config.misc.exp_dir = output_directory\n self._base_config.misc.exp_dir = os.path.abspath(self._base_config.misc.exp_dir)\n self._base_config.freeze()\n if search_space is None:\n self._search_space = dict()\n else:\n assert isinstance(search_space, dict)\n self._search_space = search_space\n self._column_properties = column_properties\n self._stopping_metric = stopping_metric\n self._log_metrics = log_metrics\n self._logger = logger\n self._output_directory = output_directory\n\n self._label_columns = label_columns\n self._feature_columns = feature_columns\n self._label_shapes = label_shapes\n self._problem_types = problem_types\n\n # Need to be set in the fit call\n self._net = None\n self._preprocessor = None\n self._config = None\n self._results = None\n\n @property\n def label_columns(self):\n return self._label_columns\n\n @property\n def label_shapes(self):\n return self._label_shapes\n\n @property\n def problem_types(self):\n return self._problem_types\n\n @property\n def feature_columns(self):\n return self._feature_columns\n\n @property\n def search_space(self):\n return self._search_space\n\n @property\n def base_config(self):\n return self._base_config\n\n @property\n def results(self):\n return self._results\n\n @property\n def config(self):\n return self._config\n\n @property\n def net(self):\n return self._net\n\n @staticmethod\n def default_config():\n \"\"\"Get the default configuration\n\n Returns\n -------\n cfg\n The configuration specified by the key\n \"\"\"\n return base_cfg()\n\n def train(self, train_data, tuning_data, resource,\n time_limits=None,\n scheduler='fifo',\n searcher=None,\n num_trials=10,\n grace_period=None,\n max_t=None,\n reduction_factor=4,\n brackets=1,\n plot_results=False,\n console_log=True,\n ignore_warning=True):\n start_tick = time.time()\n logging_config(folder=self._output_directory, name='main',\n console=console_log,\n logger=self._logger)\n assert len(self._label_columns) == 1\n # TODO(sxjscience) Try to support S3\n os.makedirs(self._output_directory, exist_ok=True)\n search_space_reg = args(search_space=space.Dict(**self.search_space))\n if scheduler == 'hyperband' and time_limits is None:\n time_limits = 5 * 60 * 60 # 5 hour\n train_fn = search_space_reg(functools.partial(train_function,\n train_data=train_data,\n time_limits=time_limits,\n tuning_data=tuning_data,\n base_config=self.base_config,\n problem_types=self.problem_types,\n column_properties=self._column_properties,\n label_columns=self._label_columns,\n label_shapes=self._label_shapes,\n log_metrics=self._log_metrics,\n stopping_metric=self._stopping_metric,\n console_log=console_log,\n ignore_warning=ignore_warning))\n if scheduler == 'fifo':\n if searcher is None:\n searcher = 'random'\n scheduler = FIFOScheduler(train_fn,\n time_out=time_limits,\n num_trials=num_trials,\n resource=resource,\n searcher=searcher,\n checkpoint=None,\n reward_attr='reward',\n time_attr='time_spent')\n elif scheduler == 'hyperband':\n if searcher is None:\n searcher = 'random'\n if grace_period is None:\n grace_period = 1\n if max_t is None:\n max_t = 5\n scheduler = HyperbandScheduler(train_fn,\n time_out=time_limits,\n max_t=max_t,\n resource=resource,\n searcher=searcher,\n grace_period=grace_period,\n reduction_factor=reduction_factor,\n brackets=brackets,\n checkpoint=None,\n reward_attr='reward',\n time_attr='report_idx')\n else:\n raise NotImplementedError\n scheduler.run()\n scheduler.join_jobs()\n if len(scheduler.config_history) == 0:\n raise RuntimeError('No training job has been completed! '\n 'There are two possibilities: '\n '1) The time_limits is too small, '\n 'or 2) There are some internal errors in AutoGluon. '\n 'For the first case, you can increase the time_limits or set it to '\n 'None, e.g., setting \"TextPrediction.fit(..., time_limits=None). To '\n 'further investigate the root cause, you can also try to train with '\n '\"verbosity=3\", i.e., TextPrediction.fit(..., verbosity=3).')\n best_config = scheduler.get_best_config()\n self._logger.info('Best_config={}'.format(best_config))\n best_task_id = scheduler.get_best_task_id()\n best_model_saved_dir_path = os.path.join(self._output_directory,\n 'task{}'.format(best_task_id))\n best_cfg_path = os.path.join(best_model_saved_dir_path, 'cfg.yml')\n cfg = self.base_config.clone_merge(best_cfg_path)\n self._results = dict()\n self._results.update(best_reward=scheduler.get_best_reward(),\n best_config=scheduler.get_best_config(),\n total_time=time.time() - start_tick,\n metadata=scheduler.metadata,\n training_history=scheduler.training_history,\n config_history=scheduler.config_history,\n reward_attr=scheduler._reward_attr,\n config=cfg)\n if plot_results:\n plot_training_curves = os.path.join(self._output_directory, 'plot_training_curves.png')\n scheduler.get_training_curves(filename=plot_training_curves, plot=plot_results,\n use_legend=True)\n # Consider to move this to a separate predictor\n self._config = cfg\n backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \\\n = get_backbone(cfg.model.backbone.name)\n text_backbone = backbone_model_cls.from_cfg(backbone_cfg)\n preprocessor = TabularBasicBERTPreprocessor(tokenizer=tokenizer,\n column_properties=self._column_properties,\n label_columns=self._label_columns,\n max_length=cfg.model.preprocess.max_length,\n merge_text=cfg.model.preprocess.merge_text)\n self._preprocessor = preprocessor\n net = BERTForTabularBasicV1(text_backbone=text_backbone,\n feature_field_info=preprocessor.feature_field_info(),\n label_shape=self._label_shapes[0],\n cfg=cfg.model.network)\n # Here, we cannot use GPU due to https://github.com/awslabs/autogluon/issues/602\n net.load_parameters(os.path.join(best_model_saved_dir_path, 'best_model.params'),\n ctx=mx.cpu())\n self._net = net\n mx.npx.waitall()\n\n def evaluate(self, valid_data, metrics):\n \"\"\" Report the predictive performance evaluated for a given dataset.\n \n Parameters\n ----------\n valid_data : str or :class:`TabularDataset` or `pandas.DataFrame`\n This Dataset must also contain the label-column with the same column-name as specified during `fit()`.\n If str is passed, `valid_data` will be loaded using the str value as the file path.\n metrics : List[str]\n A list of names of metrics to report.\n \n Returns\n -------\n Dict mapping metric -> score calculated over the given dataset.\n \"\"\"\n assert self.net is not None\n if not isinstance(valid_data, TabularDataset):\n valid_data = TabularDataset(valid_data,\n columns=self._feature_columns + self._label_columns,\n column_properties=self._column_properties)\n ground_truth = np.array(valid_data.table[self._label_columns[0]].apply(\n self._column_properties[self._label_columns[0]].transform))\n if self._problem_types[0] == _C.CLASSIFICATION:\n predictions = self.predict_proba(valid_data)\n else:\n predictions = self.predict(valid_data)\n metric_scores = calculate_metric_scores(metrics=metrics,\n predictions=predictions,\n gt_labels=ground_truth)\n return metric_scores\n\n def _internal_predict(self, test_data, get_original_labels=True, get_probabilities=False):\n assert self.net is not None\n assert self.config is not None\n if not isinstance(test_data, TabularDataset):\n if isinstance(test_data, (list, dict)):\n test_data = pd.DataFrame(test_data)\n test_data = TabularDataset(test_data,\n columns=self._feature_columns,\n column_properties=self._column_properties)\n processed_test = self._preprocessor.process_test(test_data)\n inference_batch_size = self.config.optimization.per_device_batch_size\\\n * self.config.optimization.val_batch_size_mult\n test_dataloader = DataLoader(processed_test,\n batch_size=inference_batch_size,\n shuffle=False,\n batchify_fn=self._preprocessor.batchify(is_test=True))\n test_predictions = _classification_regression_predict(self._net,\n dataloader=test_dataloader,\n problem_type=self._problem_types[0],\n has_label=False)\n if self._problem_types[0] == _C.CLASSIFICATION:\n if get_probabilities:\n return test_predictions\n else:\n test_predictions = test_predictions.argmax(axis=-1)\n if get_original_labels:\n test_predictions = np.array(\n list(map(self._column_properties[self._label_columns[0]].inv_transform,\n test_predictions)))\n return test_predictions\n\n def predict_proba(self, test_data):\n \"\"\"Predict class probabilities instead of class labels (for classification tasks).\n\n Parameters\n ----------\n test_data : `pandas.DataFrame`, `TabularPrediction.Dataset`, or str\n The test data to get predictions for. Can be DataFrame/Dataset or a file that can be loaded into DataFrame/Dataset.\n\n Returns\n -------\n probabilities : array\n The predicted class probabilities for each sample. Shape of this array is (#Samples, num_class).\n \"\"\"\n assert self.problem_types[0] == _C.CLASSIFICATION\n return self._internal_predict(test_data,\n get_original_labels=False,\n get_probabilities=True)\n\n def predict(self, test_data, get_original_labels=True):\n \"\"\"Make predictions on new data.\n\n Parameters\n ----------\n test_data : `pandas.DataFrame`, `TabularPrediction.Dataset`, or str\n The test data to get predictions for. Can be DataFrame/Dataset or a file that can be loaded into DataFrame/Dataset.\n get_original_labels : bool, default = True\n Whether or not predictions should be formatted in terms of the original labels.\n For example, the labels might be \"entailment\" or \"not_entailment\" and predictions could either be of this form (if `True`) or integer-indices corresponding to these classes (if `False`).\n\n Returns\n -------\n predictions : array\n The predictions for each sample. Shape of this array is (#Samples,).\n \"\"\"\n return self._internal_predict(test_data,\n get_original_labels=get_original_labels,\n get_probabilities=False)\n\n def save(self, dir_path):\n \"\"\"Save this model to disk.\n\n Parameters\n ----------\n dir_path : str\n Directory where the model should be saved.\n \"\"\"\n os.makedirs(dir_path, exist_ok=True)\n self.net.save_parameters(os.path.join(dir_path, 'net.params'))\n with open(os.path.join(dir_path, 'cfg.yml'), 'w') as of:\n of.write(self.config.dump())\n with open(os.path.join(dir_path, 'column_metadata.json'), 'w') as of:\n json.dump(get_column_property_metadata(self._column_properties),\n of, ensure_ascii=True)\n with open(os.path.join(dir_path, 'assets.json'), 'w') as of:\n json.dump(\n {\n 'label_columns': self._label_columns,\n 'label_shapes': self._label_shapes,\n 'problem_types': self._problem_types,\n 'feature_columns': self._feature_columns\n }, of, ensure_ascii=True)\n\n @classmethod\n def load(cls, dir_path):\n \"\"\"Load a model object previously produced by `fit()` from disk and return this object.\n It is highly recommended the predictor be loaded with the exact AutoGluon version it was fit with.\n\n\n Parameters\n ----------\n dir_path : str\n Path to directory where this model was previously saved.\n \n Returns\n -------\n model\n A `BertForTextPredictionBasic` object that can be used for making predictions on new data.\n \"\"\"\n loaded_config = cls.default_config().clone_merge(os.path.join(dir_path, 'cfg.yml'))\n with open(os.path.join(dir_path, 'assets.json'), 'r') as f:\n assets = json.load(f)\n label_columns = assets['label_columns']\n feature_columns = assets['feature_columns']\n label_shapes = assets['label_shapes']\n problem_types = assets['problem_types']\n column_properties = get_column_properties_from_metadata(\n os.path.join(dir_path, 'column_metadata.json'))\n backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \\\n = get_backbone(loaded_config.model.backbone.name)\n # Initialize the preprocessor\n preprocessor = TabularBasicBERTPreprocessor(\n tokenizer=tokenizer,\n column_properties=column_properties,\n label_columns=label_columns,\n max_length=loaded_config.model.preprocess.max_length,\n merge_text=loaded_config.model.preprocess.merge_text)\n text_backbone = backbone_model_cls.from_cfg(backbone_cfg)\n net = BERTForTabularBasicV1(text_backbone=text_backbone,\n feature_field_info=preprocessor.feature_field_info(),\n label_shape=label_shapes[0],\n cfg=loaded_config.model.network)\n net.load_parameters(os.path.join(dir_path, 'net.params'),\n ctx=mx.cpu())\n model = cls(column_properties=column_properties,\n label_columns=label_columns,\n feature_columns=feature_columns,\n label_shapes=label_shapes,\n problem_types=problem_types,\n stopping_metric=None,\n log_metrics=None,\n base_config=loaded_config)\n model._net = net\n model._preprocessor = preprocessor\n model._config = loaded_config\n return model\n",
"import numpy as np\nimport mxnet as mx\nfrom autogluon.core.utils import *\nfrom .dataset import *\nfrom autogluon.mxnet.task.classifier import Classifier\nfrom autogluon.core import AutoGluonObject\n\n__all__ = ['TextClassificationPredictor']\n\nfrom ..utils import try_import_gluonnlp\n\n\nclass TextClassificationPredictor(Classifier):\n \"\"\"Trained Text Classifier returned by `fit()` that can be used to make predictions on new text data.\n \"\"\"\n def __init__(self, model, transform, test_transform,\n results, scheduler_checkpoint, args):\n self.model = model\n self.use_roberta = 'roberta' in args.net\n self.transform = transform\n self.test_transform = test_transform\n self.results = self._format_results(results)\n self.scheduler_checkpoint = scheduler_checkpoint\n self.args = args\n\n def predict(self, X):\n \"\"\"Predict class-index of a given sentence / text-snippet.\n \n Parameters\n ----------\n X : str\n The input sentence we should classify.\n \n Examples\n --------\n >>> class_index = predictor.predict('this is cool')\n \n Returns\n -------\n Int corresponding to index of the predicted class.\n \"\"\"\n proba = self.predict_proba(X)\n ind = mx.nd.argmax(proba, axis=1).astype('int')\n return ind\n\n def predict_proba(self, X):\n \"\"\"Predict class-probabilities of a given sentence / text-snippet.\n \n Parameters\n ----------\n X : str\n The input sentence we should classify.\n \n Examples\n --------\n >>> class_probs = predictor.predict_proba('this is cool')\n \n Returns\n -------\n `mxnet.NDArray` containing predicted probabilities of each class.\n \"\"\"\n inputs = self.test_transform(X)\n X, valid_length, segment_id = [mx.nd.array(np.expand_dims(x, 0)) for x in inputs]\n if self.use_roberta:\n pred = self.model(X, valid_length)\n else:\n pred = self.model(X, segment_id, valid_length)\n return mx.nd.softmax(pred)\n\n def evaluate(self, dataset, ctx=[mx.cpu()]):\n \"\"\"Evaluate predictive performance of trained text classifier using given test data.\n \n Parameters\n ----------\n dataset : :class:`autogluon.task.TextClassification.Dataset`\n The dataset containing test sentences (must be in same format as the training dataset provided to fit).\n ctx : List of `mxnet.context` elements.\n Determines whether to use CPU or GPU(s), options include: `[mx.cpu()]` or `[mx.gpu()]`.\n \n Examples\n --------\n >>> from autogluon.text import TextClassification as task\n >>> dataset = task.Dataset(test_path='~/data/test')\n >>> test_performance = predictor.evaluate(dataset)\n \"\"\"\n args = self.args\n net = self.model\n if isinstance(dataset, AutoGluonObject):\n dataset = dataset.init()\n if isinstance(dataset, AbstractGlueTask) or isinstance(dataset, AbstractCustomTask):\n dataset = dataset.get_dataset('dev')\n if isinstance(ctx, list):\n ctx = ctx[0]\n\n metric = mx.metric.Accuracy()\n dataset = dataset.transform(self.transform)\n vocab = self.transform.vocab\n pad_val = vocab[vocab.padding_token]\n nlp = try_import_gluonnlp()\n batchify_fn = nlp.data.batchify.Tuple(\n nlp.data.batchify.Pad(axis=0, pad_val=pad_val), # input\n nlp.data.batchify.Stack(), # length\n nlp.data.batchify.Pad(axis=0, pad_val=0), # segment\n nlp.data.batchify.Stack('int32')) # label\n loader_dev = mx.gluon.data.DataLoader(\n dataset,\n batch_size=args.dev_batch_size,\n num_workers=args.num_workers,\n shuffle=False,\n batchify_fn=batchify_fn)\n\n eval_func(net, loader_dev, metric, ctx, self.use_roberta)\n _, test_reward = metric.get()\n return test_reward\n\ndef eval_func(model, loader_dev, metric, ctx, use_roberta):\n \"\"\"Evaluate the model on validation dataset.\"\"\"\n metric.reset()\n for batch_id, seqs in enumerate(loader_dev):\n input_ids, valid_length, segment_ids, label = seqs\n input_ids = input_ids.as_in_context(ctx)\n valid_length = valid_length.as_in_context(ctx).astype('float32')\n label = label.as_in_context(ctx)\n if use_roberta:\n out = model(input_ids, valid_length)\n else:\n out = model(input_ids, segment_ids.as_in_context(ctx), valid_length)\n metric.update([label], [out])\n\n metric_nm, metric_val = metric.get()\n if not isinstance(metric_nm, list):\n metric_nm, metric_val = [metric_nm], [metric_val]\n mx.nd.waitall()\n return metric_nm, metric_val\n"
] | [
[
"numpy.hstack",
"numpy.log",
"numpy.sqrt",
"numpy.random.seed",
"numpy.abs",
"numpy.ones",
"numpy.testing.assert_almost_equal",
"numpy.random.uniform",
"numpy.array"
],
[
"numpy.square",
"sklearn.metrics.roc_auc_score",
"numpy.abs",
"numpy.arange",
"scipy.stats.pearsonr",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.ceil",
"scipy.stats.spearmanr"
],
[
"numpy.expand_dims"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
labscript-suite-bitbucket-archive/shjohnst-runviewer--forked-from--labscript_suite-runviewer | [
"78d7be530bbfd005744b3a6b1cd3f1beb5fd7fe9"
] | [
"__main__.py"
] | [
"#####################################################################\n# #\n# /main.pyw #\n# #\n# Copyright 2014, Monash University #\n# #\n# This file is part of the program runviewer, in the labscript #\n# suite (see http://labscriptsuite.org), and is licensed under the #\n# Simplified BSD License. See the license.txt file in the root of #\n# the project for the full license. #\n# #\n#####################################################################\nfrom __future__ import division, unicode_literals, print_function, absolute_import\nfrom labscript_utils import PY2\n\nimport os\nimport sys\nimport time\nimport threading\nimport logging\nimport ctypes\nimport socket\nif PY2:\n str = unicode\n from Queue import Queue\nelse:\n from queue import Queue\nimport ast\nimport pprint\n\nimport signal\n# Quit on ctrl-c\nsignal.signal(signal.SIGINT, signal.SIG_DFL)\n\nimport labscript_utils.excepthook\n\n# Set working directory to runviewer folder, resolving symlinks\nrunviewer_dir = os.path.dirname(os.path.realpath(__file__))\nos.chdir(runviewer_dir)\n\ntry:\n from labscript_utils import check_version\nexcept ImportError:\n raise ImportError('Require labscript_utils > 2.1.0')\n\ncheck_version('labscript_utils', '2.6.1', '3')\ncheck_version('qtutils', '2.0.0', '3.0.0')\ncheck_version('zprocess', '1.1.2', '3')\n\nfrom labscript_utils.setup_logging import setup_logging\nlogger = setup_logging('runviewer')\nlabscript_utils.excepthook.set_logger(logger)\n\nfrom zprocess import zmq_get, ZMQServer\nimport zprocess.locking\nimport labscript_utils.h5_lock\nimport h5py\nzprocess.locking.set_client_process_name('runviewer')\n\n# This must be bumped until after the h5_lock import\n# This is because the check imports pyqtgraph, which imports h5py\n# h5py must be imported after h5_lock, thus we do the check here\ncheck_version('pyqtgraph', '0.9.10', '1')\n\nfrom qtutils.qt.QtCore import *\nfrom qtutils.qt.QtGui import *\nfrom qtutils.qt.QtWidgets import *\nfrom qtutils.qt.QtCore import pyqtSignal as Signal\n\nimport numpy\nfrom scipy import interpolate\n\n# must be imported after PySide/PyQt4\nimport pyqtgraph as pg\npg.setConfigOption('background', 'w')\npg.setConfigOption('foreground', 'k')\n\nfrom qtutils import *\nimport qtutils.icons\nfrom labscript_utils.connections import ConnectionTable\nimport labscript_devices\n\nfrom labscript_utils.labconfig import LabConfig, config_prefix\n\nfrom runviewer.resample import resample as _resample\n\n\ndef set_win_appusermodel(window_id):\n from labscript_utils.winshell import set_appusermodel, appids, app_descriptions\n icon_path = os.path.abspath('runviewer.ico')\n executable = sys.executable.lower()\n if not executable.endswith('w.exe'):\n executable = executable.replace('.exe', 'w.exe')\n relaunch_command = executable + ' ' + os.path.abspath(__file__.replace('.pyc', '.py'))\n relaunch_display_name = app_descriptions['runviewer']\n set_appusermodel(window_id, appids['runviewer'], icon_path, relaunch_command, relaunch_display_name)\n\n\nSHOT_MODEL__COLOUR_INDEX = 0\nSHOT_MODEL__SHUTTER_INDEX = 1\nSHOT_MODEL__CHECKBOX_INDEX = 2\nSHOT_MODEL__PATH_INDEX = 1\nCHANNEL_MODEL__CHECKBOX_INDEX = 0\nCHANNEL_MODEL__CHANNEL_INDEX = 0\n\n\ndef format_time(input_sec):\n # inout is the time in sec\n if input_sec >= 1:\n return \"{:.3g}s\".format(input_sec)\n elif input_sec >= 1e-3:\n return \"{:.3g}ms\".format(input_sec * 1e3)\n elif input_sec >= 1e-6:\n return \"{:.3g}us\".format(input_sec * 1e6)\n elif input_sec >= 1e-9:\n return \"{:.3g}ns\".format(input_sec * 1e9)\n elif input_sec >= 1e-12:\n return \"{:.3g}ps\".format(input_sec * 1e12)\n elif input_sec >= 1e-15:\n return \"{:.3g}fs\".format(input_sec * 1e15)\n elif input_sec >= 1e-18:\n return \"{:.3g}as\".format(input_sec * 1e18)\n else:\n return str(input_sec) + \"s\"\n\n\ndef int_to_enum(enum_list, value):\n \"\"\"stupid hack to work around the fact that PySide screws with the type of a variable when it goes into a model. Enums are converted to ints, which then\n can't be interpreted by QColor correctly (for example)\n unfortunately Qt doesn't provide a python list structure of enums, so you have to build the list yourself.\n \"\"\"\n\n for item in enum_list:\n if item == value:\n return item\n return value\n\n\nclass ScaleHandler():\n\n def __init__(self, input_times, stop_time):\n # input_times is a list (may be unsorted) of times which should be scaled evenly with target_length\n # an input list of [1,2,4,6] and target_length of 1.0 will result in:\n # get_scaled_time(1) -> 1\n # get_scaled_time(1.5) -> 1.5\n # get_scaled_time(3) -> 2.5\n # get_scaled_time(4) -> 3\n # get_scaled_time(5) -> 3.5 ...\n self.org_stop_time = float(stop_time)\n\n if 0 not in input_times:\n input_times.append(0)\n\n if self.org_stop_time not in input_times:\n input_times.append(self.org_stop_time)\n\n if not all((x >= 0) and (x <= self.org_stop_time) for x in input_times):\n raise Exception('shot contains at least one marker before t=0 and/or after the stop time. Non-linear time currently does not support this.')\n\n unscaled_times = sorted(input_times)\n target_length = self.org_stop_time / float(len(unscaled_times)-1)\n scaled_times = [target_length*i for i in range(len(input_times))]\n\n # append values for linear scaling before t=0 and after stop time\n unscaled_times = [-1e-9] + unscaled_times + [self.org_stop_time + 1e-9]\n scaled_times = [-1e-9] + scaled_times + [self.org_stop_time + 1e-9]\n\n self.get_scaled_time = interpolate.interp1d(unscaled_times, scaled_times, assume_sorted=False, bounds_error=False, fill_value='extrapolate')\n self.get_unscaled_time = interpolate.interp1d(scaled_times, unscaled_times, assume_sorted=False, bounds_error=False, fill_value='extrapolate')\n\n self.scaled_stop_time = self.get_scaled_time(self.org_stop_time)\n\n\nclass ColourDelegate(QItemDelegate):\n\n def __init__(self, view, *args, **kwargs):\n QItemDelegate.__init__(self, *args, **kwargs)\n self._view = view\n self._colours = [Qt.black, Qt.red, Qt.green, Qt.blue, Qt.cyan, Qt.magenta, Qt.yellow, Qt.gray, Qt.darkRed, Qt.darkGreen, Qt.darkBlue, Qt.darkCyan, Qt.darkMagenta, Qt.darkYellow, Qt.darkGray, Qt.lightGray]\n\n self._current_colour_index = 0\n\n def get_next_colour(self):\n colour = self._colours[self._current_colour_index]\n self._current_colour_index += 1\n if self._current_colour_index >= len(self._colours):\n self._current_colour_index = 0\n return colour\n\n def createEditor(self, parent, option, index):\n editor = QComboBox(parent)\n #colours = QColor.colorNames()\n for colour in self._colours:\n pixmap = QPixmap(20, 20)\n pixmap.fill(colour)\n editor.addItem(QIcon(pixmap), '', colour)\n\n editor.activated.connect(lambda index, editor=editor: self._view.commitData(editor))\n editor.activated.connect(lambda index, editor=editor: self._view.closeEditor(editor, QAbstractItemDelegate.NoHint))\n QTimer.singleShot(10, editor.showPopup)\n\n return editor\n\n def setEditorData(self, editor, index):\n value = index.model().data(index, Qt.UserRole)\n for i in range(editor.count()):\n if editor.itemData(i) == value():\n editor.setCurrentIndex(i)\n break\n\n def setModelData(self, editor, model, index):\n icon = editor.itemIcon(editor.currentIndex())\n colour = editor.itemData(editor.currentIndex())\n\n # Note, all data being written to the model must be read out of the editor PRIOR to calling model.setData()\n # This is because a call to model.setData() triggers setEditorData(), which messes up subsequent\n # calls to the editor to determine the currently selected item/data\n model.setData(index, icon, Qt.DecorationRole)\n model.setData(index, lambda clist=self._colours, colour=colour: int_to_enum(clist, colour), Qt.UserRole)\n\n def updateEditorGeometry(self, editor, option, index):\n editor.setGeometry(option.rect)\n\n\nclass RunviewerMainWindow(QMainWindow):\n # A signal for when the window manager has created a new window for this widget:\n newWindow = Signal(int)\n\n def event(self, event):\n result = QMainWindow.event(self, event)\n if event.type() == QEvent.WinIdChange:\n self.newWindow.emit(self.effectiveWinId())\n return result\n\n\nclass RunViewer(object):\n def __init__(self, exp_config):\n self.ui = UiLoader().load(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'main.ui'), RunviewerMainWindow())\n\n # setup shot treeview model\n self.shot_model = QStandardItemModel()\n self.shot_model.setHorizontalHeaderLabels(['colour', 'shutters', 'path'])\n self.ui.shot_treeview.setModel(self.shot_model)\n self.ui.shot_treeview.resizeColumnToContents(1)\n self.shot_model.itemChanged.connect(self.on_shot_selection_changed)\n self.shot_colour_delegate = ColourDelegate(self.ui.shot_treeview)\n self.ui.shot_treeview.setItemDelegateForColumn(0, self.shot_colour_delegate)\n\n # setup channel treeview model\n self.channel_model = QStandardItemModel()\n self.channel_model.setHorizontalHeaderLabels(['channel'])\n self.ui.channel_treeview.setModel(self.channel_model)\n self.channel_model.itemChanged.connect(self.update_plots)\n\n # create a hidden plot widget that all plots can link their x-axis too\n hidden_plot = pg.PlotWidget(name='runviewer - time axis link')\n\n hidden_plot.setMinimumHeight(1)\n hidden_plot.setMaximumHeight(1)\n hidden_plot.setLabel('bottom', 'Time', units='s')\n hidden_plot.setLabel('left', \" \")\n hidden_plot.showAxis('right', True)\n hidden_plot_item = hidden_plot.plot([0, 1], [0, 0])\n self._hidden_plot = (hidden_plot, hidden_plot_item)\n self.ui.hidden_plot_layout.addWidget(hidden_plot)\n\n time_axis_plot = pg.PlotWidget()\n time_axis_plot.setMinimumHeight(120)\n time_axis_plot.setMaximumHeight(120)\n time_axis_plot.setLabel('bottom', 'Time', units='s')\n time_axis_plot.showAxis('right', True)\n time_axis_plot.setXLink('runviewer - time axis link')\n time_axis_plot.setMouseEnabled(y=False)\n time_axis_plot.getAxis('left').setTicks([]) # hide y ticks in the left & right side. only show time axis\n time_axis_plot.getAxis('right').setTicks([])\n time_axis_plot.setLabel('left', 'Slots')\n time_axis_plot.scene().sigMouseMoved.connect(lambda pos: self.mouseMovedEvent(pos, time_axis_plot, \"Slots\"))\n time_axis_plot_item = time_axis_plot.plot([0, 1], [0, 0], pen=(255, 255, 255))\n self._time_axis_plot = (time_axis_plot, time_axis_plot_item)\n\n self.all_markers = {}\n self.all_marker_items = {}\n markers_plot = pg.PlotWidget(name='runviewer - markers')\n markers_plot.setMinimumHeight(120)\n markers_plot.setMaximumHeight(120)\n markers_plot.showAxis('top', False)\n markers_plot.showAxis('bottom', False)\n markers_plot.showAxis('left', True)\n markers_plot.showAxis('right', True)\n markers_plot.getAxis('left').setTicks([])\n markers_plot.getAxis('right').setTicks([])\n markers_plot.setLabel('left', 'Markers')\n markers_plot.setXLink('runviewer - time axis link')\n markers_plot.setMouseEnabled(y=False)\n markers_plot.scene().sigMouseMoved.connect(lambda pos: self.mouseMovedEvent(pos, markers_plot, \"Markers\"))\n markers_plot_item = markers_plot.plot([])\n self._markers_plot = (markers_plot, markers_plot_item)\n\n self.ui.verticalLayout_9.insertWidget(1,markers_plot)\n self.ui.plot_layout.addWidget(time_axis_plot)\n\n # add some icons\n self.ui.add_shot.setIcon(QIcon(':/qtutils/fugue/plus'))\n self.ui.remove_shots.setIcon(QIcon(':/qtutils/fugue/minus'))\n self.ui.enable_selected_shots.setIcon(QIcon(':/qtutils/fugue/ui-check-box'))\n self.ui.disable_selected_shots.setIcon(QIcon(':/qtutils/fugue/ui-check-box-uncheck'))\n self.ui.group_channel.setIcon(QIcon(':/qtutils/fugue/layers-group'))\n self.ui.delete_group.setIcon(QIcon(':/qtutils/fugue/layers-ungroup'))\n self.ui.channel_move_to_top.setIcon(QIcon(':/qtutils/fugue/arrow-stop-090'))\n self.ui.channel_move_up.setIcon(QIcon(':/qtutils/fugue/arrow-090'))\n self.ui.channel_move_down.setIcon(QIcon(':/qtutils/fugue/arrow-270'))\n self.ui.channel_move_to_bottom.setIcon(QIcon(':/qtutils/fugue/arrow-stop-270'))\n self.ui.reset_x_axis.setIcon(QIcon(':/qtutils/fugue/clock-history'))\n self.ui.reset_y_axis.setIcon(QIcon(':/qtutils/fugue/magnifier-history'))\n self.ui.toggle_tooltip.setIcon(QIcon(':/qtutils/fugue/ui-tooltip-balloon'))\n self.ui.non_linear_time.setIcon(QIcon(':/qtutils/fugue/ui-ruler'))\n\n self.ui.actionOpen_Shot.setIcon(QIcon(':/qtutils/fugue/plus'))\n self.ui.actionQuit.setIcon(QIcon(':/qtutils/fugue/cross-button'))\n self.ui.actionLoad_channel_config.setIcon(QIcon(':/qtutils/fugue/folder-open'))\n self.ui.actionSave_channel_config.setIcon(QIcon(':/qtutils/fugue/disk'))\n\n # disable buttons that are not yet implemented to help avoid confusion!\n self.ui.group_channel.setEnabled(False)\n self.ui.delete_group.setEnabled(False)\n\n # connect signals\n self.ui.reset_x_axis.clicked.connect(self.on_x_axis_reset)\n self.ui.reset_y_axis.clicked.connect(self.on_y_axes_reset)\n self.ui.channel_move_up.clicked.connect(self._move_up)\n self.ui.channel_move_down.clicked.connect(self._move_down)\n self.ui.channel_move_to_top.clicked.connect(self._move_top)\n self.ui.channel_move_to_bottom.clicked.connect(self._move_bottom)\n self.ui.enable_selected_shots.clicked.connect(self._enable_selected_shots)\n self.ui.disable_selected_shots.clicked.connect(self._disable_selected_shots)\n self.ui.add_shot.clicked.connect(self.on_add_shot)\n self.ui.markers_comboBox.currentIndexChanged.connect(self._update_markers)\n self.ui.non_linear_time.toggled.connect(self._toggle_non_linear_time)\n self.ui.remove_shots.clicked.connect(self.on_remove_shots)\n\n self.ui.actionOpen_Shot.triggered.connect(self.on_add_shot)\n self.ui.actionQuit.triggered.connect(self.ui.close)\n self.ui.actionLoad_channel_config.triggered.connect(self.on_load_channel_config)\n self.ui.actionSave_channel_config.triggered.connect(self.on_save_channel_config)\n\n if os.name == 'nt':\n self.ui.newWindow.connect(set_win_appusermodel)\n\n self.ui.show()\n\n # internal variables\n #self._channels_list = {}\n self.plot_widgets = {}\n self.plot_items = {}\n self.shutter_lines = {}\n\n try:\n self.default_config_path = os.path.join(exp_config.get('DEFAULT', 'app_saved_configs'), 'runviewer')\n except LabConfig.NoOptionError:\n exp_config.set('DEFAULT', 'app_saved_configs', os.path.join('%(labscript_suite)s', 'userlib', 'app_saved_configs', '%(experiment_name)s'))\n self.default_config_path = os.path.join(exp_config.get('DEFAULT', 'app_saved_configs'), 'runviewer')\n if not os.path.exists(self.default_config_path):\n os.makedirs(self.default_config_path)\n\n self.last_opened_shots_folder = exp_config.get('paths', 'experiment_shot_storage')\n\n # start resample thread\n self._resample = False\n self._thread = threading.Thread(target=self._resample_thread)\n self._thread.daemon = True\n self._thread.start()\n\n # start shots_to_process_queue monitoring thread\n self._shots_to_process_thread = threading.Thread(target=self._process_shots)\n self._shots_to_process_thread.daemon = True\n self._shots_to_process_thread.start()\n\n self.scale_time = False\n self.scalehandler = None\n\n def _update_markers(self, index):\n for line, plot in self.all_marker_items.items():\n plot.removeItem(line)\n self.all_marker_items = {}\n\n marker_index = self.ui.markers_comboBox.currentIndex()\n shot = self.ui.markers_comboBox.itemData(marker_index)\n self.all_markers = shot.markers if index > 0 else {}\n\n self._update_non_linear_time(changed_shot=True)\n\n times = sorted(list(self.all_markers.keys()))\n for i, (t, m) in enumerate(sorted(self.all_markers.items())):\n if i < len(times)-1:\n delta_t = times[i+1] - t\n else:\n delta_t = shot.stop_time - t\n\n if self.scale_time:\n t = self.scalehandler.get_scaled_time(t)\n\n color = m['color']\n color = QColor(color[0], color[1], color[2])\n label = m['label'].decode() if isinstance( m['label'], bytes) else str(m['label'])\n\n line = self._markers_plot[0].addLine(x=t, pen=pg.mkPen(color=color, width=1.5, style=Qt.DashLine), label=label, labelOpts= {\"color\": color, \"fill\": QColor(255, 255, 255, 255), \"rotateAxis\":(1, 0), \"anchors\": [(0.5, 0),(0.5, 0)]} )\n self.all_marker_items[line] = self._markers_plot[0]\n\n line = self._time_axis_plot[0].addLine(x=t, pen=pg.mkPen(color=color, width=1.5, style=Qt.DashLine), label=format_time(delta_t), labelOpts= {\"color\": color, \"fill\": QColor(255, 255, 255, 255), \"rotateAxis\":(1, 0), \"anchors\": [(0.5, 0),(0.5, 0)]} )\n self.all_marker_items[line] = self._time_axis_plot[0]\n\n self.update_plots()\n\n def mouseMovedEvent(self, position, ui, name):\n if self.ui.toggle_tooltip.isChecked():\n v = ui.scene().views()[0]\n viewP = v.mapFromScene(position)\n glob_pos = ui.mapToGlobal(viewP) # convert to Screen x\n glob_zero = ui.mapToGlobal(QPoint(0, 0))\n self._global_start_x = glob_zero.x()\n self._global_start_y = glob_zero.y()\n self._global_width = ui.width()\n self._global_height = ui.height()\n\n coord_pos = ui.plotItem.vb.mapSceneToView(position)\n\n if len(self.get_selected_shots_and_colours()) > 0:\n if self.scale_time and self.scalehandler is not None:\n unscaled_t = float(self.scalehandler.get_unscaled_time(coord_pos.x()))\n else:\n unscaled_t = float(coord_pos.x())\n if unscaled_t is not None:\n pos = QPoint(glob_pos.x(), glob_pos.y())\n plot_data = ui.plotItem.listDataItems()[0].getData()\n if plot_data[0] is not None and unscaled_t is not None:\n nearest_index = numpy.abs(plot_data[0] - unscaled_t).argmin() - 1\n y_val = \"{:.2f}\".format(plot_data[1][nearest_index])\n else:\n y_val = '-'\n text = \"Plot: {} \\nTime: {:.9f}s\\nValue: {}\".format(name, unscaled_t, y_val)\n QToolTip.showText(pos, text)\n\n def _toggle_non_linear_time(self, state):\n self.scale_time = state\n self._update_non_linear_time()\n\n def _update_non_linear_time(self, changed_shot=False):\n old_scalerhandler = self.scalehandler\n marker_index = self.ui.markers_comboBox.currentIndex()\n shot = self.ui.markers_comboBox.itemData(marker_index)\n if shot is not None and self.scale_time:\n self.scalehandler = shot.scalehandler\n else:\n self.scalehandler = None\n\n # combine markers and shutter lines\n markers = list(self.all_marker_items.keys())\n for channel in self.shutter_lines:\n for shot in self.shutter_lines[channel]:\n for line in self.shutter_lines[channel][shot][0]:\n markers.append(line)\n for line in self.shutter_lines[channel][shot][1]:\n markers.append(line)\n\n # Move all Markes/Shutter Lines to new position\n for marker in markers:\n pos = marker.pos()\n\n if old_scalerhandler is None:\n unscaled_x = pos.x()\n else:\n unscaled_x = old_scalerhandler.get_unscaled_time(pos.x())\n\n if self.scale_time and self.scalehandler is not None:\n new_x = self.scalehandler.get_scaled_time(unscaled_x)\n else:\n new_x = unscaled_x\n\n pos.setX(new_x)\n marker.setPos(pos)\n\n if shot is not None and self.scale_time:\n self._time_axis_plot[0].getAxis(\"bottom\").setTicks([[[0, 0], [shot.stop_time, shot.stop_time]]])\n for plot in self.plot_widgets.values():\n plot.getAxis(\"bottom\").setTicks([[[0, 0], [shot.stop_time, shot.stop_time]]])\n else:\n self._time_axis_plot[0].getAxis(\"bottom\").setTicks(None)\n for plot in self.plot_widgets.values():\n plot.getAxis(\"bottom\").setTicks(None)\n\n for plot in self.plot_widgets.values():\n for item in plot.getPlotItem().items:\n if isinstance(item, pg.PlotDataItem):\n if old_scalerhandler is not None:\n unscaled_t = old_scalerhandler.get_unscaled_time(item.xData)\n else:\n unscaled_t = item.xData\n\n if self.scalehandler is not None:\n item.setData(self.scalehandler.get_scaled_time(unscaled_t), item.yData)\n else:\n item.setData(unscaled_t, item.yData)\n\n self._resample = True\n\n def _process_shots(self):\n while True:\n filepath = shots_to_process_queue.get()\n inmain_later(self.load_shot, filepath)\n\n def on_load_channel_config(self):\n config_file = QFileDialog.getOpenFileName(self.ui, \"Select file to load\", self.default_config_path, \"Config files (*.ini)\")\n if isinstance(config_file, tuple):\n config_file, _ = config_file\n if config_file:\n runviewer_config = LabConfig(config_file)\n try:\n channels = ast.literal_eval(runviewer_config.get('runviewer_state', 'Channels'))\n except (LabConfig.NoOptionError, LabConfig.NoSectionError):\n channels = {}\n\n for row, (channel, checked) in enumerate(channels):\n check_items = self.channel_model.findItems(channel)\n if len(check_items) == 0:\n items = []\n check_item = QStandardItem(channel)\n check_item.setEditable(False)\n check_item.setCheckable(True)\n items.append(check_item)\n check_item.setCheckState(Qt.Checked if checked else Qt.Unchecked)\n check_item.setEnabled(False)\n self.channel_model.insertRow(row, items)\n else:\n check_item = check_items[0]\n check_item.setCheckState(Qt.Checked if checked else Qt.Unchecked)\n self.channel_model.takeRow(check_item.row())\n self.channel_model.insertRow(row, check_item)\n\n def on_save_channel_config(self):\n save_file = QFileDialog.getSaveFileName(self.ui, 'Select file to save current channel configuration', self.default_config_path, \"config files (*.ini)\")\n if type(save_file) is tuple:\n save_file, _ = save_file\n\n if save_file:\n runviewer_config = LabConfig(save_file)\n\n channels = []\n for row in range(self.channel_model.rowCount()):\n item = self.channel_model.item(row)\n channels.append((item.text(), item.checkState() == Qt.Checked))\n\n runviewer_config.set('runviewer_state', 'Channels', pprint.pformat(channels))\n\n def on_toggle_shutter(self, checked, current_shot):\n for channel in self.shutter_lines:\n for shot in self.shutter_lines[channel]:\n if shot == current_shot:\n for line in self.shutter_lines[channel][shot][0]:\n if checked:\n line.show()\n else:\n line.hide()\n for line in self.shutter_lines[channel][shot][1]:\n if checked:\n line.show()\n else:\n line.hide()\n\n def on_add_shot(self):\n selected_files = QFileDialog.getOpenFileNames(self.ui, \"Select file to load\", self.last_opened_shots_folder, \"HDF5 files (*.h5 *.hdf5)\")\n popup_warning = False\n if isinstance(selected_files, tuple):\n selected_files, _ = selected_files\n # Convert to standard platform specific path, otherwise Qt likes forward slashes:\n selected_files = [os.path.abspath(str(shot_file)) for shot_file in selected_files]\n if len(selected_files) > 0:\n self.last_opened_shots_folder = os.path.dirname(selected_files[0])\n\n for file in selected_files:\n try:\n filepath = str(file)\n # Qt has this weird behaviour where if you type in the name of a file that exists\n # but does not have the extension you have limited the dialog to, the OK button is greyed out\n # but you can hit enter and the file will be selected.\n # So we must check the extension of each file here!\n if filepath.endswith('.h5') or filepath.endswith('.hdf5'):\n self.load_shot(filepath)\n else:\n popup_warning = True\n except:\n popup_warning = True\n raise\n if popup_warning:\n message = QMessageBox()\n message.setText(\"Warning: Some shots were not loaded because they were not valid hdf5 files\")\n message.setIcon(QMessageBox.Warning)\n message.setWindowTitle(\"Runviewer\")\n message.setStandardButtons(QMessageBox.Ok)\n message.exec_()\n\n def on_remove_shots(self):\n # Get the selection model from the treeview\n selection_model = self.ui.shot_treeview.selectionModel()\n # Create a list of select row indices\n selected_row_list = [index.row() for index in selection_model.selectedRows()]\n # sort in descending order to prevent index changes of rows to be deleted\n selected_row_list.sort(reverse=True)\n\n reply = QMessageBox.question(self.ui, 'Runviewer', 'Remove {} shots?'.format(len(selected_row_list)),\n QMessageBox.Yes | QMessageBox.No)\n if reply == QMessageBox.No:\n return\n\n for row in selected_row_list:\n item = self.shot_model.item(row, SHOT_MODEL__CHECKBOX_INDEX)\n colour_item = self.shot_model.item(row, SHOT_MODEL__COLOUR_INDEX)\n shutter_item = self.shot_model.item(row, SHOT_MODEL__SHUTTER_INDEX)\n shot = item.data()\n # unselect shot\n item.setCheckState(Qt.Unchecked)\n shutter_item.setCheckState(Qt.Unchecked)\n # remove row\n self.shot_model.removeRow(row)\n del shot\n\n def on_shot_selection_changed(self, item):\n if self.shot_model.indexFromItem(item).column() == SHOT_MODEL__CHECKBOX_INDEX:\n\n # add or remove a colour for this shot\n checked = item.checkState()\n row = self.shot_model.indexFromItem(item).row()\n colour_item = self.shot_model.item(row, SHOT_MODEL__COLOUR_INDEX)\n check_shutter = self.shot_model.item(row, SHOT_MODEL__SHUTTER_INDEX)\n\n if checked:\n colour = colour_item.data(Qt.UserRole)\n if colour is not None:\n colour = colour()\n else:\n colour = self.shot_colour_delegate.get_next_colour()\n\n colour_item.setEditable(True)\n pixmap = QPixmap(20, 20)\n pixmap.fill(colour)\n icon = QIcon(pixmap)\n colour_item.setData(lambda clist=self.shot_colour_delegate._colours, colour=colour: int_to_enum(clist, colour), Qt.UserRole)\n colour_item.setData(icon, Qt.DecorationRole)\n shot_combobox_index = self.ui.markers_comboBox.findText(os.path.basename(item.data().path))\n self.ui.markers_comboBox.model().item(shot_combobox_index).setEnabled(True)\n if self.ui.markers_comboBox.currentIndex() == 0:\n self.ui.markers_comboBox.setCurrentIndex(shot_combobox_index)\n if item.data().shutter_times != {}:\n check_shutter.setEnabled(True)\n else:\n check_shutter.setEnabled(False)\n check_shutter.setToolTip(\"This shot doesn't contain shutter markers\")\n else:\n # colour = None\n # icon = None\n shot_combobox_index = self.ui.markers_comboBox.findText(os.path.basename(item.data().path))\n self.ui.markers_comboBox.model().item(shot_combobox_index).setEnabled(False)\n if shot_combobox_index == self.ui.markers_comboBox.currentIndex():\n self.ui.markers_comboBox.setCurrentIndex(0)\n colour_item.setEditable(False)\n check_shutter.setEnabled(False)\n\n # model.setData(index, editor.itemIcon(editor.currentIndex()),\n # model.setData(index, editor.itemData(editor.currentIndex()), Qt.UserRole)\n\n self.update_channels_treeview()\n elif self.shot_model.indexFromItem(item).column() == SHOT_MODEL__COLOUR_INDEX:\n # update the plot colours\n\n # get reference to the changed shot\n current_shot = self.shot_model.item(self.shot_model.indexFromItem(item).row(), SHOT_MODEL__CHECKBOX_INDEX).data()\n\n # find and update the pen of the plot items\n for channel in self.plot_items.keys():\n for shot in self.plot_items[channel]:\n if shot == current_shot:\n colour = item.data(Qt.UserRole)\n self.plot_items[channel][shot].setPen(pg.mkPen(QColor(colour()), width=2))\n elif self.shot_model.indexFromItem(item).column() == SHOT_MODEL__SHUTTER_INDEX:\n current_shot = self.shot_model.item(self.shot_model.indexFromItem(item).row(), SHOT_MODEL__CHECKBOX_INDEX).data()\n self.on_toggle_shutter(item.checkState(), current_shot)\n\n def load_shot(self, filepath):\n shot = Shot(filepath)\n\n # add shot to shot list\n # Create Items\n items = []\n colour_item = QStandardItem('')\n colour_item.setEditable(False)\n colour_item.setToolTip('Double-click to change colour')\n items.append(colour_item)\n\n check_shutter = QStandardItem()\n check_shutter.setCheckable(True)\n check_shutter.setCheckState(Qt.Unchecked) # options are Qt.Checked OR Qt.Unchecked\n check_shutter.setEnabled(False)\n check_shutter.setToolTip(\"Toggle shutter markers\")\n items.append(check_shutter)\n\n check_item = QStandardItem(shot.path)\n check_item.setEditable(False)\n check_item.setCheckable(True)\n check_item.setCheckState(Qt.Unchecked) # options are Qt.Checked OR Qt.Unchecked\n check_item.setData(shot)\n check_item.setToolTip(filepath)\n items.append(check_item)\n # script name\n # path_item = QStandardItem(shot.path)\n # path_item.setEditable(False)\n # items.append(path_item)\n self.shot_model.appendRow(items)\n self.ui.markers_comboBox.addItem(os.path.basename(shot.path), shot)\n shot_combobox_index = self.ui.markers_comboBox.findText(os.path.basename(shot.path))\n self.ui.markers_comboBox.model().item(shot_combobox_index).setEnabled(False)\n\n # only do this if we are checking the shot we are adding\n # self.update_channels_treeview()\n\n def get_selected_shots_and_colours(self):\n # get the ticked shots\n ticked_shots = {}\n for i in range(self.shot_model.rowCount()):\n item = self.shot_model.item(i, SHOT_MODEL__CHECKBOX_INDEX)\n colour_item = self.shot_model.item(i, SHOT_MODEL__COLOUR_INDEX)\n shutter_item = self.shot_model.item(i, SHOT_MODEL__SHUTTER_INDEX)\n if item.checkState() == Qt.Checked:\n shot = item.data()\n colour_item_data = colour_item.data(Qt.UserRole)\n ticked_shots[shot] = (colour_item_data(), shutter_item.checkState())\n return ticked_shots\n\n def update_channels_treeview(self):\n ticked_shots = self.get_selected_shots_and_colours()\n\n # get set of channels\n channels = {}\n for shot in ticked_shots.keys():\n channels[shot] = set(shot.channels)\n channels_set = frozenset().union(*channels.values())\n\n # now find channels in channels_set which are not in the treeview, and add them\n # now find channels in channels set which are already in the treeview, but deactivated, and activate them\n treeview_channels_dict = {}\n deactivated_treeview_channels_dict = {}\n for i in range(self.channel_model.rowCount()):\n item = self.channel_model.item(i, CHANNEL_MODEL__CHECKBOX_INDEX)\n # Sanity check\n if str(item.text()) in treeview_channels_dict:\n raise RuntimeError(\"A duplicate channel name was detected in the treeview due to an internal error. Please lodge a bugreport detailing how the channels with the same name appeared in the channel treeview. Please restart the application\")\n\n treeview_channels_dict[str(item.text())] = i\n if not item.isEnabled():\n deactivated_treeview_channels_dict[str(item.text())] = i\n treeview_channels = set(treeview_channels_dict.keys())\n deactivated_treeview_channels = set(deactivated_treeview_channels_dict.keys())\n\n # speed up working with self.channel_model by blocking signals and later reenabeling them\n self.channel_model.blockSignals(True)\n\n # find list of channels to work with\n channels_to_add = channels_set.difference(treeview_channels)\n for channel in sorted(channels_to_add):\n items = []\n check_item = QStandardItem(channel)\n check_item.setEditable(False)\n check_item.setCheckable(True)\n check_item.setCheckState(Qt.Unchecked)\n items.append(check_item)\n # channel_name_item = QStandardItem(channel)\n # channel_name_item.setEditable(False)\n # items.append(channel_name_item)\n self.channel_model.appendRow(items)\n\n channels_to_reactivate = deactivated_treeview_channels.intersection(channels_set)\n for channel in channels_to_reactivate:\n for i in range(self.channel_model.columnCount()):\n item = self.channel_model.item(deactivated_treeview_channels_dict[channel], i)\n item.setEnabled(True)\n item.setSelectable(True)\n\n # now find channels in the treeview which are not in the channels_set and deactivate them\n channels_to_deactivate = treeview_channels.difference(channels_set)\n for channel in channels_to_deactivate:\n for i in range(self.channel_model.columnCount()):\n item = self.channel_model.item(treeview_channels_dict[channel], i)\n item.setEnabled(False)\n item.setSelectable(False)\n\n self.channel_model.blockSignals(False)\n self.channel_model.layoutChanged.emit()\n\n # TODO: Also update entries in groups\n\n self.update_plots()\n\n def update_plots(self):\n # get list of selected shots\n ticked_shots = self.get_selected_shots_and_colours()\n\n # SHould we rescale the x-axis?\n # if self._hidden_plot[0].getViewBox.getState()['autoRange'][0]:\n # self._hidden_plot[0].enableAutoRange(axis=pg.ViewBox.XAxis)\n # else:\n # self._hidden_plot[0].enableAutoRange(axis=pg.ViewBox.XAxis, enable=False)\n\n # find stop time of longest ticked shot\n\n largest_stop_time = 0\n stop_time_set = False\n for shot in ticked_shots.keys():\n if shot.stop_time > largest_stop_time:\n largest_stop_time = shot.stop_time\n stop_time_set = True\n if not stop_time_set:\n largest_stop_time = 1.0\n\n # Update the range of the link plot\n self._hidden_plot[1].setData([0, largest_stop_time], [0, 1e-9])\n\n # Update plots\n for i in range(self.channel_model.rowCount()):\n check_item = self.channel_model.item(i, CHANNEL_MODEL__CHECKBOX_INDEX)\n channel = str(check_item.text())\n if check_item.checkState() == Qt.Checked and check_item.isEnabled():\n # we want to show this plot\n # does a plot already exist? If yes, show it\n if channel in self.plot_widgets:\n self.plot_widgets[channel].show()\n # update the plot\n # are there are plot items for this channel which are shown that should not be?\n to_delete = []\n for shot in self.plot_items[channel]:\n if shot not in ticked_shots.keys():\n self.plot_widgets[channel].removeItem(self.plot_items[channel][shot])\n # Remove Shutter Markers of unticked Shots\n if shot in self.shutter_lines[channel]:\n for line in self.shutter_lines[channel][shot][0]:\n self.plot_widgets[channel].removeItem(line)\n for line in self.shutter_lines[channel][shot][1]:\n self.plot_widgets[channel].removeItem(line)\n self.shutter_lines[channel].pop(shot)\n to_delete.append(shot)\n for shot in to_delete:\n del self.plot_items[channel][shot]\n\n # do we need to add any plot items for shots that were not previously selected?\n for shot, (colour, shutters_checked) in ticked_shots.items():\n if shot not in self.plot_items[channel]:\n # plot_item = self.plot_widgets[channel].plot(shot.traces[channel][0], shot.traces[channel][1], pen=pg.mkPen(QColor(colour), width=2))\n # Add empty plot as it the custom resampling we do will happen quicker if we don't attempt to first plot all of the data\n plot_item = self.plot_widgets[channel].plot([0, 0], [0], pen=pg.mkPen(QColor(colour), width=2), stepMode=True)\n self.plot_items[channel][shot] = plot_item\n\n # Add Shutter Markers of newly ticked Shots\n self.add_shutter_markers(shot, channel, shutters_checked)\n\n for t, m in self.all_markers.items():\n color = m['color']\n color = QColor(color[0], color[1], color[2])\n if self.scale_time and self.scalehandler is not None:\n t = self.scalehandler.get_scaled_time(t)\n line = self.plot_widgets[channel].addLine(x=t, pen=pg.mkPen(color=color, width=1.5, style=Qt.DashLine))\n self.all_marker_items[line] = self.plot_widgets[channel]\n\n # If no, create one\n else:\n self.create_plot(channel, ticked_shots)\n\n else:\n if channel not in self.plot_widgets:\n self.create_plot(channel, ticked_shots)\n self.plot_widgets[channel].hide()\n\n self._resample = True\n\n def create_plot(self, channel, ticked_shots):\n self.plot_widgets[channel] = pg.PlotWidget() # name=channel)\n self.plot_widgets[channel].setMinimumHeight(200)\n self.plot_widgets[channel].setMaximumHeight(200)\n self.plot_widgets[channel].setLabel('bottom', 'Time', units='s')\n self.plot_widgets[channel].showAxis('right', True)\n self.plot_widgets[channel].showAxis('bottom', True)\n self.plot_widgets[channel].setXLink('runviewer - time axis link')\n self.plot_widgets[channel].sigXRangeChanged.connect(self.on_x_range_changed)\n self.plot_widgets[channel].scene().sigMouseMoved.connect(lambda pos: self.mouseMovedEvent(pos, self.plot_widgets[channel], channel))\n self.ui.plot_layout.insertWidget(self.ui.plot_layout.count() - 1, self.plot_widgets[channel])\n self.shutter_lines[channel] = {} # initialize Storage for shutter lines\n self.plot_items.setdefault(channel, {})\n\n has_units = False\n units = ''\n for shot, (colour, shutters_checked) in ticked_shots.items():\n if channel in shot.traces:\n # plot_item = self.plot_widgets[channel].plot(shot.traces[channel][0], shot.traces[channel][1], pen=pg.mkPen(QColor(colour), width=2))\n # Add empty plot as it the custom resampling we do will happen quicker if we don't attempt to first plot all of the data\n plot_item = self.plot_widgets[channel].plot([0, 0], [0], pen=pg.mkPen(QColor(colour), width=2), stepMode=True)\n self.plot_items[channel][shot] = plot_item\n\n if len(shot.traces[channel]) == 3:\n has_units = True\n units = shot.traces[channel][2]\n\n # Add Shutter Markers of ticked Shots\n self.add_shutter_markers(shot, channel, shutters_checked)\n\n if has_units:\n self.plot_widgets[channel].setLabel('left', channel, units=units)\n else:\n self.plot_widgets[channel].setLabel('left', channel)\n\n def add_shutter_markers(self, shot, channel, shutters_checked):\n if shot not in self.shutter_lines[channel] and channel in shot.shutter_times:\n self.shutter_lines[channel][shot] = [[], []]\n\n open_color = QColor(0, 255, 0)\n close_color = QColor(255, 0, 0)\n\n for t, val in shot.shutter_times[channel].items():\n scaled_t = t\n if val: # val != 0, shutter open\n line = self.plot_widgets[channel].addLine(x=scaled_t, pen=pg.mkPen(color=open_color, width=4., style=Qt.DotLine))\n self.shutter_lines[channel][shot][1].append(line)\n if not shutters_checked:\n line.hide()\n else: # else shutter close\n line = self.plot_widgets[channel].addLine(x=scaled_t, pen=pg.mkPen(color=close_color, width=4., style=Qt.DotLine))\n self.shutter_lines[channel][shot][0].append(line)\n if not shutters_checked:\n line.hide()\n\n def on_x_range_changed(self, *args):\n # print 'x range changed'\n self._resample = True\n\n @inmain_decorator(wait_for_return=True)\n def _get_resample_params(self, channel, shot):\n rect = self.plot_items[channel][shot].getViewBox().viewRect()\n xmin, xmax = rect.left(), rect.width() + rect.left()\n dx = xmax - xmin\n view_range = self.plot_widgets[channel].viewRange()\n return view_range[0][0], view_range[0][1], dx\n\n def resample(self, data_x, data_y, xmin, xmax, stop_time, num_pixels):\n \"\"\"This is a function for downsampling the data before plotting\n it. Unlike using nearest neighbour interpolation, this method\n preserves the features of the plot. It chooses what value to\n use based on what values within a region are most different\n from the values it's already chosen. This way, spikes of a short\n duration won't just be skipped over as they would with any sort\n of interpolation.\"\"\"\n # TODO: Only finely sample the currently visible region. Coarsely sample the rest\n # x_out = numpy.float32(numpy.linspace(data_x[0], data_x[-1], 4000*(data_x[-1]-data_x[0])/(xmax-xmin)))\n x_out = numpy.float64(numpy.linspace(xmin, xmax, 3 * 2000 + 2))\n y_out = numpy.empty(len(x_out) - 1, dtype=numpy.float64)\n data_x = numpy.float64(data_x)\n data_y = numpy.float64(data_y)\n\n # TODO: investigate only resampling when necessary.\n # Currently pyqtgraph sometimes has trouble rendering things\n # if you don't resample. If a point is far off the graph,\n # and this point is the first that should be drawn for stepMode,\n # because there is a long gap before the next point (which is\n # visible) then there is a problem.\n # Also need to explicitly handle cases where none of the data\n # is visible (which resampling does by setting NaNs)\n #\n # x_data_slice = data_x[(data_x>=xmin)&(data_x<=xmax)]\n # print len(data_x)\n # if len(x_data_slice) < 3*2000+2:\n # x_out = x_data_slice\n # y_out = data_y[(data_x>=xmin)&(data_x<=xmax)][:-1]\n # logger.info('skipping resampling')\n # else:\n resampling = True\n\n if resampling:\n _resample(data_x, data_y, x_out, y_out, numpy.float64(stop_time))\n # self.__resample4(data_x, data_y, x_out, y_out, numpy.float32(stop_time))\n else:\n x_out, y_out = data_x, data_y\n\n return x_out, y_out\n\n def __resample4(self, x_in, y_in, x_out, y_out, stop_time):\n # we want x-out to have three times the number of points as there are pixels\n # Plus one at the end\n # y_out = numpy.empty(len(x_out)-1, dtype=numpy.float64)\n # print 'len x_out: %d'%len(x_out)\n\n # A couple of special cases that I don't want to have to put extra checks in for:\n if x_out[-1] < x_in[0] or x_out[0] > stop_time:\n # We're all the way to the left of the data or all the way to the right. Fill with NaNs:\n y_out.fill('NaN')\n elif x_out[0] > x_in[-1]:\n # We're after the final clock tick, but before stop_time\n i = 0\n while i < len(x_out) - 1:\n if x_out[i] < stop_time:\n y_out[i] = y_in[-1]\n else:\n y_out[i] = numpy.float('NaN')\n i += 1\n else:\n i = 0\n j = 1\n # Until we get to the data, fill the output array with NaNs (which\n # get ignored when plotted)\n while x_out[i] < x_in[0]:\n y_out[i] = numpy.float('NaN')\n y_out[i + 1] = numpy.float('NaN')\n y_out[i + 2] = numpy.float('NaN')\n i += 3\n # If we're some way into the data, we need to skip ahead to where\n # we want to get the first datapoint from:\n while x_in[j] < x_out[i]:\n j += 1\n\n # Get the first datapoint:\n # y_out[i] = y_in[j-1]\n # i += 1\n\n # Get values until we get to the end of the data:\n while j < len(x_in) and i < len(x_out) - 2: # Leave one spare for the final data point and one because stepMode=True requires len(y)=len(x)-1\n # This is 'nearest neighbour on the left' interpolation. It's\n # what we want if none of the source values checked in the\n # upcoming loop are used:\n y_out[i] = y_in[j - 1]\n i += 2\n positive_jump_value = 0\n positive_jump_index = j - 1\n negative_jump_value = 0\n negative_jump_index = j - 1\n # now find the max and min values between this x_out time point and the next x_out timepoint\n # print i\n while j < len(x_in) and x_in[j] < x_out[i]:\n jump = y_in[j] - y_out[i - 2]\n # would using this source value cause a bigger positive jump?\n if jump > 0 and jump > positive_jump_value:\n positive_jump_value = jump\n positive_jump_index = j\n # would using this source value cause a bigger negative jump?\n elif jump < 0 and jump < negative_jump_value:\n negative_jump_value = jump\n negative_jump_index = j\n\n j += 1\n\n if positive_jump_index < negative_jump_index:\n y_out[i - 1] = y_in[positive_jump_index]\n y_out[i] = y_in[negative_jump_index]\n # TODO: We could override the x_out values with x_in[jump_index]\n else:\n y_out[i - 1] = y_in[negative_jump_index]\n y_out[i] = y_in[positive_jump_index]\n\n i += 1\n\n # Get the last datapoint:\n if j < len(x_in):\n # If the sample rate of the raw data is low, then the current\n # j point could be outside the current plot view range\n # If so, decrease j so that we take a value that is within the\n # plot view range.\n if x_in[j] > x_out[-1] and j > 0:\n j -= 1\n\n y_out[i] = y_in[j]\n i += 1\n # if i < len(x_out):\n # y_out[i] = y_in[-1]\n # i += 1\n # Fill the remainder of the array with the last datapoint,\n # if t < stop_time, and then NaNs after that:\n while i < len(x_out) - 1:\n if x_out[i] < stop_time:\n y_out[i] = y_in[-1]\n else:\n y_out[i] = numpy.float('NaN')\n i += 1\n # return y_out # method changed to modify y_out array in place\n\n def __resample3(self, x_in, y_in, x_out, stop_time):\n \"\"\"This is a Python implementation of the C extension. For\n debugging and developing the C extension.\"\"\"\n y_out = numpy.empty(len(x_out))\n i = 0\n j = 1\n # A couple of special cases that I don't want to have to put extra checks in for:\n if x_out[-1] < x_in[0] or x_out[0] > stop_time:\n # We're all the way to the left of the data or all the way to the right. Fill with NaNs:\n while i < len(x_out):\n y_out[i] = numpy.float('NaN')\n i += 1\n elif x_out[0] > x_in[-1]:\n # We're after the final clock tick, but before stop_time\n while i < len(x_out):\n if x_out[i] < stop_time:\n y_out[i] = y_in[-1]\n else:\n y_out[i] = numpy.float('NaN')\n i += 1\n else:\n # Until we get to the data, fill the output array with NaNs (which\n # get ignored when plotted)\n while x_out[i] < x_in[0]:\n y_out[i] = numpy.float('NaN')\n i += 1\n # If we're some way into the data, we need to skip ahead to where\n # we want to get the first datapoint from:\n while x_in[j] < x_out[i]:\n j += 1\n # Get the first datapoint:\n y_out[i] = y_in[j - 1]\n i += 1\n # Get values until we get to the end of the data:\n while j < len(x_in) and i < len(x_out):\n # This is 'nearest neighbour on the left' interpolation. It's\n # what we want if none of the source values checked in the\n # upcoming loop are used:\n y_out[i] = y_in[j - 1]\n while j < len(x_in) and x_in[j] < x_out[i]:\n # Would using this source value cause the interpolated values\n # to make a bigger jump?\n if numpy.abs(y_in[j] - y_out[i - 1]) > numpy.abs(y_out[i] - y_out[i - 1]):\n # If so, use this source value:\n y_out[i] = y_in[j]\n j += 1\n i += 1\n # Get the last datapoint:\n if i < len(x_out):\n y_out[i] = y_in[-1]\n i += 1\n # Fill the remainder of the array with the last datapoint,\n # if t < stop_time, and then NaNs after that:\n while i < len(x_out):\n if x_out[i] < stop_time:\n y_out[i] = y_in[-1]\n else:\n y_out[i] = numpy.float('NaN')\n i += 1\n return y_out\n\n def _resample_thread(self):\n logger = logging.getLogger('runviewer.resample_thread')\n while True:\n if self._resample:\n self._resample = False\n # print 'resampling'\n ticked_shots = inmain(self.get_selected_shots_and_colours)\n for shot, (colour, shutters_checked) in ticked_shots.items():\n for channel in shot.traces:\n if self.channel_checked_and_enabled(channel):\n try:\n xmin, xmax, dx = self._get_resample_params(channel, shot)\n\n # We go a bit outside the visible range so that scrolling\n # doesn't immediately go off the edge of the data, and the\n # next resampling might have time to fill in more data before\n # the user sees any empty space.\n if self.scale_time:\n xnew, ynew = self.resample(shot.scaled_times(channel), shot.traces[channel][1], xmin, xmax, shot.stop_time, dx)\n else:\n xnew, ynew = self.resample(shot.traces[channel][0], shot.traces[channel][1], xmin, xmax, shot.stop_time, dx)\n inmain(self.plot_items[channel][shot].setData, xnew, ynew, pen=pg.mkPen(QColor(colour), width=2), stepMode=True)\n except Exception:\n #self._resample = True\n pass\n else:\n logger.info('ignoring channel %s' % channel)\n time.sleep(0.5)\n\n @inmain_decorator(wait_for_return=True)\n def channel_checked_and_enabled(self, channel):\n logger.info('is channel %s enabled' % channel)\n index = self.channel_model.index(0, CHANNEL_MODEL__CHANNEL_INDEX)\n indexes = self.channel_model.match(index, Qt.DisplayRole, channel, 1, Qt.MatchExactly)\n logger.info('number of matches %d' % len(indexes))\n if len(indexes) == 1:\n check_item = self.channel_model.itemFromIndex(indexes[0])\n if check_item.checkState() == Qt.Checked and check_item.isEnabled():\n return True\n return False\n\n def on_x_axis_reset(self):\n self._hidden_plot[0].enableAutoRange(axis=pg.ViewBox.XAxis)\n\n def on_y_axes_reset(self):\n for plot_widget in self.plot_widgets.values():\n plot_widget.enableAutoRange(axis=pg.ViewBox.YAxis)\n\n def _enable_selected_shots(self):\n self.update_ticks_of_selected_shots(Qt.Checked)\n\n def _disable_selected_shots(self):\n self.update_ticks_of_selected_shots(Qt.Unchecked)\n\n def update_ticks_of_selected_shots(self, state):\n # Get the selection model from the treeview\n selection_model = self.ui.shot_treeview.selectionModel()\n # Create a list of select row indices\n selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]\n # for each row selected\n for row in selected_row_list:\n check_item = self.shot_model.item(row, SHOT_MODEL__CHECKBOX_INDEX)\n check_item.setCheckState(state)\n\n def _move_up(self):\n # Get the selection model from the treeview\n selection_model = self.ui.channel_treeview.selectionModel()\n # Create a list of select row indices\n selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]\n # For each row selected\n for i, row in enumerate(selected_row_list):\n # only move the row if it is not element 0, and the row above it is not selected\n # (note that while a row above may have been initially selected, it should by now, be one row higher\n # since we start moving elements of the list upwards starting from the lowest index)\n if row > 0 and (row - 1) not in selected_row_list:\n # Remove the selected row\n items = self.channel_model.takeRow(row)\n # Add the selected row into a position one above\n self.channel_model.insertRow(row - 1, items)\n # Since it is now a newly inserted row, select it again\n selection_model.select(self.channel_model.indexFromItem(items[0]), QItemSelectionModel.SelectCurrent)\n # reupdate the list of selected indices to reflect this change\n selected_row_list[i] -= 1\n self.update_plot_positions()\n\n def _move_down(self):\n # Get the selection model from the treeview\n selection_model = self.ui.channel_treeview.selectionModel()\n # Create a list of select row indices\n selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]\n # For each row selected\n for i, row in enumerate(selected_row_list):\n # only move the row if it is not the last element, and the row above it is not selected\n # (note that while a row below may have been initially selected, it should by now, be one row lower\n # since we start moving elements of the list upwards starting from the highest index)\n if row < self.channel_model.rowCount() - 1 and (row + 1) not in selected_row_list:\n # Remove the selected row\n items = self.channel_model.takeRow(row)\n # Add the selected row into a position one above\n self.channel_model.insertRow(row + 1, items)\n # Since it is now a newly inserted row, select it again\n selection_model.select(self.channel_model.indexFromItem(items[0]), QItemSelectionModel.SelectCurrent)\n # reupdate the list of selected indices to reflect this change\n selected_row_list[i] += 1\n self.update_plot_positions()\n\n def _move_top(self):\n # Get the selection model from the treeview\n selection_model = self.ui.channel_treeview.selectionModel()\n # Create a list of select row indices\n selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]\n # For each row selected\n for i, row in enumerate(selected_row_list):\n # only move the row while it is not element 0, and the row above it is not selected\n # (note that while a row above may have been initially selected, it should by now, be one row higher\n # since we start moving elements of the list upwards starting from the lowest index)\n while row > 0 and (row - 1) not in selected_row_list:\n # Remove the selected row\n items = self.channel_model.takeRow(row)\n # Add the selected row into a position one above\n self.channel_model.insertRow(row - 1, items)\n # Since it is now a newly inserted row, select it again\n selection_model.select(self.channel_model.indexFromItem(items[0]), QItemSelectionModel.SelectCurrent)\n # reupdate the list of selected indices to reflect this change\n selected_row_list[i] -= 1\n row -= 1\n self.update_plot_positions()\n\n def _move_bottom(self):\n selection_model = self.ui.channel_treeview.selectionModel()\n # Create a list of select row indices\n selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]\n # For each row selected\n for i, row in enumerate(selected_row_list):\n # only move the row while it is not the last element, and the row above it is not selected\n # (note that while a row below may have been initially selected, it should by now, be one row lower\n # since we start moving elements of the list upwards starting from the highest index)\n while row < self.channel_model.rowCount() - 1 and (row + 1) not in selected_row_list:\n # Remove the selected row\n items = self.channel_model.takeRow(row)\n # Add the selected row into a position one above\n self.channel_model.insertRow(row + 1, items)\n # Since it is now a newly inserted row, select it again\n selection_model.select(self.channel_model.indexFromItem(items[0]), QItemSelectionModel.SelectCurrent)\n # reupdate the list of selected indices to reflect this change\n selected_row_list[i] += 1\n row += 1\n self.update_plot_positions()\n\n def update_plot_positions(self):\n # remove all widgets\n layout_items = {}\n for i in range(self.ui.plot_layout.count()):\n if i == 0:\n continue\n item = self.ui.plot_layout.takeAt(i)\n\n # add all widgets\n for i in range(self.channel_model.rowCount()):\n check_item = self.channel_model.item(i, CHANNEL_MODEL__CHECKBOX_INDEX)\n channel = str(check_item.text())\n if channel in self.plot_widgets:\n self.ui.plot_layout.addWidget(self.plot_widgets[channel])\n if check_item.checkState() == Qt.Checked and check_item.isEnabled():\n self.plot_widgets[channel].show()\n else:\n self.plot_widgets[channel].hide()\n self.ui.plot_layout.addWidget(self._time_axis_plot[0])\n\n\nclass Shot(object):\n def __init__(self, path):\n self.path = path\n\n # Store list of traces\n self._traces = None\n # store list of channels\n self._channels = None\n # store list of markers\n self._markers = None\n self.cached_scaler = None\n self._scalehandler = None\n self._scaled_x = {}\n\n # store list of shutter changes and callibrations\n self._shutter_times = None\n self._shutter_calibrations = {}\n\n # TODO: Get this dynamically\n device_list = ['PulseBlaster', 'NI_PCIe_6363', 'NI_PCI_6733']\n\n # Load connection table\n self.connection_table = ConnectionTable(path)\n\n # open h5 file\n with h5py.File(path, 'r') as file:\n # Get master pseudoclock\n self.master_pseudoclock_name = file['connection table'].attrs['master_pseudoclock']\n if isinstance(self.master_pseudoclock_name, bytes):\n self.master_pseudoclock_name = self.master_pseudoclock_name.decode('utf8')\n else:\n self.master_pseudoclock_name = str(self.master_pseudoclock_name)\n\n # get stop time\n self.stop_time = file['devices'][self.master_pseudoclock_name].attrs['stop_time']\n\n self.device_names = list(file['devices'].keys())\n\n # Get Shutter Calibrations\n if 'calibrations' in file and 'Shutter' in file['calibrations']:\n for name, open_delay, close_delay in numpy.array(file['calibrations']['Shutter']):\n self._shutter_calibrations[name] = [open_delay, close_delay]\n\n def delete_cache(self):\n self._channels = None\n self._traces = None\n\n def _load(self):\n if self._channels is None:\n self._channels = {}\n if self._traces is None:\n self._traces = {}\n if self._markers is None:\n self._markers = {}\n if self._shutter_times is None:\n self._shutter_times = {}\n\n self._load_markers()\n # Let's walk the connection table, starting with the master pseudoclock\n master_pseudoclock_device = self.connection_table.find_by_name(self.master_pseudoclock_name)\n\n self._load_device(master_pseudoclock_device)\n\n self._scalehandler = ScaleHandler(self._markers.keys(), self.stop_time)\n\n def _load_markers(self):\n with h5py.File(self.path, 'r') as file:\n if \"time_markers\" in file:\n for row in file[\"time_markers\"]:\n self._markers[row['time']] = {'color': row['color'].tolist()[0], 'label': row['label']}\n elif \"runviewer\" in file:\n for time, val in file[\"runviewer\"][\"markers\"].attrs.items():\n props = val.strip('{}}').rsplit(\",\", 1)\n color = list(map(int, props[0].split(\":\")[1].strip(\" ()\").split(\",\")))\n label = props[1].split(\":\")[1]\n self._markers[float(time)] = {'color': color, 'label': label}\n\n def add_trace(self, name, trace, parent_device_name, connection):\n name = str(name)\n self._channels[name] = {'device_name': parent_device_name, 'port': connection}\n self._traces[name] = trace\n\n # add shutter times\n try:\n con = self.connection_table.find_by_name(name)\n if con.device_class == \"Shutter\":\n self.add_shutter_times([(name, con.properties['open_state'])])\n except KeyError:\n pass\n\n\n # Temporary solution to physical shutter times\n def add_shutter_times(self, shutters):\n for name, open_state in shutters:\n x_values, y_values = self._traces[name]\n if len(x_values) > 0:\n change_indices = numpy.where(y_values[:-1] != y_values[1:])[0]\n change_indices += 1 # use the index of the value that is changed to\n change_values = zip(x_values[change_indices], y_values[change_indices])\n change_values.insert(0, (x_values[0], y_values[0])) # insert first value\n self._shutter_times[name] = {x_value + (self._shutter_calibrations[name][0] if y_value == open_state else self._shutter_calibrations[name][1]): 1 if y_value == open_state else 0 for x_value, y_value in change_values}\n\n def _load_device(self, device, clock=None):\n try:\n print('loading %s' % device.name)\n module = device.device_class\n # Load the master pseudoclock class\n # labscript_devices.import_device(module)\n device_class = labscript_devices.get_runviewer_parser(module)\n device_instance = device_class(self.path, device)\n clocklines_and_triggers = device_instance.get_traces(self.add_trace, clock)\n\n for name, trace in clocklines_and_triggers.items():\n child_device = self.connection_table.find_by_name(name)\n for grandchild_device_name, grandchild_device in child_device.child_list.items():\n self._load_device(grandchild_device, trace)\n\n except Exception:\n # TODO: print/log exception traceback\n # if device.name == 'ni_card_0' or device.name == 'pulseblaster_0' or device.name == 'pineblaster_0' or device.name == 'ni_card_1' or device.name == 'novatechdds9m_0':\n # raise\n # raise\n if hasattr(device, 'name'):\n print('Failed to load device %s' % device.name)\n else:\n print('Failed to load device (unknown name, device object does not have attribute name)')\n\n # backwards compat\n with h5py.File(self.path, 'r') as file:\n if \"runviewer\" in file:\n if \"shutter_times\" in file[\"runviewer\"]:\n for name, val in file[\"runviewer\"][\"shutter_times\"].attrs.items():\n self._shutter_times[name] = {float(key_value.split(\":\")[0]): int(key_value.split(\":\")[1]) for key_value in val.strip('{}}').split(\",\")}\n\n def scaled_times(self, channel):\n if self.cached_scaler != app.scalehandler:\n self.cached_scaler = app.scalehandler\n self._scaled_x = {}\n if channel not in self._scaled_x:\n self._scaled_x[channel] = self.cached_scaler.get_scaled_time(self._traces[channel][0])\n\n return self._scaled_x[channel]\n\n @property\n def channels(self):\n if self._channels is None:\n self._load()\n\n return self._channels.keys()\n\n def clear_cache(self):\n # clear cache variables to cut down on memory usage\n pass\n\n @property\n def markers(self):\n if self._markers is None:\n self._load()\n return self._markers\n\n @property\n def traces(self):\n # if traces cached:\n # return cached traces and waits\n if self._traces is None:\n self._load()\n return self._traces\n\n @property\n def shutter_times(self):\n if self._shutter_times is None:\n self._load()\n return self._shutter_times\n\n @property\n def scalehandler(self):\n if self._scalehandler is None:\n self._load()\n return self._scalehandler\n\n\nclass TempShot(Shot):\n def __init__(self, i):\n Shot.__init__(self, 'shot %d' % i)\n self._channels = ['Bx', 'By', 'Bz', 'Bq']\n\n self.stop_time = i + 1\n\n self.traces = {}\n no_x_points = 10000\n for channel in self.channels:\n # self.traces[channel] = (numpy.linspace(0,10,no_x_points), numpy.random.rand(no_x_points))\n x_points = numpy.linspace(0, self.stop_time, no_x_points)\n self.traces[channel] = (x_points, (i + 1) * numpy.sin(x_points * numpy.pi + i / 11.0 * 2 * numpy.pi))\n\n @property\n def channels(self):\n return self._channels\n\n def get_traces(self):\n return self.traces\n\n\nclass RunviewerServer(ZMQServer):\n def __init__(self, *args, **kwargs):\n ZMQServer.__init__(self, *args, **kwargs)\n self.logger = logging.getLogger('runviewer.server')\n\n def handler(self, h5_filepath):\n if h5_filepath == 'hello':\n return 'hello'\n\n self.logger.info('Received hdf5 file: %s' % h5_filepath)\n # Convert path to local slashes and shared drive prefix:\n h5_filepath = labscript_utils.shared_drive.path_to_local(h5_filepath)\n logger.info('local filepath: %s' % h5_filepath)\n # we add the shot to a queue so that we don't have to wait for the app to come up before\n # responding to runmanager\n shots_to_process_queue.put(h5_filepath)\n return 'ok'\n\n\nif __name__ == \"__main__\":\n qapplication = QApplication(sys.argv)\n\n shots_to_process_queue = Queue()\n\n exp_config = LabConfig(required_params = {\"DEFAULT\": [\"experiment_name\"], \"paths\": [\"shared_drive\", \"experiment_shot_storage\"], 'ports': ['runviewer']})\n\n port = int(exp_config.get('ports', 'runviewer'))\n myappid = 'monashbec.runviewer' # arbitrary string\n try:\n ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)\n except:\n logger.info('Not on a windows machine')\n # Start experiment server\n experiment_server = RunviewerServer(port)\n\n app = RunViewer(exp_config)\n\n def execute_program():\n qapplication.exec_()\n\n sys.exit(execute_program())\n"
] | [
[
"numpy.abs",
"numpy.linspace",
"numpy.sin",
"scipy.interpolate.interp1d",
"numpy.float64",
"numpy.array",
"numpy.where",
"numpy.float"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
maniospas/pygrank | [
"a92f6bb6d13553dd960f2e6bda4c041a8027a9d1"
] | [
"pygrank/measures/unsupervised.py"
] | [
"import warnings\nimport numpy as np\nfrom pygrank.measures.utils import Measure\nfrom pygrank.core.signals import to_signal\nfrom pygrank.core import backend, GraphSignalGraph, GraphSignalData, BackendPrimitive\n\n\nclass Unsupervised(Measure):\n pass\n\n\nclass Conductance(Unsupervised):\n \"\"\" Graph conductance (information flow) of scores.\n\n Assumes a fuzzy set of subgraphs whose nodes are included with probability proportional to their scores,\n as per the formulation of [krasanakis2019linkauc] and calculates E[outgoing edges] / E[internal edges] of\n the fuzzy rank subgraph.\n If scores assume binary values, E[.] becomes set size and this calculates the induced subgraph Conductance.\n \"\"\"\n\n def __init__(self, graph: GraphSignalGraph = None, max_rank: float = 1):\n \"\"\" Initializes the Conductance measure.\n\n Args:\n graph: Optional. The graph on which to calculate the measure. If None (default) it is automatically\n extracted from graph signals passed for evaluation.\n max_rank: Optional. The maximum value scores can assume. To maintain a probabilistic formulation of\n conductance, this can be greater but not less than the maximum rank during evaluation. Default is 1.\n\n Example:\n >>> import pygrank as pg\n >>> graph, seed_nodes, algorithm = ...\n >>> algorithm = pg.Normalize(algorithm)\n >>> scores = algorithm.rank(graph, seed_nodes)\n >>> conductance = pg.Conductance().evaluate(scores)\n \"\"\"\n self.graph = graph\n self.max_rank = max_rank\n\n def evaluate(self, scores: GraphSignalData) -> BackendPrimitive:\n scores = to_signal(self.graph, scores)\n graph = scores.graph\n if backend.max(scores.np) > self.max_rank:\n raise Exception(\"Normalize scores to be <= \" + str(self.max_rank) + \" for non-negative conductance\")\n external_edges = sum(scores.get(i, 0)*(self.max_rank-scores.get(j, 0)) for i, j in graph.edges())\n internal_edges = sum(scores.get(i, 0)*scores.get(j, 0) for i, j in graph.edges())\n if internal_edges > graph.number_of_edges()/2:\n internal_edges = graph.number_of_edges()-internal_edges # user the smallest partition as reference\n if not graph.is_directed():\n external_edges += sum(scores.get(j, 0) * (self.max_rank - scores.get(i, 0)) for i, j in graph.edges())\n internal_edges *= 2\n return external_edges / internal_edges if internal_edges != 0 else float('inf')\n\n\nclass Density(Unsupervised):\n \"\"\" Extension of graph density that accounts for node scores.\n\n Assumes a fuzzy set of subgraphs whose nodes are included with probability proportional to their scores,\n as per the formulation of [krasanakis2019linkauc] and calculates E[internal edges] / E[possible edges] of\n the fuzzy rank subgraph.\n If scores assume binary values, E[.] becomes set size and this calculates the induced subgraph Density.\n \"\"\"\n\n def __init__(self, graph: GraphSignalGraph = None):\n \"\"\" Initializes the Density measure.\n\n Args:\n graph: Optional. The graph on which to calculate the measure. If None (default) it is automatically\n extracted from graph signals passed for evaluation.\n\n Example:\n >>> import pygrank as pg\n >>> graph, seed_nodes, algorithm = ...\n >>> scores = algorithm.rank(graph, seed_nodes)\n >>> conductance = pg.Density().evaluate(scores)\n \"\"\"\n self.graph = graph\n\n def evaluate(self, scores: GraphSignalData) -> BackendPrimitive:\n scores = to_signal(self.graph, scores)\n graph = scores.graph\n internal_edges = sum(scores.get(i, 0) * scores.get(j, 0) for i,j in graph.edges())\n expected_edges = backend.sum(scores.np) ** 2 - backend.sum(scores.np ** 2) # without self-loops\n if internal_edges == 0:\n return 0\n return internal_edges / expected_edges\n\n\nclass Modularity(Unsupervised):\n \"\"\"\n Extension of modularity that accounts for node scores.\n \"\"\"\n \n def __init__(self,\n graph: GraphSignalGraph = None,\n max_rank: float = 1,\n max_positive_samples: int = 2000,\n seed: int = 0):\n \"\"\" Initializes the Modularity measure with a sampling strategy that speeds up normal computations.\n\n Args:\n graph: Optional. The graph on which to calculate the measure. If None (default) it is automatically\n extracted from graph signals passed for evaluation.\n max_rank: Optional. Default is 1.\n max_positive_samples: Optional. The number of nodes with which to compute modularity. These are\n sampled uniformly from all graph nodes. If this is greater than the number of graph nodes,\n all nodes are used and the measure is deterministic. However,\n calculation time is O(max_positive_samples<sup>2</sup>) and thus a trade-off needs to be determined of time\n vs approximation quality. Effectively, the value should be high enough for max_positive_samples<sup>2</sup>\n to be comparable to the number of graph edges. Default is 2000.\n seed: Optional. Makes the evaluation seeded, for example to use in tuning. Default is 0.\n\n Example:\n >>> import pygrank as pg\n >>> graph, seed_nodes, algorithm = ...\n >>> scores = algorithm.rank(graph, seed_nodes)\n >>> modularity = pg.Modularity(max_positive_samples=int(graph.number_of_edges()**0.5)).evaluate(scores)\n \"\"\"\n self.graph = graph\n self.max_positive_samples = max_positive_samples\n self.max_rank = max_rank\n self.seed = seed\n\n def evaluate(self, scores: GraphSignalData) -> BackendPrimitive:\n scores = to_signal(self.graph, scores)\n graph = scores.graph\n positive_candidates = list(graph)\n if len(positive_candidates) > self.max_positive_samples:\n np.random.seed(self.seed)\n positive_candidates = np.random.choice(positive_candidates, self.max_positive_samples)\n m = graph.number_of_edges()\n if m == 0:\n return 0\n Q = 0\n for v in positive_candidates:\n for u in positive_candidates:\n Avu = 1 if graph.has_edge(v,u) else 0\n Avu -= graph.degree[v]*graph.degree[u]/2/m\n Q += Avu*(scores[v]/self.max_rank)*(scores[u]/self.max_rank)\n return Q/2/m\n"
] | [
[
"numpy.random.seed",
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SummaLabs/DLS | [
"2adba47430b456ad0f324e4c8883a896a23b3fbf",
"2adba47430b456ad0f324e4c8883a896a23b3fbf",
"2adba47430b456ad0f324e4c8883a896a23b3fbf"
] | [
"data-test/test_data_serialization/step2_h5py_test_perf/run02_s1_write_image_blob.py",
"data-test/test_data_serialization/step3_h5py_vs_lmdb/run02_read_lmdb_with_tfrecord.py",
"app/backend-test/core_datasets/run03_test_generation_preview_images.py"
] | [
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n__author__ = 'ar'\n\nimport skimage.io as skio\nimport numpy as np\nimport h5py\nimport time\n\nfrom run00_common import ImageDirParser\n\n############################\ndef buidImageDataset(imageDirParser=None, datasetName='test-dataset.h5', numberOfSamples=1000, isRawBlob = False):\n if imgDirParser is None:\n raise Exception('Invalid imageDirParser')\n pathH5File = datasetName\n f = h5py.File(pathH5File, 'w')\n f.create_dataset('scheme', data=np.array(imgDirParser.scheme))\n grpData = f.create_group('data')\n #\n rndIndex = np.random.randint(0, imgDirParser.getNumSamples(), (numberOfSamples))\n for ii in range(len(rndIndex)):\n ridx = rndIndex[ii]\n dataRow = imgDirParser.listPathAndIdx[ridx]\n grpName = 'row_%08d' % ii\n grp = grpData.create_group(grpName)\n for vvi, vv in enumerate(dataRow):\n ttype = imgDirParser.scheme[vvi]\n tkey = 'col_%02d' % vvi\n if ttype == 'path-img2d':\n if isRawBlob:\n timgData = np.void(open(vv, 'r').read())\n dset = grp.create_dataset(tkey, data=timgData)\n else:\n timg = skio.imread(vv)\n dset = grp.create_dataset(tkey, data=timg)\n elif ttype == 'category-idx':\n dset = grp.create_dataset(tkey, data=np.array(vv))\n elif ttype == 'array-float':\n dset = grp.create_dataset(tkey, data=vv)\n elif ttype == 'category-name':\n dset = grp.create_dataset(tkey, data=np.array(vv))\n else:\n raise Exception('Unknown feature type [%s]' % ttype)\n f.close()\n\n\n############################\nif __name__ == '__main__':\n wdir = '../../dataset-image2d/simple4c_test'\n imgDirParser = ImageDirParser(wdir=wdir)\n print (imgDirParser)\n #\n numberOfSamples = 10000\n dataSetNameRaw = 'test-dataset-rawimg.h5'\n dataSetNameArr = 'test-dataset-numpy.h5'\n # (1) Raw\n t1 = time.time()\n buidImageDataset(imageDirParser=imgDirParser,\n datasetName=dataSetNameRaw,\n numberOfSamples=numberOfSamples, isRawBlob=True)\n dt = time.time() - t1\n tspeed = float(numberOfSamples) / dt\n dT1k = 1000. / tspeed\n print ('WRITE [%s] : T=%0.2fs, #Samples=%d, Speed: %0.3f (Samples/Sec), dt(#1000) = %0.3fs'\n % (dataSetNameRaw, dt, numberOfSamples, tspeed, dT1k))\n # (2) Numpy\n t1 = time.time()\n buidImageDataset(imageDirParser=imgDirParser,\n datasetName=dataSetNameArr,\n numberOfSamples=numberOfSamples, isRawBlob=False)\n dt = time.time() - t1\n tspeed = float(numberOfSamples) / dt\n dT1k = 1000. / tspeed\n print ('WRITE [%s] : T=%0.2fs, #Samples=%d, Speed: %0.3f (Samples/Sec), dt(#1000) = %0.3fs'\n % (dataSetNameArr, dt, numberOfSamples, tspeed, dT1k))\n",
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n__author__ = 'ar'\n\nimport os\nimport time\nimport lmdb\nimport tensorflow as tf\nimport numpy as np\nimport shutil\nimport matplotlib.pyplot as plt\n\nfrom run00_common import ImageDirParser, DataType\n\n#################################\ndef readImageDatasetLMDB(imageDirParser=None, numberOfSamples=1000, isRawBlob=False):\n if imgDirParser is None:\n raise Exception('Invalid imageDirParser')\n rndIndex = np.random.randint(0, imgDirParser.getNumSamples(), (numberOfSamples))\n dataTypeBuilder = DataType()\n # (1) check dataset type and prepare write\n tpref = 'raw%d' % isRawBlob\n dbfout = 'test-dataset-lmdb-%s' % tpref\n if not os.path.isdir(dbfout):\n raise Exception('Cant find LMDB dataset [%s]' % dbfout)\n tsizInBytes = 4 * (1024 ** 3)\n lmdbEnv = lmdb.open(dbfout, map_size=tsizInBytes)\n t0 = time.time()\n meanImage = None\n meanArray = None\n meanArra2 = None\n schemeOfFeatures = None\n with lmdbEnv.begin(write=False) as lmdbTxn:\n lstKeys = [key for key, _ in lmdbTxn.cursor()]\n rndIndex = np.random.randint(len(lstKeys), size=numberOfSamples)\n for ii, ridx in enumerate(rndIndex):\n tkey = lstKeys[ridx]\n texampleStr = lmdbTxn.get(tkey)\n texample = tf.train.Example()\n texample.ParseFromString(texampleStr)\n tfeatures = texample.features._fields.values()[0]\n # (1) Prepare scheme for dataset row-sample\n if schemeOfFeatures is None:\n d1 = {ss: ss.split('.') for ss in tfeatures.keys()}\n schemeOfFeatures = {}\n for kk,vv in d1.items():\n if not schemeOfFeatures.has_key(vv[0]):\n schemeOfFeatures[vv[0]] = {}\n tk = vv[1]\n schemeOfFeatures[vv[0]][tk] = kk\n # (2) iterate over scheme-data-types\n for ttypeStr,vv in schemeOfFeatures.items():\n tdataTypeObj = dataTypeBuilder.getDataClassByName(ttypeStr)\n cfg = {k2:tfeatures.pop(v2) for k2,v2 in vv.items()}\n tret = tdataTypeObj.blob2Data(cfg)\n #\n if ttypeStr == 'path-img2d':\n if meanImage is None:\n meanImage = tret['img'].copy().astype(np.float)\n else:\n meanImage += tret['img'].copy().astype(np.float)\n elif ttypeStr == 'array-float':\n tarr = tret['val'].copy()\n if meanArray is None:\n meanArray = tarr\n meanArra2 = tarr ** 2\n else:\n meanArray += tarr\n meanArra2 += tarr ** 2\n numData = len(lstKeys)\n meanImage /= numData\n meanArray /= numData\n stdArray = np.sqrt(meanArra2 - meanArray ** 2)\n dt = time.time() - t0\n return (dt, meanImage, meanArray, stdArray, numData)\n\n#################################\nif __name__ == '__main__':\n wdir = '../../dataset-image2d/simple4c_test'\n imgDirParser = ImageDirParser(wdir=wdir)\n print (imgDirParser)\n lstOpt_Raw = [False, True]\n opBdt = 'lmdb'\n for opRaw in lstOpt_Raw:\n (tdt, meanImage, meanArray, stdArray, numberOfSamples) = readImageDatasetLMDB(imgDirParser, isRawBlob=opRaw)\n tspeed = float(numberOfSamples) / tdt\n dT1k = 1000. / tspeed\n print ('READ [%s : isRaw = %d] : T=%0.2fs, #Samples=%d, Speed: %0.3f (Samples/Sec), dt(#1000) = %0.3fs'\n % (opBdt, opRaw, tdt, numberOfSamples, tspeed, dT1k))\n plt.imshow((255.*meanImage/meanImage.max()).astype(np.uint8))\n plt.title('#Samples = %d' % numberOfSamples)\n plt.show()\n",
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n__author__ = 'ar'\n\nimport os\nimport glob\n\nimport matplotlib.pyplot as plt\n\nfrom app.backend.core.datasets.dbimageinfo import DatasetImage2dInfo\nfrom app.backend.core.datasets.imgproc2d import ImageTransformer2D\n\npathWithDatasets='../../../data/datasets'\n\nif __name__ == '__main__':\n lstDir=glob.glob('%s/dbset-*' % pathWithDatasets)\n numDir=len(lstDir)\n if numDir>5:\n numDir=5\n plt.figure()\n for ii in range(numDir):\n pathDB = lstDir[ii]\n dbImage2dInfo = DatasetImage2dInfo(pathDB)\n if dbImage2dInfo.checkIsAValidImage2dDir():\n dbImage2dInfo.loadDBInfo()\n dbName=dbImage2dInfo.getInfoStat()['name']\n pathLMDB = dbImage2dInfo.pathDbTrain\n imgPreview=ImageTransformer2D.generateImagePreview(pathLMDB)\n plt.subplot(1,numDir,ii+1)\n plt.imshow(imgPreview)\n plt.title(dbName)\n plt.show()\n print ('----')\n\n"
] | [
[
"numpy.array"
],
[
"matplotlib.pyplot.show",
"numpy.sqrt",
"matplotlib.pyplot.title",
"tensorflow.train.Example"
],
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Heng-Z/mwr | [
"28e42a3a64f46dc627333b2c6ae4b317803648ba",
"28e42a3a64f46dc627333b2c6ae4b317803648ba"
] | [
"training/data_sequence.py",
"preprocessing/prepare.py"
] | [
"from tensorflow.keras.utils import Sequence\nimport numpy as np\nimport mrcfile\nimport os\n# Here, `x_set` is list of path to the images\n# and `y_set` are the associated classes.\n\nclass dataSequence(Sequence):\n\n def __init__(self, x_set, y_set, batch_size):\n self.x, self.y = x_set, y_set\n self.batch_size = batch_size\n self.perm = np.random.permutation(len(self.x))\n\n def __len__(self):\n return int(np.ceil(len(self.x) / float(self.batch_size)))\n\n def on_epoch_end(self):\n self.perm = np.random.permutation(len(self.x))\n\n def __getitem__(self, i):\n idx = slice(i*self.batch_size,(i+1)*self.batch_size)\n idx = self.perm[idx]\n # print('*******',self.x[-1],mrcfile.open(self.x[0]).data[:,:,:,np.newaxis].shape)\n rx = np.array([mrcfile.open(self.x[j]).data[:,:,:,np.newaxis] for j in idx])\n ry = np.array([mrcfile.open(self.y[j]).data[:,:,:,np.newaxis] for j in idx])\n # for j in idx:\n # print(mrcfile.open(self.x[j]).data.shape,mrcfile.open(self.y[j]).data.shape)\n return rx,ry\n\n\ndef prepare_dataseq(data_folder, batch_size):\n\n dirs_tomake = ['train_x','train_y', 'test_x', 'test_y']\n path_all = []\n for d in dirs_tomake:\n p = '{}/{}/'.format(data_folder, d)\n path_all.append(sorted([p+f for f in os.listdir(p)]))\n # train_data = dataSequence(path_all[0], path_all[1], batch_size)\n # test_data = dataSequence(path_all[2], path_all[3], batch_size)\n train_data = get_gen(path_all[0], path_all[1], batch_size)\n test_data = get_gen(path_all[2], path_all[3], batch_size)\n # print(path_all[2],path_all[3])\n return train_data, test_data\n\ndef get_gen(x_set,y_set,batch_size,shuffle=True):\n def gen():\n while True:\n all_idx = np.arange(len(x_set))\n if shuffle:\n np.random.shuffle(all_idx)\n for i in range(len(x_set)//batch_size):\n idx = slice(i * batch_size,(i+1) * batch_size)\n idx = all_idx[idx]\n rx = np.array([mrcfile.open(x_set[j]).data[:,:,:,np.newaxis] for j in idx])\n ry = np.array([mrcfile.open(y_set[j]).data[:,:,:,np.newaxis] for j in idx])\n\n yield rx,ry\n return gen\n\ndef get_gen_single(x_set,batch_size,shuffle=True):\n def gen():\n while True:\n all_idx = np.arange(len(x_set))\n if shuffle:\n np.random.shuffle(all_idx)\n for i in range(len(x_set)//batch_size):\n idx = slice(i * batch_size,(i+1) * batch_size)\n idx = all_idx[idx]\n rx = np.array([mrcfile.open(x_set[j]).data[:,:,:,np.newaxis] for j in idx])\n yield rx\n return gen",
"import os \nimport sys\nimport logging\nimport sys\nimport mrcfile\nfrom IsoNet.preprocessing.cubes import create_cube_seeds,crop_cubes,DataCubes\nfrom IsoNet.preprocessing.img_processing import normalize\nfrom IsoNet.preprocessing.simulate import apply_wedge1 as apply_wedge\nfrom multiprocessing import Pool\nimport numpy as np\nfrom functools import partial\nfrom IsoNet.util.rotations import rotation_list\n# from difflib import get_close_matches\nfrom IsoNet.util.metadata import MetaData, Item, Label\n#Make a new folder. If exist, nenew it\n# Do not set basic config for logging here\n# logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',datefmt=\"%H:%M:%S\",level=logging.DEBUG)\ndef mkfolder(folder):\n import os\n try:\n os.makedirs(folder)\n except FileExistsError:\n logging.warning(\"The {0} folder already exists before the 1st iteration \\n The old {0} folder will be renamed (to {0}~)\".format(folder))\n import shutil\n if os.path.exists(folder+'~'):\n shutil.rmtree(folder+'~')\n os.system('mv {} {}'.format(folder, folder+'~'))\n os.makedirs(folder)\n\ndef generate_first_iter_mrc(mrc,settings):\n '''\n Apply mw to the mrc and save as xx_iter00.xx\n '''\n root_name = mrc.split('/')[-1].split('.')[0]\n extension = mrc.split('/')[-1].split('.')[1]\n with mrcfile.open(mrc) as mrcData:\n orig_data = normalize(mrcData.data.astype(np.float32)*-1, percentile = settings.normalize_percentile)\n orig_data = apply_wedge(orig_data, ld1=1, ld2=0)\n orig_data = normalize(orig_data, percentile = settings.normalize_percentile)\n\n with mrcfile.new('{}/{}_iter00.{}'.format(settings.result_dir,root_name, extension), overwrite=True) as output_mrc:\n output_mrc.set_data(-orig_data)\n\ndef extract_subtomos(settings):\n '''\n extract subtomo from whole tomogram based on mask\n and feed to generate_first_iter_mrc to generate xx_iter00.xx\n '''\n #mkfolder(settings.result_dir)\n #mkfolder(settings.subtomo_dir)\n md = MetaData()\n md.read(settings.star_file)\n if len(md)==0:\n sys.exit(\"No input exists. Please check it in input folder!\")\n\n subtomo_md = MetaData()\n subtomo_md.addLabels('rlnSubtomoIndex','rlnImageName','rlnCubeSize','rlnCropSize','rlnPixelSize')\n count=0\n for it in md:\n if settings.tomo_idx is None or str(it.rlnIndex) in settings.tomo_idx:\n pixel_size = it.rlnPixelSize\n if settings.use_deconv_tomo and \"rlnDeconvTomoName\" in md.getLabels() and os.path.isfile(it.rlnDeconvTomoName):\n logging.info(\"Extract from deconvolved tomogram {}\".format(it.rlnDeconvTomoName))\n with mrcfile.open(it.rlnDeconvTomoName) as mrcData:\n orig_data = mrcData.data.astype(np.float32)\n else: \n print(\"Extract from origional tomogram {}\".format(it.rlnMicrographName))\n with mrcfile.open(it.rlnMicrographName) as mrcData:\n orig_data = mrcData.data.astype(np.float32)\n \n\n if \"rlnMaskName\" in md.getLabels() and it.rlnMaskName not in [None, \"None\"]:\n with mrcfile.open(it.rlnMaskName) as m:\n mask_data = m.data\n else:\n mask_data = None\n logging.info(\" mask not been used for tomogram {}!\".format(it.rlnIndex))\n\n seeds=create_cube_seeds(orig_data, it.rlnNumberSubtomo, settings.crop_size,mask=mask_data)\n subtomos=crop_cubes(orig_data,seeds,settings.crop_size)\n\n # save sampled subtomo to {results_dir}/subtomos instead of subtomo_dir (as previously does)\n base_name = os.path.splitext(os.path.basename(it.rlnMicrographName))[0]\n \n for j,s in enumerate(subtomos):\n im_name = '{}/{}_{:0>6d}.mrc'.format(settings.subtomo_dir, base_name, j)\n with mrcfile.new(im_name, overwrite=True) as output_mrc:\n count+=1\n subtomo_it = Item()\n subtomo_md.addItem(subtomo_it)\n subtomo_md._setItemValue(subtomo_it,Label('rlnSubtomoIndex'), str(count))\n subtomo_md._setItemValue(subtomo_it,Label('rlnImageName'), im_name)\n subtomo_md._setItemValue(subtomo_it,Label('rlnCubeSize'),settings.cube_size)\n subtomo_md._setItemValue(subtomo_it,Label('rlnCropSize'),settings.crop_size)\n subtomo_md._setItemValue(subtomo_it,Label('rlnPixelSize'),pixel_size)\n output_mrc.set_data(s.astype(np.float32))\n subtomo_md.write(settings.subtomo_star)\n\n\n#preparation files for the first iteration\ndef prepare_first_iter(settings):\n # extract_subtomos(settings)\n mkfolder(settings.result_dir) \n # settings.mrc_list = os.listdir(settings.subtomo_dir)\n # settings.mrc_list = ['{}/{}'.format(settings.subtomo_dir,i) for i in settings.mrc_list]\n\n #need further test\n #with Pool(settings.preprocessing_ncpus) as p:\n # func = partial(generate_first_iter_mrc, settings)\n # res = p.map(func, settings.mrc_list)\n\n if settings.preprocessing_ncpus >1:\n with Pool(settings.preprocessing_ncpus) as p:\n func = partial(generate_first_iter_mrc, settings=settings)\n res = p.map(func, settings.mrc_list)\n # res = p.map(generate_first_iter_mrc, settings.mrc_list)\n else:\n for i in settings.mrc_list:\n generate_first_iter_mrc(i,settings)\n return settings\n \ndef get_cubes_one(data, settings, start = 0, mask = None, add_noise = 0):\n '''\n crop out one subtomo and missing wedge simulated one from input data,\n and save them as train set\n '''\n data_cubes = DataCubes(data, nCubesPerImg=1, cubeSideLen = settings.cube_size, cropsize = settings.crop_size, \n mask = mask, noise_folder = settings.noise_dir,noise_level = settings.noise_level_current,noise_mode = settings.noise_mode)\n for i,img in enumerate(data_cubes.cubesX):\n with mrcfile.new('{}/train_x/x_{}.mrc'.format(settings.data_dir, i+start), overwrite=True) as output_mrc:\n output_mrc.set_data(img.astype(np.float32))\n with mrcfile.new('{}/train_y/y_{}.mrc'.format(settings.data_dir, i+start), overwrite=True) as output_mrc:\n output_mrc.set_data(data_cubes.cubesY[i].astype(np.float32))\n return 0\n\n\ndef get_cubes(inp,settings):\n '''\n current iteration mrc(in the 'results') + infomation from orignal subtomo\n normalized predicted + normalized orig -> normalize\n rotate by rotation_list and feed to get_cubes_one\n '''\n mrc, start = inp\n root_name = mrc.split('/')[-1].split('.')[0]\n current_mrc = '{}/{}_iter{:0>2d}.mrc'.format(settings.result_dir,root_name,settings.iter_count-1)\n\n with mrcfile.open(current_mrc) as mrcData:\n ow_data = mrcData.data.astype(np.float32)*-1\n ow_data = normalize(ow_data, percentile = settings.normalize_percentile)\n with mrcfile.open('{}/{}_iter00.mrc'.format(settings.result_dir,root_name)) as mrcData:\n iw_data = mrcData.data.astype(np.float32)*-1\n iw_data = normalize(iw_data, percentile = settings.normalize_percentile)\n\n if settings.iter_count <= settings.iterations:\n orig_data = apply_wedge(ow_data, ld1=0, ld2=1) + apply_wedge(iw_data, ld1 = 1, ld2=0)\n orig_data = normalize(orig_data, percentile = settings.normalize_percentile)\n else:\n orig_data = ow_data\n\n for r in rotation_list:\n data = np.rot90(orig_data, k=r[0][1], axes=r[0][0])\n data = np.rot90(data, k=r[1][1], axes=r[1][0])\n get_cubes_one(data, settings, start = start) \n start += 1#settings.ncube\n\ndef get_cubes_list(settings):\n '''\n generate new training dataset:\n map function 'get_cubes' to mrc_list from subtomo_dir\n seperate 10% generated cubes into test set.\n '''\n import os\n dirs_tomake = ['train_x','train_y', 'test_x', 'test_y']\n if not os.path.exists(settings.data_dir):\n os.makedirs(settings.data_dir)\n for d in dirs_tomake:\n folder = '{}/{}'.format(settings.data_dir, d)\n if not os.path.exists(folder):\n os.makedirs(folder)\n inp=[]\n for i,mrc in enumerate(settings.mrc_list):\n inp.append((mrc, i*len(rotation_list)))\n \n # inp: list 0f (mrc_dir, index * rotation times)\n\n if settings.preprocessing_ncpus > 1:\n func = partial(get_cubes, settings=settings)\n with Pool(settings.preprocessing_ncpus) as p:\n res = p.map(func,inp)\n if settings.preprocessing_ncpus == 1:\n for i in inp:\n get_cubes(i, settings)\n\n all_path_x = os.listdir(settings.data_dir+'/train_x')\n num_test = int(len(all_path_x) * 0.1) \n num_test = num_test - num_test%settings.ngpus + settings.ngpus\n all_path_y = ['y_'+i.split('_')[1] for i in all_path_x ]\n ind = np.random.permutation(len(all_path_x))[0:num_test]\n for i in ind:\n os.rename('{}/train_x/{}'.format(settings.data_dir, all_path_x[i]), '{}/test_x/{}'.format(settings.data_dir, all_path_x[i]) )\n os.rename('{}/train_y/{}'.format(settings.data_dir, all_path_y[i]), '{}/test_y/{}'.format(settings.data_dir, all_path_y[i]) )\n #os.rename('data/train_y/'+all_path_y[i], 'data/test_y/'+all_path_y[i])\n\ndef get_noise_level(noise_level_tuple,noise_start_iter_tuple,iterations):\n assert len(noise_level_tuple) == len(noise_start_iter_tuple) and type(noise_level_tuple) in [tuple,list]\n noise_level = np.zeros(iterations+1)\n for i in range(len(noise_start_iter_tuple)-1):\n assert i < iterations and noise_start_iter_tuple[i]< noise_start_iter_tuple[i+1]\n noise_level[noise_start_iter_tuple[i]:noise_start_iter_tuple[i+1]] = noise_level_tuple[i]\n assert noise_level_tuple[-1] < iterations \n noise_level[noise_start_iter_tuple[-1]:] = noise_level_tuple[-1]\n return noise_level\n"
] | [
[
"numpy.random.shuffle"
],
[
"numpy.rot90",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dwkim565/laygo2 | [
"fea1263638fa5641ad27f2000d7562cdf910c67f",
"fea1263638fa5641ad27f2000d7562cdf910c67f",
"fea1263638fa5641ad27f2000d7562cdf910c67f"
] | [
"examples/demo/7_thshin_test.py",
"examples/quick_start/quick_start.py",
"laygo2/util/transform.py"
] | [
"#!/usr/bin/python\n########################################################################################################################\n#\n# Copyright (c) 2014, Regents of the University of California\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n# following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n########################################################################################################################\n\nimport numpy as np\nimport pprint\nimport laygo2\nimport laygo2.interface\nimport laygo2_tech as tech\n\n# Parameter definitions ##############\n# Templates\ntpmos_name = 'pmos'\ntnmos_name = 'nmos'\n# Grids\npg_name = 'placement_basic'\nr12_name = 'routing_12_cmos'\nr23_name = 'routing_23_cmos'\n# Design hierarchy\nlibname = 'thshin_65_to_40'\ncellname = 'thshin_test'\n# Design parameters\nnf_a = 2\nnf_b = 4\n# End of parameter definitions #######\n\n# Generation start ###################\n# 1. Load templates and grids.\nprint(\"Load templates\")\ntemplates = tech.load_templates()\ntpmos, tnmos = templates[tpmos_name], templates[tnmos_name]\nprint(templates[tpmos_name], templates[tnmos_name], sep=\"\\n\")\n\nprint(\"Load grids\")\ngrids = tech.load_grids(templates=templates)\npg, r12, r23 = grids[pg_name], grids[r12_name], grids[r23_name]\nprint(grids[pg_name], grids[r12_name], grids[r23_name], sep=\"\\n\")\n\n# 2. Create a design hierarchy.\nlib = laygo2.object.database.Library(name=libname)\ndsn = laygo2.object.database.Design(name=cellname, libname=libname)\nlib.append(dsn)\n\n# 3. Create instances.\nprint(\"Create instances\")\nin0 = tnmos.generate(name='MN0', params={'nf': nf_b, 'trackswap': False, 'tie': 'D', 'gbndl': True})\nin1 = tnmos.generate(name='MN1', params={'nf': nf_b, 'trackswap': True, 'tie': 'D', 'gbndl': True})\nin2 = tnmos.generate(name='MN2', params={'nf': nf_b, 'trackswap': True, 'tie': 'D', 'gbndl': True})\nin3 = tnmos.generate(name='MN3', params={'nf': nf_b, 'trackswap': True, 'gbndl': True})\n#in1 = tnmos.generate(name='MN1', params={'nf': nf_a, 'gbndr': True})\n#ip0 = tpmos.generate(name='MP0', transform='MX', params={'nf': nf_b, 'tie': 'S',11gbndl': True})\n#ip1 = tpmos.generate(name='MP1', transform='MX', params={'nf': nf_a, 'trackswap': True, 'tie': 'D', 'gbndr': True})\n\n# 4. Place instances.\ndsn.place(grid=pg, inst=in0, mn=pg.mn[0, 0])\ndsn.place(grid=pg, inst=in1, mn=pg.mn.bottom_right(in0)+np.array([2, 0])) # same with pg == in0.bottom_right\ndsn.place(grid=pg, inst=in2, mn=pg.mn.top_left(in1)+np.array([0, 4])) # same with pg == in0.bottom_right\ndsn.place(grid=pg, inst=in3, mn=pg.mn.bottom_right(in2)+np.array([2, 0])) # same with pg == in0.bottom_right\n\n#dsn.place(grid=pg, inst=ip0, mn=pg.mn.top_left(in0) + pg.mn.height_vec(ip0)) # +height_vec due to MX transform\n#dsn.place(grid=pg, inst=ip1, mn=pg.mn.top_right(ip0))\n\n# 5. Create and place wires.\nprint(\"Create wires\")\n# A\n#_mn = [r23.mn(in1.pins['G'])[0], r23.mn(ip1.pins['G'])[0]]\n#va0, ra0, va1 = dsn.route(grid=r23, mn=_mn, via_tag=[True, True])\n# B\n#_mn = [r23.mn(in0.pins['G'])[0], r23.mn(ip0.pins['G'])[0]]\n#vb0, rb0, vb1 = dsn.route(grid=r23, mn=_mn, via_tag=[True, True])\n# Internal\n#_mn = [r12.mn(in0.pins['S'])[0], r12.mn(in1.pins['D'])[0]]\n#ri0 = dsn.route(grid=r23, mn=_mn)\n#_mn = [r12.mn(ip0.pins['D'])[0], r12.mn(ip1.pins['S'])[0]]\n#ri1 = dsn.route(grid=r23, mn=_mn)\n# Output\n#_mn = [r23.mn(in1.pins['S'])[1], r23.mn(ip1.pins['S'])[1]]\n#_track = [r23.mn(ip1.pins['S'])[1, 0], None]\n#_, vo0, ro0, vo1, _= dsn.route_via_track(grid=r23, mn=_mn, track=_track)\n# VSS\n#rvss0 = dsn.route(grid=r12, mn=[r12.mn(in0.pins['RAIL'])[0], r12.mn(in1.pins['RAIL'])[1]])\n# VDD\n#rvdd0 = dsn.route(grid=r12, mn=[r12.mn(ip0.pins['RAIL'])[0], r12.mn(ip1.pins['RAIL'])[1]])\n\n# 6. Create pins.\n#pa0 = dsn.pin(name='A', grid=r23, mn=r23.mn.bbox(ra0))\n#pb0 = dsn.pin(name='B', grid=r23, mn=r23.mn.bbox(rb0))\n#po0 = dsn.pin(name='O', grid=r23, mn=r23.mn.bbox(ro0))\n#pvss0 = dsn.pin(name='VSS', grid=r12, mn=r12.mn.bbox(rvss0))\n#pvdd0 = dsn.pin(name='VDD', grid=r12, mn=r12.mn.bbox(rvdd0))\n\n# 7. Export to physical database.\nprint(\"Export design\")\nprint(dsn)\n# Uncomment for GDS export\n\"\"\"\n#abstract = False # export abstract\n#laygo2.interface.gds.export(lib, filename=libname+'_'+cellname+'.gds', cellname=None, scale=1e-9,\n# layermapfile=\"../technology_example/technology_example.layermap\", physical_unit=1e-9, logical_unit=0.001,\n# pin_label_height=0.1, pin_annotate_layer=['text', 'drawing'], text_height=0.1,\n# abstract_instances=abstract)\n\"\"\"\n\n# Uncomment for SKILL export\n\"\"\"\n#skill_str = laygo2.interface.skill.export(lib, filename=libname+'_'+cellname+'.il', cellname=None, scale=1e-3)\n#print(skill_str)\n\"\"\"\n\n# Uncomment for BAG export\nlaygo2.interface.bag.export(lib, filename=libname+'_'+cellname+'.il', cellname=None, scale=1e-3, reset_library=False, tech_library=tech.name)\n\n# 7-a. Import the GDS file back and display\n#with open('nand_generate.gds', 'rb') as stream:\n# pprint.pprint(laygo2.interface.gds.readout(stream, scale=1e-9))\n\n# 8. Export to a template database file.\nnat_temp = dsn.export_to_template()\nlaygo2.interface.yaml.export_template(nat_temp, filename=libname+'_templates.yaml', mode='append')\n\n",
"#!/usr/bin/python\n########################################################################################################################\n#\n# Copyright (c) 2014, Regents of the University of California\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n# following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n########################################################################################################################\n\nimport numpy as np\nimport pprint\nimport laygo2\nimport laygo2.interface\nimport quick_start_tech as tech\n\n# Parameter definitions ##############\n# Templates\ntpmos_name = 'pmos'\ntnmos_name = 'nmos'\n# Grids\npg_name = 'placement_cmos'\nr12_name = 'routing_12_cmos'\nr23_name = 'routing_23_cmos'\n# Design hierarchy\nlibname = 'laygo2_test'\ncellname = 'nand2'\n# Design parameters\nnf_a = 3\nnf_b = 4\n# End of parameter definitions #######\n\n# Generation start ###################\n# 1. Load templates and grids.\nprint(\"Load templates\")\ntemplates = tech.load_templates()\ntpmos, tnmos = templates[tpmos_name], templates[tnmos_name]\nprint(templates[tpmos_name], templates[tnmos_name], sep=\"\\n\")\n\nprint(\"Load grids\")\ngrids = tech.load_grids(templates=templates)\npg, r12, r23 = grids[pg_name], grids[r12_name], grids[r23_name]\nprint(grids[pg_name], grids[r12_name], grids[r23_name], sep=\"\\n\")\n\n# 2. Create a design hierarchy.\nlib = laygo2.object.database.Library(name=libname)\ndsn = laygo2.object.database.Design(name=cellname)\nlib.append(dsn)\n\n# 3. Create instances.\nprint(\"Create instances\")\nin0 = tnmos.generate(name='MN0', params={'nf': nf_b})\nsd_swap = False if nf_b % 2 == 1 else True\nin1 = tnmos.generate(name='MN1', params={'nf': nf_a, 'sd_swap': sd_swap})\nip0 = tpmos.generate(name='MP0', transform='MX', params={'nf': nf_b})\nsd_swap = True if nf_b % 2 == 1 else False\nip1 = tpmos.generate(name='MP1', transform='MX', params={'nf': nf_a, 'sd_swap': sd_swap})\n\n# 4. Place instances.\ndsn.place(grid=pg, inst=in0, mn=pg.mn[0, 0])\ndsn.place(grid=pg, inst=in1, mn=pg.mn.bottom_right(in0)) # same with pg == in0.bottom_right\ndsn.place(grid=pg, inst=ip0, mn=pg.mn.top_left(in0) + np.array([0, pg.mn.height(ip0)])) # +height due to MX transform\ndsn.place(grid=pg, inst=ip1, mn=pg.mn.top_right(ip0))\n\n# 5. Create and place wires.\nprint(\"Create wires\")\n# A\nra0 = dsn.route(grid=r12, mn=r12.mn.bbox(in1.pins['G']))\nva0 = dsn.via(grid=r12, mn=r12.mn.overlap(ra0, in1.pins['G'], type='array'))\nra1 = dsn.route(grid=r12, mn=r12.mn.bbox(ip1.pins['G']))\nva1 = dsn.via(grid=r12, mn=r12.mn.overlap(ra1, ip1.pins['G'], type='array'))\nva3, ra2, va4 = dsn.route(grid=r23, mn=[r23.mn.bottom_left(ra0), r23.mn.top_left(ra1)], via_tag=[True, True])\n# B\nrb0 = dsn.route(grid=r12, mn=r12.mn.bbox(in0.pins['G']))\nvb0 = dsn.via(grid=r12, mn=r12.mn.overlap(rb0, in0.pins['G'], type='array'))\nrb1 = dsn.route(grid=r12, mn=r12.mn.bbox(ip0.pins['G']))\nvb1 = dsn.via(grid=r12, mn=r12.mn.overlap(rb1, ip0.pins['G'], type='array'))\nvb3, rb2, vb4 = dsn.route(grid=r23, mn=[r23.mn.bottom_left(rb0), r23.mn.top_left(rb1)], via_tag=[True, True])\n# Internal\nif not (nf_a == 1 and nf_b == 1):\n ri0 = dsn.route(grid=r12, mn=[r12.mn.bottom_left(in0.pins['D'][0]) + np.array([0, 1]),\n r12.mn.bottom_right(in1.pins['S'][-1]) + np.array([0, 1])])\n vi0 = [dsn.via(grid=r12, mn=r12.mn.overlap(ri0, i, type='point')) for i in in0.pins['D']]\n vi1 = [dsn.via(grid=r12, mn=r12.mn.overlap(ri0, i, type='point')) for i in in1.pins['S']]\n# Output\nron0 = dsn.route(grid=r12, mn=[r12.mn.bottom_left(in1.pins['D'][0]) + np.array([0, 2]),\n r12.mn.bottom_right(in1.pins['D'][-1]) + np.array([0, 2])])\nvon0 = [dsn.via(grid=r12, mn=r12.mn.overlap(ron0, i, type='point')) for i in in1.pins['D']]\nrop0 = dsn.route(grid=r12, mn=[r12.mn.bottom_left(ip0.pins['D'][0]),\n r12.mn.bottom_right(ip1.pins['D'][-1])])\nvop0 = [dsn.via(grid=r12, mn=r12.mn.overlap(rop0, i, type='point')) for i in ip0.pins['D']]\nvop1 = [dsn.via(grid=r12, mn=r12.mn.overlap(rop0, i, type='point')) for i in ip1.pins['D']]\nm = r23.mn.bottom_right(ra2)[0] + 1\nvo0, ro0, vo1 = dsn.route(grid=r23, mn=np.array([[m, r23.mn.bottom_right(ron0)[1]], [m, r23.mn.bottom_right(rop0)[1]]]),\n via_tag=[True, True])\n# VSS\nrvss0 = dsn.route(grid=r12, mn=[r12.mn.bottom_left(in0.pins['S'][0]), r12.mn.bottom_left(in1.pins['S'][0])])\nvvss = [dsn.via(grid=r12, mn=r12.mn.overlap(rvss0, s, type='point')) for s in in0.pins['S']]\n# VDD\nrvdd0 = dsn.route(grid=r12, mn=[r12.mn.top_left(ip0.pins['S'][0]), r12.mn.top_right(ip1.pins['S'][-1])])\nvvdd = [dsn.via(grid=r12, mn=r12.mn.overlap(rvdd0, s, type='point')) for s in ip0.pins['S']]\nvvdd += [dsn.via(grid=r12, mn=r12.mn.overlap(rvdd0, s, type='point')) for s in ip1.pins['S']]\n\n# 6. Create pins.\npa0 = dsn.pin(name='A', grid=r23, mn=r23.mn.bbox(ra2))\npb0 = dsn.pin(name='B', grid=r23, mn=r23.mn.bbox(rb2))\npo0 = dsn.pin(name='O', grid=r23, mn=r23.mn.bbox(ro0))\npvss0 = dsn.pin(name='VSS', grid=r12, mn=r12.mn.bbox(rvss0))\npvdd0 = dsn.pin(name='VDD', grid=r12, mn=r12.mn.bbox(rvdd0))\n\nprint(dsn)\n\n# 7. Export to physical database.\nprint(\"Export design\")\nabstract = False # export abstract\nlaygo2.interface.gds.export(lib, filename=libname+'_'+cellname+'.gds', cellname=None, scale=1e-9,\n layermapfile=\"./quick_start_tech/technology_example.layermap\", physical_unit=1e-9, logical_unit=0.001,\n pin_label_height=0.1, pin_annotate_layer=['text', 'drawing'], text_height=0.1,\n abstract_instances=abstract)\nskill_str = laygo2.interface.skill.export(lib, filename=libname+'_'+cellname+'.il', cellname=None, scale=1e-3)\nprint(skill_str)\n\n# 7-a. Import the GDS file back and display\nwith open(libname+'_'+cellname+'.gds', 'rb') as stream:\n pprint.pprint(laygo2.interface.gds.readout(stream, scale=1e-9))\n\n# 8. Export to a template database file.\nnat_temp = dsn.export_to_template()\nlaygo2.interface.yaml.export_template(nat_temp, filename=libname+'_templates.yaml', mode='append')\n\n",
"#!/usr/bin/python\n########################################################################################################################\n#\n# Copyright (c) 2014, Regents of the University of California\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n# following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n########################################################################################################################\n\n\"\"\"\nUtility functions for coordinate tranformations.\n\"\"\"\n\n__author__ = \"Jaeduk Han\"\n__maintainer__ = \"Jaeduk Han\"\n__status__ = \"Prototype\"\n\nimport numpy as np\n\ndef combine(transform1, transform2):\n \"\"\"\n Returns the resulting transform parameter of \n two consecutive transforms\n \"\"\"\n if transform1 == 'R0':\n if transform2 == 'R0': return 'R0'\n elif transform2 == 'MX': return 'MX'\n elif transform2 == 'MY': return 'MY'\n elif transform2 == 'MXY': return 'MXY'\n elif transform1 == 'MX':\n if transform2 == 'R0': return 'MX'\n elif transform2 == 'MX': return 'R0'\n elif transform2 == 'MY': return 'MXY'\n elif transform2 == 'MXY': return 'MY'\n elif transform1 == 'MY':\n if transform2 == 'R0': return 'MY'\n elif transform2 == 'MX': return 'MXY'\n elif transform2 == 'MY': return 'R0'\n elif transform2 == 'MXY': return 'MX'\n raise ValueError(\"Transformation mapping is not matched.\")\n\n\ndef Mt(transform):\n \"\"\"\n Returns the transform matrix.\n\n Parameters\n ----------\n transform : str\n The transform parameter. Possible values are 'R0', 'MX', 'MY', 'MXY', and 'R180'.\n\n Returns\n -------\n numpy.ndarray(dtype=int)\n The transform matrix corresponding to the transform parameter.\n\n \"\"\"\n transform_map = {\n 'R0': np.array([[1, 0], [0, 1]]),\n 'R90': np.array([[0, -1], [1, 0]]),\n 'R180': np.array([[-1, 0], [0, -1]]),\n 'R270': np.array([[0, 1], [-1, 0]]),\n 'MX': np.array([[1, 0], [0, -1]]),\n 'MY': np.array([[-1, 0], [0, 1]]),\n 'MXY': np.array([[0, 1], [1, 0]]), # mirror to the y=x line.\n }\n return transform_map[transform]\n\n\ndef Mtinv(transform):\n \"\"\"\n Returns the inverse of the transform matrix.\n\n Parameters\n ----------\n transform : str\n The transform parameter. possible values are 'R0', 'MX', 'MY', 'MXY', and 'R180'.\n\n Returns\n -------\n numpy.ndarray(dtype=int)\n The inverse of the transform matrix.\n \"\"\"\n transform_map = {\n 'R0': np.array([[1, 0], [0, 1]]), 'MX': np.array([[1, 0], [0, -1]]),\n 'MY': np.array([[-1, 0], [0, 1]]),\n 'MXY': np.array([[0, 1], [1, 0]]), # mirror to the y=x line.\n 'R180': np.array([[-1, 0], [0, -1]]),\n }\n return transform_map[transform]\n\n\ndef Md(direction):\n \"\"\"\n Returns the direction(projection) matrix. The direction matrix is used when placing an object based on relative\n information to other instance(s). For example, if an instance's center is located at xyc0=[xc0, yc0],\n the xy-coordinate of the center of the new instance xyc1 can be computed from the following equation:\n\n (1) xyc1 = xyc0 + 0.5 * Md * (xys0 + xys1)\n\n where xys0, xys1 are the size of the reference and the new instance, respectively, and Md is the direction matrix\n corresponding to the direction of the placement.\n\n Parameters\n ----------\n direction : str\n The direction parameter. Possible values are 'left', 'right', 'top', 'bottom', 'omni', 'x', 'y'.\n\n Returns\n -------\n np.array([[int, int], [int, int]])\n The direction matrix.\n\n Notes\n -----\n The following equation will be used instead of (1) in the future versions, to avoid the 0.5 scaling that increases\n the precision requirement.\n\n (2) xy1 = xy0 + 0.5 * [(Md + Mt0) * xys0 + (Md - Mt1) * xys1]\n \"\"\"\n direction_map = {\n 'left': np.array([[-1, 0], [0, 0]]),\n 'right': np.array([[1, 0], [0, 0]]),\n 'top': np.array([[0, 0], [0, 1]]),\n 'bottom': np.array([[0, 0], [0, -1]]),\n 'omni': np.array([[1, 0], [0, 1]]), # omnidirectional\n 'x': np.array([[1, 0], [0, 0]]),\n 'y': np.array([[0, 0], [0, 1]]),\n }\n return direction_map[direction]\n\n\n"
] | [
[
"numpy.array"
],
[
"numpy.array"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jihyunbak/spyglass | [
"780fe2c101db60d42a1b73ad8fd729db42620ba6"
] | [
"src/nwb_datajoint/spikesorting/spikesorting_artifact.py"
] | [
"import warnings\nfrom functools import reduce\n\nimport datajoint as dj\nimport numpy as np\nimport scipy.stats as stats\nimport spikeinterface as si\nfrom spikeinterface.core.segmentutils import AppendSegmentRecording\n\nfrom ..common.common_interval import IntervalList\nfrom ..common.nwb_helper_fn import get_valid_intervals\nfrom .spikesorting_recording import SpikeSortingRecording\n\nschema = dj.schema('spikesorting_artifact')\n\n@schema\nclass ArtifactDetectionParameters(dj.Manual):\n definition = \"\"\"\n # Parameters for detecting artifact times within a sort group.\n artifact_params_name: varchar(200)\n ---\n artifact_params: blob # dictionary of parameters\n \"\"\"\n\n def insert_default(self):\n \"\"\"Insert the default artifact parameters with an appropriate parameter dict.\n \"\"\"\n artifact_params = {}\n artifact_params['zscore_thresh'] = None # must be None or >= 0\n artifact_params['amplitude_thresh'] = 3000 # must be None or >= 0\n # all electrodes of sort group\n artifact_params['proportion_above_thresh'] = 1.0\n artifact_params['removal_window_ms'] = 1.0 # in milliseconds\n self.insert1(['default', artifact_params], skip_duplicates=True)\n\n artifact_params_none = {}\n artifact_params_none['zscore_thresh'] = None\n artifact_params_none['amplitude_thresh'] = None\n self.insert1(['none', artifact_params_none], skip_duplicates=True)\n\n\n@schema\nclass ArtifactDetectionSelection(dj.Manual):\n definition = \"\"\"\n # Specifies artifact detection parameters to apply to a sort group's recording.\n -> SpikeSortingRecording\n -> ArtifactDetectionParameters\n ---\n \"\"\"\n\n\n@schema\nclass ArtifactDetection(dj.Computed):\n definition = \"\"\"\n # Stores artifact times and valid no-artifact times as intervals.\n -> ArtifactDetectionSelection\n ---\n artifact_times: longblob # np array of artifact intervals\n artifact_removed_valid_times: longblob # np array of valid no-artifact intervals\n artifact_removed_interval_list_name: varchar(200) # name of the array of no-artifact valid time intervals\n \"\"\"\n\n def make(self, key):\n # get the dict of artifact params associated with this artifact_params_name\n artifact_params = (ArtifactDetectionParameters &\n key).fetch1(\"artifact_params\")\n\n recording_path = (SpikeSortingRecording & key).fetch1('recording_path')\n recording_name = SpikeSortingRecording._get_recording_name(key)\n recording = si.load_extractor(recording_path)\n\n artifact_removed_valid_times, artifact_times = _get_artifact_times(\n recording, **artifact_params)\n\n # NOTE: decided not to do this but to just create a single long segment; keep for now\n # get artifact times by segment\n # if AppendSegmentRecording, get artifact times for each segment\n # if isinstance(recording, AppendSegmentRecording):\n # artifact_removed_valid_times = []\n # artifact_times = []\n # for rec in recording.recording_list:\n # rec_valid_times, rec_artifact_times = _get_artifact_times(rec, **artifact_params)\n # for valid_times in rec_valid_times:\n # artifact_removed_valid_times.append(valid_times)\n # for artifact_times in rec_artifact_times:\n # artifact_times.append(artifact_times)\n # artifact_removed_valid_times = np.asarray(artifact_removed_valid_times)\n # artifact_times = np.asarray(artifact_times)\n # else:\n # artifact_removed_valid_times, artifact_times = _get_artifact_times(recording, **artifact_params)\n\n key['artifact_times'] = artifact_times\n key['artifact_removed_valid_times'] = artifact_removed_valid_times\n\n # set up a name for no-artifact times using recording id\n key['artifact_removed_interval_list_name'] = recording_name + \\\n '_' + key['artifact_params_name'] + '_artifact_removed_valid_times'\n\n ArtifactRemovedIntervalList.insert1(key, replace=True)\n\n # # insert artifact times and valid times into ArtifactRemovedIntervalList with an appropriate name\n # tmp_key = (ArtifactDetectionSelection & key).proj().fetch1()\n # tmp_key['artifact_removed_interval_list_name'] = key['artifact_removed_interval_list_name']\n # tmp_key['artifact_removed_valid_times'] = key['artifact_removed_valid_times']\n # tmp_key['artifact_times'] = key['artifact_times']\n # ArtifactRemovedIntervalList.insert1(tmp_key, skip_duplicates = True)\n\n # also insert into IntervalList\n tmp_key = {}\n tmp_key['nwb_file_name'] = key['nwb_file_name']\n tmp_key['interval_list_name'] = key['artifact_removed_interval_list_name']\n tmp_key['valid_times'] = key['artifact_removed_valid_times']\n IntervalList.insert1(tmp_key, replace=True)\n\n # insert into computed table\n self.insert1(key)\n\n\n@schema\nclass ArtifactRemovedIntervalList(dj.Manual):\n definition = \"\"\"\n # Stores intervals without detected artifacts.\n # Note that entries can come from either ArtifactDetection() or alternative artifact removal analyses.\n artifact_removed_interval_list_name: varchar(200)\n ---\n -> ArtifactDetectionSelection\n artifact_removed_valid_times: longblob\n artifact_times: longblob # np array of artifact intervals\n \"\"\"\n\n\ndef _get_artifact_times(recording, zscore_thresh=None, amplitude_thresh=None,\n proportion_above_thresh=1.0, removal_window_ms=1.0):\n \"\"\"Detects times during which artifacts do and do not occur.\n Artifacts are defined as periods where the absolute value of the recording signal exceeds one\n OR both specified amplitude or zscore thresholds on the proportion of channels specified,\n with the period extended by the removal_window_ms/2 on each side. Z-score and amplitude\n threshold values of None are ignored.\n\n Parameters\n ----------\n recording : si.Recording\n zscore_thresh : float, optional\n Stdev threshold for exclusion, should be >=0, defaults to None\n amplitude_thresh : float, optional\n Amplitude threshold for exclusion, should be >=0, defaults to None\n proportion_above_thresh : float, optional, should be>0 and <=1\n Proportion of electrodes that need to have threshold crossings, defaults to 1\n removal_window_ms : float, optional\n Width of the window in milliseconds to mask out per artifact (window/2 removed on each side of threshold crossing), defaults to 1 ms\n\n Returns\n ------_\n artifact_intervals : np.ndarray\n Intervals in which artifacts are detected (including removal windows), unit: seconds\n artifact_removed_valid_times : np.ndarray\n Intervals of valid times where artifacts were not detected, unit: seconds\n \"\"\"\n\n valid_timestamps = SpikeSortingRecording._get_recording_timestamps(\n recording)\n if recording.get_num_segments() > 1:\n recording = si.concatenate_recordings(recording.recording_list)\n\n # if both thresholds are None, we essentially skip artifract detection and\n # return an array with the times of the first and last samples of the recording\n if (amplitude_thresh is None) and (zscore_thresh is None):\n recording_interval = np.asarray(\n [valid_timestamps[0], valid_timestamps[-1]])\n artifact_times_empty = np.asarray([])\n print(\"Amplitude and zscore thresholds are both None, skipping artifact detection\")\n return recording_interval, artifact_times_empty\n\n # verify threshold parameters\n amplitude_thresh, zscore_thresh, proportion_above_thresh = _check_artifact_thresholds(\n amplitude_thresh, zscore_thresh, proportion_above_thresh)\n\n # turn ms to remove total into s to remove from either side of each detected artifact\n half_removal_window_s = removal_window_ms * (1 / 1000) * (1 / 2)\n\n # TODO: load by chunk to avoid memory problems\n data = recording.get_traces()\n\n # compute the number of electrodes that have to be above threshold\n nelect_above = np.ceil(proportion_above_thresh *\n len(recording.get_channel_ids()))\n\n # find the artifact occurrences using one or both thresholds, across channels\n if ((amplitude_thresh is not None) and (zscore_thresh is None)):\n above_a = np.abs(data) > amplitude_thresh\n above_thresh = np.ravel(np.argwhere(\n np.sum(above_a, axis=0) >= nelect_above))\n elif ((amplitude_thresh is None) and (zscore_thresh is not None)):\n dataz = np.abs(stats.zscore(data, axis=1))\n above_z = dataz > zscore_thresh\n above_thresh = np.ravel(np.argwhere(\n np.sum(above_z, axis=0) >= nelect_above))\n else:\n above_a = np.abs(data) > amplitude_thresh\n dataz = np.abs(stats.zscore(data, axis=1))\n above_z = dataz > zscore_thresh\n above_thresh = np.ravel(np.argwhere(\n np.sum(np.logical_or(above_z, above_a), axis=0) >= nelect_above))\n\n if len(above_thresh) == 0:\n recording_interval = np.asarray(\n [[valid_timestamps[0], valid_timestamps[-1]]])\n artifact_times_empty = np.asarray([])\n print(\"No artifacts detected.\")\n return recording_interval, artifact_times_empty\n\n # find timestamps of initial artifact threshold crossings\n above_thresh_times = valid_timestamps[above_thresh]\n\n # keep track of all the artifact timestamps within each artifact removal window and the indices of those timestamps\n artifact_times = []\n artifact_indices = []\n for a in above_thresh_times:\n a_times = np.copy(valid_timestamps[(valid_timestamps > (\n a - half_removal_window_s)) & (valid_timestamps <= (a + half_removal_window_s))])\n a_indices = np.argwhere((valid_timestamps > (\n a - half_removal_window_s)) & (valid_timestamps <= (a + half_removal_window_s)))\n artifact_times.append(a_times)\n artifact_indices.append(a_indices)\n all_artifact_times = reduce(np.union1d, artifact_times)\n all_artifact_indices = reduce(np.union1d, artifact_indices)\n # turn artifact detected times into intervals\n # should be faster than diffing and comparing to zero\n if not np.all(all_artifact_times[:-1] <= all_artifact_times[1:]):\n warnings.warn(\n \"Warning: sorting artifact timestamps; all_artifact_times was not strictly increasing\")\n all_artifact_times = np.sort(all_artifact_times)\n artifact_intervals = get_valid_intervals(\n all_artifact_times, recording.get_sampling_frequency(), 1.5, .000001)\n\n artifact_percent_of_times = 100 * \\\n len(all_artifact_times) / len(valid_timestamps)\n print(f\"{len(artifact_intervals)} artifact intervals detected;\\\n {artifact_percent_of_times} % of the recording's valid_timestamps removed as artifact\")\n\n # turn all artifact detected times into -1 to easily find non-artifact intervals\n valid_timestamps[all_artifact_indices] = -1\n artifact_removed_valid_times = get_valid_intervals(valid_timestamps[valid_timestamps != -1],\n recording.get_sampling_frequency(), 1.5, 0.000001)\n\n return artifact_removed_valid_times, artifact_intervals\n\n\ndef _check_artifact_thresholds(amplitude_thresh, zscore_thresh, proportion_above_thresh):\n \"\"\"Alerts user to likely unintended parameters. Not an exhaustive verification.\n\n Parameters\n ----------\n zscore_thresh: float\n amplitude_thresh: float\n proportion_above_thresh: float\n\n Return\n ------\n zscore_thresh: float\n amplitude_thresh: float\n proportion_above_thresh: float\n\n Raise\n ------\n ValueError: if signal thresholds are negative\n \"\"\"\n # amplitude or zscore thresholds should be negative, as they are applied to an absolute signal\n signal_thresholds = [t for t in [\n amplitude_thresh, zscore_thresh] if t is not None]\n for t in signal_thresholds:\n if t < 0:\n raise ValueError(\n \"Amplitude and Z-Score thresholds must be >= 0, or None\")\n\n # proportion_above_threshold should be in [0:1] inclusive\n if proportion_above_thresh < 0:\n warnings.warn(\n \"Warning: proportion_above_thresh must be a proportion >0 and <=1. Using proportion_above_thresh = 0.01 instead of \" + str(proportion_above_thresh))\n proportion_above_thresh = 0.01\n elif proportion_above_thresh > 1:\n warnings.warn(\n \"Warning: proportion_above_thresh must be a proportion >0 and <=1. Using proportion_above_thresh = 1 instead of \" + str(proportion_above_thresh))\n proportion_above_thresh = 1\n return amplitude_thresh, zscore_thresh, proportion_above_thresh\n"
] | [
[
"numpy.abs",
"numpy.asarray",
"scipy.stats.zscore",
"numpy.sort",
"numpy.argwhere",
"numpy.all",
"numpy.logical_or",
"numpy.copy",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
taylorguo/model-optimization | [
"ddd9a67c7599214a4061ae04b28387171d29d96a"
] | [
"CPD/CPDtorch/quant/quant_function.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom torch.utils.cpp_extension import load\nimport os\ncurrent_path = os.path.dirname(os.path.realpath(__file__))\n\nif torch.cuda.is_available():\n quant_cuda = load(\n name='quant_cuda',\n sources=[\n os.path.join(current_path, \"quant_cuda/quant_cuda.cpp\"),\n os.path.join(current_path, \"quant_cuda/float_kernel.cu\"),\n os.path.join(current_path, \"quant_cuda/quant.cu\"),\n ],\n )\nelse:\n quant_cuda = None\n\n__all__ = ['float_quantize', \"quantizer\", \"quant_gemm\"]\n\n\ndef get_module(x):\n if x.is_cuda:\n quant_module = quant_cuda\n else:\n raise NotImplementedError(\n \"Currently, we do not support customized precision for CPU\")\n return quant_module\n\n\ndef quantizer(forward_exp=8, forward_man=23, backward_exp=8, backward_man=23):\n\n class Rounding(torch.autograd.Function):\n @staticmethod\n def forward(self, x):\n if forward_exp == 8 and forward_man == 23:\n return x\n quant_module = get_module(x)\n out = quant_module.float_quantize_nearest(\n x.contiguous(), forward_man, forward_exp)\n return out\n\n @staticmethod\n def backward(self, grad_output):\n if self.needs_input_grad[0]:\n if backward_exp == 8 and backward_man == 23:\n return grad_output\n quant_module = get_module(grad_output)\n grad_input = quant_module.float_quantize_nearest(\n grad_output.contiguous(), backward_man, backward_exp)\n else:\n grad_input = None\n return grad_input\n\n return Rounding.apply\n\n\ndef float_quantize(x, exp, man):\n \"\"\"\n Quantize a single precision Floating Point into low-precision Floating Point\n\n Args:\n - :attr: `x` (torch.Tensor) : the single precision number(torch.Tensor) to be quantized\n - :attr: `exp` (int) : number of bits allocated for exponent\n - :attr: `man` (int) : number of bits allocated for mantissa, not counting the virtual bit\n\n Returns:\n - a quantized low-precision floating point number (torch.Tensor)\n \"\"\"\n assert isinstance(\n x, torch.Tensor), \"x is not a single precision Floating Point Tensor\"\n quant_module = get_module(x)\n return quant_module.float_quantize_nearest(x.contiguous(), man, exp)\n\n\ndef quant_gemm(a, b, man=23, exp=8):\n \"\"\"\n Quantize GEMM with customized precision as accumulator\n\n Args:\n - :attr: `a` (torch.Tensor) : the input of GEMM, with shape:(M, K)\n - :attr: `b` (torch.Tensor) : the input of GEMM, with shape:(K, N)\n - :attr: `exp` (int) : number of bits allocated for exponent\n - :attr: `man` (int) : number of bits allocated for mantissa, not counting the virtual bit\n\n Returns:\n - the result of GEMM (torch.Tensor)\n \"\"\"\n assert len(a.shape) == 2\n assert len(b.shape) == 2\n assert a.shape[1] == b.shape[0]\n quant_module = get_module(a)\n c = torch.zeros(a.shape[0], b.shape[1]).cuda()\n quant_module.float_quantize_gemm(a.contiguous(), b.contiguous(), c.contiguous(),\n a.shape[0], b.shape[1], a.shape[1], man, exp)\n return c\n"
] | [
[
"torch.cuda.is_available",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JiachenMao/TransfornerPrune | [
"084956e00807af5ce3f363d964f327405862e51b"
] | [
"tensor2tensor/models/resnet.py"
] | [
"# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Resnets.\"\"\"\n# Copied from cloud_tpu/models/resnet/resnet_model.py and modified\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensor2tensor.layers import common_hparams\nfrom tensor2tensor.layers import common_layers\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import t2t_model\nfrom tensor2tensor.utils.hparam import HParams\n\nimport tensorflow as tf\n\n\nBATCH_NORM_DECAY = 0.9\nBATCH_NORM_EPSILON = 1e-5\n\n\n# TODO(lukaszkaiser): remove or simplify after V2 work is done.\ndef layers():\n return common_layers.layers()\n\n\ndef batch_norm_relu(inputs,\n is_training,\n relu=True,\n init_zero=False,\n data_format=\"channels_first\"):\n \"\"\"Performs a batch normalization followed by a ReLU.\n\n Args:\n inputs: `Tensor` of shape `[batch, channels, ...]`.\n is_training: `bool` for whether the model is training.\n relu: `bool` if False, omits the ReLU operation.\n init_zero: `bool` if True, initializes scale parameter of batch\n normalization with 0 instead of 1 (default).\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n\n Returns:\n A normalized `Tensor` with the same `data_format`.\n \"\"\"\n if init_zero:\n gamma_initializer = tf.zeros_initializer()\n else:\n gamma_initializer = tf.ones_initializer()\n\n if data_format == \"channels_first\":\n axis = 1\n else:\n axis = 3\n\n inputs = layers().BatchNormalization(\n axis=axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON,\n center=True,\n scale=True,\n fused=True,\n gamma_initializer=gamma_initializer)(inputs, training=is_training)\n\n if relu:\n inputs = tf.nn.relu(inputs)\n return inputs\n\n\ndef fixed_padding(inputs, kernel_size, data_format=\"channels_first\"):\n \"\"\"Pads the input along the spatial dimensions independently of input size.\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height, width]` or\n `[batch, height, width, channels]` depending on `data_format`.\n kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`\n operations. Should be a positive integer.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n\n Returns:\n A padded `Tensor` of the same `data_format` with size either intact\n (if `kernel_size == 1`) or padded (if `kernel_size > 1`).\n \"\"\"\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n if data_format == \"channels_first\":\n padded_inputs = tf.pad(\n inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(\n inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])\n\n return padded_inputs\n\n\ndef conv2d_fixed_padding(inputs,\n filters,\n kernel_size,\n strides,\n data_format=\"channels_first\",\n use_td=False,\n targeting_rate=None,\n keep_prob=None,\n is_training=None):\n \"\"\"Strided 2-D convolution with explicit padding.\n\n The padding is consistent and is based only on `kernel_size`, not on the\n dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.\n filters: `int` number of filters in the convolution.\n kernel_size: `int` size of the kernel to be used in the convolution.\n strides: `int` strides of the convolution.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n use_td: `str` one of \"weight\" or \"unit\". Set to False or \"\" to disable\n targeted dropout.\n targeting_rate: `float` proportion of weights to target with targeted\n dropout.\n keep_prob: `float` keep probability for targeted dropout.\n is_training: `bool` for whether the model is in training.\n\n Returns:\n A `Tensor` of shape `[batch, filters, height_out, width_out]`.\n\n Raises:\n Exception: if use_td is not valid.\n \"\"\"\n if strides > 1:\n inputs = fixed_padding(inputs, kernel_size, data_format=data_format)\n\n if use_td:\n inputs_shape = common_layers.shape_list(inputs)\n if use_td == \"weight\":\n if data_format == \"channels_last\":\n size = kernel_size * kernel_size * inputs_shape[-1]\n else:\n size = kernel_size * kernel_size * inputs_shape[1]\n targeting_count = targeting_rate * tf.to_float(size)\n targeting_fn = common_layers.weight_targeting\n elif use_td == \"unit\":\n targeting_count = targeting_rate * filters\n targeting_fn = common_layers.unit_targeting\n else:\n raise Exception(\"Unrecognized targeted dropout type: %s\" % use_td)\n\n y = common_layers.td_conv(\n inputs,\n filters,\n kernel_size,\n targeting_count,\n targeting_fn,\n keep_prob,\n is_training,\n do_prune=True,\n strides=strides,\n padding=(\"SAME\" if strides == 1 else \"VALID\"),\n data_format=data_format,\n use_bias=False,\n kernel_initializer=tf.variance_scaling_initializer())\n else:\n y = layers().Conv2D(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=(\"SAME\" if strides == 1 else \"VALID\"),\n use_bias=False,\n kernel_initializer=tf.variance_scaling_initializer(),\n data_format=data_format)(inputs)\n \n # added by mjc: to see the activation distribution\n tf.summary.histogram('activations', y)\n\n return y\n\n\ndef residual_block(inputs,\n filters,\n is_training,\n projection_shortcut,\n strides,\n final_block,\n data_format=\"channels_first\",\n use_td=False,\n targeting_rate=None,\n keep_prob=None):\n \"\"\"Standard building block for residual networks with BN before convolutions.\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height, width]`.\n filters: `int` number of filters for the first two convolutions. Note that\n the third and final convolution will use 4 times as many filters.\n is_training: `bool` for whether the model is in training.\n projection_shortcut: `function` to use for projection shortcuts (typically\n a 1x1 convolution to match the filter dimensions). If None, no\n projection is used and the input is passed as unchanged through the\n shortcut connection.\n strides: `int` block stride. If greater than 1, this block will ultimately\n downsample the input.\n final_block: unused parameter to keep the same function signature as\n `bottleneck_block`.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n use_td: `str` one of \"weight\" or \"unit\". Set to False or \"\" to disable\n targeted dropout.\n targeting_rate: `float` proportion of weights to target with targeted\n dropout.\n keep_prob: `float` keep probability for targeted dropout.\n\n Returns:\n The output `Tensor` of the block.\n \"\"\"\n del final_block\n shortcut = inputs\n inputs = batch_norm_relu(inputs, is_training, data_format=data_format)\n\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=filters,\n kernel_size=3,\n strides=strides,\n data_format=data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob,\n is_training=is_training)\n\n inputs = batch_norm_relu(inputs, is_training, data_format=data_format)\n # added by mjc: to see the activation distribution after relu\n tf.summary.histogram('activations_after_relu', inputs)\n # added by mjc: add activation sparsity\n # inputs = common_layers.activation_sparsity(inputs, sparsity=0.8)\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=filters,\n kernel_size=3,\n strides=1,\n data_format=data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob,\n is_training=is_training)\n\n return inputs + shortcut\n\n\ndef bottleneck_block(inputs,\n filters,\n is_training,\n projection_shortcut,\n strides,\n final_block,\n data_format=\"channels_first\",\n use_td=False,\n targeting_rate=None,\n keep_prob=None):\n \"\"\"Bottleneck block variant for residual networks with BN after convolutions.\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height, width]`.\n filters: `int` number of filters for the first two convolutions. Note that\n the third and final convolution will use 4 times as many filters.\n is_training: `bool` for whether the model is in training.\n projection_shortcut: `function` to use for projection shortcuts (typically\n a 1x1 convolution to match the filter dimensions). If None, no\n projection is used and the input is passed as unchanged through the\n shortcut connection.\n strides: `int` block stride. If greater than 1, this block will ultimately\n downsample the input.\n final_block: `bool` set to True if it is this the final block in the group.\n This is changes the behavior of batch normalization initialization for\n the final batch norm in a block.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n use_td: `str` one of \"weight\" or \"unit\". Set to False or \"\" to disable\n targeted dropout.\n targeting_rate: `float` proportion of weights to target with targeted\n dropout.\n keep_prob: `float` keep probability for targeted dropout.\n\n Returns:\n The output `Tensor` of the block.\n \"\"\"\n # TODO(chrisying): this block is technically the post-activation resnet-v1\n # bottleneck unit. Test with v2 (pre-activation) and replace if there is no\n # difference for consistency.\n shortcut = inputs\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=filters,\n kernel_size=1,\n strides=1,\n data_format=data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob,\n is_training=is_training)\n\n inputs = batch_norm_relu(inputs, is_training, data_format=data_format)\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=filters,\n kernel_size=3,\n strides=strides,\n data_format=data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob,\n is_training=is_training)\n\n inputs = batch_norm_relu(inputs, is_training, data_format=data_format)\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=4 * filters,\n kernel_size=1,\n strides=1,\n data_format=data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob,\n is_training=is_training)\n inputs = batch_norm_relu(\n inputs,\n is_training,\n relu=False,\n init_zero=final_block,\n data_format=data_format)\n\n return tf.nn.relu(inputs + shortcut)\n\n\ndef block_layer(inputs,\n filters,\n block_fn,\n blocks,\n strides,\n is_training,\n name,\n data_format=\"channels_first\",\n use_td=False,\n targeting_rate=None,\n keep_prob=None):\n \"\"\"Creates one layer of blocks for the ResNet model.\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height, width]`.\n filters: `int` number of filters for the first convolution of the layer.\n block_fn: `function` for the block to use within the model\n blocks: `int` number of blocks contained in the layer.\n strides: `int` stride to use for the first convolution of the layer. If\n greater than 1, this layer will downsample the input.\n is_training: `bool` for whether the model is training.\n name: `str`name for the Tensor output of the block layer.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n use_td: `str` one of \"weight\" or \"unit\". Set to False or \"\" to disable\n targeted dropout.\n targeting_rate: `float` proportion of weights to target with targeted\n dropout.\n keep_prob: `float` keep probability for targeted dropout.\n\n Returns:\n The output `Tensor` of the block layer.\n \"\"\"\n # Bottleneck blocks end with 4x the number of filters as they start with\n filters_out = 4 * filters if block_fn is bottleneck_block else filters\n\n def projection_shortcut(inputs):\n \"\"\"Project identity branch.\"\"\"\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=filters_out,\n kernel_size=1,\n strides=strides,\n data_format=data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob,\n is_training=is_training)\n return batch_norm_relu(\n inputs, is_training, relu=False, data_format=data_format)\n\n # Only the first block per block_layer uses projection_shortcut and strides\n inputs = block_fn(\n inputs,\n filters,\n is_training,\n projection_shortcut,\n strides,\n False,\n data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob)\n\n for i in range(1, blocks):\n inputs = block_fn(\n inputs,\n filters,\n is_training,\n None,\n 1, (i + 1 == blocks),\n data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob)\n\n return tf.identity(inputs, name)\n\n\ndef resnet_v2(inputs,\n block_fn,\n layer_blocks,\n filters,\n data_format=\"channels_first\",\n is_training=False,\n is_cifar=False,\n use_td=False,\n targeting_rate=None,\n keep_prob=None):\n \"\"\"Resnet model.\n\n Args:\n inputs: `Tensor` images.\n block_fn: `function` for the block to use within the model. Either\n `residual_block` or `bottleneck_block`.\n layer_blocks: list of 3 or 4 `int`s denoting the number of blocks to include\n in each of the 3 or 4 block groups. Each group consists of blocks that\n take inputs of the same resolution.\n filters: list of 4 or 5 `int`s denoting the number of filter to include in\n block.\n data_format: `str`, \"channels_first\" `[batch, channels, height,\n width]` or \"channels_last\" `[batch, height, width, channels]`.\n is_training: bool, build in training mode or not.\n is_cifar: bool, whether the data is CIFAR or not.\n use_td: `str` one of \"weight\" or \"unit\". Set to False or \"\" to disable\n targeted dropout.\n targeting_rate: `float` proportion of weights to target with targeted\n dropout.\n keep_prob: `float` keep probability for targeted dropout.\n\n Returns:\n Pre-logit activations.\n \"\"\"\n inputs = block_layer(\n inputs=inputs,\n filters=filters[1],\n block_fn=block_fn,\n blocks=layer_blocks[0],\n strides=1,\n is_training=is_training,\n name=\"block_layer1\",\n data_format=data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob)\n inputs = block_layer(\n inputs=inputs,\n filters=filters[2],\n block_fn=block_fn,\n blocks=layer_blocks[1],\n strides=2,\n is_training=is_training,\n name=\"block_layer2\",\n data_format=data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob)\n inputs = block_layer(\n inputs=inputs,\n filters=filters[3],\n block_fn=block_fn,\n blocks=layer_blocks[2],\n strides=2,\n is_training=is_training,\n name=\"block_layer3\",\n data_format=data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob)\n if not is_cifar:\n inputs = block_layer(\n inputs=inputs,\n filters=filters[4],\n block_fn=block_fn,\n blocks=layer_blocks[3],\n strides=2,\n is_training=is_training,\n name=\"block_layer4\",\n data_format=data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob)\n\n return inputs\n\n\[email protected]_model\nclass Resnet(t2t_model.T2TModel):\n \"\"\"Residual Network.\"\"\"\n\n def body(self, features):\n hp = self.hparams\n block_fns = {\n \"residual\": residual_block,\n \"bottleneck\": bottleneck_block,\n }\n assert hp.block_fn in block_fns\n is_training = hp.mode == tf.estimator.ModeKeys.TRAIN\n if is_training:\n targets = features[\"targets_raw\"]\n\n inputs = features[\"inputs\"]\n\n data_format = \"channels_last\"\n if hp.use_nchw:\n # Convert from channels_last (NHWC) to channels_first (NCHW). This\n # provides a large performance boost on GPU.\n inputs = tf.transpose(inputs, [0, 3, 1, 2])\n data_format = \"channels_first\"\n\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=hp.filter_sizes[0],\n kernel_size=7,\n strides=1 if hp.is_cifar else 2,\n data_format=data_format)\n inputs = tf.identity(inputs, \"initial_conv\")\n inputs = batch_norm_relu(inputs, is_training, data_format=data_format)\n\n if not hp.is_cifar:\n inputs = layers().MaxPooling2D(\n pool_size=3,\n strides=2,\n padding=\"SAME\",\n data_format=data_format)(inputs)\n inputs = tf.identity(inputs, \"initial_max_pool\")\n\n out = resnet_v2(\n inputs,\n block_fns[hp.block_fn],\n hp.layer_sizes,\n hp.filter_sizes,\n data_format,\n is_training=is_training,\n is_cifar=hp.is_cifar,\n use_td=hp.use_td,\n targeting_rate=hp.targeting_rate,\n keep_prob=hp.keep_prob)\n\n if hp.use_nchw:\n out = tf.transpose(out, [0, 2, 3, 1])\n\n if not hp.is_cifar:\n return out\n\n out = tf.reduce_mean(out, [1, 2])\n num_classes = self._problem_hparams.vocab_size[\"targets\"]\n if hasattr(self._hparams, \"vocab_divisor\"):\n num_classes += (-num_classes) % self._hparams.vocab_divisor\n logits = layers().Dense(num_classes, name=\"logits\")(out)\n\n losses = {\"training\": 0.0}\n if is_training:\n loss = tf.losses.sparse_softmax_cross_entropy(\n labels=tf.squeeze(targets), logits=logits)\n loss = tf.reduce_mean(loss)\n\n losses = {\"training\": loss}\n\n logits = tf.reshape(logits, [-1, 1, 1, 1, logits.shape[1]])\n\n return logits, losses\n\n def infer(self,\n features=None,\n decode_length=50,\n beam_size=1,\n top_beams=1,\n alpha=0.0,\n use_tpu=False):\n \"\"\"Predict.\"\"\"\n del decode_length, beam_size, top_beams, alpha, use_tpu\n assert features is not None\n logits, _ = self(features) # pylint: disable=not-callable\n assert len(logits.get_shape()) == 5\n logits = tf.squeeze(logits, [1, 2, 3])\n log_probs = common_layers.log_prob_from_logits(logits)\n predictions, scores = common_layers.argmax_with_score(log_probs)\n return {\n \"outputs\": predictions,\n \"scores\": scores,\n }\n\n\ndef resnet_base():\n \"\"\"Set of hyperparameters.\"\"\"\n # For imagenet on TPU:\n # Set train_steps=120000\n # Set eval_steps=48\n\n # Base\n hparams = common_hparams.basic_params1()\n\n # Model-specific parameters\n hparams.add_hparam(\"layer_sizes\", [3, 4, 6, 3])\n hparams.add_hparam(\"filter_sizes\", [64, 64, 128, 256, 512])\n hparams.add_hparam(\"block_fn\", \"bottleneck\")\n hparams.add_hparam(\"use_nchw\", True)\n hparams.add_hparam(\"is_cifar\", False)\n\n # Targeted dropout\n hparams.add_hparam(\"use_td\", False)\n hparams.add_hparam(\"targeting_rate\", None)\n hparams.add_hparam(\"keep_prob\", None)\n\n # Variable init\n hparams.initializer = \"normal_unit_scaling\"\n hparams.initializer_gain = 2.\n\n # Optimization\n hparams.optimizer = \"Momentum\"\n hparams.optimizer_momentum_momentum = 0.9\n hparams.optimizer_momentum_nesterov = True\n hparams.weight_decay = 1e-4\n hparams.clip_grad_norm = 0.0\n # (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.)\n hparams.learning_rate = 0.4\n hparams.learning_rate_decay_scheme = \"cosine\"\n # For image_imagenet224, 120k training steps, which effectively makes this a\n # cosine decay (i.e. no cycles).\n hparams.learning_rate_cosine_cycle_steps = 120000\n\n hparams.batch_size = 128\n return hparams\n\n\[email protected]_hparams\ndef resnet_50():\n hp = resnet_base()\n return hp\n\n\[email protected]_hparams\ndef resnet_18():\n hp = resnet_base()\n hp.block_fn = \"residual\"\n hp.layer_sizes = [2, 2, 2, 2]\n return hp\n\n\[email protected]_hparams\ndef resnet_imagenet_34():\n \"\"\"Set of hyperparameters.\"\"\"\n hp = resnet_base()\n hp.block_fn = \"residual\"\n hp.layer_sizes = [2, 4, 8, 2]\n\n return hp\n\n\[email protected]_hparams\ndef resnet_imagenet_34_td_weight_05_05():\n \"\"\"Set of hyperparameters.\"\"\"\n hp = resnet_imagenet_34()\n hp.use_td = \"weight\"\n hp.targeting_rate = 0.5\n hp.keep_prob = 0.5\n\n return hp\n\n\[email protected]_hparams\ndef resnet_imagenet_34_td_unit_05_05():\n \"\"\"Set of hyperparameters.\"\"\"\n hp = resnet_imagenet_34()\n hp.use_td = \"unit\"\n hp.targeting_rate = 0.5\n hp.keep_prob = 0.5\n\n return hp\n\n\[email protected]_hparams\ndef resnet_imagenet_34_td_unit_no_drop():\n \"\"\"Set of hyperparameters.\"\"\"\n hp = resnet_imagenet_34()\n hp.use_td = \"unit\"\n hp.targeting_rate = 0.0\n hp.keep_prob = 1.0\n\n return hp\n\n\[email protected]_hparams\ndef resnet_imagenet_102():\n hp = resnet_imagenet_34()\n hp.layer_sizes = [3, 8, 36, 3]\n return hp\n\n\[email protected]_hparams\ndef resnet_cifar_15():\n \"\"\"Set of hyperparameters.\"\"\"\n hp = resnet_base()\n hp.block_fn = \"residual\"\n hp.is_cifar = True\n hp.layer_sizes = [2, 2, 2]\n hp.filter_sizes = [16, 32, 64, 128]\n\n return hp\n\n\[email protected]_hparams\ndef resnet_cifar_32():\n hp = resnet_cifar_15()\n hp.layer_sizes = [5, 5, 5]\n return hp\n\n\[email protected]_hparams\ndef resnet_cifar_32_td_weight_05_05():\n hp = resnet_cifar_32()\n hp.use_td = \"weight\"\n hp.targeting_rate = 0.5\n hp.keep_prob = 0.5\n return hp\n\n\[email protected]_hparams\ndef resnet_cifar_32_td_unit_05_05():\n hp = resnet_cifar_32()\n hp.use_td = \"unit\"\n hp.targeting_rate = 0.5\n hp.keep_prob = 0.5\n return hp\n\n\[email protected]_hparams\ndef resnet_cifar_32_td_unit_no_drop():\n hp = resnet_cifar_32()\n hp.use_td = \"unit\"\n hp.targeting_rate = 0.0\n hp.keep_prob = 1.0\n return hp\n\n\[email protected]_hparams\ndef resnet_34():\n hp = resnet_base()\n hp.block_fn = \"residual\"\n return hp\n\n\[email protected]_hparams\ndef resnet_101():\n hp = resnet_base()\n hp.layer_sizes = [3, 4, 23, 3]\n return hp\n\n\[email protected]_hparams\ndef resnet_152():\n hp = resnet_base()\n hp.layer_sizes = [3, 8, 36, 3]\n return hp\n\n\[email protected]_hparams\ndef resnet_200():\n hp = resnet_base()\n hp.layer_sizes = [3, 24, 36, 3]\n return hp\n\n\n# Pruning parameters\[email protected]_pruning_params\ndef resnet_weight():\n hp = HParams()\n hp.add_hparam(\"strategy\", \"weight\")\n hp.add_hparam(\"black_list\", [\"logits\", \"bias\"])\n hp.add_hparam(\"white_list\", [\"td_conv\"])\n hp.add_hparam(\"sparsities\", [0.1 * i for i in range(10)])\n return hp\n\n\[email protected]_pruning_params\ndef resnet_unit():\n hp = resnet_weight()\n hp.strategy = \"unit\"\n return hp\n\n\n# Adversarial attack parameters\[email protected]_attack_params\ndef resnet_fgsm():\n aparams = HParams()\n aparams.attack = \"fgsm\"\n aparams.epsilon_name = \"eps\"\n aparams.attack_epsilons = [i * 0.8 for i in range(20)]\n aparams.add_hparam(\"clip_min\", 0.0)\n aparams.add_hparam(\"clip_max\", 255.0)\n return aparams\n\n\[email protected]_attack_params\ndef resnet_madry():\n aparams = resnet_fgsm()\n aparams.attack = \"madry\"\n aparams.add_hparam(\"nb_iter\", 40)\n aparams.add_hparam(\"eps_iter\", 1.0)\n return aparams\n\n\[email protected]_attack_params\ndef resnet_random():\n aparams = resnet_fgsm()\n aparams.attack = \"random\"\n aparams.epsilon_name = \"eps\"\n aparams.add_hparam(\"num_samples\", 10)\n aparams.add_hparam(\"num_batches\", 100)\n return aparams\n"
] | [
[
"tensorflow.nn.relu",
"tensorflow.transpose",
"tensorflow.reduce_mean",
"tensorflow.zeros_initializer",
"tensorflow.reshape",
"tensorflow.identity",
"tensorflow.squeeze",
"tensorflow.variance_scaling_initializer",
"tensorflow.to_float",
"tensorflow.pad",
"tensorflow.ones_initializer",
"tensorflow.summary.histogram"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
IvanaEscobar/oceanAcouPy | [
"9ea11792dfe1cc0ba7004d23521fd222b4e949eb"
] | [
"oceanAcouPy/soundSpeed.py"
] | [
"# EE348N: Ocean Acoustics \n# Sound speed profiles used in class\n\nfrom numpy import exp, sin\n\n### FUNCTIONS ###\ndef cSTD (t,s,z, lat=0, eqn='mackenzie81'):\n# Inputs:\n# t : tempurature [degC]\n# s : salinity [ppt]\n# z : column depth [m]\n# lat : latitude [deg]\n# eqn : mackenzie81, leroy08\n# Returns:\n# c : seawater sound speed [m/s] \n if (eqn=='mackenzie81'):\n return 1448.96 + 4.59*t - 0.05304*t**2 + 2.374e-4*t**3 + \\\n (1.340 - 0.01025*t)*(s - 35) + 0.01630*z + \\\n 1.675e-7*z**2 - 7.139e-13*t*z**3\n elif (eqn=='leroy08'):\n return 1402.5 +5*t - 5.44e-2*t**2 + 2.1e-4*t**3 + 1.33*s - \\\n 1.23e-2*t*s + 8.7e-5*t**2*s + 1.56e-2*z + \\\n 2.55e-7*z**2 - 7.3e-12*z**3 + 1.2e-6*z*(lat-45) - \\\n 9.5e-13*t*z**3 + 3e-7*t**2*z + 1.43e-5*s*z\n\n#-------------------------------------------------------------------------------\ndef cMunk (z):\n# Inputs:\n# z : depth [m]\n# Returns:\n# Munk sound speed profile [m/s]\n c0 = 1500.\n eps = 0.00737\n zt=2*(z-1300)/1300\n return c0*( 1 + eps*(zt - 1 + exp(-zt)) )\n\n#-------------------------------------------------------------------------------\ndef cIsoVel (z):\n# Inputs:\n# z : depth [m]\n# Returns:\n# Isovelocity downward refracting sound speed profile [m/s]\n c0 = 1520.\n return c0 - 0.5*z\n\n#-------------------------------------------------------------------------------\ndef cDD (z): \n# Inputs:\n# z : depth [m]\n# Returns:\n# double-ducted sound speed profile: \n return 10*sin(f1(z)*(5000.-z)) +\\\n 70*sin(f2(z)*(5000.-z)) +\\\n 0.014*z + 1480.\n\n#-------------------------------------------------------------------------------\nf1 = lambda z : -3e-18*(5000-z)**4\nf2 = lambda z : 3e-19*(5000-z)**4\n"
] | [
[
"numpy.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
omyllymaki/shifting-peaks | [
"0dd67662daac29ce1c2db257553c3355202a46f1"
] | [
"solvers/tests/test_grid_solver.py"
] | [
"import itertools\n\nimport numpy as np\n\nfrom solvers.grid_solver import GridSolver\nfrom solvers.math import interpolate_signal, ls_fit\nfrom solvers.tests.base_test_case import BaseTestCase\nfrom solvers.tests.correction_models import linear_correction\n\n\nclass TestGridSolver(BaseTestCase):\n offset_candidates = np.arange(-3, 3, 0.1)\n slope_candidates = np.arange(-0.03, 0.03, 0.001)\n candidates = np.array(list(itertools.product(slope_candidates, offset_candidates)))\n\n def setUp(self):\n super().setUp()\n self.solver = GridSolver(x=self.x,\n pure_components=self.pure_components,\n candidates=self.candidates,\n correction_model=linear_correction,\n fit_function=ls_fit)\n\n def test_no_x_axis_errors_should_pass(self) -> None:\n self.run_test(self.mixture_signal)\n\n def test_offset_error_should_pass(self) -> None:\n x_distorted = self.x + 2\n signal = interpolate_signal(self.mixture_signal, self.x, x_distorted, 0, 0)\n self.run_test(signal)\n\n def test_slope_error_should_pass(self) -> None:\n x_distorted = 1.01 * self.x\n signal = interpolate_signal(self.mixture_signal, self.x, x_distorted, 0, 0)\n self.run_test(signal)\n\n def test_slope_and_offset_error_should_pass(self) -> None:\n x_distorted = 1.01 * self.x - 2\n signal = interpolate_signal(self.mixture_signal, self.x, x_distorted, 0, 0)\n self.run_test(signal)\n"
] | [
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
imaginary-person/pipeline_experiments | [
"32d20f1b9a4192e75ed6ba709c9acd2e0cf23e06",
"32d20f1b9a4192e75ed6ba709c9acd2e0cf23e06"
] | [
"BERT/bert_local_pipeline.py",
"mnist_cuda_rpc_conv_error/mnist_test2.py"
] | [
"import argparse\nimport math\nimport sys\nimport time\nimport os\nimport socket\nimport statistics\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\n\nfrom model import MLMTask, MLMTask2, MLMTaskEmbedding, MLMTaskEncoder, MLMTaskHead\nfrom cuda_local_pipeline import LocalSequential, sync_all_device\n\n\nIS_SLURM = os.getenv('SLURM_LOCALID')\nUSE_TQDM = os.getenv('USE_TQDM', True if not IS_SLURM else False)\n\n\ndef collate_batch(batch_data, args, mask_id, cls_id):\n batch_data = torch.tensor(batch_data).long().view(args.batch_size, -1).t().contiguous()\n # Generate masks with args.mask_frac\n data_len = batch_data.size(0)\n ones_num = int(data_len * args.mask_frac)\n zeros_num = data_len - ones_num\n lm_mask = torch.cat([torch.zeros(zeros_num), torch.ones(ones_num)])\n lm_mask = lm_mask[torch.randperm(data_len)]\n batch_data = torch.cat((torch.tensor([[cls_id] * batch_data.size(1)]).long(), batch_data))\n lm_mask = torch.cat((torch.tensor([0.0]), lm_mask))\n\n targets = torch.stack([batch_data[i] for i in range(lm_mask.size(0)) if lm_mask[i]]).view(-1)\n batch_data = batch_data.masked_fill(lm_mask.bool().unsqueeze(1), mask_id)\n return batch_data, lm_mask, targets\n\n\ndef process_raw_data(raw_data, args):\n _num = raw_data.size(0) // (args.batch_size * args.bptt)\n raw_data = raw_data[:(_num * args.batch_size * args.bptt)]\n return raw_data\n\n\ndef train(model, vocab, train_loss_log, train_data,\n optimizer, criterion, ntokens, epoch, args):\n model.train()\n total_loss = 0.\n start_time = time.time()\n mask_id = vocab.stoi['<MASK>']\n cls_id = vocab.stoi['<cls>']\n train_loss_log.append(0.0)\n dataloader = DataLoader(train_data, batch_size=args.batch_size * args.bptt,\n shuffle=False, collate_fn=lambda b: collate_batch(b, args, mask_id, cls_id))\n\n forward_pyth_elapsed = []\n forward_cuda_elapsed = []\n forward_comm_elapsed = []\n forward_comp_elapsed = []\n backward_pyth_elapsed = []\n backward_cuda_elapsed = []\n\n for batch, (data, lm_mask, targets) in enumerate(dataloader):\n optimizer.zero_grad()\n data = data.to(0)\n targets = targets.to(args.gpus - 1)\n data = data.transpose(0, 1)\n\n fwd_tik = torch.cuda.Event(enable_timing=True)\n fwd_tok = torch.cuda.Event(enable_timing=True)\n\n sync_all_device(args.gpus)\n forward_start_time = time.time()\n\n fwd_tik.record()\n\n output = model(data)\n output = torch.stack([output[i] for i in range(lm_mask.size(0)) if lm_mask[i]])\n loss = criterion(output.view(-1, ntokens), targets)\n total_loss += loss.item()\n\n fwd_tok.record()\n fwd_tok.synchronize()\n fwd_delay = fwd_tik.elapsed_time(fwd_tok)\n\n forward_cuda_elapsed.append(fwd_delay)\n forward_comp_elapsed.append(model.get_fwd_compute_delay())\n forward_comm_elapsed.append(model.get_fwd_communication_delay()) # forward_comm_elapsed.append(fwd_delay - model.get_fwd_compute_delay())\n\n sync_all_device(args.gpus)\n forward_pyth_elapsed.append((time.time() - forward_start_time) * 1000)\n\n bwd_tik = torch.cuda.Event(enable_timing=True)\n bwd_tok = torch.cuda.Event(enable_timing=True)\n\n backward_start_time = time.time()\n\n bwd_tik.record()\n\n loss.backward()\n\n bwd_tok.record()\n bwd_tok.synchronize()\n bwd_delay = bwd_tik.elapsed_time(bwd_tok)\n\n backward_cuda_elapsed.append(bwd_delay)\n\n sync_all_device(args.gpus)\n backward_pyth_elapsed.append((time.time() - backward_start_time) * 1000)\n\n optimizer.step()\n\n if (batch + 1) % args.log_interval == 0:\n cur_loss = total_loss / args.log_interval\n elapsed = time.time() - start_time\n train_loss_log[-1] = cur_loss\n\n num_of_batches = len(train_data) // (args.bptt * args.batch_size)\n\n last = 10 # len(forward_comm_elapsed) // 2\n\n f_comm_last = forward_comm_elapsed[-last:]\n f_comm_last_mean = statistics.mean(f_comm_last)\n f_comm_last_std = statistics.stdev(f_comm_last) if len(f_comm_last) > 1 else 0.0\n\n f_comp_last = forward_comp_elapsed[-last:]\n f_comp_last_mean = statistics.mean(f_comp_last)\n f_comp_last_std = statistics.stdev(f_comp_last) if len(f_comp_last) > 1 else 0.0\n\n f_last = forward_cuda_elapsed[-last:]\n f_last_mean = statistics.mean(f_last)\n f_last_std = statistics.stdev(f_last) if len(f_last) > 1 else 0.0\n\n b_last = backward_cuda_elapsed[-last:]\n b_last_mean = statistics.mean(b_last)\n b_last_std = statistics.stdev(b_last) if len(b_last) > 1 else 0.0\n\n print(\n f\"EPOCH:{epoch:2}|\"\n f\"BATCH:{(batch + 1):3}/{num_of_batches:3}|\"\n f\"LOSS:{cur_loss:5.2f}|\"\n \"\\t\"\n f\"TIME:{(elapsed * 1000 / args.log_interval):10.2f} = {forward_pyth_elapsed[-1]:10.2f} + {backward_pyth_elapsed[-1]:10.2f}|\"\n \"\\t\"\n f\"FORWARD:{forward_cuda_elapsed[-1]:10.2f}({f_last_mean:10.2f} ±{f_last_std:8.2f})=({f_comp_last_mean:10.2f} ±{f_comp_last_std:8.2f})+({f_comm_last_mean:10.2f} ±{f_comm_last_std:8.2f}) |\"\n \"\\t\"\n f\"BACKWARD:{backward_cuda_elapsed[-1]:10.2f}({b_last_mean:10.2f} ±{b_last_std:8.2f})|\"\n )\n\n total_loss = 0\n start_time = time.time()\n\n\ndef run_main(args):\n torch.manual_seed(args.seed)\n import torchtext\n if args.dataset == 'WikiText103':\n from torchtext.experimental.datasets import WikiText103 as WLMDataset\n elif args.dataset == 'WikiText2':\n from torchtext.experimental.datasets import WikiText2 as WLMDataset\n elif args.dataset == 'WMTNewsCrawl':\n from torchtext.experimental.datasets import WMTNewsCrawl as WLMDataset\n elif args.dataset == 'EnWik9':\n from torchtext.datasets import EnWik9\n elif args.dataset == 'BookCorpus':\n from data import BookCorpus\n else:\n print(\"dataset for MLM task is not supported\")\n\n try:\n vocab = torch.load(args.save_vocab)\n except:\n print(f\"WLMDataset = {WLMDataset}\")\n train_dataset, valid_dataset, test_dataset = WLMDataset()\n old_vocab = train_dataset.vocab\n print(f\"len(old_vocab) = {len(old_vocab)}\")\n vocab = torchtext.vocab.Vocab(counter=old_vocab.freqs,\n specials=['<unk>', '<pad>', '<MASK>'])\n with open(args.save_vocab, 'wb') as f:\n torch.save(vocab, f)\n\n if args.dataset == 'WikiText103' or args.dataset == 'WikiText2':\n train_dataset, valid_dataset, test_dataset = WLMDataset(vocab=vocab)\n train_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, train_dataset)))\n valid_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, valid_dataset)))\n test_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, test_dataset)))\n elif args.dataset == 'WMTNewsCrawl':\n from torchtext.experimental.datasets import WikiText2\n test_dataset, valid_dataset = WikiText2(vocab=vocab, split=('test', 'valid'))\n valid_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, valid_dataset)))\n test_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, test_dataset)))\n train_dataset = WLMDataset(vocab=vocab, split='train')\n train_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, train_dataset)))\n elif args.dataset == 'EnWik9':\n enwik9 = EnWik9()\n idx1, idx2 = int(len(enwik9) * 0.8), int(len(enwik9) * 0.9)\n train_data = torch.tensor([vocab.stoi[_id]\n for _id in enwik9[0:idx1]]).long()\n val_data = torch.tensor([vocab.stoi[_id]\n for _id in enwik9[idx1:idx2]]).long()\n test_data = torch.tensor([vocab.stoi[_id]\n for _id in enwik9[idx2:]]).long()\n from torchtext.experimental.datasets import LanguageModelingDataset\n train_dataset = LanguageModelingDataset(train_data, vocab, lambda x: x)\n valid_dataset = LanguageModelingDataset(val_data, vocab, lambda x: x)\n test_dataset = LanguageModelingDataset(test_data, vocab, lambda x: x)\n elif args.dataset == 'BookCorpus':\n train_dataset, valid_dataset, test_dataset = BookCorpus(vocab)\n\n train_data = process_raw_data(train_dataset.data, args)\n val_data = process_raw_data(valid_dataset.data, args)\n test_data = process_raw_data(test_dataset.data, args)\n\n ntokens = len(train_dataset.get_vocab())\n print(f\"Vocabulary size = {ntokens}\")\n\n if args.gpus == 1:\n model = LocalSequential(\n nn.Sequential(\n MLMTask(ntokens, args.emsize, args.nhead, args.nhid, args.nlayers, args.dropout).to(0)\n )\n )\n elif args.gpus == 2:\n assert(args.nlayers % 2 == 0)\n model = LocalSequential(\n nn.Sequential(\n MLMTaskEmbedding(ntokens, args.emsize).to(0),\n MLMTaskEncoder(args.emsize, args.nhead, args.nhid, args.nlayers // 2, args.dropout).to(0),\n ),\n nn.Sequential(\n MLMTaskEncoder(args.emsize, args.nhead, args.nhid, args.nlayers // 2, args.dropout).to(1),\n MLMTaskHead(ntokens, args.emsize).to(1),\n ),\n )\n else:\n assert(args.nlayers % (args.gpus - 2) == 0)\n model = LocalSequential(\n MLMTaskEmbedding(ntokens, args.emsize).to(0),\n *(MLMTaskEncoder(args.emsize, args.nhead, args.nhid, args.nlayers // (args.gpus - 2), args.dropout).to(i) for i in range(1, args.gpus - 1)),\n MLMTaskHead(ntokens, args.emsize).to(args.gpus - 1),\n )\n\n params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print(f'Total parameters = {params // 10**6}M')\n\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)\n best_val_loss = None\n train_loss_log, val_loss_log = [], []\n\n for epoch in range(1, args.epochs + 1):\n epoch_start_time = time.time()\n train(model, train_dataset.vocab, train_loss_log, train_data,\n optimizer, criterion, ntokens, epoch, args)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Pipeline experiments')\n parser.add_argument('--emsize', type=int, default=768,\n help='size of word embeddings')\n parser.add_argument('--nhid', type=int, default=3072,\n help='number of hidden units per layer')\n parser.add_argument('--nlayers', type=int, default=12,\n help='number of layers')\n parser.add_argument('--nhead', type=int, default=12,\n help='the number of heads in the encoder/decoder of the transformer model')\n parser.add_argument('--lr', type=float, default=0.1,\n help='initial learning rate')\n parser.add_argument('--clip', type=float, default=0.1,\n help='gradient clipping')\n parser.add_argument('--epochs', type=int, default=8,\n help='upper epoch limit')\n parser.add_argument('--batch_size', type=int, default=32, metavar='N',\n help='batch size')\n parser.add_argument('--bptt', type=int, default=128,\n help='sequence length')\n parser.add_argument('--dropout', type=float, default=0.2,\n help='dropout applied to layers (0 = no dropout)')\n parser.add_argument('--seed', type=int, default=5431916812,\n help='random seed')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='report interval')\n parser.add_argument('--save-vocab', type=str, default='torchtext_bert_vocab.pt',\n help='path to save the vocab')\n parser.add_argument('--mask_frac', type=float, default=0.15,\n help='the fraction of masked tokens')\n parser.add_argument('--dataset', type=str, default='WikiText2',\n help='dataset used for MLM task')\n parser.add_argument('--gpus', type=int, default=8,\n help='number of GPUs per worker node to use')\n\n args = parser.parse_args()\n run_main(args)\n",
"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nimport torch.multiprocessing as mp\nimport torch.distributed.rpc as rpc\nimport os\nimport concurrent.futures\nfrom torch.distributed.optim import DistributedOptimizer\nfrom torch.distributed.rpc import RRef\nimport torch.distributed.autograd as dist_autograd\nfrom tqdm import tqdm\nfrom rpc_framework import MyRPCPipeline, MyRPCPipelineWrapper\n\n\ndef LayerOnDevice(device):\n return nn.Sequential(\n nn.Conv2d(1, 16, 3, 1),\n nn.ReLU(),\n nn.Conv2d(16, 32, 3, 1),\n nn.ReLU(),\n nn.MaxPool2d(2),\n nn.Flatten(1),\n nn.Linear(4608, 128),\n nn.ReLU(),\n nn.Linear(128, 10),\n ).to(device)\n\n\ndef run_main():\n rref = rpc.remote(\"worker1\", LayerOnDevice, args=(\"cuda:0\",))\n for _ in range(100):\n x = torch.randn(100, 1, 28, 28).to(\"cuda:1\")\n actual = rref.remote().forward(x).to_here()\n expected = rref.rpc_sync().forward(x)\n assert((expected == actual).all())\n\n\ndef run_worker(rank, world_size):\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = '29500'\n options = rpc.TensorPipeRpcBackendOptions(num_worker_threads=256)\n\n if rank == 0:\n options.set_device_map(\"worker1\", {1:0})\n rpc.init_rpc(\n \"master\",\n rank=rank,\n world_size=world_size,\n rpc_backend_options=options\n )\n run_main()\n else:\n if rank == 1:\n options.set_device_map(\"master\", {0:1})\n rpc.init_rpc(\n f\"worker{rank}\",\n rank=rank,\n world_size=world_size,\n rpc_backend_options=options\n )\n\n rpc.shutdown()\n\n\nif __name__==\"__main__\":\n gpus = 1\n world_size = gpus + 1\n mp.spawn(run_worker, args=(world_size,), nprocs=world_size, join=True)\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.zeros",
"torch.load",
"torch.manual_seed",
"torch.randperm",
"torch.cuda.Event",
"torch.tensor",
"torch.save"
],
[
"torch.multiprocessing.spawn",
"torch.randn",
"torch.nn.Conv2d",
"torch.nn.Flatten",
"torch.distributed.rpc.TensorPipeRpcBackendOptions",
"torch.distributed.rpc.remote",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.distributed.rpc.shutdown",
"torch.distributed.rpc.init_rpc",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dchenam/AnimeGAN | [
"15707a99dde000a6d7f283f4f82d5176b8313e0a"
] | [
"logger.py"
] | [
"# Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514\nimport tensorflow as tf\nimport numpy as np\nimport scipy.misc\nimport logging\n\ntry:\n from StringIO import StringIO # Python 2.7\nexcept ImportError:\n from io import BytesIO # Python 3.5+\n\n\nclass Logger(object):\n\n def __init__(self, config):\n \"\"\"Create a summary writer logging to log_dir.\"\"\"\n self.config = config\n self.writer = tf.summary.FileWriter(config.summary_dir)\n\n def scalar_summary(self, tag, value, step):\n \"\"\"Log a scalar variable.\"\"\"\n summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])\n self.writer.add_summary(summary, step)\n\n def image_summary(self, tag, images, step):\n \"\"\"Log a list of images.\"\"\"\n\n img_summaries = []\n for i, img in enumerate(images):\n # Write the image to a string\n try:\n s = StringIO()\n except:\n s = BytesIO()\n scipy.misc.toimage(img).save(s, format=\"png\")\n\n # Create an Image object\n img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),\n height=img.shape[0],\n width=img.shape[1])\n # Create a Summary value\n img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))\n\n # Create and write Summary\n summary = tf.Summary(value=img_summaries)\n self.writer.add_summary(summary, step)\n\n def histo_summary(self, tag, values, step, bins=1000):\n \"\"\"Log a histogram of the tensor of values.\"\"\"\n\n # Create a histogram using numpy\n counts, bin_edges = np.histogram(values, bins=bins)\n\n # Fill the fields of the histogram proto\n hist = tf.HistogramProto()\n hist.min = float(np.min(values))\n hist.max = float(np.max(values))\n hist.num = int(np.prod(values.shape))\n hist.sum = float(np.sum(values))\n hist.sum_squares = float(np.sum(values ** 2))\n\n # Drop the start of the first bin\n bin_edges = bin_edges[1:]\n\n # Add bin edges and counts\n for edge in bin_edges:\n hist.bucket_limit.append(edge)\n for c in counts:\n hist.bucket.append(c)\n\n # Create and write Summary\n summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])\n self.writer.add_summary(summary, step)\n self.writer.flush()\n\n def set_logger(self, log_path):\n \"\"\"Sets the logger to log info in terminal and file `log_path`.\n In general, it is useful to have a logger so that every output to the terminal is saved\n in a permanent file. Here we save it to `model_dir/train.log`.\n Example:\n ```\n logging.info(\"Starting training...\")\n ```\n Args:\n log_path: (string) where to log\n \"\"\"\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n if not logger.handlers:\n # Logging to a file\n file_handler = logging.FileHandler(log_path)\n file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))\n logger.addHandler(file_handler)\n\n # Logging to console\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter('%(message)s'))\n logger.addHandler(stream_handler)\n"
] | [
[
"tensorflow.summary.FileWriter",
"numpy.min",
"numpy.max",
"tensorflow.Summary.Value",
"numpy.prod",
"tensorflow.HistogramProto",
"tensorflow.Summary",
"numpy.histogram",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Hardly-Human/Instance-Segmentation-of-Images | [
"45b048a2eb7fa31d5007f3fcd70b03fcb57abad4"
] | [
"app.py"
] | [
"import streamlit as st\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom gluoncv import model_zoo, data, utils\n\n\[email protected]\ndef load_image(image_file):\n img = Image.open(image_file)\n return img\n\[email protected](allow_output_mutation=True)\ndef load_model(model_name):\n\tmodel = model_zoo.get_model(model_name, pretrained = True)\n\treturn model\n\ndef plot_image(model, x, orig_img):\n\tst.warning(\"Inferencing from Model..\")\n\tids, scores, bboxes, masks = [xx[0].asnumpy() for xx in model(x)]\n\n\twidth, height = orig_img.shape[1], orig_img.shape[0]\n\tmasks, _ = utils.viz.expand_mask(masks, bboxes, (width, height), scores)\n\torig_img = utils.viz.plot_mask(orig_img, masks)\n\n\tfig = plt.figure(figsize=(10, 10))\n\tax = fig.add_subplot(1, 1, 1)\n\tax = utils.viz.plot_bbox(orig_img, bboxes, scores, ids,\n\t class_names=model.classes, ax=ax)\n\tst.set_option('deprecation.showPyplotGlobalUse', False)\n\tst.success(\"Instance Segmentation Successful!! Plotting Image..\")\n\tst.pyplot(plt.show())\n\ndef footer():\n\tst.markdown(\"\"\"\n\t* * *\n\tBuilt with ❤️ by [Rehan uddin](https://hardly-human.github.io/)\n\t\"\"\")\n\tst.success(\"Rehan uddin (Hardly-Human)👋😉\")\n\n\n################################################################################\n# main()\n################################################################################\n\ndef main():\n \n\tst.title(\"Instance Segmentation App\")\n\tst.text(\"Built with gluoncv and Streamlit\")\n\tst.markdown(\"### [Instance Segmentation](https://missinglink.ai/guides/neural-network-concepts/instance-segmentation-deep-learning/)\\\n ` `[Mask RCNN Networks](https://alittlepain833.medium.com/simple-understanding-of-mask-rcnn-134b5b330e95) \\\n\t\t [[Paper]](https://arxiv.org/abs/1703.06870)\\\n ` `[[View Source]](https://github.com/Hardly-Human/Instance-Segmentation-of-Images)\")\n\n\timage_file = st.file_uploader(\"Upload Image\", type = ['jpg','png','jpeg'])\n\n\tif image_file is None:\n\t\tst.warning(\"Upload Image and Run Model\")\n\n\tif image_file is not None:\n\t\timage1 = Image.open(image_file)\n\t\trgb_im = image1.convert('RGB') \n\t\timage = rgb_im.save(\"saved_image.jpg\")\n\t\timage_path = \"saved_image.jpg\"\n\t\tst.image(image1)\n\t\t\n\tif st.button(\"Run Model\"):\n\t\tst.warning(\"Loading Model..🤞\")\n\t\tmodel = load_model('mask_rcnn_resnet50_v1b_coco')\n\t\tst.success(\"Loaded Model Succesfully!!🤩👍\")\n\n\t\tx, orig_img = data.transforms.presets.rcnn.load_test(image_path)\n\t\tplot_image(model,x,orig_img)\n\n\n\nif __name__== \"__main__\":\n\tmain()\n\tfooter()"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
luh0907/nn_breaking_detection | [
"6e810a5296bea3c6ef975b4e62caa2d94e992b81",
"6e810a5296bea3c6ef975b4e62caa2d94e992b81"
] | [
"density_estimation.py",
"pca_detection.py"
] | [
"# Copyright (C) 2017, Nicholas Carlini <[email protected]>\n# All rights reserved.\n\nimport sys\nimport time\nimport tensorflow as tf\nimport numpy as np\nimport random\n\nfrom setup_cifar import CIFARModel, CIFAR\nfrom setup_mnist import MNISTModel, MNIST\n\nsys.path.append(\"../..\")\nfrom nn_robust_attacks.l2_attack import CarliniL2\nfrom fast_gradient_sign import FGS\n\nimport keras\nfrom keras import backend as K\n\n#import matplotlib\n#import matplotlib.pyplot as plt\n#import matplotlib.patches as mpatches\n#from matplotlib.backends.backend_pdf import PdfPages\nfrom scipy.stats import gaussian_kde\n\nBINARY_SEARCH_STEPS = 9 # number of times to adjust the constant with binary search\nMAX_ITERATIONS = 10000 # number of iterations to perform gradient descent\nABORT_EARLY = True # if we stop improving, abort gradient descent early\nLEARNING_RATE = 1e-2 # larger values converge faster to less accurate results\nTARGETED = True # should we target one specific class? or just be wrong?\nCONFIDENCE = 0 # how strong the adversarial example should be\nINITIAL_CONST = 1e-3 # the initial constant c to pick as a first guess\n\nclass CarliniL2New:\n def __init__(self, sess, model, batch_size=1, confidence = CONFIDENCE,\n targeted = TARGETED, learning_rate = LEARNING_RATE,\n binary_search_steps = BINARY_SEARCH_STEPS, max_iterations = MAX_ITERATIONS,\n abort_early = ABORT_EARLY, \n initial_const = INITIAL_CONST, extra_loss=None):\n \"\"\"\n The L_2 optimized attack. \n\n This attack is the most efficient and should be used as the primary \n attack to evaluate potential defenses.\n\n Returns adversarial examples for the supplied model.\n\n confidence: Confidence of adversarial examples: higher produces examples\n that are farther away, but more strongly classified as adversarial.\n batch_size: Number of attacks to run simultaneously.\n targeted: True if we should perform a targetted attack, False otherwise.\n learning_rate: The learning rate for the attack algorithm. Smaller values\n produce better results but are slower to converge.\n binary_search_steps: The number of times we perform binary search to\n find the optimal tradeoff-constant between distance and confidence. \n max_iterations: The maximum number of iterations. Larger values are more\n accurate; setting too small will require a large learning rate and will\n produce poor results.\n abort_early: If true, allows early aborts if gradient descent gets stuck.\n initial_const: The initial tradeoff-constant to use to tune the relative\n importance of distance and confidence. If binary_search_steps is large,\n the initial constant is not important.\n \"\"\"\n\n image_size, num_channels, num_labels = model.image_size, model.num_channels, model.num_labels\n self.sess = sess\n self.TARGETED = targeted\n self.LEARNING_RATE = learning_rate\n self.MAX_ITERATIONS = max_iterations\n self.BINARY_SEARCH_STEPS = binary_search_steps\n self.ABORT_EARLY = abort_early\n self.CONFIDENCE = confidence\n self.initial_const = initial_const\n self.batch_size = batch_size\n\n self.repeat = binary_search_steps >= 10\n\n shape = (batch_size,image_size,image_size,num_channels)\n \n # the variable we're going to optimize over\n modifier = tf.Variable(np.zeros(shape,dtype=np.float32))\n\n # these are variables to be more efficient in sending data to tf\n self.origs = tf.Variable(np.zeros(shape), dtype=tf.float32)\n self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32)\n self.tlab = tf.Variable(np.zeros((batch_size,num_labels)), dtype=tf.float32)\n self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32)\n self.const2 = tf.Variable(np.zeros(batch_size), dtype=tf.float32)\n\n # and here's what we use to assign them\n self.assign_origs = tf.placeholder(tf.float32, shape)\n self.assign_timg = tf.placeholder(tf.float32, shape)\n self.assign_tlab = tf.placeholder(tf.float32, (batch_size,num_labels))\n self.assign_const = tf.placeholder(tf.float32, [batch_size])\n self.assign_const2 = tf.placeholder(tf.float32, [batch_size])\n \n # the resulting image, tanh'd to keep bounded from -0.5 to 0.5\n self.newimg = tf.tanh(modifier + self.timg)/2\n \n # prediction BEFORE-SOFTMAX of the model\n self.output = model.predict(self.newimg)\n \n # distance to the input data\n self.l2dist = tf.reduce_sum(tf.square(self.newimg-tf.tanh(self.origs)/2),[1,2,3])\n \n # compute the probability of the label class versus the maximum other\n self.real = real = tf.reduce_sum((self.tlab)*self.output,1)\n self.other = other = tf.reduce_max((1-self.tlab)*self.output - (self.tlab*10000),1)\n\n if self.TARGETED:\n # if targetted, optimize for making the other class most likely\n loss1 = tf.maximum(0.0, other-real+self.CONFIDENCE)\n else:\n # if untargeted, optimize for making this class least likely.\n loss1 = tf.maximum(0.0, real-other+self.CONFIDENCE)\n\n # sum up the losses\n self.loss2 = tf.reduce_sum(self.l2dist)\n self.loss1 = tf.reduce_sum(self.const*loss1)\n if extra_loss != None:\n self.extra_loss = extra_loss(self.newimg, self.output)\n else:\n self.extra_loss = 0\n self.loss = self.loss1+self.loss2+self.const*tf.reduce_sum(self.extra_loss)\n \n # Setup the adam optimizer and keep track of variables we're creating\n start_vars = set(x.name for x in tf.global_variables())\n optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE)\n self.train = optimizer.minimize(self.loss, var_list=[modifier])\n end_vars = tf.global_variables()\n new_vars = [x for x in end_vars if x.name not in start_vars]\n\n # these are the variables to initialize when we run\n self.setup = []\n self.setup.append(self.origs.assign(self.assign_origs))\n self.setup.append(self.timg.assign(self.assign_timg))\n self.setup.append(self.tlab.assign(self.assign_tlab))\n self.setup.append(self.const.assign(self.assign_const))\n self.setup.append(self.const2.assign(self.assign_const2))\n \n self.init = tf.variables_initializer(var_list=[modifier]+new_vars)\n\n def attack(self, origs, imgs, targets):\n \"\"\"\n Perform the L_2 attack on the given images for the given targets.\n\n If self.targeted is true, then the targets represents the target labels.\n If self.targeted is false, then targets are the original class labels.\n \"\"\"\n r = []\n print('go up to',len(imgs))\n for i in range(0,len(imgs),self.batch_size):\n print('tick',i)\n r.extend(self.attack_batch(origs[i:i+self.batch_size], \n imgs[i:i+self.batch_size], \n targets[i:i+self.batch_size]))\n return np.array(r)\n\n def attack_batch(self, origs, imgs, labs):\n \"\"\"\n Run the attack on a batch of images and labels.\n \"\"\"\n def compare(x,y):\n if not isinstance(x, (float, int, np.int64)):\n x = np.copy(x)\n x[y] -= self.CONFIDENCE\n x = np.argmax(x)\n if self.TARGETED:\n return x == y\n else:\n return x != y\n\n batch_size = self.batch_size\n\n # convert to tanh-space\n imgs = np.arctanh(imgs*1.999999)\n origs = np.arctanh(origs*1.999999)\n\n # set the lower and upper bounds accordingly\n lower_bound = np.zeros(batch_size)\n CONST = np.ones(batch_size)*self.initial_const\n upper_bound = np.ones(batch_size)*1e10\n\n CONST2 = np.ones(batch_size)*self.initial_const\n\n # the best l2, score, and image attack\n o_bestl2 = [1e10]*batch_size\n o_bestscore = [-1]*batch_size\n o_bestattack = [np.zeros(imgs[0].shape)]*batch_size\n \n for outer_step in range(self.BINARY_SEARCH_STEPS):\n # completely reset adam's internal state.\n self.sess.run(self.init)\n batch = imgs[:batch_size]\n batchlab = labs[:batch_size]\n \n bestl2 = [1e10]*batch_size\n bestscore = [-1]*batch_size\n\n # The last iteration (if we run many steps) repeat the search once.\n if self.repeat == True and outer_step == self.BINARY_SEARCH_STEPS-1:\n CONST = upper_bound\n\n # set the variables so that we don't have to send them over again\n self.sess.run(self.setup, {self.assign_timg: batch,\n self.assign_origs: origs,\n self.assign_tlab: batchlab,\n self.assign_const: CONST,\n self.assign_const2: CONST2})\n \n print('set new const',CONST)\n prev = 1e20\n for iteration in range(self.MAX_ITERATIONS):\n # perform the attack \n _, l, l2s, scores, nimg, extra = self.sess.run([self.train, self.loss, \n self.l2dist, self.output, \n self.newimg, self.extra_loss])\n #print(np.argmax(scores))\n # print out the losses every 10%\n if iteration%(self.MAX_ITERATIONS//10) == 0:\n print(iteration,*self.sess.run((self.loss,self.loss1,self.loss2,self.extra_loss)))\n\n # check if we should abort search if we're getting nowhere.\n if self.ABORT_EARLY and iteration%(self.MAX_ITERATIONS//10) == 0:\n if l > prev*.9999:\n break\n prev = l\n\n # adjust the best result found so far\n for e,(l2,sc,ii) in enumerate(zip(l2s,scores,nimg)):\n if l2 < bestl2[e] and compare(sc, np.argmax(batchlab[e])) and extra[e] <= 0:\n bestl2[e] = l2\n bestscore[e] = np.argmax(sc)\n #print(l2,o_bestl2[e],np.argmax(sc),np.argmax(batchlab[e]),\n # extra[e])\n if l2 < o_bestl2[e] and compare(sc, np.argmax(batchlab[e])) and extra[e] <= 0:\n #print('set')\n o_bestl2[e] = l2\n o_bestscore[e] = np.argmax(sc)\n o_bestattack[e] = ii\n\n # adjust the constant as needed\n for e in range(batch_size):\n if compare(bestscore[e], np.argmax(batchlab[e])) and bestscore[e] != -1:\n # success, divide const by two\n upper_bound[e] = min(upper_bound[e],CONST[e])\n if upper_bound[e] < 1e9:\n CONST[e] = (lower_bound[e] + upper_bound[e])/2\n else:\n # failure, either multiply by 10 if no solution found yet\n # or do binary search with the known upper bound\n lower_bound[e] = max(lower_bound[e],CONST[e])\n if upper_bound[e] < 1e9:\n CONST[e] = (lower_bound[e] + upper_bound[e])/2\n else:\n CONST[e] *= 10\n\n # return the best solution found\n o_bestl2 = np.array(o_bestl2)\n return o_bestattack\n\ndef pop(model):\n '''Removes a layer instance on top of the layer stack.\n This code is thanks to @joelthchao https://github.com/fchollet/keras/issues/2371#issuecomment-211734276\n '''\n if not model.outputs:\n raise Exception('Sequential model cannot be popped: model is empty.')\n else:\n model.layers.pop()\n if not model.layers:\n model.outputs = []\n model.inbound_nodes = []\n model.outbound_nodes = []\n else:\n model.layers[-1].outbound_nodes = []\n model.outputs = [model.layers[-1].output]\n model.built = False\n\n return model\n\nclass DensityEstimate:\n\n def __init__(self, sess, hidden, centers, image_size, num_channels, sigma=20):\n self.sess = sess\n\n centers = hidden.predict(centers).reshape((centers.shape[0],1,-1))\n print(centers.shape)\n self.centers = centers\n\n self.sigma = sigma\n\n self.gaussian_means = tf.constant(centers)\n\n self.X = tf.placeholder(tf.float32, (None, image_size, image_size, num_channels))\n\n self.dist = tf.reduce_sum(tf.square(self.gaussian_means - hidden(self.X)[tf.newaxis,:,:]),axis=2)\n\n self.Y = tf.reduce_mean(tf.exp(-self.dist/self.sigma),axis=0)\n self.hidden = hidden\n\n def make(self, X):\n dist = tf.reduce_sum(tf.square(self.gaussian_means - self.hidden(X)[tf.newaxis,:,:]),axis=2)\n \n return tf.reduce_mean(tf.exp(-dist/self.sigma),axis=0)\n \n \n def slow(self, x):\n x = x.flatten()\n dist = np.sum((self.centers.reshape((self.centers.shape[0],-1))-x)**2,axis=(1))\n dist = np.sort(dist)\n\n print(dist)\n \n #plt.plot(np.cumsum(np.exp(-dist/self.sigma)))\n #plt.show()\n return np.mean(np.exp(-dist/self.sigma))\n\n def predict(self, xs):\n return self.sess.run(self.Y, {self.X: xs})\n\n\ndef estimate_density(model, de, data):\n labels = model.model.predict(data)\n\n res = []\n\n for i in range(10):\n r = []\n this_class = data[np.argmax(labels,axis=1)==i]\n for j in range(0,len(this_class),10):\n probs = de[i].predict(this_class[j:j+10])\n r.extend(probs)\n res.append((r))\n return res\n\ndef estimate_density_full(model, de, data):\n labels = model.model.predict(data)\n\n res = []\n for j in range(0,len(data),1):\n i = np.argmax(labels[j])\n probs = de[i].predict(data[j:j+1])\n res.extend(probs)\n return np.array(res)\n\nclass RobustWrap:\n image_size = 28\n num_channels = 1\n num_labels = 11\n\n def __init__(self, model, de):\n self.model = model\n self.de = de\n\n def predict(self, xs):\n de = self.de.make(xs)\n \n padded = tf.pad(self.model.predict(xs), [[0, 0], [0, 1]], \"CONSTANT\")\n\n maximum = tf.reshape(tf.reduce_max(padded,axis=1),(-1,1))\n\n de = -8-tf.log(de) #TODO\n\n dee = tf.pad(tf.reshape(de, (-1,1)), [[0, 0], [0, self.num_labels-1]], \"CONSTANT\")\n\n padded = padded + 1*maximum*dee\n\n return padded, de\n\ndef extra_loss(de, target_lab):\n def fn(img, out):\n return tf.nn.relu(-tf.log(de[target_lab].make(img))-DECONST)\n return fn\n\ndef compute_optimal_sigma(sess, model, hidden_layer, data):\n sigma = tf.Variable(np.ones(1)*100,dtype=tf.float32)\n de = [DensityEstimate(sess, hidden_layer, data.train_data[np.argmax(data.train_labels,axis=1)==i], model.image_size, model.num_channels, sigma) for i in range(10)]\n #print(de[0].centers)\n #print(estimate_density(model, de, data.test_data))\n xs = []\n for const in np.arange(0,5,.1):\n sess.run(sigma.assign(np.ones(1)*(10**const)))\n r = []\n for labA in range(10):\n print(labA)\n for labB in range(10):\n subset = data.validation_data[np.argmax(data.validation_labels,axis=1)==labB,:,:,:]\n r.append(np.mean(np.log(1e-30+de[labA].predict(subset))))\n r = np.array(r).reshape((10,10))\n diag = np.mean(r[np.arange(10),np.arange(10)])\n r[np.arange(10),np.arange(10)] = 0\n rest = np.mean(r)\n value = diag-rest\n xs.append(value)\n print(xs)\n plt.plot(np.arange(0,5,.1),xs)\n plt.xlabel('sigma')\n plt.ylabel('Log liklihood difference')\n \n plt.show()\n \n exit(0)\n \n \n \ndef run_kde(Data, Model, path):\n global DECONST\n sess = K.get_session()\n K.set_learning_phase(False)\n data, model = Data(), Model(path)\n\n model2 = Model(path)\n\n hidden_layer = pop(model2.model) # once to remove dense(10)\n hidden_layer = pop(hidden_layer) # once to remove ReLU\n\n #compute_optimal_sigma(sess, model, hidden_layer, data)\n #MNIST SIGMA: 20\n \n de = [DensityEstimate(sess, hidden_layer, data.train_data[np.argmax(data.train_labels,axis=1)==i], model.image_size, model.num_channels, sigma=20) for i in range(10)]\n de2 = [DensityEstimate(sess, hidden_layer, data.train_data[np.argmax(data.train_labels,axis=1)==i][:100], model.image_size, model.num_channels, sigma=20) for i in range(10)]\n\n p = tf.placeholder(tf.float32, (None, model.image_size, model.image_size, model.num_channels))\n\n #print(np.log(de[0].predict(data.test_data[:10])))\n #print(sess.run(rmodel.predict(p)[1], {p: data.test_data[:10]}))\n #exit(0)\n\n N = 1\n print(model.model.predict(data.train_data[:N]))\n print(hidden_layer.predict(data.train_data[:N]))\n\n for i in range(10):\n print(de[i].predict(data.train_data[:N]))\n \n start_density = estimate_density_full(model, de, data.test_data[M:M+N])+1e-30\n print(\"starting density\", np.log(start_density))\n\n DECONST = -np.log(start_density)\n\n l = np.zeros((N,10))\n #l[np.arange(N),np.random.random_integers(0,9,N)] = 1\n for i in range(N):\n r = np.random.random_integers(0,9)\n while r == np.argmax(data.test_labels[i]):\n r = np.random.random_integers(0,9)\n l[i,r] = 1\n\n attack1 = CarliniL2(sess, model, batch_size=1, max_iterations=3000,\n binary_search_steps=3, initial_const=1.0, learning_rate=1e-1,\n targeted=True)\n attack2 = CarliniL2New(sess, model, batch_size=1, max_iterations=10000,\n binary_search_steps=5, initial_const=1.0, learning_rate=1e-2,\n targeted=True, extra_loss=extra_loss(de2, np.argmax(l)))\n #l = data.test_labels[:N]\n #l = np.zeros((N,10))\n #l[np.arange(N),1] = 1\n print(\"RUN PHASE 1\")\n adv = attack1.attack(data.test_data[M:M+N], l)\n print('mean distortion',np.mean(np.sum((adv-data.test_data[M:M+N])**2,axis=(1,2,3))**.5))\n\n print(\"RUN PHASE 2\")\n adv = attack2.attack(data.test_data[M:M+N], adv, l)\n\n np.save(\"/tmp/q\"+str(M),adv)\n #adv = np.load(\"/tmp/qq.npy\")\n\n print('labels',np.mean(np.argmax(sess.run(model.predict(p), {p: adv}),axis=1)==l))\n\n print('mean distortion',np.mean(np.sum((adv-data.test_data[M:M+N])**2,axis=(1,2,3))**.5))\n \n a = estimate_density_full(model, de, data.test_data[M:M+N])+1e-30\n b = estimate_density_full(model, de, adv)+1e-30\n\n show(adv)\n\n print('de of test', np.mean(np.log(a)))\n print('de of adv', np.mean(np.log(b)))\n\n print('better ratio', np.mean(np.array(a)>np.array(b)))\n exit(0)\n\n #density = gaussian_kde(np.array(np.log(a))-np.array(np.log(b)))\n #density_a = gaussian_kde(np.log(a))\n #density_b = gaussian_kde(np.log(b))\n\n xs = np.linspace(-25,25,200)\n \n fig = plt.figure(figsize=(4,3))\n fig.subplots_adjust(bottom=0.17,left=.15, right=.85)\n \n plt.xlabel('log(KDE(valid))-log(KDE(adversarial))')\n plt.ylabel('Occurrances')\n \n #plt.hist(np.log(a),100)\n #plt.hist(np.log(b),100)\n plt.hist(np.log(a)-np.log(b),100)\n #plt.hist(np.array(np.log(a))-np.array(np.log(b)),100)\n #a = plt.plot(xs,density_a(xs), 'r--',color='blue', label='Valid')\n #b = plt.plot(xs,density_b(xs), color='red', label='Adversarial')\n #plt.plot(xs,density(xs))\n \n #plt.legend(handles=[a[0], b[0]])\n \n pp = PdfPages('/tmp/a.pdf')\n plt.savefig(pp, format='pdf')\n pp.close()\n plt.show()\n\ndef show(img):\n remap = \" .*#\"+\"#\"*100\n img = (img.flatten()+.5)*3\n print(\"START\")\n for i in range(28):\n print(\"\".join([remap[int(round(x))] for x in img[i*28:i*28+28]]))\n\n#M = int(sys.argv[1])\nM = 0\nrun_kde(MNIST, MNISTModel, \"models/mnist\")\n#run_kde(CIFAR, CIFARModel, \"models/cifar\")\n",
"## pca_detect.py -- break inner-layer pca-based detection\n##\n## Copyright (C) 2017, Nicholas Carlini <[email protected]>.\n##\n## This program is licenced under the BSD 2-Clause licence,\n## contained in the LICENCE file in this directory.\n\n\nimport sys\nimport time\nimport tensorflow as tf\nimport numpy as np\nimport random\n\nimport sklearn.decomposition\nfrom sklearn.svm import LinearSVC\n\nfrom setup_cifar import CIFARModel, CIFAR\nfrom setup_mnist import MNISTModel, MNIST\n\nfrom nn_robust_attacks.l2_attack import CarliniL2\nfrom fast_gradient_sign import FGS\n\nfrom keras import backend as K\nimport pickle\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\ndef pop(model):\n '''Removes a layer instance on top of the layer stack.\n This code is thanks to @joelthchao https://github.com/fchollet/keras/issues/2371#issuecomment-211734276\n '''\n if not model.outputs:\n raise Exception('Sequential model cannot be popped: model is empty.')\n else:\n model.layers.pop()\n if not model.layers:\n model.outputs = []\n model.inbound_nodes = []\n model.outbound_nodes = []\n else:\n model.layers[-1].outbound_nodes = []\n model.outputs = [model.layers[-1].output]\n model.built = False\n\n return model\n\n\ndef run_hidden_pca(Data, Model, path=None):\n sess = K.get_session()\n K.set_learning_phase(False)\n\n data = Data()\n model = Model(path)\n model2 = Model(path)\n\n hidden_layer = pop(model2.model) # once to remove dense(10)\n hidden_layer = pop(hidden_layer) # once to remove ReLU\n train_hidden = hidden_layer.predict(data.test_data)\n #val_hidden = hidden_layer.predict(data.validation_data)\n test_hidden = hidden_layer.predict(data.test_data)\n \n pca = sklearn.decomposition.PCA(n_components=test_hidden.shape[1])\n \n pca.fit(train_hidden)\n\n #r_val = pca.transform(hidden_layer.predict(data.validation_data))\n r_test = pca.transform(hidden_layer.predict(data.test_data))\n\n attack = FGS(sess, model, eps=.2)\n #attack = CarliniL2(sess, model, batch_size=100, max_iterations=1000, \n # binary_search_steps=2, targeted=False)\n\n N = 10000\n\n test_adv = attack.attack(data.test_data[:N], data.test_labels[:N])\n\n r_test_adv = pca.transform(hidden_layer.predict(test_adv[:N]))\n\n print(r_test_adv[0])\n\n show(test_adv[0])\n\n #compute_thresholds(r_val, r_val_adv)\n\n plt.figure(figsize=(4,3))\n plt.xlabel('Component Number')\n plt.ylabel('Mean Absolute Value (log scale)')\n\n plt.semilogy(range(r_test.shape[1]),np.mean(np.abs(r_test),axis=0))\n plt.semilogy(range(r_test_adv.shape[1]),np.mean(np.abs(r_test_adv),axis=0))\n \n plt.show()\n\ndef run_pca(Data, Model, path=None):\n sess = K.get_session()\n K.set_learning_phase(False)\n\n data = Data()\n model = Model(path)\n\n shape = (-1, model.num_channels*model.image_size**2)\n \n pca = sklearn.decomposition.PCA(n_components=shape[1])\n\n pca.fit(data.train_data.reshape(shape))\n\n print(pca.explained_variance_ratio_)\n\n r_test = pca.transform(data.test_data.reshape(shape))\n\n #attack = FGS(sess, model, eps=.3)\n attack = CarliniL2(sess, model, batch_size=100, max_iterations=1000, \n binary_search_steps=2, targeted=False,\n initial_const=10)\n\n N = 10000\n\n #test_adv = attack.attack(data.test_data[:N], data.test_labels[:N])\n test_adv = np.load(\"tmp/outlieradvtest.npy\")\n\n r_test_adv = pca.transform(test_adv[:N].reshape(shape))\n\n fig = plt.figure(figsize=(4,3))\n fig.subplots_adjust(bottom=0.17,left=.19)\n \n plt.xlabel('Component Number')\n plt.ylabel('Mean Absolute Value (log scale)')\n\n plt.semilogy(range(r_test.shape[1]),np.mean(np.abs(r_test),axis=0),label='Valid')\n plt.semilogy(range(r_test_adv.shape[1]),np.mean(np.abs(r_test_adv),axis=0), label='Adversarial')\n\n plt.legend()\n \n pp = PdfPages('/tmp/a.pdf')\n plt.savefig(pp, format='pdf')\n pp.close()\n plt.show()\n\ndef run_convolution_pca(Data, Model, path):\n sess = K.get_session()\n K.set_learning_phase(False)\n\n data = Data()\n model = Model(path)\n\n \"\"\"\n for i in range(4):\n model2 = Model(path)\n\n layer = i\n hidden_layer = model2.model\n while True:\n hidden_layer = pop(hidden_layer)\n if 'conv2d' in str(hidden_layer.outputs):\n if layer == 0: \n shape = hidden_layer.outputs[0].get_shape().as_list()\n shape = tuple([-1]+list(shape[1:]))\n flatshape = (-1, shape[1]*shape[2]*shape[3])\n break\n layer -= 1\n \n pca = sklearn.decomposition.PCA(n_components=flatshape[-1])\n\n print('fitting',flatshape)\n pca.fit(hidden_layer.predict(data.train_data[::5]).reshape(flatshape))\n print('done')\n open(\"tmp/pcalayer%d.p\"%i,\"wb\").write(pickle.dumps(pca))\n #\"\"\"\n\n pcas = []\n for i in range(2):\n layer = i\n model2 = Model(path)\n hidden_layer = model2.model\n while True:\n hidden_layer = pop(hidden_layer)\n if 'conv2d' in str(hidden_layer.outputs):\n if layer == 0: \n shape = hidden_layer.outputs[0].get_shape().as_list()\n shape = tuple([-1]+list(shape[1:]))\n flatshape = (-1, shape[1]*shape[2]*shape[3])\n break\n layer -= 1\n\n print(\"shape\",shape,flatshape)\n\n pca = pickle.load(open(\"tmp/pcalayer%d.p\"%i,\"rb\"))\n\n print('loaded')\n \n test_adv = np.load(\"tmp/outlieradvtest.npy\")\n hidden_adv = pca.transform(hidden_layer.predict(data.test_data).reshape(flatshape))\n hidden = pca.transform(hidden_layer.predict(test_adv).reshape(flatshape))\n\n print(hidden_adv.shape)\n\n np.save(\"/tmp/hidden_adv\", hidden_adv)\n np.save(\"/tmp/hidden\", hidden)\n print('complete')\n \n hidden_adv = np.load(\"/tmp/hidden_adv.npy\").reshape(shape)\n hidden = np.load(\"/tmp/hidden.npy\").reshape(shape)\n \n stdev = np.std(hidden,axis=0)\n \n hidden = np.mean(np.abs(hidden/stdev),axis=(1,2))\n hidden_adv = np.mean(np.abs(hidden_adv/stdev),axis=(1,2))\n \n print('fit model')\n svm = LinearSVC()\n svm.fit(np.concatenate([hidden_adv[:1000],hidden[:1000]],axis=0),[1]*1000+[0]*1000)\n print(np.mean(svm.predict(hidden)))\n print(np.mean(svm.predict(hidden_adv)))\n \n print(hidden.shape)\n \n\n\ndef show(img):\n remap = \" .*#\"+\"#\"*100\n img = (img.flatten()+.5)*3\n print(\"START\")\n for i in range(28):\n print(\"\".join([remap[int(round(x))] for x in img[i*28:i*28+28]]))\n\n#run_pca(MNIST, MNISTModel, \"models/mnist\")\n#run_hidden_pca(CIFAR, CIFARModel, \"models/cifar\")\n\nrun_convolution_pca(MNIST, MNISTModel, \"models/mnist\")\n"
] | [
[
"numpy.arctanh",
"numpy.linspace",
"tensorflow.reduce_sum",
"tensorflow.global_variables",
"tensorflow.variables_initializer",
"tensorflow.tanh",
"numpy.mean",
"tensorflow.train.AdamOptimizer",
"numpy.exp",
"numpy.arange",
"numpy.copy",
"numpy.argmax",
"numpy.zeros",
"numpy.log",
"tensorflow.placeholder",
"tensorflow.exp",
"numpy.random.random_integers",
"numpy.array",
"numpy.sum",
"tensorflow.reduce_max",
"tensorflow.constant",
"tensorflow.maximum",
"tensorflow.reshape",
"numpy.sort",
"numpy.ones",
"tensorflow.log"
],
[
"matplotlib.pyplot.legend",
"matplotlib.backends.backend_pdf.PdfPages",
"numpy.abs",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.save",
"numpy.concatenate",
"numpy.std",
"matplotlib.pyplot.xlabel",
"sklearn.svm.LinearSVC",
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
coldfix/probnum | [
"9b93d822c8d6501f9a12a783da84867ea54e6f6c"
] | [
"src/probnum/prob/distributions/dirac.py"
] | [
"\"\"\"\nDirac delta distribution.\n\"\"\"\nimport operator\n\nimport numpy as np\n\nfrom probnum.prob.distributions.distribution import Distribution\n\n\nclass Dirac(Distribution):\n \"\"\"\n The Dirac delta distribution.\n\n This distribution models a point mass and can be useful to represent\n numbers as random variables with Dirac measure. It has the useful\n property that arithmetic operations between a :class:`Dirac` random\n variable and an arbitrary :class:`RandomVariable` acts in the same\n way as the arithmetic operation with a constant.\n\n Note, that a Dirac measure does not admit a probability density\n function but can be viewed as a distribution (generalized function).\n\n Parameters\n ----------\n support : scalar or array-like or LinearOperator\n The support of the dirac delta function.\n\n See Also\n --------\n Distribution : Class representing general probability distribution.\n\n Examples\n --------\n >>> from probnum.prob import RandomVariable, Dirac\n >>> dist1 = Dirac(support=0.)\n >>> dist2 = Dirac(support=1.)\n >>> rv = RandomVariable(distribution=dist1 + dist2)\n >>> rv.sample(size=5)\n array([1., 1., 1., 1., 1.])\n \"\"\"\n\n def __init__(self, support, random_state=None):\n if np.isscalar(support):\n _dtype = np.dtype(type(support))\n else:\n _dtype = support.dtype\n super().__init__(\n parameters={\"support\": support}, dtype=_dtype, random_state=random_state\n )\n\n def cdf(self, x):\n if np.any(x < self.parameters[\"support\"]):\n return 0.0\n else:\n return 1.0\n\n def median(self):\n return self.parameters[\"support\"]\n\n def mode(self):\n return self.parameters[\"support\"]\n\n def mean(self):\n return self.parameters[\"support\"]\n\n def var(self):\n return 0.0\n\n def cov(self):\n if np.isscalar(self.parameters[\"support\"]):\n return self.var()\n else:\n return np.zeros(\n (len(self.parameters[\"support\"]), len(self.parameters[\"support\"]))\n )\n\n def sample(self, size=(), seed=None):\n ndims = len(self.shape)\n if size == 1 or size == ():\n return self.parameters[\"support\"]\n elif isinstance(size, int) and ndims == 0:\n return np.tile(A=self.parameters[\"support\"], reps=size)\n elif isinstance(size, int):\n return np.tile(\n A=self.parameters[\"support\"], reps=[size, *np.repeat(1, ndims)]\n )\n else:\n return np.tile(\n A=self.parameters[\"support\"], reps=tuple([*size, *np.repeat(1, ndims)])\n )\n\n def reshape(self, newshape):\n try:\n # Reshape support\n self._parameters[\"support\"].reshape(newshape=newshape)\n except ValueError:\n raise ValueError(\n \"Cannot reshape this Dirac distribution to the given shape: {}\".format(\n str(newshape)\n )\n )\n\n # Binary arithmetic operations\n def __add__(self, other):\n if isinstance(other, Dirac):\n support_ = self.parameters[\"support\"] + other.parameters[\"support\"]\n return Dirac(support=support_, random_state=self.random_state)\n else:\n return other.__add__(other=self)\n\n def __sub__(self, other):\n if isinstance(other, Dirac):\n support_ = self.parameters[\"support\"] - other.parameters[\"support\"]\n return Dirac(support=support_, random_state=self.random_state)\n else:\n return other.__rsub__(other=self)\n\n def __mul__(self, other):\n if isinstance(other, Dirac):\n support_ = self.parameters[\"support\"] * other.parameters[\"support\"]\n return Dirac(support=support_, random_state=self.random_state)\n else:\n return other.__mul__(other=self)\n\n def __matmul__(self, other):\n if isinstance(other, Dirac):\n support_ = self.parameters[\"support\"] @ other.parameters[\"support\"]\n return Dirac(support=support_, random_state=self.random_state)\n else:\n return other.__rmatmul__(other=self)\n\n def __truediv__(self, other):\n if isinstance(other, Dirac):\n support_ = operator.truediv(\n self.parameters[\"support\"], other.parameters[\"support\"]\n )\n return Dirac(support=support_, random_state=self.random_state)\n else:\n return other.__rtruediv__(other=self)\n\n def __pow__(self, power, modulo=None):\n if isinstance(power, Dirac):\n support_ = pow(\n self.parameters[\"support\"], power.parameters[\"support\"], modulo\n )\n return Dirac(support=support_, random_state=self.random_state)\n else:\n return power.__rpow__(power=self, modulo=modulo)\n\n # Binary arithmetic operations with reflected (swapped) operands\n\n def __radd__(self, other):\n return other.__add__(other=self)\n\n def __rsub__(self, other):\n return other.__sub__(other=self)\n\n def __rmul__(self, other):\n return other.__mul__(other=self)\n\n def __rmatmul__(self, other):\n return other.__matmul__(other=self)\n\n def __rtruediv__(self, other):\n return other.__truediv__(other=self)\n\n def __rpow__(self, power, modulo=None):\n return power.__pow__(power=self)\n\n # Augmented arithmetic assignments (+=, -=, *=, ...)\n # attempting to do the operation in place\n\n def __iadd__(self, other):\n if isinstance(other, Dirac):\n support_ = self.parameters[\"support\"] + other.parameters[\"support\"]\n self.parameters[\"support\"] = support_\n return self\n else:\n return NotImplemented\n\n def __isub__(self, other):\n if isinstance(other, Dirac):\n support_ = self.parameters[\"support\"] - other.parameters[\"support\"]\n self.parameters[\"support\"] = support_\n return self\n else:\n return NotImplemented\n\n def __imul__(self, other):\n if isinstance(other, Dirac):\n support_ = self.parameters[\"support\"] * other.parameters[\"support\"]\n self.parameters[\"support\"] = support_\n return self\n else:\n return NotImplemented\n\n def __imatmul__(self, other):\n if isinstance(other, Dirac):\n support_ = self.parameters[\"support\"] @ other.parameters[\"support\"]\n self.parameters[\"support\"] = support_\n return self\n else:\n return NotImplemented\n\n def __itruediv__(self, other):\n if isinstance(other, Dirac):\n support_ = operator.truediv(\n self.parameters[\"support\"], other.parameters[\"support\"]\n )\n self.parameters[\"support\"] = support_\n return self\n else:\n return NotImplemented\n\n def __ipow__(self, power, modulo=None):\n if isinstance(power, Dirac):\n support_ = pow(\n self.parameters[\"support\"], power.parameters[\"support\"], modulo\n )\n self.parameters[\"support\"] = support_\n return self\n else:\n return NotImplemented\n\n # Unary arithmetic operations\n\n def __neg__(self):\n self.parameters[\"support\"] = operator.neg(self.parameters[\"support\"])\n return self\n\n def __pos__(self):\n self.parameters[\"support\"] = operator.pos(self.parameters[\"support\"])\n return self\n\n def __abs__(self):\n self.parameters[\"support\"] = operator.abs(self.parameters[\"support\"])\n return self\n\n def __invert__(self):\n support_ = self.parameters[\"support\"]\n self.parameters[\"support\"] = operator.invert(support_)\n return self\n"
] | [
[
"numpy.repeat",
"numpy.tile",
"numpy.any",
"numpy.isscalar"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SJTU-Det/R3Det | [
"3e092fa65dee2b9f7722b0985b3791811a1de5ae",
"3e092fa65dee2b9f7722b0985b3791811a1de5ae",
"3e092fa65dee2b9f7722b0985b3791811a1de5ae",
"3e092fa65dee2b9f7722b0985b3791811a1de5ae",
"3e092fa65dee2b9f7722b0985b3791811a1de5ae"
] | [
"libs/configs/DOTA1.0/r3det_plusplus/cfgs_res50_dota_r3det_plusplus_v8.py",
"libs/detection_oprations/refine_proposal_opr_csl.py",
"libs/configs/DOTA1.0/csl/cfgs_res152_dota_r3det_csl_v4.py",
"libs/configs/DOTA1.0/baseline/cfgs_res50_dota_v13.py",
"libs/configs/ICDAR2015/csl/cfgs_res101_icdar2015_csl_v1.py"
] | [
"# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import\nimport os\nimport tensorflow as tf\nimport math\n\n\"\"\"\nv3 + weight\n\nThis is your result for task 1:\n\n mAP: 0.694278910759864\n ap of each class:\n plane:0.88866159706304,\n baseball-diamond:0.7860352276239824,\n bridge:0.47338301497690105,\n ground-track-field:0.6216372729671545,\n small-vehicle:0.6994177931102508,\n large-vehicle:0.7458671012655077,\n ship:0.785294772102568,\n tennis-court:0.9075708653156096,\n basketball-court:0.7834021499469714,\n storage-tank:0.8172385380195397,\n soccer-ball-field:0.5645662115849255,\n roundabout:0.6018272737599449,\n harbor:0.5750654725229614,\n swimming-pool:0.6652388936929979,\n helicopter:0.49897747744560683\n\nThe submitted information is :\n\nDescription: RetinaNet_DOTA_R3Det_plusplus_2x_20200405_108w\nUsername: SJTU-Det\nInstitute: SJTU\nEmailadress: [email protected]\nTeamMembers: yangxue\n\n\"\"\"\n\n# ------------------------------------------------\nVERSION = 'RetinaNet_DOTA_R3Det_plusplus_2x_20200405'\nNET_NAME = 'resnet50_v1d' # 'MobilenetV2'\nADD_BOX_IN_TENSORBOARD = True\n\n# ---------------------------------------- System_config\nROOT_PATH = os.path.abspath('../')\nprint(20*\"++--\")\nprint(ROOT_PATH)\nGPU_GROUP = \"0,1,2,3\"\nNUM_GPU = len(GPU_GROUP.strip().split(','))\nSHOW_TRAIN_INFO_INTE = 20\nSMRY_ITER = 200\nSAVE_WEIGHTS_INTE = 27000 * 2\n\nSUMMARY_PATH = ROOT_PATH + '/output/summary'\nTEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'\n\nif NET_NAME.startswith(\"resnet\"):\n weights_name = NET_NAME\nelif NET_NAME.startswith(\"MobilenetV2\"):\n weights_name = \"mobilenet/mobilenet_v2_1.0_224\"\nelse:\n raise Exception('net name must in [resnet_v1_101, resnet_v1_50, MobilenetV2]')\n\nPRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'\nTRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')\nEVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'\n\n# ------------------------------------------ Train config\nRESTORE_FROM_RPN = False\nFIXED_BLOCKS = 1 # allow 0~3\nFREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone\nUSE_07_METRIC = True\n\nMUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy\nGRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip\n\nCLS_WEIGHT = 1.0\nREG_WEIGHT = 1.0\nUSE_IOU_FACTOR = False\n\nBATCH_SIZE = 1\nEPSILON = 1e-5\nMOMENTUM = 0.9\nLR = 5e-4\nDECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]\nMAX_ITERATION = SAVE_WEIGHTS_INTE*20\nWARM_SETP = int(1.0 / 4.0 * SAVE_WEIGHTS_INTE)\n\n# -------------------------------------------- Data_preprocess_config\nDATASET_NAME = 'DOTA' # 'pascal', 'coco'\nPIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR\nPIXEL_MEAN_ = [0.485, 0.456, 0.406]\nPIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR\nIMG_SHORT_SIDE_LEN = 800\nIMG_MAX_LENGTH = 800\nCLASS_NUM = 15\n\nIMG_ROTATE = False\nRGB2GRAY = False\nVERTICAL_FLIP = False\nHORIZONTAL_FLIP = True\nIMAGE_PYRAMID = False\n\n# --------------------------------------------- Network_config\nSUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)\nSUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)\nPROBABILITY = 0.01\nFINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))\nWEIGHT_DECAY = 1e-4\nUSE_GN = False\nNUM_SUBNET_CONV = 4\nNUM_REFINE_STAGE = 1\nUSE_RELU = False\nFPN_CHANNEL = 256\n\n# ---------------------------------------------Anchor config\nLEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']\nBASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]\nANCHOR_STRIDE = [8, 16, 32, 64, 128]\nANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]\nANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]\nANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]\nANCHOR_SCALE_FACTORS = None\nUSE_CENTER_OFFSET = True\nMETHOD = 'H'\nUSE_ANGLE_COND = False\nANGLE_RANGE = 90\n\n# --------------------------------------------RPN config\nSHARE_NET = True\nUSE_P5 = True\nIOU_POSITIVE_THRESHOLD = 0.5\nIOU_NEGATIVE_THRESHOLD = 0.4\nREFINE_IOU_POSITIVE_THRESHOLD = [0.6, 0.7]\nREFINE_IOU_NEGATIVE_THRESHOLD = [0.5, 0.6]\n\nNMS = True\nNMS_IOU_THRESHOLD = 0.1\nMAXIMUM_DETECTIONS = 100\nFILTERED_SCORE = 0.05\nVIS_SCORE = 0.4\n\n# --------------------------------------------MASK config\nUSE_SUPERVISED_MASK = True\nMASK_TYPE = 'r' # r or h\nBINARY_MASK = False\nSIGMOID_ON_DOT = False\nMASK_ACT_FET = True # weather use mask generate 256 channels to dot feat.\nGENERATE_MASK_LIST = [\"P3\", \"P4\", \"P5\", \"P6\", \"P7\"]\nADDITION_LAYERS = [1, 1, 1, 1, 1] # add 4 layer to generate P2_mask, 2 layer to generate P3_mask\nENLAEGE_RF_LIST = [\"P3\", \"P4\", \"P5\", \"P6\", \"P7\"]\nSUPERVISED_MASK_LOSS_WEIGHT = 1.0\n",
"# encoding: utf-8\nfrom libs.configs import cfgs\nfrom libs.box_utils import bbox_transform\nfrom libs.box_utils import nms_rotate\nimport tensorflow as tf\n\nfrom libs.box_utils.coordinate_convert import coordinate_present_convert\n\n\ndef filter_detections(boxes, scores, is_training, gpu_id):\n \"\"\"\n :param boxes: [-1, 4]\n :param scores: [-1, ]\n :param labels: [-1, ]\n :return:\n \"\"\"\n if is_training:\n indices = tf.reshape(tf.where(tf.greater(scores, cfgs.VIS_SCORE)), [-1, ])\n else:\n indices = tf.reshape(tf.where(tf.greater(scores, cfgs.FILTERED_SCORE)), [-1, ])\n\n if cfgs.NMS:\n filtered_boxes = tf.gather(boxes, indices)\n filtered_scores = tf.gather(scores, indices)\n\n if cfgs.ANGLE_RANGE == 180:\n # _, _, _, _, theta = tf.unstack(boxes_pred, axis=1)\n # indx = tf.reshape(tf.where(tf.logical_and(tf.less(theta, 0), tf.greater_equal(theta, -180))), [-1, ])\n # boxes_pred = tf.gather(boxes_pred, indx)\n # scores = tf.gather(scores, indx)\n\n filtered_boxes = tf.py_func(coordinate_present_convert,\n inp=[filtered_boxes, 1],\n Tout=[tf.float32])\n filtered_boxes = tf.reshape(filtered_boxes, [-1, 5])\n\n # perform NMS\n max_output_size = 4000 if 'DOTA' in cfgs.NET_NAME else 200\n nms_indices = nms_rotate.nms_rotate(decode_boxes=filtered_boxes,\n scores=filtered_scores,\n iou_threshold=cfgs.NMS_IOU_THRESHOLD,\n max_output_size=100 if is_training else max_output_size,\n use_angle_condition=False,\n angle_threshold=15,\n use_gpu=True,\n gpu_id=gpu_id)\n\n # filter indices based on NMS\n indices = tf.gather(indices, nms_indices)\n\n # add indices to list of all indices\n return indices\n\n\ndef postprocess_detctions(refine_bbox_pred, refine_cls_prob, refine_angle_prob, anchors, is_training, gpu_id):\n\n boxes_pred = bbox_transform.rbbox_transform_inv(boxes=anchors, deltas=refine_bbox_pred,\n scale_factors=cfgs.ANCHOR_SCALE_FACTORS)\n angle_cls = tf.cast(tf.argmax(refine_angle_prob, axis=1), tf.float32)\n angle_cls = (tf.reshape(angle_cls, [-1, ]) * -1 - 0.5) * cfgs.OMEGA\n x, y, w, h, theta = tf.unstack(boxes_pred, axis=1)\n boxes_pred_angle = tf.transpose(tf.stack([x, y, w, h, angle_cls]))\n\n return_boxes_pred = []\n return_boxes_pred_angle = []\n return_scores = []\n return_labels = []\n for j in range(0, cfgs.CLASS_NUM):\n indices = filter_detections(boxes_pred_angle, refine_cls_prob[:, j], is_training, gpu_id)\n tmp_boxes_pred_angle = tf.reshape(tf.gather(boxes_pred_angle, indices), [-1, 5])\n tmp_boxes_pred = tf.reshape(tf.gather(boxes_pred, indices), [-1, 5])\n tmp_scores = tf.reshape(tf.gather(refine_cls_prob[:, j], indices), [-1, ])\n\n if cfgs.ANGLE_RANGE == 180:\n # _, _, _, _, theta = tf.unstack(boxes_pred, axis=1)\n # indx = tf.reshape(tf.where(tf.logical_and(tf.less(theta, 0), tf.greater_equal(theta, -180))), [-1, ])\n # boxes_pred = tf.gather(boxes_pred, indx)\n # scores = tf.gather(scores, indx)\n\n tmp_boxes_pred_angle = tf.py_func(coordinate_present_convert,\n inp=[tmp_boxes_pred_angle, 1],\n Tout=[tf.float32])\n tmp_boxes_pred_angle = tf.reshape(tmp_boxes_pred_angle, [-1, 5])\n\n tmp_boxes_pred = tf.py_func(coordinate_present_convert,\n inp=[tmp_boxes_pred, 1],\n Tout=[tf.float32])\n tmp_boxes_pred = tf.reshape(tmp_boxes_pred, [-1, 5])\n\n return_boxes_pred.append(tmp_boxes_pred)\n return_boxes_pred_angle.append(tmp_boxes_pred_angle)\n return_scores.append(tmp_scores)\n return_labels.append(tf.ones_like(tmp_scores)*(j+1))\n\n return_boxes_pred = tf.concat(return_boxes_pred, axis=0)\n return_boxes_pred_angle = tf.concat(return_boxes_pred_angle, axis=0)\n return_scores = tf.concat(return_scores, axis=0)\n return_labels = tf.concat(return_labels, axis=0)\n\n return return_boxes_pred, return_scores, return_labels, return_boxes_pred_angle\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import\nimport os\nimport tensorflow as tf\nimport math\n\n\"\"\"\n\n\"\"\"\n\n# ------------------------------------------------\nVERSION = 'RetinaNet_DOTA_R3Det_CSL_4x_20200827'\nNET_NAME = 'resnet152_v1d' # 'MobilenetV2'\nADD_BOX_IN_TENSORBOARD = True\n\n# ---------------------------------------- System_config\nROOT_PATH = os.path.abspath('../')\nprint(20*\"++--\")\nprint(ROOT_PATH)\nGPU_GROUP = \"1,2,3\"\nNUM_GPU = len(GPU_GROUP.strip().split(','))\nSHOW_TRAIN_INFO_INTE = 20\nSMRY_ITER = 200\nSAVE_WEIGHTS_INTE = 27000 * 4\n\nSUMMARY_PATH = ROOT_PATH + '/output/summary'\nTEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'\n\nif NET_NAME.startswith(\"resnet\"):\n weights_name = NET_NAME\nelif NET_NAME.startswith(\"MobilenetV2\"):\n weights_name = \"mobilenet/mobilenet_v2_1.0_224\"\nelse:\n raise Exception('net name must in [resnet_v1_101, resnet_v1_50, MobilenetV2]')\n\nPRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'\nTRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')\nEVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'\n\n# ------------------------------------------ Train config\nRESTORE_FROM_RPN = False\nFIXED_BLOCKS = 1 # allow 0~3\nFREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone\nUSE_07_METRIC = True\n\nMUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy\nGRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip\n\nCLS_WEIGHT = 1.0\nREG_WEIGHT = 1.0\nANGLE_CLS_WEIGHT = 0.5\nUSE_IOU_FACTOR = True\n\nBATCH_SIZE = 1\nEPSILON = 1e-5\nMOMENTUM = 0.9\nLR = 5e-4\nDECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]\nMAX_ITERATION = SAVE_WEIGHTS_INTE*20\nWARM_SETP = int(1.0 / 8.0 * SAVE_WEIGHTS_INTE)\n\n\n# -------------------------------------------- Data_preprocess_config\nDATASET_NAME = 'DOTA' # 'pascal', 'coco'\nPIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR\nPIXEL_MEAN_ = [0.485, 0.456, 0.406]\nPIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR\nIMG_SHORT_SIDE_LEN = [800, 400, 600, 720, 900, 1000, 1100]\nIMG_MAX_LENGTH = 1100\nCLASS_NUM = 15\nLABEL_TYPE = 0\nRADUIUS = 4\nOMEGA = 1\n\nIMG_ROTATE = True\nRGB2GRAY = True\nVERTICAL_FLIP = True\nHORIZONTAL_FLIP = True\nIMAGE_PYRAMID = True\n\n# --------------------------------------------- Network_config\nSUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)\nSUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)\nPROBABILITY = 0.01\nFINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))\nWEIGHT_DECAY = 1e-4\nUSE_GN = False\nNUM_SUBNET_CONV = 4\nNUM_REFINE_STAGE = 1\nUSE_RELU = False\nFPN_CHANNEL = 256\n\n# ---------------------------------------------Anchor config\nLEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']\nBASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]\nANCHOR_STRIDE = [8, 16, 32, 64, 128]\nANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]\nANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]\nANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]\nANCHOR_SCALE_FACTORS = None\nUSE_CENTER_OFFSET = True\nMETHOD = 'H'\nUSE_ANGLE_COND = False\nANGLE_RANGE = 90\n\n# --------------------------------------------RPN config\nSHARE_NET = True\nUSE_P5 = True\nIOU_POSITIVE_THRESHOLD = 0.5\nIOU_NEGATIVE_THRESHOLD = 0.4\nREFINE_IOU_POSITIVE_THRESHOLD = [0.6, 0.7]\nREFINE_IOU_NEGATIVE_THRESHOLD = [0.5, 0.6]\n\nNMS = True\nNMS_IOU_THRESHOLD = 0.1\nMAXIMUM_DETECTIONS = 100\nFILTERED_SCORE = 0.05\nVIS_SCORE = 0.4\n\n# --------------------------------------------MASK config\nUSE_SUPERVISED_MASK = False\nMASK_TYPE = 'r' # r or h\nBINARY_MASK = False\nSIGMOID_ON_DOT = False\nMASK_ACT_FET = True # weather use mask generate 256 channels to dot feat.\nGENERATE_MASK_LIST = [\"P3\", \"P4\", \"P5\", \"P6\", \"P7\"]\nADDITION_LAYERS = [4, 4, 3, 2, 2] # add 4 layer to generate P2_mask, 2 layer to generate P3_mask\nENLAEGE_RF_LIST = [\"P3\", \"P4\", \"P5\", \"P6\", \"P7\"]\nSUPERVISED_MASK_LOSS_WEIGHT = 1.0\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import\nimport os\nimport tensorflow as tf\nimport math\n\n\"\"\"\nv5 repeat\nThis is your result for task 1:\n\n mAP: 0.6887560239475758\n ap of each class:\n plane:0.8940529652135962,\n baseball-diamond:0.7474486698392037,\n bridge:0.3643166649129069,\n ground-track-field:0.662924268693916,\n small-vehicle:0.6616918317735657,\n large-vehicle:0.7637966336747759,\n ship:0.7775309381402764,\n tennis-court:0.907857598484631,\n basketball-court:0.8105264053192953,\n storage-tank:0.8012307433913947,\n soccer-ball-field:0.5642786809836354,\n roundabout:0.6258372702184064,\n harbor:0.6188599073641682,\n swimming-pool:0.6492325004864525,\n helicopter:0.4817552807174126\n\nThe submitted information is :\n\nDescription: RetinaNet_DOTA_1x_20191119_54w\nUsername: liuqingiqng\nInstitute: Central South University\nEmailadress: [email protected]\nTeamMembers: liuqingqing\n\"\"\"\n\n# ------------------------------------------------\nVERSION = 'RetinaNet_DOTA_1x_20191119'\nNET_NAME = 'resnet50_v1d' # 'MobilenetV2'\nADD_BOX_IN_TENSORBOARD = True\n\n# ---------------------------------------- System_config\nROOT_PATH = os.path.abspath('../')\nprint(20*\"++--\")\nprint(ROOT_PATH)\nGPU_GROUP = \"0\"\nNUM_GPU = len(GPU_GROUP.strip().split(','))\nSHOW_TRAIN_INFO_INTE = 20\nSMRY_ITER = 200\nSAVE_WEIGHTS_INTE = 27000\n\nSUMMARY_PATH = ROOT_PATH + '/output/summary'\nTEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'\n\nif NET_NAME.startswith(\"resnet\"):\n weights_name = NET_NAME\nelif NET_NAME.startswith(\"MobilenetV2\"):\n weights_name = \"mobilenet/mobilenet_v2_1.0_224\"\nelse:\n raise Exception('net name must in [resnet_v1_101, resnet_v1_50, MobilenetV2]')\n\nPRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'\nTRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')\nEVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'\n\n# ------------------------------------------ Train config\nRESTORE_FROM_RPN = False\nFIXED_BLOCKS = 1 # allow 0~3\nFREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone\nUSE_07_METRIC = True\n\nMUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy\nGRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip\n\nCLS_WEIGHT = 1.0\nREG_WEIGHT = 1.0\nREG_LOSS_MODE = 0\n\nBATCH_SIZE = 1\nEPSILON = 1e-5\nMOMENTUM = 0.9\nLR = 5e-4 # * NUM_GPU * BATCH_SIZE\nDECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]\nMAX_ITERATION = SAVE_WEIGHTS_INTE*20\nWARM_SETP = int(1.0 / 8.0 * SAVE_WEIGHTS_INTE)\n\n# -------------------------------------------- Data_preprocess_config\nDATASET_NAME = 'DOTA' # 'pascal', 'coco'\nPIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR\nPIXEL_MEAN_ = [0.485, 0.456, 0.406]\nPIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR\nIMG_SHORT_SIDE_LEN = 800\nIMG_MAX_LENGTH = 800\nCLASS_NUM = 15\n\n# --------------------------------------------- Network_config\nSUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)\nSUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)\nPROBABILITY = 0.01\nFINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))\nWEIGHT_DECAY = 1e-4\nUSE_GN = False\n\n# ---------------------------------------------Anchor config\nLEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']\nBASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]\nANCHOR_STRIDE = [8, 16, 32, 64, 128]\nANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]\nANCHOR_RATIOS = [1, 1 / 3., 3., 5., 1 / 5.]\nANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]\nANCHOR_SCALE_FACTORS = None\nUSE_CENTER_OFFSET = True\nMETHOD = 'R'\nUSE_ANGLE_COND = False\nANGLE_RANGE = 90 # or 180\n\n# --------------------------------------------RPN config\nSHARE_NET = True\nUSE_P5 = True\nIOU_POSITIVE_THRESHOLD = 0.5\nIOU_NEGATIVE_THRESHOLD = 0.4\n\nNMS = True\nNMS_IOU_THRESHOLD = 0.1\nMAXIMUM_DETECTIONS = 100\nFILTERED_SCORE = 0.05\nVIS_SCORE = 0.4\n\n\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import\nimport os\nimport tensorflow as tf\nimport math\n\n\"\"\"\nCSL + gaussian label, omega=1, r=6\n2020-10-04 CSL\t80.50%\t87.40%\t83.81%\n\n\"\"\"\n\n# ------------------------------------------------\nVERSION = 'RetinaNet_ICDAR2015_CSL_2x_20201001'\nNET_NAME = 'resnet50_v1d' # 'MobilenetV2'\nADD_BOX_IN_TENSORBOARD = True\n\n# ---------------------------------------- System_config\nROOT_PATH = os.path.abspath('../')\nprint(20*\"++--\")\nprint(ROOT_PATH)\nGPU_GROUP = \"0,1,2\"\nNUM_GPU = len(GPU_GROUP.strip().split(','))\nSHOW_TRAIN_INFO_INTE = 20\nSMRY_ITER = 2000\nSAVE_WEIGHTS_INTE = 10000 * 2\n\nSUMMARY_PATH = ROOT_PATH + '/output/summary'\nTEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'\n\nif NET_NAME.startswith(\"resnet\"):\n weights_name = NET_NAME\nelif NET_NAME.startswith(\"MobilenetV2\"):\n weights_name = \"mobilenet/mobilenet_v2_1.0_224\"\nelse:\n raise Exception('net name must in [resnet_v1_101, resnet_v1_50, MobilenetV2]')\n\nPRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'\nTRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')\nEVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'\n\n# ------------------------------------------ Train config\nRESTORE_FROM_RPN = False\nFIXED_BLOCKS = 1 # allow 0~3\nFREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone\nUSE_07_METRIC = True\n\nMUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy\nGRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip\n\nCLS_WEIGHT = 1.0\nREG_WEIGHT = 1.0\nANGLE_WEIGHT = 0.5\nREG_LOSS_MODE = None\n\nBATCH_SIZE = 1\nEPSILON = 1e-5\nMOMENTUM = 0.9\nLR = 5e-4\nDECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]\nMAX_ITERATION = SAVE_WEIGHTS_INTE*20\nWARM_SETP = int(1.0 / 4.0 * SAVE_WEIGHTS_INTE)\n\n# -------------------------------------------- Data_preprocess_config\nDATASET_NAME = 'ICDAR2015' # 'pascal', 'coco'\nPIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR\nPIXEL_MEAN_ = [0.485, 0.456, 0.406]\nPIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR\nIMG_SHORT_SIDE_LEN = [800, 600, 1000, 1200]\nIMG_MAX_LENGTH = 1500\nCLASS_NUM = 1\nLABEL_TYPE = 0\nRADUIUS = 6\nOMEGA = 1\n\nIMG_ROTATE = True\nRGB2GRAY = True\nVERTICAL_FLIP = True\nHORIZONTAL_FLIP = True\nIMAGE_PYRAMID = True\n\n# --------------------------------------------- Network_config\nSUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)\nSUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)\nPROBABILITY = 0.01\nFINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))\nWEIGHT_DECAY = 1e-4\nUSE_GN = False\nFPN_CHANNEL = 256\n\n# ---------------------------------------------Anchor config\nLEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']\nBASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]\nANCHOR_STRIDE = [8, 16, 32, 64, 128]\nANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]\nANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]\nANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]\nANCHOR_SCALE_FACTORS = None\nUSE_CENTER_OFFSET = True\nMETHOD = 'H'\nUSE_ANGLE_COND = False\nANGLE_RANGE = 180 # 90 or 180\n\n# --------------------------------------------RPN config\nSHARE_NET = True\nUSE_P5 = True\nIOU_POSITIVE_THRESHOLD = 0.5\nIOU_NEGATIVE_THRESHOLD = 0.4\n\nNMS = True\nNMS_IOU_THRESHOLD = 0.1\nMAXIMUM_DETECTIONS = 100\nFILTERED_SCORE = 0.05\nVIS_SCORE = 0.7\n\n"
] | [
[
"tensorflow.constant_initializer",
"tensorflow.random_normal_initializer"
],
[
"tensorflow.concat",
"tensorflow.unstack",
"tensorflow.greater",
"tensorflow.stack",
"tensorflow.reshape",
"tensorflow.ones_like",
"tensorflow.gather",
"tensorflow.argmax",
"tensorflow.py_func"
],
[
"tensorflow.constant_initializer",
"tensorflow.random_normal_initializer"
],
[
"tensorflow.constant_initializer",
"tensorflow.random_normal_initializer"
],
[
"tensorflow.constant_initializer",
"tensorflow.random_normal_initializer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
bbrzycki/setigen | [
"3106c32a629c76c71768ea02b7661474e1cf7ff6"
] | [
"setigen/distributions.py"
] | [
"import numpy as np\n\nfwhm_m = 2 * np.sqrt(2 * np.log(2))\n\ndef fwhm(sigma):\n \"\"\"\n Get full width at half maximum (FWHM) for a provided sigma / \n standard deviation, assuming a Gaussian distribution.\n \"\"\"\n return fwhm_m * sigma\n \n\ndef gaussian(x_mean, x_std, shape):\n return np.random.normal(x_mean, x_std, shape)\n\n\ndef truncated_gaussian(x_mean, x_std, x_min, shape):\n \"\"\"\n Sample from a normal distribution, but enforces a minimum value.\n \"\"\"\n return np.maximum(gaussian(x_mean, x_std, shape), x_min)\n\n\ndef chi2(x_mean, chi2_df, shape):\n \"\"\"\n Chi-squared distribution centered at a specific mean.\n \n Parameters\n ----------\n x_mean : float\n chi2_df : int\n Degrees of freedom for chi-squared\n shape : list\n Shape of output noise array\n \n Returns\n -------\n dist : ndarray\n Array of chi-squared noise\n \"\"\"\n return np.random.chisquare(df=chi2_df, size=shape) * x_mean / chi2_df"
] | [
[
"numpy.random.normal",
"numpy.log",
"numpy.random.chisquare"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mdblackledge/SimpleITK-Image-Symmetry | [
"b18343394852a1514bf45ddb9078e27c4d6f6718"
] | [
"image_symmetry.py"
] | [
"import SimpleITK as sitk\nimport numpy as np\nfrom scipy.optimize import minimize\nfrom pywt import wavedecn\n\nclass ImageSymmetry(object):\n\n def plane_normalised(self, plane):\n if not type(plane) is np.ndarray:\n plane = np.array(plane)\n if not np.abs(np.sum(plane[0:-1]**2) - 1.0) < 1e-10:\n return False\n return True\n\n def normalise_plane(self, plane):\n norm = np.sqrt(np.sum(np.array(plane[0:-1])**2))\n return np.array(plane)/norm\n\n def cartesian_2_polar(self, plane):\n if not self.plane_normalised(plane):\n raise Exception(\"Input plane contents not normalised\")\n plane_polar = np.zeros(len(plane)-1)\n plane_polar[-1] = plane[-1]\n plane_polar[-2] = np.arcsin(plane[-2])\n for i in range(len(plane_polar)-3, -1, -1):\n plane_polar[i] = np.arcsin(plane[i+1] / np.prod(np.cos(plane_polar[i+1:-1])))\n return plane_polar\n\n def polar_2_cartesian(self, plane_polar):\n plane = np.zeros(len(plane_polar)+1)\n plane[0] = np.prod(np.cos(plane_polar[0:-1]))\n for i in range(1, len(plane)-2):\n plane[i] = np.sin(plane_polar[i-1]) * np.prod(np.cos(plane_polar[i:-1]))\n plane[-2] = np.sin(plane_polar[-2])\n plane[-1] = plane_polar[-1]\n return plane\n\n def __reflection_cost__(self, plane_polar, im):\n plane = self.polar_2_cartesian(plane_polar)\n imN = self.reflect_image(plane, im)\n cost = np.mean(np.abs(sitk.GetArrayFromImage(im-imN)))\n return cost\n\n def reflect_image(self, plane, im):\n trans = self.reflection_transform(plane)\n imN = sitk.Resample(im, im, trans, sitk.sitkLinear, 0.0, im.GetPixelID())\n return imN\n\n def plane_of_reflection(self, im, plane=None, levels=(2, 0)):\n if plane is None:\n plane = np.zeros(len(im.GetSize())+1)\n plane[0] = 1.0\n if not self.plane_normalised(plane):\n raise Exception(\"Input plane is not normalised\")\n origin = im.GetOrigin()\n shape = np.array(im.GetSize())\n spacing = np.array(im.GetSpacing())\n plane_polar = self.cartesian_2_polar(plane)\n for level in levels:\n arr = wavedecn(sitk.GetArrayFromImage(im), 'db1', level=level)[0]\n im_ = sitk.GetImageFromArray(arr)\n im_.SetSpacing(shape / arr.shape[::-1] * spacing)\n im_.SetOrigin(origin + 0.5 * (im_.GetSpacing() - spacing))\n plane_polar = minimize(self.__reflection_cost__, plane_polar, (im_), method='Nelder-Mead', tol=1e-10).x\n plane = self.polar_2_cartesian(plane_polar)\n return plane\n\n def reflection_matrix(self, plane):\n mat = np.zeros((len(plane), len(plane)))\n for i in range(len(plane)-1):\n for j in range(len(plane)):\n if i == j:\n mat[i, j] = 1 - 2 * plane[i] * plane[j]\n else:\n mat[i, j] = - 2 * plane[i] * plane[j]\n mat[-1, -1] = 1.0\n return mat\n\n def reflection_transform(self, plane):\n trans_arr = self.reflection_matrix(plane)\n trans = sitk.AffineTransform(len(plane)-1)\n trans_params = []\n for i in range(len(plane)-1):\n trans_params = np.r_[trans_params, trans_arr[i, 0:-1].ravel()]\n trans_params = np.r_[trans_params, trans_arr[0:-1, -1].ravel()]\n trans.SetParameters(trans_params)\n return trans\n\n def plane_2d(self, x, plane):\n a = plane[0]\n b = plane[1]\n c = plane[2]\n return (a * x + c) / (-1. * b)\n\n def plane(self, X, plane):\n d = plane[-1]\n plane = plane[0:-1]\n return (np.einsum(\"ij,j->i\", X, plane[0:-2]) + d)/(-1.*plane[-1])\n\n\nif __name__ == \"__main__\":\n\n from scipy.misc import face\n import matplotlib.pyplot as pl\n\n image_sym = ImageSymmetry()\n\n # Create a mock image with symmetry\n arr = face(gray=True).astype('float')\n arr = np.pad(arr, ((arr.shape[0], arr.shape[0]), (arr.shape[1], arr.shape[1])), 'constant', constant_values=0.0)\n\n im = sitk.GetImageFromArray(arr)\n im.SetOrigin((-arr.shape[1]/2, -arr.shape[0]/2))\n plane = image_sym.normalise_plane([1.0, 0.5, 100])\n trans = image_sym.reflection_transform(plane)\n im_reflected = sitk.Resample(im, im, trans, sitk.sitkLinear, 0.0, im.GetPixelID())\n im = im + im_reflected\n\n # Initialise the plane as something different and try to fit\n plane_init = [0.80, 0.7, 0.22]\n plane_init = image_sym.normalise_plane(plane_init)\n plane_est = image_sym.plane_of_reflection(im, plane_init, levels=[4])\n print('Initial plane: ', plane_init)\n print('Estimated plane: ', plane_est)\n print('True plane: ', plane)\n\n # Show the result\n f = pl.figure()\n pl.imshow(sitk.GetArrayFromImage(im),\n cmap = 'gray',\n origin='lower',\n extent = (-arr.shape[1]/2, arr.shape[1]/2, -arr.shape[0]/2, arr.shape[0]/2))\n x = np.linspace(-arr.shape[1]/2, arr.shape[1]/2, 100)\n y = image_sym.plane_2d(x, plane)\n pl.plot(x, y, 'r-', label = \"Truth\")\n y_ = image_sym.plane_2d(x, plane_init)\n pl.plot(x, y_, 'b-', label = \"Init.\")\n y__ = image_sym.plane_2d(x, plane_est)\n pl.plot(x, y__, 'g--', label = \"Est.\")\n pl.plot((0, 0), (0, 0), 'ro')\n pl.xlim(-arr.shape[1]/2, arr.shape[1]/2)\n pl.ylim(-arr.shape[0]/2, arr.shape[0]/2)\n pl.legend(loc = 1)\n pl.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.pad",
"numpy.linspace",
"numpy.arcsin",
"numpy.einsum",
"matplotlib.pyplot.ylim",
"scipy.misc.face",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"scipy.optimize.minimize",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
shbe-aau/multi-pose-estimation | [
"22cea6cd09684fe655fb2214bc14856f589048e1",
"22cea6cd09684fe655fb2214bc14856f589048e1",
"22cea6cd09684fe655fb2214bc14856f589048e1"
] | [
"multi-pose/utils/sundermeyer/pysixd/view_sampler.py",
"aae/auto_pose/ae/ae_train.py",
"aae/auto_pose/ae/ae_factory.py"
] | [
"# Author: Tomas Hodan ([email protected])\n# Center for Machine Perception, Czech Technical University in Prague\n\n# Samples views from a sphere.\n\nimport math\nimport numpy as np\n#import transform\nfrom utils.sundermeyer.pysixd import transform\n\ndef calc_2d_bbox(xs, ys, im_size):\n bbTL = (max(xs.min() - 1, 0),\n max(ys.min() - 1, 0))\n bbBR = (min(xs.max() + 1, im_size[0] - 1),\n min(ys.max() + 1, im_size[1] - 1))\n return [bbTL[0], bbTL[1], bbBR[0] - bbTL[0], bbBR[1] - bbTL[1]]\n\n\n\ndef hinter_sampling(min_n_pts, radius=1):\n '''\n Sphere sampling based on refining icosahedron as described in:\n Hinterstoisser et al., Simultaneous Recognition and Homography Extraction of\n Local Patches with a Simple Linear Classifier, BMVC 2008\n\n :param min_n_pts: Minimum required number of points on the whole view sphere.\n :param radius: Radius of the view sphere.\n :return: 3D points on the sphere surface and a list that indicates on which\n refinement level the points were created.\n '''\n\n # Get vertices and faces of icosahedron\n a, b, c = 0.0, 1.0, (1.0 + math.sqrt(5.0)) / 2.0\n pts = [(-b, c, a), (b, c, a), (-b, -c, a), (b, -c, a), (a, -b, c), (a, b, c),\n (a, -b, -c), (a, b, -c), (c, a, -b), (c, a, b), (-c, a, -b), (-c, a, b)]\n faces = [(0, 11, 5), (0, 5, 1), (0, 1, 7), (0, 7, 10), (0, 10, 11), (1, 5, 9),\n (5, 11, 4), (11, 10, 2), (10, 7, 6), (7, 1, 8), (3, 9, 4), (3, 4, 2),\n (3, 2, 6), (3, 6, 8), (3, 8, 9), (4, 9, 5), (2, 4, 11), (6, 2, 10),\n (8, 6, 7), (9, 8, 1)]\n\n # Refinement level on which the points were created\n pts_level = [0 for _ in range(len(pts))]\n\n ref_level = 0\n while len(pts) < min_n_pts:\n ref_level += 1\n edge_pt_map = {} # Mapping from an edge to a newly added point on that edge\n faces_new = [] # New set of faces\n\n # Each face is replaced by 4 new smaller faces\n for face in faces:\n pt_inds = list(face) # List of point IDs involved in the new faces\n for i in range(3):\n # Add a new point if this edge hasn't been processed yet,\n # or get ID of the already added point.\n edge = (face[i], face[(i + 1) % 3])\n edge = (min(edge), max(edge))\n if edge not in list(edge_pt_map.keys()):\n pt_new_id = len(pts)\n edge_pt_map[edge] = pt_new_id\n pt_inds.append(pt_new_id)\n\n pt_new = 0.5 * (np.array(pts[edge[0]]) + np.array(pts[edge[1]]))\n pts.append(pt_new.tolist())\n pts_level.append(ref_level)\n else:\n pt_inds.append(edge_pt_map[edge])\n\n # Replace the current face with 4 new faces\n faces_new += [(pt_inds[0], pt_inds[3], pt_inds[5]),\n (pt_inds[3], pt_inds[1], pt_inds[4]),\n (pt_inds[3], pt_inds[4], pt_inds[5]),\n (pt_inds[5], pt_inds[4], pt_inds[2])]\n faces = faces_new\n\n # Project the points to a sphere\n pts = np.array(pts)\n pts *= np.reshape(radius / np.linalg.norm(pts, axis=1), (pts.shape[0], 1))\n\n # Collect point connections\n pt_conns = {}\n for face in faces:\n for i in range(len(face)):\n pt_conns.setdefault(face[i], set()).add(face[(i + 1) % len(face)])\n pt_conns[face[i]].add(face[(i + 2) % len(face)])\n\n # Order the points - starting from the top one and adding the connected points\n # sorted by azimuth\n top_pt_id = np.argmax(pts[:, 2])\n pts_ordered = []\n pts_todo = [top_pt_id]\n pts_done = [False for _ in range(pts.shape[0])]\n\n def calc_azimuth(x, y):\n two_pi = 2.0 * math.pi\n return (math.atan2(y, x) + two_pi) % two_pi\n\n while len(pts_ordered) != pts.shape[0]:\n # Sort by azimuth\n pts_todo = sorted(pts_todo, key=lambda i: calc_azimuth(pts[i][0], pts[i][1]))\n pts_todo_new = []\n for pt_id in pts_todo:\n pts_ordered.append(pt_id)\n pts_done[pt_id] = True\n pts_todo_new += [i for i in pt_conns[pt_id]] # Find the connected points\n\n # Points to be processed in the next iteration\n pts_todo = [i for i in set(pts_todo_new) if not pts_done[i]]\n\n # Re-order the points and faces\n pts = pts[np.array(pts_ordered), :]\n pts_level = [pts_level[i] for i in pts_ordered]\n pts_order = np.zeros((pts.shape[0],))\n pts_order[np.array(pts_ordered)] = np.arange(pts.shape[0])\n for face_id in range(len(faces)):\n faces[face_id] = [pts_order[i] for i in faces[face_id]]\n\n # import inout\n # inout.save_ply('output/hinter_sampling.ply', pts=pts, faces=np.array(faces))\n\n return pts, pts_level\n\ndef sample_views(min_n_views, radius=1,\n azimuth_range=(0, 2 * math.pi),\n elev_range=(-0.5 * math.pi, 0.5 * math.pi)):\n '''\n Viewpoint sampling from a view sphere.\n\n :param min_n_views: Minimum required number of views on the whole view sphere.\n :param radius: Radius of the view sphere.\n :param azimuth_range: Azimuth range from which the viewpoints are sampled.\n :param elev_range: Elevation range from which the viewpoints are sampled.\n :return: List of views, each represented by a 3x3 rotation matrix and\n a 3x1 translation vector.\n '''\n\n # Get points on a sphere\n if True:\n pts, pts_level = hinter_sampling(min_n_views, radius=radius)\n else:\n pts = fibonacci_sampling(min_n_views + 1, radius=radius)\n pts_level = [0 for _ in range(len(pts))]\n\n views = []\n for pt in pts:\n # Azimuth from (0, 2 * pi)\n azimuth = math.atan2(pt[1], pt[0])\n if azimuth < 0:\n azimuth += 2.0 * math.pi\n\n # Elevation from (-0.5 * pi, 0.5 * pi)\n a = np.linalg.norm(pt)\n b = np.linalg.norm([pt[0], pt[1], 0])\n elev = math.acos(b / a)\n if pt[2] < 0:\n elev = -elev\n\n # if hemisphere and (pt[2] < 0 or pt[0] < 0 or pt[1] < 0):\n if not (azimuth_range[0] <= azimuth <= azimuth_range[1] and\n elev_range[0] <= elev <= elev_range[1]):\n continue\n\n # Rotation matrix\n # The code was adopted from gluLookAt function (uses OpenGL coordinate system):\n # [1] http://stackoverflow.com/questions/5717654/glulookat-explanation\n # [2] https://www.opengl.org/wiki/GluLookAt_code\n f = -np.array(pt) # Forward direction\n f /= np.linalg.norm(f)\n u = np.array([0.0, 0.0, 1.0]) # Up direction\n s = np.cross(f, u) # Side direction\n if np.count_nonzero(s) == 0:\n # f and u are parallel, i.e. we are looking along or against Z axis\n s = np.array([1.0, 0.0, 0.0])\n s /= np.linalg.norm(s)\n u = np.cross(s, f) # Recompute up\n R = np.array([[s[0], s[1], s[2]],\n [u[0], u[1], u[2]],\n [-f[0], -f[1], -f[2]]])\n\n # Convert from OpenGL to OpenCV coordinate system\n R_yz_flip = transform.rotation_matrix(math.pi, [1, 0, 0])[:3, :3]\n R = R_yz_flip.dot(R)\n\n # Translation vector\n t = -R.dot(np.array(pt).reshape((3, 1)))\n\n views.append({'R': R, 't': t})\n\n return views, pts_level\n",
" # -*- coding: utf-8 -*-\nimport os\nimport configparser\nimport argparse\nimport numpy as np\nimport signal\nimport shutil\nimport cv2\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport progressbar\nimport tensorflow as tf\n\nfrom auto_pose.ae import ae_factory as factory\nfrom auto_pose.ae import utils as u\n\ntry:\n range = xrange\nexcept NameError:\n # when running on Python3\n pass\n\n\ndef main():\n workspace_path = os.environ.get('AE_WORKSPACE_PATH')\n\n if workspace_path is None:\n print('Please define a workspace path:\\n')\n print('export AE_WORKSPACE_PATH=/path/to/workspace\\n')\n exit(-1)\n\n gentle_stop = np.array((1,), dtype=np.bool)\n gentle_stop[0] = False\n def on_ctrl_c(signal, frame):\n gentle_stop[0] = True\n signal.signal(signal.SIGINT, on_ctrl_c)\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"experiment_name\")\n parser.add_argument(\"-d\", action='store_true', default=False)\n parser.add_argument(\"-gen\", action='store_true', default=False)\n parser.add_argument('--at_step', default=None, type=int, required=False)\n\n arguments = parser.parse_args()\n\n full_name = arguments.experiment_name.split('/')\n \n experiment_name = full_name.pop()\n experiment_group = full_name.pop() if len(full_name) > 0 else ''\n \n debug_mode = arguments.d\n generate_data = arguments.gen\n at_step = arguments.at_step\n\n cfg_file_path = u.get_config_file_path(workspace_path, experiment_name, experiment_group)\n log_dir = u.get_log_dir(workspace_path, experiment_name, experiment_group)\n checkpoint_file = u.get_checkpoint_basefilename(log_dir)\n ckpt_dir = u.get_checkpoint_dir(log_dir)\n train_fig_dir = u.get_train_fig_dir(log_dir)\n dataset_path = u.get_dataset_path(workspace_path)\n \n if not os.path.exists(cfg_file_path):\n print('Could not find config file:\\n')\n print(('{}\\n'.format(cfg_file_path)))\n exit(-1)\n \n if not os.path.exists(ckpt_dir):\n os.makedirs(ckpt_dir)\n if not os.path.exists(train_fig_dir):\n os.makedirs(train_fig_dir)\n if not os.path.exists(dataset_path):\n os.makedirs(dataset_path)\n\n args = configparser.ConfigParser(inline_comment_prefixes=\"#\")\n args.read(cfg_file_path)\n\n shutil.copy2(cfg_file_path, log_dir)\n\n num_iter = args.getint('Training', 'NUM_ITER') if not debug_mode else np.iinfo(np.int32).max\n save_interval = args.getint('Training', 'SAVE_INTERVAL')\n num_gpus = args.getint('Training', 'NUM_GPUS')\n\n with tf.device('/device:CPU:0'): \n with tf.variable_scope(experiment_name, reuse=tf.AUTO_REUSE):\n \n dataset = factory.build_dataset(dataset_path, args)\n multi_queue = factory.build_multi_queue(dataset, args)\n if generate_data:\n # dataset.load_bg_images(dataset_path)\n multi_queue.create_tfrecord_training_images(dataset_path, args)\n print('finished generating training images')\n exit()\n\n dev_splits = np.array_split(np.arange(multi_queue._num_objects), num_gpus)\n\n iterator = multi_queue.create_iterator(dataset_path, args)\n\n all_x, all_y = list(zip(*[(inp[0], inp[2]) for inp in multi_queue.next_element]))\n all_x, all_y = tf.concat(all_x, axis=0), tf.concat(all_y, axis=0)\n print(all_x.shape)\n encoding_splits = []\n for dev in range(num_gpus):\n with tf.device('/device:GPU:%s' % dev): \n sta = dev_splits[dev][0] * multi_queue._batch_size\n end = (dev_splits[dev][-1]+1) * multi_queue._batch_size\n print(sta, end)\n encoder = factory.build_encoder(all_x[sta:end], args, target=all_y[sta:end], is_training=True)\n encoding_splits.append(tf.split(encoder.z, len(dev_splits[dev]),0))\n\n with tf.variable_scope(experiment_name):\n decoders = []\n for dev in range(num_gpus): \n with tf.device('/device:GPU:%s' % dev): \n for j,i in enumerate(dev_splits[dev]):\n print(len(encoding_splits))\n decoders.append(factory.build_decoder(multi_queue.next_element[i], encoding_splits[dev][j], args, is_training=True, idx=i))\n \n ae = factory.build_ae(encoder, decoders, args)\n codebook = factory.build_codebook(encoder, dataset, args)\n train_op = factory.build_train_op(ae, args)\n saver = tf.train.Saver(save_relative_paths=True, max_to_keep=1)\n\n # dataset.get_training_images(dataset_path, args)\n # dataset.load_bg_images(dataset_path)\n multi_queue.create_tfrecord_training_images(dataset_path, args)\n\n if generate_data:\n print(('finished generating synthetic training data for ' + experiment_name))\n print('exiting...')\n exit()\n\n\n widgets = ['Training: ', progressbar.Percentage(),\n ' ', progressbar.Bar(),\n ' ', progressbar.Counter(), ' / %s' % num_iter,\n ' ', progressbar.ETA(), ' ']\n bar = progressbar.ProgressBar(maxval=num_iter,widgets=widgets)\n\n\n gpu_options = tf.GPUOptions(allow_growth=True, per_process_gpu_memory_fraction = 0.9)\n config = tf.ConfigProto(gpu_options=gpu_options,allow_soft_placement=True)\n\n with tf.Session(config=config) as sess:\n\n sess.run(multi_queue.bg_img_init.initializer)\n sess.run(iterator.initializer)\n \n u.create_summaries(multi_queue, decoders, ae)\n merged_loss_summary = tf.summary.merge_all()\n summary_writer = tf.summary.FileWriter(ckpt_dir, sess.graph)\n\n chkpt = tf.train.get_checkpoint_state(ckpt_dir)\n if chkpt and chkpt.model_checkpoint_path:\n if at_step is None:\n # checkpoint_file_basename = u.get_checkpoint_basefilename(log_dir,latest=args.getint('Training', 'NUM_ITER'))\n checkpoint_file_basename = chkpt.model_checkpoint_path\n else:\n checkpoint_file_basename = u.get_checkpoint_basefilename(log_dir,latest=at_step)\n print(('loading ', checkpoint_file_basename))\n saver.restore(sess, checkpoint_file_basename)\n # except:\n # print 'loading ', chkpt.model_checkpoint_path\n # saver.restore(sess, chkpt.model_checkpoint_path)\n else: \n if encoder._pre_trained_model != 'False':\n encoder.saver.restore(sess, encoder._pre_trained_model)\n all_vars = set([var for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)])\n var_list = all_vars.symmetric_difference([v[1] for v in list(encoder.fil_var_list.items())])\n sess.run(tf.variables_initializer(var_list))\n print(sess.run(tf.report_uninitialized_variables()))\n else:\n sess.run(tf.global_variables_initializer())\n\n if not debug_mode:\n print(('Training with %s model' % args.get('Dataset','MODEL'), os.path.basename(args.get('Paths','MODEL_PATH'))))\n bar.start()\n\n for i in range(encoder.global_step.eval(), num_iter):\n if not debug_mode:\n # print 'before optimize'\n sess.run([train_op,multi_queue.next_bg_element])\n # print 'after optimize'\n if (i+1) % 100 == 0:\n merged_summaries = sess.run(merged_loss_summary)\n summary_writer.add_summary(merged_summaries, i)\n\n bar.update(i)\n\n if (i+1) % save_interval == 0:\n saver.save(sess, checkpoint_file, global_step=encoder.global_step)\n\n # this_x, this_y = sess.run([queue.x, queue.y])\n # reconstr_train = sess.run(decoder.x,feed_dict={queue.x:this_x})\n\n this, reconstr_train = sess.run([multi_queue.next_element,[decoder.x for decoder in decoders]])\n this_x = np.concatenate([el[0] for el in this])\n this_y = np.concatenate([el[2] for el in this])\n # reconstr_train = sess.run(,feed_dict={queue.x:this_x})\n reconstr_train = np.concatenate(reconstr_train)\n for imgs in [this_x,this_y,reconstr_train]:\n np.random.seed(0)\n np.random.shuffle(imgs)\n train_imgs = np.hstack(( u.tiles(this_x, 4, 4), u.tiles(reconstr_train, 4,4),u.tiles(this_y, 4, 4)))\n cv2.imwrite(os.path.join(train_fig_dir,'training_images_%s.png' % i), train_imgs*255)\n else:\n \n this,_,reconstr_train = sess.run([multi_queue.next_element,multi_queue.next_bg_element,[decoder.x for decoder in decoders]])\n\n this_x = np.concatenate([el[0] for el in this])\n this_y = np.concatenate([el[2] for el in this])\n print(this_x.shape, reconstr_train[0].shape, len(reconstr_train))\n reconstr_train = np.concatenate(reconstr_train,axis=0)\n for imgs in [this_x,this_y,reconstr_train]:\n np.random.seed(0)\n np.random.shuffle(imgs)\n print(this_x.shape)\n cv2.imshow('sample batch', np.hstack(( u.tiles(this_x, 4, 6), u.tiles(reconstr_train, 4,6),u.tiles(this_y, 4, 6))) )\n k = cv2.waitKey(0)\n if k == 27:\n break\n\n if gentle_stop[0]:\n break\n\n if not debug_mode:\n bar.finish()\n if not gentle_stop[0] and not debug_mode:\n print('To create the embedding run:\\n')\n print(('ae_embed {}\\n'.format(full_name)))\n\nif __name__ == '__main__':\n main()\n \n",
"# -*- coding: utf-8 -*-\nimport tensorflow as tf\nfrom tensorflow.contrib.framework.python.framework import checkpoint_utils\n\nfrom .dataset import Dataset\nfrom .queue import Queue\nfrom .multi_queue import MultiQueue\nfrom .ae import AE\nfrom .encoder import Encoder\nfrom .decoder import Decoder\nfrom .codebook_multi import Codebook\n\n\ndef build_dataset(dataset_path, args):\n dataset_args = { k:v for k,v in \n args.items('Dataset') + \n args.items('Paths') + \n args.items('Augmentation')+ \n args.items('Queue') +\n args.items('Embedding')}\n dataset = Dataset(dataset_path, **dataset_args)\n return dataset\n\ndef build_queue(dataset, args):\n NUM_THREADS = args.getint('Queue', 'NUM_THREADS')\n QUEUE_SIZE = args.getint('Queue', 'QUEUE_SIZE')\n BATCH_SIZE = args.getint('Training', 'BATCH_SIZE')\n queue = Queue(\n dataset, \n NUM_THREADS, \n QUEUE_SIZE, \n BATCH_SIZE\n )\n return queue\n\ndef build_multi_queue(dataset, args):\n BATCH_SIZE = args.getint('Training', 'BATCH_SIZE')\n SHAPE = (args.getint('Dataset', 'W'), args.getint('Dataset', 'H'), args.getint('Dataset', 'C'))\n NOOF_TRAINING_IMGS = args.getint('Dataset', 'NOOF_TRAINING_IMGS')\n MODEL_PATHS = eval(args.get('Paths', 'MODEL_PATH'))\n AUG_ARGS = { k:v for k,v in args.items('Augmentation')}\n queue = MultiQueue(\n dataset, \n BATCH_SIZE,\n NOOF_TRAINING_IMGS,\n MODEL_PATHS,\n SHAPE,\n AUG_ARGS\n )\n return queue\n\ndef build_encoder(x, args, target=None, is_training=False):\n LATENT_SPACE_SIZE = args.getint('Network', 'LATENT_SPACE_SIZE')\n NUM_FILTER = eval(args.get('Network', 'NUM_FILTER'))\n KERNEL_SIZE_ENCODER = args.getint('Network', 'KERNEL_SIZE_ENCODER')\n STRIDES = eval(args.get('Network', 'STRIDES'))\n BATCH_NORM = args.getboolean('Network', 'BATCH_NORMALIZATION')\n RESNET50 = args.getboolean('Network', 'RESNET50')\n RESNET101 = args.getboolean('Network', 'RESNET101')\n ASPP = eval(args.get('Network', 'ASPP'))\n PRE_TRAINED_MODEL = args.get('Training', 'PRE_TRAINED_MODEL')\n EMB_INVARIANCE_LOSS = args.getfloat('Network', 'EMB_INVARIANCE_LOSS')\n\n if target is not None and EMB_INVARIANCE_LOSS > 0:\n x = tf.concat((x, target), axis=0)\n\n encoder = Encoder(\n x,\n LATENT_SPACE_SIZE, \n NUM_FILTER, \n KERNEL_SIZE_ENCODER, \n STRIDES,\n BATCH_NORM,\n RESNET50,\n RESNET101,\n ASPP,\n PRE_TRAINED_MODEL,\n EMB_INVARIANCE_LOSS,\n is_training=is_training\n )\n return encoder\n\ndef build_decoder(reconstruction_target, encoder_z_split, args, is_training=False,idx=0):\n NUM_FILTER = eval(args.get('Network', 'NUM_FILTER'))\n KERNEL_SIZE_DECODER = args.getint('Network', 'KERNEL_SIZE_DECODER')\n STRIDES = eval(args.get('Network', 'STRIDES'))\n LOSS = args.get('Network', 'LOSS')\n BOOTSTRAP_RATIO = args.getint('Network', 'BOOTSTRAP_RATIO')\n VARIATIONAL = args.getfloat('Network', 'VARIATIONAL') if is_training else False\n AUXILIARY_MASK = args.getboolean('Network', 'AUXILIARY_MASK')\n BATCH_NORM = args.getboolean('Network', 'BATCH_NORMALIZATION')\n decoder = Decoder(\n reconstruction_target,\n encoder_z_split,\n list( reversed(NUM_FILTER) ),\n KERNEL_SIZE_DECODER,\n list( reversed(STRIDES) ),\n LOSS,\n BOOTSTRAP_RATIO,\n AUXILIARY_MASK,\n BATCH_NORM,\n is_training=is_training,\n idx=idx\n )\n return decoder\n\ndef build_ae(encoder, decoder, args):\n NORM_REGULARIZE = args.getfloat('Network', 'NORM_REGULARIZE')\n VARIATIONAL = args.getfloat('Network', 'VARIATIONAL')\n EMB_INVARIANCE_LOSS = args.getfloat('Network', 'EMB_INVARIANCE_LOSS')\n ae = AE(encoder, decoder, NORM_REGULARIZE, VARIATIONAL, EMB_INVARIANCE_LOSS)\n return ae\n\ndef build_train_op(ae, args):\n import tensorflow as tf\n\n LEARNING_RATE = args.getfloat('Training', 'LEARNING_RATE')\n LEARNING_RATE_SCHEDULE = args.get('Training','LEARNING_RATE_SCHEDULE')\n LAYERS_TO_FREEZE = eval(args.get('Training', 'LAYERS_TO_FREEZE'))\n\n if LEARNING_RATE_SCHEDULE=='poly':\n FINAL_LEARNING_RATE = args.getfloat('Training','FINAL_LEARNING_RATE')\n NUM_ITER = args.getfloat('Training','NUM_ITER')\n print('using poly learning rate schedule')\n LEARNING_RATE = tf.train.polynomial_decay(LEARNING_RATE, ae._encoder.global_step,\n NUM_ITER, FINAL_LEARNING_RATE, power=0.9)\n \n\n OPTIMIZER_NAME = args.get('Training', 'OPTIMIZER')\n\n optimizer = eval('tf.train.{}Optimizer'.format(OPTIMIZER_NAME))\n optim = optimizer(LEARNING_RATE)\n if len(LAYERS_TO_FREEZE)>0:\n freeze_vars = []\n all_vars = set([var for var in tf.trainable_variables()])\n for layer_to_freeze in LAYERS_TO_FREEZE:\n freeze_vars += [v for v in all_vars if layer_to_freeze in v.name]\n train_vars = list(all_vars.symmetric_difference(freeze_vars))\n train_op = tf.contrib.training.create_train_op(ae.loss, \n optim, \n global_step=ae._encoder.global_step, \n variables_to_train=train_vars,\n colocate_gradients_with_ops=True)\n else:\n train_op = tf.contrib.training.create_train_op(ae.loss, \n optim, \n global_step=ae._encoder.global_step, \n colocate_gradients_with_ops=True)\n\n return train_op\n\ndef build_codebook(encoder, dataset, args):\n embed_bb = args.getboolean('Embedding', 'EMBED_BB')\n from .codebook import Codebook\n codebook = Codebook(encoder, dataset, embed_bb)\n return codebook\n\ndef build_codebook_multi(encoder, dataset, args, checkpoint_file_basename=None):\n embed_bb = args.getboolean('Embedding', 'EMBED_BB')\n\n existing_embs = []\n if checkpoint_file_basename is not None:\n var_list = checkpoint_utils.list_variables(checkpoint_file_basename)\n for v in var_list:\n if 'embedding_normalized_' in v[0]:\n print(v)\n existing_embs.append(v[0].split('/embedding_normalized_')[-1].split('.')[0])\n\n print(existing_embs)\n codebook = Codebook(encoder, dataset, embed_bb, existing_embs)\n return codebook\n\ndef build_codebook_from_name(experiment_name, experiment_group='', return_dataset=False, return_decoder = False, joint=False):\n import os\n import configparser\n workspace_path = os.environ.get('AE_WORKSPACE_PATH')\n\n if workspace_path == None:\n print('Please define a workspace path:\\n')\n print('export AE_WORKSPACE_PATH=/path/to/workspace\\n')\n exit(-1)\n\n from . import utils as u\n import tensorflow as tf\n\n log_dir = u.get_log_dir(workspace_path, experiment_name, experiment_group)\n cfg_file_path = u.get_train_config_exp_file_path(log_dir, experiment_name)\n dataset_path = u.get_dataset_path(workspace_path)\n\n if os.path.exists(cfg_file_path):\n args = configparser.ConfigParser(inline_comment_prefixes=\"#\")\n args.read(cfg_file_path)\n else:\n print(('ERROR: Config File not found: ', cfg_file_path))\n exit()\n\n if joint:\n checkpoint_file = u.get_checkpoint_basefilename(log_dir, joint=joint, latest=args.getint('Training', 'NUM_ITER'))\n else:\n checkpoint_file = u.get_checkpoint_basefilename(log_dir, joint=joint)\n\n with tf.variable_scope(experiment_name):\n dataset = build_dataset(dataset_path, args)\n x = tf.placeholder(tf.float32, [None,] + list(dataset.shape))\n encoder = build_encoder(x, args)\n if joint:\n codebook = build_codebook_multi(encoder, dataset, args, checkpoint_file)\n else:\n codebook = build_codebook(encoder, dataset, args)\n\n if return_decoder:\n reconst_target = tf.placeholder(tf.float32, [None,] + list(dataset.shape))\n decoder = build_decoder(reconst_target, encoder, args)\n\n if return_dataset:\n if return_decoder:\n return codebook, dataset, decoder\n else:\n return codebook, dataset\n else:\n return codebook\n\n\ndef restore_checkpoint(session, saver, ckpt_dir, at_step=None):\n\n import tensorflow as tf\n import os\n\n chkpt = tf.train.get_checkpoint_state(ckpt_dir)\n\n if chkpt and chkpt.model_checkpoint_path:\n if at_step is None:\n saver.restore(session, chkpt.model_checkpoint_path)\n else:\n for ckpt_path in chkpt.all_model_checkpoint_paths:\n \n if str(at_step) in str(ckpt_path):\n saver.restore(session, ckpt_path)\n print(('restoring' , os.path.basename(ckpt_path)))\n else:\n print('No checkpoint found. Expected one in:\\n')\n print(('{}\\n'.format(ckpt_dir)))\n exit(-1)\n\n"
] | [
[
"numpy.arange",
"numpy.linalg.norm",
"numpy.argmax",
"numpy.count_nonzero",
"numpy.cross",
"numpy.array",
"numpy.zeros"
],
[
"tensorflow.device",
"tensorflow.concat",
"tensorflow.variables_initializer",
"numpy.concatenate",
"tensorflow.GPUOptions",
"numpy.iinfo",
"tensorflow.get_collection",
"numpy.arange",
"tensorflow.report_uninitialized_variables",
"tensorflow.ConfigProto",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"numpy.array",
"tensorflow.train.get_checkpoint_state",
"tensorflow.summary.FileWriter",
"numpy.random.seed",
"numpy.random.shuffle",
"tensorflow.variable_scope"
],
[
"tensorflow.train.get_checkpoint_state",
"tensorflow.train.polynomial_decay",
"tensorflow.contrib.framework.python.framework.checkpoint_utils.list_variables",
"tensorflow.concat",
"tensorflow.contrib.training.create_train_op",
"tensorflow.trainable_variables",
"tensorflow.variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
burcgokden/SDPA-Transformer-Wrapper | [
"81371d1bd7d9ae26a70a549740539242f1a76199"
] | [
"nmt_data_prep.py"
] | [
"\nimport logging\n\nimport tensorflow_datasets as tfds\nimport tensorflow as tf\nimport tensorflow_text as text\n\nlogging.getLogger('tensorflow').setLevel(logging.ERROR)\n\n\nclass src_tgt_data_prep:\n '''\n Prepares data for encoder-decoder architecture for machine translation task\n default inputs are for portuguese to english dataset from TED Talks Open Translation Project.\n '''\n def __init__(self,\n src_lang='pt',\n tgt_lang='en',\n BUFFER_SIZE=20000,\n BATCH_SIZE = 64,\n dataset_file='ted_hrlr_translate/pt_to_en',\n load_dataset=True,\n train_percent=None,\n model_name = \"./ted_hrlr_translate_pt_en_tokenizer\",\n revert_order=False,\n shuffle_set=True,\n shuffle_files=True,\n MAX_LENGTH=None,\n verbose=False):\n '''\n This init method asks for tokenizer source and target object loaded and ready to provide.\n The dataset may have order reverted, this method does the conversion to intended source target order.\n\n Args:\n src_lang: source language abbreviation as string\n tgt_lang: target language abbreviation as string\n BUFFER_SIZE: Buffer size for shuffling\n BATCH_SIZE: batch size for dataset\n dataset_file: path to tensorflow dataset\n load_dataset: if True load the dataset\n train_percent: Percentage of train data to be loaded. 1-100. None loads all training data.\n model_name: file path for tokenizer model.\n revert_order: If True, it reverts the order of language pairs in dataset_file. Reverted order should match\n src_lang/tgt_lang assignment.\n shuffle_set:If True shuffle the dataset while loading\n shuffle_files: shuffle dataset files while loading\n MAX_LENGTH: Maximum number of tokens in each sentence.\n verbose: If True print out more details.\n\n Returns batched, tokenized, filtered train, validation datasets and test dataset. Tokenizer methods are accessible\n through instance of this class object\n '''\n\n self.BUFFER_SIZE=BUFFER_SIZE\n self.BATCH_SIZE=BATCH_SIZE\n self.MAX_LENGTH = MAX_LENGTH\n self.model_name = model_name\n self.revert_order=revert_order\n self.src_lang = src_lang\n self.tgt_lang = tgt_lang\n self.tokenizers_src, self.tokenizers_tgt, self.tokenizers = self.load_tokenizer()\n\n #load dataset\n if load_dataset:\n print(\"LOADING DATASET\")\n if train_percent:\n #load only percentage of train data\n examples, metadata = tfds.load(dataset_file,\n split=[f'train[:{train_percent}%]', 'validation', 'test'],\n with_info=True, as_supervised=True, shuffle_files=shuffle_files)\n else:\n #load all data\n examples, metadata = tfds.load(dataset_file,\n split=['train', 'validation', 'test'],\n with_info=True, as_supervised=True, shuffle_files=shuffle_files)\n\n if self.revert_order:\n #revert the order if intended source and target language orders are reversed\n #tokenizer source and tokenizer target are intended values.\n print(f\"REVERTING ORDER OF DATASET TUPLES TO (SRC, TGT) : {self.src_lang},{self.tgt_lang}\")\n self.train_examples = examples[0].map(lambda dsl1, dsl2: [dsl2, dsl1])\n self.val_examples = examples[1].map(lambda dsl1, dsl2: [dsl2, dsl1])\n self.test_examples=examples[2].map(lambda dsl1, dsl2: [dsl2, dsl1])\n self.examples = examples\n self.metadata = metadata\n else:\n print(f\"ORDER OF DATASET TUPLES (SRC, TGT) : {self.src_lang},{self.tgt_lang}\")\n self.train_examples = examples[0]\n self.val_examples = examples[1]\n self.test_examples=examples[2]\n self.examples=None\n self.metadata=metadata\n else:\n print(\"SKIPPED LOADING DATASET\")\n\n #print some info about tokenizer model\n load_tokenizer_model= self.tokenizers_src and self.tokenizers_tgt\n if load_tokenizer_model:\n print(\"SOURCE AND TARGET TOKENIZERS INFO\")\n print(f\"Methods for source lang: {self.src_lang}\")\n print([item for item in dir(self.tokenizers_src) if not item.startswith('_')])\n print((f\"Methods for tgt lang: {self.tgt_lang}\"))\n print([item for item in dir(self.tokenizers_tgt) if not item.startswith('_')])\n else:\n print(\"PLEASE PROVIDE TOKENIZERS CORRECTLY\")\n\n if self.MAX_LENGTH is None:\n #create batched and tokenized datasets.\n print(\"CREATING SHUFFLED BATCHED DATASETS FOR TRAINING AND VALIDATION\")\n self.train_batches=self.make_batches(self.train_examples, map_tokenize=load_tokenizer_model, shuffle_set=shuffle_set)\n self.val_batches=self.make_batches(self.val_examples, map_tokenize=load_tokenizer_model, shuffle_set=False)\n self.test_examples = self.test_examples.prefetch(tf.data.AUTOTUNE)\n else:\n self.train_batches=self.make_padded_batches(self.train_examples, shuffle_set=shuffle_set)\n self.val_batches=self.make_padded_batches(self.val_examples, shuffle_set=False)\n self.test_examples=self.filter_test(self.test_examples)\n if verbose:\n #these operations are very slow so for large datasets should be avoided.\n print(f\"FILTERED BATCHED TRAIN DATASET ELEMENT COUNT: {self.dataset_batch_cardinality(self.train_batches)*self.BATCH_SIZE}\")\n print(f\"FILTERED BATCHED VAL DATASET ELEMENT COUNT: {self.dataset_batch_cardinality(self.val_batches)*self.BATCH_SIZE}\")\n\n @staticmethod\n def dataset_batch_cardinality(ds):\n cnt = 0\n for _ in ds:\n cnt += 1\n return cnt\n\n def filter_test(self, test_ds):\n '''\n The test needs to be first tokenized,\n filter for token length and then detokenized.\n '''\n\n print(f\"ORIGINAL TEST DATASET LENGTH: {len(test_ds)}\")\n\n test_ds=test_ds.batch(1).map(self.tokenize_pairs_src_tgt)\n test_ds=test_ds.unbatch().filter(self.filter_max_length)\n test_ds=test_ds.batch(1).map(self.detokenize_pairs_src_tgt)\n test_ds=test_ds.unbatch().prefetch(tf.data.AUTOTUNE)\n\n for ts in test_ds.take(3):\n print(f\"DETOKENIZED TEST SAMPLE LESS THAN LENGTH {self.MAX_LENGTH}: {ts}\")\n print(f\"FILTERED TEST LENGTH: {self.dataset_batch_cardinality(test_ds)}\")\n\n return test_ds\n\n def detokenize_pairs_src_tgt(self, src, tgt):\n\n src = self.tokenizers_src.detokenize(src)\n tgt = self.tokenizers_tgt.detokenize(tgt)\n\n return src, tgt\n\n\n\n def load_tokenizer(self):\n '''\n Run this first to get tokenizers pairs for intended source and target language.\n Returns source tokenizer, target tokenizer and tokenizer object\n '''\n print(f\"LOADING TOKENIZER AT {self.model_name}\")\n tokenizers = tf.saved_model.load(self.model_name)\n print(\"THE TOKENIZER LANGUAGES AVAILABLE ARE:\")\n print([item for item in dir(tokenizers) if not item.startswith('_')])\n tokenizers_src=getattr(tokenizers, self.src_lang, None)\n tokenizers_tgt=getattr(tokenizers, self.tgt_lang, None)\n\n return tokenizers_src, tokenizers_tgt, tokenizers\n\n\n\n def tokenize_pairs_src_tgt(self, src, tgt):\n '''\n Use tokenizer model to create tokenized pairs.\n '''\n src = self.tokenizers_src.tokenize(src)\n # Convert from ragged to dense, padding with zeros.\n src = src.to_tensor()\n\n tgt = self.tokenizers_tgt.tokenize(tgt)\n # Convert from ragged to dense, padding with zeros.\n tgt = tgt.to_tensor()\n\n return src, tgt\n\n def make_batches(self, ds, map_tokenize=True, shuffle_set=True):\n '''\n method to create dataset batches and map each element with tokenizer model\n it takes a dataset that contains lang1, lang2 pairs.\n '''\n #shuffle dataset and make batches\n ds_batched=ds\n if shuffle_set:\n ds_batched = ds_batched.shuffle(self.BUFFER_SIZE)\n\n ds_batched=ds_batched.batch(self.BATCH_SIZE)\n if map_tokenize:\n ds_batched = ds_batched.map(self.tokenize_pairs_src_tgt, num_parallel_calls=tf.data.AUTOTUNE)\n\n ds_batched=ds_batched.prefetch(tf.data.AUTOTUNE)\n print(\"Dataset element spec:\", ds_batched.element_spec)\n\n return ds_batched\n\n def filter_max_length(self, x, y):\n return tf.logical_and(tf.size(x) <= self.MAX_LENGTH,\n tf.size(y) <= self.MAX_LENGTH)\n\n def make_padded_batches(self, ds, shuffle_set=True):\n '''\n If a max length is specified, the dataset is filtered, padded then batched.\n '''\n\n ds_batched = ds.batch(1)\n ds_batched = ds_batched.map(self.tokenize_pairs_src_tgt, num_parallel_calls=tf.data.AUTOTUNE)\n ds_batched=ds_batched.unbatch()\n if shuffle_set:\n ds_batched=ds_batched.shuffle(self.BUFFER_SIZE)\n ds_batched=ds_batched.filter(self.filter_max_length).padded_batch(self.BATCH_SIZE, padded_shapes=(self.MAX_LENGTH, self.MAX_LENGTH))\n ds_batched = ds_batched.prefetch(tf.data.AUTOTUNE)\n\n return ds_batched\n\ndef download_tokenizer_model(model_name = \"ted_hrlr_translate_pt_en_converter\", cache_dir=\".\"):\n '''\n Downloads a pretrained tokenizer model to a cache dir where model can be loaded from.\n Can be used once to download the model. model_name needs to match exactly the name of the model.\n '''\n\n tf.keras.utils.get_file(\n f\"{model_name}.zip\",\n f\"https://storage.googleapis.com/download.tensorflow.org/models/{model_name}.zip\",\n cache_dir=cache_dir, cache_subdir='', extract=True\n )\n"
] | [
[
"tensorflow.size",
"tensorflow.keras.utils.get_file",
"tensorflow.saved_model.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rostyboost/scipy | [
"2f5aa264724099c03772ed784e7a947d2bea8398",
"2f5aa264724099c03772ed784e7a947d2bea8398",
"2f5aa264724099c03772ed784e7a947d2bea8398",
"2f5aa264724099c03772ed784e7a947d2bea8398"
] | [
"scipy/sparse/linalg/tests/test_matfuncs.py",
"scipy/linalg/tests/test_solve_toeplitz.py",
"tools/osx/install_and_test.py",
"scipy/optimize/tests/test_regression.py"
] | [
"#\n# Created by: Pearu Peterson, March 2002\n#\n\"\"\" Test functions for scipy.linalg.matfuncs module\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport math\n\nimport numpy as np\nfrom numpy import array, eye, exp, random\nfrom numpy.linalg import matrix_power\nfrom numpy.testing import (\n assert_allclose, assert_, assert_array_almost_equal, assert_equal,\n assert_array_almost_equal_nulp)\nfrom scipy._lib._numpy_compat import suppress_warnings\n\nfrom scipy.sparse import csc_matrix, SparseEfficiencyWarning\nfrom scipy.sparse.construct import eye as speye\nfrom scipy.sparse.linalg.matfuncs import (expm, _expm,\n ProductOperator, MatrixPowerOperator,\n _onenorm_matrix_power_nnm)\nfrom scipy.linalg import logm\nfrom scipy.special import factorial\nimport scipy.sparse\nimport scipy.sparse.linalg\n\n\ndef _burkardt_13_power(n, p):\n \"\"\"\n A helper function for testing matrix functions.\n\n Parameters\n ----------\n n : integer greater than 1\n Order of the square matrix to be returned.\n p : non-negative integer\n Power of the matrix.\n\n Returns\n -------\n out : ndarray representing a square matrix\n A Forsythe matrix of order n, raised to the power p.\n\n \"\"\"\n # Input validation.\n if n != int(n) or n < 2:\n raise ValueError('n must be an integer greater than 1')\n n = int(n)\n if p != int(p) or p < 0:\n raise ValueError('p must be a non-negative integer')\n p = int(p)\n\n # Construct the matrix explicitly.\n a, b = divmod(p, n)\n large = np.power(10.0, -n*a)\n small = large * np.power(10.0, -n)\n return np.diag([large]*(n-b), b) + np.diag([small]*b, b-n)\n\n\ndef test_onenorm_matrix_power_nnm():\n np.random.seed(1234)\n for n in range(1, 5):\n for p in range(5):\n M = np.random.random((n, n))\n Mp = np.linalg.matrix_power(M, p)\n observed = _onenorm_matrix_power_nnm(M, p)\n expected = np.linalg.norm(Mp, 1)\n assert_allclose(observed, expected)\n\n\nclass TestExpM(object):\n def test_zero_ndarray(self):\n a = array([[0.,0],[0,0]])\n assert_array_almost_equal(expm(a),[[1,0],[0,1]])\n\n def test_zero_sparse(self):\n a = csc_matrix([[0.,0],[0,0]])\n assert_array_almost_equal(expm(a).toarray(),[[1,0],[0,1]])\n\n def test_zero_matrix(self):\n a = np.matrix([[0.,0],[0,0]])\n assert_array_almost_equal(expm(a),[[1,0],[0,1]])\n\n def test_misc_types(self):\n A = expm(np.array([[1]]))\n assert_allclose(expm(((1,),)), A)\n assert_allclose(expm([[1]]), A)\n assert_allclose(expm(np.matrix([[1]])), A)\n assert_allclose(expm(np.array([[1]])), A)\n assert_allclose(expm(csc_matrix([[1]])).A, A)\n B = expm(np.array([[1j]]))\n assert_allclose(expm(((1j,),)), B)\n assert_allclose(expm([[1j]]), B)\n assert_allclose(expm(np.matrix([[1j]])), B)\n assert_allclose(expm(csc_matrix([[1j]])).A, B)\n\n def test_bidiagonal_sparse(self):\n A = csc_matrix([\n [1, 3, 0],\n [0, 1, 5],\n [0, 0, 2]], dtype=float)\n e1 = math.exp(1)\n e2 = math.exp(2)\n expected = np.array([\n [e1, 3*e1, 15*(e2 - 2*e1)],\n [0, e1, 5*(e2 - e1)],\n [0, 0, e2]], dtype=float)\n observed = expm(A).toarray()\n assert_array_almost_equal(observed, expected)\n\n def test_padecases_dtype_float(self):\n for dtype in [np.float32, np.float64]:\n for scale in [1e-2, 1e-1, 5e-1, 1, 10]:\n A = scale * eye(3, dtype=dtype)\n observed = expm(A)\n expected = exp(scale) * eye(3, dtype=dtype)\n assert_array_almost_equal_nulp(observed, expected, nulp=100)\n\n def test_padecases_dtype_complex(self):\n for dtype in [np.complex64, np.complex128]:\n for scale in [1e-2, 1e-1, 5e-1, 1, 10]:\n A = scale * eye(3, dtype=dtype)\n observed = expm(A)\n expected = exp(scale) * eye(3, dtype=dtype)\n assert_array_almost_equal_nulp(observed, expected, nulp=100)\n\n def test_padecases_dtype_sparse_float(self):\n # float32 and complex64 lead to errors in spsolve/UMFpack\n dtype = np.float64\n for scale in [1e-2, 1e-1, 5e-1, 1, 10]:\n a = scale * speye(3, 3, dtype=dtype, format='csc')\n e = exp(scale) * eye(3, dtype=dtype)\n with suppress_warnings() as sup:\n sup.filter(SparseEfficiencyWarning,\n \"Changing the sparsity structure of a csc_matrix is expensive.\")\n exact_onenorm = _expm(a, use_exact_onenorm=True).toarray()\n inexact_onenorm = _expm(a, use_exact_onenorm=False).toarray()\n assert_array_almost_equal_nulp(exact_onenorm, e, nulp=100)\n assert_array_almost_equal_nulp(inexact_onenorm, e, nulp=100)\n\n def test_padecases_dtype_sparse_complex(self):\n # float32 and complex64 lead to errors in spsolve/UMFpack\n dtype = np.complex128\n for scale in [1e-2, 1e-1, 5e-1, 1, 10]:\n a = scale * speye(3, 3, dtype=dtype, format='csc')\n e = exp(scale) * eye(3, dtype=dtype)\n with suppress_warnings() as sup:\n sup.filter(SparseEfficiencyWarning,\n \"Changing the sparsity structure of a csc_matrix is expensive.\")\n assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100)\n\n def test_logm_consistency(self):\n random.seed(1234)\n for dtype in [np.float64, np.complex128]:\n for n in range(1, 10):\n for scale in [1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2]:\n # make logm(A) be of a given scale\n A = (eye(n) + random.rand(n, n) * scale).astype(dtype)\n if np.iscomplexobj(A):\n A = A + 1j * random.rand(n, n) * scale\n assert_array_almost_equal(expm(logm(A)), A)\n\n def test_integer_matrix(self):\n Q = np.array([\n [-3, 1, 1, 1],\n [1, -3, 1, 1],\n [1, 1, -3, 1],\n [1, 1, 1, -3]])\n assert_allclose(expm(Q), expm(1.0 * Q))\n\n def test_triangularity_perturbation(self):\n # Experiment (1) of\n # Awad H. Al-Mohy and Nicholas J. Higham (2012)\n # Improved Inverse Scaling and Squaring Algorithms\n # for the Matrix Logarithm.\n A = np.array([\n [3.2346e-1, 3e4, 3e4, 3e4],\n [0, 3.0089e-1, 3e4, 3e4],\n [0, 0, 3.221e-1, 3e4],\n [0, 0, 0, 3.0744e-1]],\n dtype=float)\n A_logm = np.array([\n [-1.12867982029050462e+00, 9.61418377142025565e+04,\n -4.52485573953179264e+09, 2.92496941103871812e+14],\n [0.00000000000000000e+00, -1.20101052953082288e+00,\n 9.63469687211303099e+04, -4.68104828911105442e+09],\n [0.00000000000000000e+00, 0.00000000000000000e+00,\n -1.13289322264498393e+00, 9.53249183094775653e+04],\n [0.00000000000000000e+00, 0.00000000000000000e+00,\n 0.00000000000000000e+00, -1.17947533272554850e+00]],\n dtype=float)\n assert_allclose(expm(A_logm), A, rtol=1e-4)\n\n # Perturb the upper triangular matrix by tiny amounts,\n # so that it becomes technically not upper triangular.\n random.seed(1234)\n tiny = 1e-17\n A_logm_perturbed = A_logm.copy()\n A_logm_perturbed[1, 0] = tiny\n A_expm_logm_perturbed = expm(A_logm_perturbed)\n rtol = 1e-4\n atol = 100 * tiny\n assert_(not np.allclose(A_expm_logm_perturbed, A, rtol=rtol, atol=atol))\n\n def test_burkardt_1(self):\n # This matrix is diagonal.\n # The calculation of the matrix exponential is simple.\n #\n # This is the first of a series of matrix exponential tests\n # collected by John Burkardt from the following sources.\n #\n # Alan Laub,\n # Review of \"Linear System Theory\" by Joao Hespanha,\n # SIAM Review,\n # Volume 52, Number 4, December 2010, pages 779--781.\n #\n # Cleve Moler and Charles Van Loan,\n # Nineteen Dubious Ways to Compute the Exponential of a Matrix,\n # Twenty-Five Years Later,\n # SIAM Review,\n # Volume 45, Number 1, March 2003, pages 3--49.\n #\n # Cleve Moler,\n # Cleve's Corner: A Balancing Act for the Matrix Exponential,\n # 23 July 2012.\n #\n # Robert Ward,\n # Numerical computation of the matrix exponential\n # with accuracy estimate,\n # SIAM Journal on Numerical Analysis,\n # Volume 14, Number 4, September 1977, pages 600--610.\n exp1 = np.exp(1)\n exp2 = np.exp(2)\n A = np.array([\n [1, 0],\n [0, 2],\n ], dtype=float)\n desired = np.array([\n [exp1, 0],\n [0, exp2],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_2(self):\n # This matrix is symmetric.\n # The calculation of the matrix exponential is straightforward.\n A = np.array([\n [1, 3],\n [3, 2],\n ], dtype=float)\n desired = np.array([\n [39.322809708033859, 46.166301438885753],\n [46.166301438885768, 54.711576854329110],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_3(self):\n # This example is due to Laub.\n # This matrix is ill-suited for the Taylor series approach.\n # As powers of A are computed, the entries blow up too quickly.\n exp1 = np.exp(1)\n exp39 = np.exp(39)\n A = np.array([\n [0, 1],\n [-39, -40],\n ], dtype=float)\n desired = np.array([\n [\n 39/(38*exp1) - 1/(38*exp39),\n -np.expm1(-38) / (38*exp1)],\n [\n 39*np.expm1(-38) / (38*exp1),\n -1/(38*exp1) + 39/(38*exp39)],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_4(self):\n # This example is due to Moler and Van Loan.\n # The example will cause problems for the series summation approach,\n # as well as for diagonal Pade approximations.\n A = np.array([\n [-49, 24],\n [-64, 31],\n ], dtype=float)\n U = np.array([[3, 1], [4, 2]], dtype=float)\n V = np.array([[1, -1/2], [-2, 3/2]], dtype=float)\n w = np.array([-17, -1], dtype=float)\n desired = np.dot(U * np.exp(w), V)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_5(self):\n # This example is due to Moler and Van Loan.\n # This matrix is strictly upper triangular\n # All powers of A are zero beyond some (low) limit.\n # This example will cause problems for Pade approximations.\n A = np.array([\n [0, 6, 0, 0],\n [0, 0, 6, 0],\n [0, 0, 0, 6],\n [0, 0, 0, 0],\n ], dtype=float)\n desired = np.array([\n [1, 6, 18, 36],\n [0, 1, 6, 18],\n [0, 0, 1, 6],\n [0, 0, 0, 1],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_6(self):\n # This example is due to Moler and Van Loan.\n # This matrix does not have a complete set of eigenvectors.\n # That means the eigenvector approach will fail.\n exp1 = np.exp(1)\n A = np.array([\n [1, 1],\n [0, 1],\n ], dtype=float)\n desired = np.array([\n [exp1, exp1],\n [0, exp1],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_7(self):\n # This example is due to Moler and Van Loan.\n # This matrix is very close to example 5.\n # Mathematically, it has a complete set of eigenvectors.\n # Numerically, however, the calculation will be suspect.\n exp1 = np.exp(1)\n eps = np.spacing(1)\n A = np.array([\n [1 + eps, 1],\n [0, 1 - eps],\n ], dtype=float)\n desired = np.array([\n [exp1, exp1],\n [0, exp1],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_8(self):\n # This matrix was an example in Wikipedia.\n exp4 = np.exp(4)\n exp16 = np.exp(16)\n A = np.array([\n [21, 17, 6],\n [-5, -1, -6],\n [4, 4, 16],\n ], dtype=float)\n desired = np.array([\n [13*exp16 - exp4, 13*exp16 - 5*exp4, 2*exp16 - 2*exp4],\n [-9*exp16 + exp4, -9*exp16 + 5*exp4, -2*exp16 + 2*exp4],\n [16*exp16, 16*exp16, 4*exp16],\n ], dtype=float) * 0.25\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_9(self):\n # This matrix is due to the NAG Library.\n # It is an example for function F01ECF.\n A = np.array([\n [1, 2, 2, 2],\n [3, 1, 1, 2],\n [3, 2, 1, 2],\n [3, 3, 3, 1],\n ], dtype=float)\n desired = np.array([\n [740.7038, 610.8500, 542.2743, 549.1753],\n [731.2510, 603.5524, 535.0884, 542.2743],\n [823.7630, 679.4257, 603.5524, 610.8500],\n [998.4355, 823.7630, 731.2510, 740.7038],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_10(self):\n # This is Ward's example #1.\n # It is defective and nonderogatory.\n A = np.array([\n [4, 2, 0],\n [1, 4, 1],\n [1, 1, 4],\n ], dtype=float)\n assert_allclose(sorted(scipy.linalg.eigvals(A)), (3, 3, 6))\n desired = np.array([\n [147.8666224463699, 183.7651386463682, 71.79703239999647],\n [127.7810855231823, 183.7651386463682, 91.88256932318415],\n [127.7810855231824, 163.6796017231806, 111.9681062463718],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_11(self):\n # This is Ward's example #2.\n # It is a symmetric matrix.\n A = np.array([\n [29.87942128909879, 0.7815750847907159, -2.289519314033932],\n [0.7815750847907159, 25.72656945571064, 8.680737820540137],\n [-2.289519314033932, 8.680737820540137, 34.39400925519054],\n ], dtype=float)\n assert_allclose(scipy.linalg.eigvalsh(A), (20, 30, 40))\n desired = np.array([\n [\n 5.496313853692378E+15,\n -1.823188097200898E+16,\n -3.047577080858001E+16],\n [\n -1.823188097200899E+16,\n 6.060522870222108E+16,\n 1.012918429302482E+17],\n [\n -3.047577080858001E+16,\n 1.012918429302482E+17,\n 1.692944112408493E+17],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_12(self):\n # This is Ward's example #3.\n # Ward's algorithm has difficulty estimating the accuracy\n # of its results.\n A = np.array([\n [-131, 19, 18],\n [-390, 56, 54],\n [-387, 57, 52],\n ], dtype=float)\n assert_allclose(sorted(scipy.linalg.eigvals(A)), (-20, -2, -1))\n desired = np.array([\n [-1.509644158793135, 0.3678794391096522, 0.1353352811751005],\n [-5.632570799891469, 1.471517758499875, 0.4060058435250609],\n [-4.934938326088363, 1.103638317328798, 0.5413411267617766],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_13(self):\n # This is Ward's example #4.\n # This is a version of the Forsythe matrix.\n # The eigenvector problem is badly conditioned.\n # Ward's algorithm has difficulty esimating the accuracy\n # of its results for this problem.\n #\n # Check the construction of one instance of this family of matrices.\n A4_actual = _burkardt_13_power(4, 1)\n A4_desired = [[0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1e-4, 0, 0, 0]]\n assert_allclose(A4_actual, A4_desired)\n # Check the expm for a few instances.\n for n in (2, 3, 4, 10):\n # Approximate expm using Taylor series.\n # This works well for this matrix family\n # because each matrix in the summation,\n # even before dividing by the factorial,\n # is entrywise positive with max entry 10**(-floor(p/n)*n).\n k = max(1, int(np.ceil(16/n)))\n desired = np.zeros((n, n), dtype=float)\n for p in range(n*k):\n Ap = _burkardt_13_power(n, p)\n assert_equal(np.min(Ap), 0)\n assert_allclose(np.max(Ap), np.power(10, -np.floor(p/n)*n))\n desired += Ap / factorial(p)\n actual = expm(_burkardt_13_power(n, 1))\n assert_allclose(actual, desired)\n\n def test_burkardt_14(self):\n # This is Moler's example.\n # This badly scaled matrix caused problems for MATLAB's expm().\n A = np.array([\n [0, 1e-8, 0],\n [-(2e10 + 4e8/6.), -3, 2e10],\n [200./3., 0, -200./3.],\n ], dtype=float)\n desired = np.array([\n [0.446849468283175, 1.54044157383952e-09, 0.462811453558774],\n [-5743067.77947947, -0.0152830038686819, -4526542.71278401],\n [0.447722977849494, 1.54270484519591e-09, 0.463480648837651],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n\nclass TestOperators(object):\n\n def test_product_operator(self):\n random.seed(1234)\n n = 5\n k = 2\n nsamples = 10\n for i in range(nsamples):\n A = np.random.randn(n, n)\n B = np.random.randn(n, n)\n C = np.random.randn(n, n)\n D = np.random.randn(n, k)\n op = ProductOperator(A, B, C)\n assert_allclose(op.matmat(D), A.dot(B).dot(C).dot(D))\n assert_allclose(op.T.matmat(D), (A.dot(B).dot(C)).T.dot(D))\n\n def test_matrix_power_operator(self):\n random.seed(1234)\n n = 5\n k = 2\n p = 3\n nsamples = 10\n for i in range(nsamples):\n A = np.random.randn(n, n)\n B = np.random.randn(n, k)\n op = MatrixPowerOperator(A, p)\n assert_allclose(op.matmat(B), matrix_power(A, p).dot(B))\n assert_allclose(op.T.matmat(B), matrix_power(A, p).T.dot(B))\n\n",
"\"\"\"Test functions for linalg._solve_toeplitz module\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\nimport numpy as np\nfrom scipy.linalg._solve_toeplitz import levinson\nfrom scipy.linalg import solve, toeplitz, solve_toeplitz\nfrom numpy.testing import (assert_equal, assert_allclose,\n assert_raises)\nimport pytest\n\n\ndef test_solve_equivalence():\n # For toeplitz matrices, solve_toeplitz() should be equivalent to solve().\n random = np.random.RandomState(1234)\n for n in (1, 2, 3, 10):\n c = random.randn(n)\n if random.rand() < 0.5:\n c = c + 1j * random.randn(n)\n r = random.randn(n)\n if random.rand() < 0.5:\n r = r + 1j * random.randn(n)\n y = random.randn(n)\n if random.rand() < 0.5:\n y = y + 1j * random.randn(n)\n\n # Check equivalence when both the column and row are provided.\n actual = solve_toeplitz((c,r), y)\n desired = solve(toeplitz(c, r=r), y)\n assert_allclose(actual, desired)\n\n # Check equivalence when the column is provided but not the row.\n actual = solve_toeplitz(c, b=y)\n desired = solve(toeplitz(c), y)\n assert_allclose(actual, desired)\n\n\ndef test_multiple_rhs():\n random = np.random.RandomState(1234)\n c = random.randn(4)\n r = random.randn(4)\n for offset in [0, 1j]:\n for yshape in ((4,), (4, 3), (4, 3, 2)):\n y = random.randn(*yshape) + offset\n actual = solve_toeplitz((c,r), b=y)\n desired = solve(toeplitz(c, r=r), y)\n assert_equal(actual.shape, yshape)\n assert_equal(desired.shape, yshape)\n assert_allclose(actual, desired)\n \n \ndef test_native_list_arguments():\n c = [1,2,4,7]\n r = [1,3,9,12]\n y = [5,1,4,2]\n actual = solve_toeplitz((c,r), y)\n desired = solve(toeplitz(c, r=r), y)\n assert_allclose(actual, desired)\n\n\ndef test_zero_diag_error():\n # The Levinson-Durbin implementation fails when the diagonal is zero.\n random = np.random.RandomState(1234)\n n = 4\n c = random.randn(n)\n r = random.randn(n)\n y = random.randn(n)\n c[0] = 0\n assert_raises(np.linalg.LinAlgError,\n solve_toeplitz, (c, r), b=y)\n\n\ndef test_wikipedia_counterexample():\n # The Levinson-Durbin implementation also fails in other cases.\n # This example is from the talk page of the wikipedia article.\n random = np.random.RandomState(1234)\n c = [2, 2, 1]\n y = random.randn(3)\n assert_raises(np.linalg.LinAlgError, solve_toeplitz, c, b=y)\n\n\ndef test_reflection_coeffs():\n # check that that the partial solutions are given by the reflection\n # coefficients\n\n random = np.random.RandomState(1234)\n y_d = random.randn(10)\n y_z = random.randn(10) + 1j\n reflection_coeffs_d = [1]\n reflection_coeffs_z = [1]\n for i in range(2, 10):\n reflection_coeffs_d.append(solve_toeplitz(y_d[:(i-1)], b=y_d[1:i])[-1])\n reflection_coeffs_z.append(solve_toeplitz(y_z[:(i-1)], b=y_z[1:i])[-1])\n\n y_d_concat = np.concatenate((y_d[-2:0:-1], y_d[:-1]))\n y_z_concat = np.concatenate((y_z[-2:0:-1].conj(), y_z[:-1]))\n _, ref_d = levinson(y_d_concat, b=y_d[1:])\n _, ref_z = levinson(y_z_concat, b=y_z[1:])\n\n assert_allclose(reflection_coeffs_d, ref_d[:-1])\n assert_allclose(reflection_coeffs_z, ref_z[:-1])\n\n\[email protected](reason='Instability of Levinson iteration')\ndef test_unstable():\n # this is a \"Gaussian Toeplitz matrix\", as mentioned in Example 2 of\n # I. Gohbert, T. Kailath and V. Olshevsky \"Fast Gaussian Elimination with\n # Partial Pivoting for Matrices with Displacement Structure\"\n # Mathematics of Computation, 64, 212 (1995), pp 1557-1576\n # which can be unstable for levinson recursion.\n\n # other fast toeplitz solvers such as GKO or Burg should be better.\n random = np.random.RandomState(1234)\n n = 100\n c = 0.9 ** (np.arange(n)**2)\n y = random.randn(n)\n\n solution1 = solve_toeplitz(c, b=y)\n solution2 = solve(toeplitz(c), y)\n\n assert_allclose(solution1, solution2)\n\n",
"#!/usr/bin/env python\n\"\"\"Install the built package and run the tests.\"\"\"\nfrom __future__ import print_function\nimport os\n\n# FIXME: Should handle relative import better!\n#from .build import DIST_DIR\nfrom build import SRC_DIR, DIST_DIR, shellcmd\n\nclrgreen = '\\033[0;32m'\nclrnull = '\\033[0m'\n# print '\\033[0;32m foobar \\033[0m'\ndef color_print(msg):\n \"\"\"Add color to this print output.\"\"\"\n clrmsg = clrgreen + msg + clrnull\n print(clrmsg)\n\ndistdir = os.path.join(SRC_DIR, DIST_DIR)\n\n# Find the package and build abspath to it\npkg = None\nfilelist = os.listdir(distdir)\nfor fn in filelist:\n if fn.endswith('mpkg'):\n pkg = fn\n break\nif pkg is None:\n raise IOError('Package is not found in directory %s' % distdir)\n\npkgpath = os.path.abspath(os.path.join(SRC_DIR, DIST_DIR, pkg))\ncolor_print('Installing package: %s' % pkgpath)\n\n# Run the installer\nprint(\"\")\ncolor_print('Installer requires admin rights, you will be prompted for sudo')\nprint(\"\")\ncmd = 'sudo installer -verbose -package %s -target /' % pkgpath\n#color_print(cmd)\nshellcmd(cmd)\n\n# Null out the PYTHONPATH so we're sure to test the Installed version of scipy\nos.environ['PYTHONPATH'] = '0'\n\nprint(\"\")\ncolor_print('Install successful!')\ncolor_print('Running scipy test suite!')\nprint(\"\")\nimport scipy\nscipy.test()\n",
"\"\"\"Regression tests for optimize.\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom numpy.testing import assert_almost_equal, \\\n assert_raises\n\nimport scipy.optimize\n\n\nclass TestRegression(object):\n\n def test_newton_x0_is_0(self):\n # Regression test for gh-1601\n tgt = 1\n res = scipy.optimize.newton(lambda x: x - 1, 0)\n assert_almost_equal(res, tgt)\n\n def test_newton_integers(self):\n # Regression test for gh-1741\n root = scipy.optimize.newton(lambda x: x**2 - 1, x0=2,\n fprime=lambda x: 2*x)\n assert_almost_equal(root, 1.0)\n\n def test_lmdif_errmsg(self):\n # This shouldn't cause a crash on Python 3\n class SomeError(Exception):\n pass\n counter = [0]\n\n def func(x):\n counter[0] += 1\n if counter[0] < 3:\n return x**2 - np.array([9, 10, 11])\n else:\n raise SomeError()\n assert_raises(SomeError,\n scipy.optimize.leastsq,\n func, [1, 2, 3])\n\n"
] | [
[
"numpy.diag",
"numpy.matrix",
"scipy._lib._numpy_compat.suppress_warnings",
"scipy.sparse.linalg.matfuncs._expm",
"numpy.linalg.matrix_power",
"numpy.max",
"scipy.linalg.logm",
"numpy.random.randn",
"numpy.iscomplexobj",
"numpy.exp",
"scipy.sparse.linalg.matfuncs.MatrixPowerOperator",
"scipy.sparse.construct.eye",
"numpy.allclose",
"numpy.eye",
"scipy.sparse.linalg.matfuncs.expm",
"scipy.sparse.linalg.matfuncs._onenorm_matrix_power_nnm",
"numpy.ceil",
"scipy.special.factorial",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal",
"scipy.sparse.csc_matrix",
"numpy.spacing",
"numpy.power",
"numpy.testing.assert_array_almost_equal_nulp",
"numpy.min",
"numpy.random.rand",
"numpy.floor",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.random.random",
"numpy.random.seed",
"numpy.linalg.norm",
"numpy.expm1",
"scipy.sparse.linalg.matfuncs.ProductOperator"
],
[
"numpy.testing.assert_equal",
"scipy.linalg._solve_toeplitz.levinson",
"scipy.linalg.toeplitz",
"numpy.arange",
"numpy.concatenate",
"scipy.linalg.solve_toeplitz",
"numpy.testing.assert_raises",
"numpy.testing.assert_allclose",
"numpy.random.RandomState"
],
[
"scipy.test"
],
[
"numpy.testing.assert_almost_equal",
"numpy.array",
"numpy.testing.assert_raises"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.14",
"1.6",
"0.15",
"1.4",
"1.3",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"0.16"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Oriolac/data-utils | [
"87423d7f7f408c26ea31cbeb7a55a77a55a9ee27"
] | [
"src/dataut/visual.py"
] | [
"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n\ndef show_corr(X, mask=True, figsize=(7,7)):\n fig, ax = plt.subplots(figsize=figsize)\n corr = X.corr()\n mask = np.triu(np.ones_like(corr, dtype=bool)) if mask else np.ones_like(corr, dtype=bool)\n sns.heatmap(corr, mask=mask, square=True, annot=True, ax=ax)\n plt.show()"
] | [
[
"matplotlib.pyplot.show",
"numpy.ones_like",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wingkitlee0/lifelines | [
"46a225ab1d8845f0366921d2b61151fb9a69d398"
] | [
"lifelines/fitters/kaplan_meier_fitter.py"
] | [
"# -*- coding: utf-8 -*-\nimport functools\nimport warnings\nimport numpy as np\nimport pandas as pd\n\nfrom lifelines.fitters import UnivariateFitter\nfrom lifelines.utils import (\n _preprocess_inputs,\n _additive_estimate,\n _to_1d_array,\n StatError,\n inv_normal_cdf,\n median_survival_times,\n qth_survival_time,\n check_nans_or_infs,\n StatisticalWarning,\n coalesce,\n CensoringType,\n)\nfrom lifelines.plotting import plot_loglogs, _plot_estimate\n\n\nclass KaplanMeierFitter(UnivariateFitter):\n\n \"\"\"\n Class for fitting the Kaplan-Meier estimate for the survival function.\n\n Parameters\n ----------\n alpha: float, option (default=0.05)\n The alpha value associated with the confidence intervals.\n\n\n Examples\n --------\n >>> from lifelines import KaplanMeierFitter\n >>> from lifelines.datasets import load_waltons\n >>> waltons = load_waltons()\n >>> kmf = KaplanMeierFitter()\n >>> kmf.fit(waltons['T'], waltons['E'])\n >>> kmf.plot()\n\n\n Attributes\n ----------\n survival_function_ : DataFrame\n The estimated survival function (with custom timeline if provided)\n median_ : float\n The estimated median time to event. np.inf if doesn't exist.\n confidence_interval_ : DataFrame\n The lower and upper confidence intervals for the survival function. An alias of\n ``confidence_interval_survival_function_``\n confidence_interval_survival_function_ : DataFrame\n The lower and upper confidence intervals for the survival function. An alias of\n ``confidence_interval_``\n cumumlative_density_ : DataFrame\n The estimated cumulative density function (with custom timeline if provided)\n confidence_interval_cumulative_density_ : DataFrame\n The lower and upper confidence intervals for the cumulative density\n durations: array\n The durations provided\n event_observed: array\n The event_observed variable provided\n timeline: array\n The time line to use for plotting and indexing\n entry: array or None\n The entry array provided, or None\n event_table: DataFrame\n A summary of the life table\n \"\"\"\n\n @CensoringType.right_censoring\n def fit(\n self,\n durations,\n event_observed=None,\n timeline=None,\n entry=None,\n label=\"KM_estimate\",\n left_censorship=False,\n alpha=None,\n ci_labels=None,\n weights=None,\n ): # pylint: disable=too-many-arguments,too-many-locals\n \"\"\"\n Fit the model to a right-censored dataset\n\n Parameters\n ----------\n durations: an array, list, pd.DataFrame or pd.Series\n length n -- duration subject was observed for\n event_observed: an array, list, pd.DataFrame, or pd.Series, optional\n True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None\n timeline: an array, list, pd.DataFrame, or pd.Series, optional\n return the best estimate at the values in timelines (postively increasing)\n entry: an array, list, pd.DataFrame, or pd.Series, optional\n relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population\n entered study when they were \"born\".\n label: string, optional\n a string to name the column of the estimate.\n alpha: float, optional\n the alpha value in the confidence intervals. Overrides the initializing alpha for this call to fit only.\n left_censorship: bool, optional (default=False)\n Deprecated, use ``fit_left_censoring``\n ci_labels: tuple, optional\n add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<1-alpha/2>\n weights: an array, list, pd.DataFrame, or pd.Series, optional\n if providing a weighted dataset. For example, instead\n of providing every subject as a single element of `durations` and `event_observed`, one could\n weigh subject differently.\n\n Returns\n -------\n self: KaplanMeierFitter\n self with new properties like ``survival_function_``, ``plot()``, ``median``\n\n \"\"\"\n if left_censorship:\n warnings.warn(\n \"kwarg left_censorship is deprecated and will be removed in a future release. Please use ``.fit_left_censoring`` instead.\",\n DeprecationWarning,\n )\n\n return self._fit(durations, event_observed, timeline, entry, label, alpha, ci_labels, weights)\n\n @CensoringType.left_censoring\n def fit_left_censoring(\n self,\n durations,\n event_observed=None,\n timeline=None,\n entry=None,\n label=\"KM_estimate\",\n alpha=None,\n ci_labels=None,\n weights=None,\n ):\n \"\"\"\n Fit the model to a left-censored dataset\n\n Parameters\n ----------\n durations: an array, list, pd.DataFrame or pd.Series\n length n -- duration subject was observed for\n event_observed: an array, list, pd.DataFrame, or pd.Series, optional\n True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None\n timeline: an array, list, pd.DataFrame, or pd.Series, optional\n return the best estimate at the values in timelines (postively increasing)\n entry: an array, list, pd.DataFrame, or pd.Series, optional\n relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population\n entered study when they were \"born\".\n label: string, optional\n a string to name the column of the estimate.\n alpha: float, optional\n the alpha value in the confidence intervals. Overrides the initializing alpha for this call to fit only.\n left_censorship: bool, optional (default=False)\n Deprecated, use ``fit_left_censoring``\n ci_labels: tuple, optional\n add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<1-alpha/2>\n weights: an array, list, pd.DataFrame, or pd.Series, optional\n if providing a weighted dataset. For example, instead\n of providing every subject as a single element of `durations` and `event_observed`, one could\n weigh subject differently.\n\n Returns\n -------\n self: KaplanMeierFitter\n self with new properties like ``survival_function_``, ``plot()``, ``median``\n\n \"\"\"\n return self._fit(durations, event_observed, timeline, entry, label, alpha, ci_labels, weights)\n\n def _fit(\n self,\n durations,\n event_observed=None,\n timeline=None,\n entry=None,\n label=\"KM_estimate\",\n alpha=None,\n ci_labels=None,\n weights=None,\n ): # pylint: disable=too-many-arguments,too-many-locals\n \"\"\"\n Parameters\n ----------\n durations: an array, list, pd.DataFrame or pd.Series\n length n -- duration subject was observed for\n event_observed: an array, list, pd.DataFrame, or pd.Series, optional\n True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None\n timeline: an array, list, pd.DataFrame, or pd.Series, optional\n return the best estimate at the values in timelines (postively increasing)\n entry: an array, list, pd.DataFrame, or pd.Series, optional\n relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population\n entered study when they were \"born\".\n label: string, optional\n a string to name the column of the estimate.\n alpha: float, optional\n the alpha value in the confidence intervals. Overrides the initializing alpha for this call to fit only.\n left_censorship: bool, optional (default=False)\n True if durations and event_observed refer to left censorship events. Default False\n ci_labels: tuple, optional\n add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<1-alpha/2>\n weights: an array, list, pd.DataFrame, or pd.Series, optional\n if providing a weighted dataset. For example, instead\n of providing every subject as a single element of `durations` and `event_observed`, one could\n weigh subject differently.\n\n Returns\n -------\n self: KaplanMeierFitter\n self with new properties like ``survival_function_``, ``plot()``, ``median``\n\n \"\"\"\n self._check_values(durations)\n if event_observed is not None:\n self._check_values(event_observed)\n\n self._label = label\n\n if weights is not None:\n weights = np.asarray(weights)\n if (weights.astype(int) != weights).any():\n warnings.warn(\n \"\"\"It looks like your weights are not integers, possibly propensity scores then?\n It's important to know that the naive variance estimates of the coefficients are biased. Instead use Monte Carlo to\n estimate the variances. See paper \"Variance estimation when using inverse probability of treatment weighting (IPTW) with survival analysis\"\n or \"Adjusted Kaplan-Meier estimator and log-rank test with inverse probability of treatment weighting for survival data.\"\n \"\"\",\n StatisticalWarning,\n )\n else:\n weights = np.ones_like(durations, dtype=float)\n\n # if the user is interested in left-censorship, we return the cumulative_density_, no survival_function_,\n is_left_censoring = CensoringType.is_left_censoring(self)\n primary_estimate_name = \"survival_function_\" if not is_left_censoring else \"cumulative_density_\"\n secondary_estimate_name = \"cumulative_density_\" if not is_left_censoring else \"survival_function_\"\n\n self.durations, self.event_observed, self.timeline, self.entry, self.event_table, self.weights = _preprocess_inputs(\n durations, event_observed, timeline, entry, weights\n )\n\n alpha = alpha if alpha else self.alpha\n log_estimate, cumulative_sq_ = _additive_estimate(\n self.event_table, self.timeline, self._additive_f, self._additive_var, is_left_censoring\n )\n\n if entry is not None:\n # a serious problem with KM is that when the sample size is small and there are too few early\n # truncation times, it may happen that is the number of patients at risk and the number of deaths is the same.\n # we adjust for this using the Breslow-Fleming-Harrington estimator\n n = self.event_table.shape[0]\n net_population = (self.event_table[\"entrance\"] - self.event_table[\"removed\"]).cumsum()\n if net_population.iloc[: int(n / 2)].min() == 0:\n ix = net_population.iloc[: int(n / 2)].idxmin()\n raise StatError(\n \"\"\"There are too few early truncation times and too many events. S(t)==0 for all t>%g. Recommend BreslowFlemingHarringtonFitter.\"\"\"\n % ix\n )\n\n # estimation\n setattr(self, primary_estimate_name, pd.DataFrame(np.exp(log_estimate), columns=[self._label]))\n setattr(self, secondary_estimate_name, pd.DataFrame(1 - np.exp(log_estimate), columns=[self._label]))\n\n self.__estimate = getattr(self, primary_estimate_name)\n self.confidence_interval_ = self._bounds(cumulative_sq_[:, None], alpha, ci_labels)\n self._median = median_survival_times(self.survival_function_)\n self.percentile = functools.partial(qth_survival_time, model_or_survival_function=self.survival_function_)\n self._cumulative_sq_ = cumulative_sq_\n\n setattr(self, \"confidence_interval_\" + primary_estimate_name, self.confidence_interval_)\n setattr(self, \"confidence_interval_\" + secondary_estimate_name, 1 - self.confidence_interval_)\n\n # estimation methods\n self._estimation_method = primary_estimate_name\n self._estimate_name = primary_estimate_name\n self._update_docstrings()\n\n return self\n\n @property\n def median_(self):\n warnings.warn(\n \"\"\"Please use `median_survival_time_` property instead. Future property `median_` will be removed.\"\"\",\n FutureWarning,\n )\n return self._median\n\n @property\n def median_survival_time_(self):\n return self._median\n\n def _check_values(self, array):\n check_nans_or_infs(array)\n\n def plot_loglogs(self, *args, **kwargs):\n r\"\"\"\n Plot :math:`\\log(S(t))` against :math:`\\log(t)`. Same arguments as ``.plot``.\n \"\"\"\n return plot_loglogs(self, *args, **kwargs)\n\n def survival_function_at_times(self, times, label=None):\n \"\"\"\n Return a Pandas series of the predicted survival value at specific times\n\n Parameters\n -----------\n times: iterable or float\n\n Returns\n --------\n pd.Series\n\n \"\"\"\n label = coalesce(label, self._label)\n return pd.Series(self.predict(times), index=_to_1d_array(times), name=label)\n\n def cumulative_density_at_times(self, times, label=None):\n \"\"\"\n Return a Pandas series of the predicted cumulative density at specific times\n\n Parameters\n -----------\n times: iterable or float\n\n Returns\n --------\n pd.Series\n\n \"\"\"\n label = coalesce(label, self._label)\n return pd.Series(1 - self.predict(times), index=_to_1d_array(times), name=label)\n\n def plot_survival_function(self, **kwargs):\n \"\"\"Alias of ``plot``\"\"\"\n return _plot_estimate(self, estimate=\"survival_function_\", **kwargs)\n\n def plot_cumulative_density(self, **kwargs):\n \"\"\"\n Plots a pretty figure of {0}.{1}\n\n Matplotlib plot arguments can be passed in inside the kwargs, plus\n\n Parameters\n -----------\n show_censors: bool\n place markers at censorship events. Default: False\n censor_styles: bool\n If show_censors, this dictionary will be passed into the plot call.\n ci_alpha: bool\n the transparency level of the confidence interval. Default: 0.3\n ci_force_lines: bool\n force the confidence intervals to be line plots (versus default shaded areas). Default: False\n ci_show: bool\n show confidence intervals. Default: True\n ci_legend: bool\n if ci_force_lines is True, this is a boolean flag to add the lines' labels to the legend. Default: False\n at_risk_counts: bool\n show group sizes at time points. See function ``add_at_risk_counts`` for details. Default: False\n loc: slice\n specify a time-based subsection of the curves to plot, ex:\n\n >>> model.plot(loc=slice(0.,10.))\n\n will plot the time values between t=0. and t=10.\n iloc: slice\n specify a location-based subsection of the curves to plot, ex:\n\n >>> model.plot(iloc=slice(0,10))\n\n will plot the first 10 time points.\n invert_y_axis: bool\n boolean to invert the y-axis, useful to show cumulative graphs instead of survival graphs. (Deprecated, use ``plot_cumulative_density()``)\n\n Returns\n -------\n ax:\n a pyplot axis object\n \"\"\"\n return _plot_estimate(\n self,\n estimate=self.cumulative_density_,\n confidence_intervals=self.confidence_interval_cumulative_density_,\n **kwargs\n )\n\n def _bounds(self, cumulative_sq_, alpha, ci_labels):\n # This method calculates confidence intervals using the exponential Greenwood formula.\n # See https://www.math.wustl.edu/%7Esawyer/handouts/greenwood.pdf\n z = inv_normal_cdf(1 - alpha / 2)\n df = pd.DataFrame(index=self.timeline)\n v = np.log(self.__estimate.values)\n\n if ci_labels is None:\n ci_labels = [\"%s_upper_%g\" % (self._label, 1 - alpha), \"%s_lower_%g\" % (self._label, 1 - alpha)]\n assert len(ci_labels) == 2, \"ci_labels should be a length 2 array.\"\n\n df[ci_labels[0]] = np.exp(-np.exp(np.log(-v) + z * np.sqrt(cumulative_sq_) / v))\n df[ci_labels[1]] = np.exp(-np.exp(np.log(-v) - z * np.sqrt(cumulative_sq_) / v))\n return df\n\n def _additive_f(self, population, deaths):\n np.seterr(invalid=\"ignore\", divide=\"ignore\")\n return np.log(population - deaths) - np.log(population)\n\n def _additive_var(self, population, deaths):\n np.seterr(divide=\"ignore\")\n population = population.astype(\"uint64\")\n return (deaths / (population * (population - deaths))).replace([np.inf], 0)\n\n def plot_cumulative_hazard(self, **kwargs):\n raise NotImplementedError(\n \"The Kaplan-Meier estimator is not used to estimate the cumulative hazard. Try the NelsonAalenFitter or any other parametric model\"\n )\n\n def plot_hazard(self, **kwargs):\n raise NotImplementedError(\n \"The Kaplan-Meier estimator is not used to estimate the hazard. Try the NelsonAalenFitter or any other parametric model\"\n )\n"
] | [
[
"numpy.log",
"numpy.ones_like",
"numpy.sqrt",
"numpy.asarray",
"pandas.DataFrame",
"numpy.seterr",
"numpy.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
whfh3900/Tacotron-2-korea-example | [
"2799394f14e5d52bed2e5f7495bbd89e020a350a"
] | [
"wavenet_vocoder/feeder.py"
] | [
"import os\nimport threading\nimport time\n\nimport numpy as np\nimport tensorflow as tf\nfrom datasets import audio\nfrom infolog import log\nfrom keras.utils import np_utils\nfrom sklearn.model_selection import train_test_split\n\nfrom .util import is_mulaw_quantize, is_scalar_input\n\n\n\n_batches_per_group = 64\n\n\nclass Feeder:\n\t\"\"\"\n\t\tFeeds batches of data into queue in a background thread.\n\t\"\"\"\n\tdef __init__(self, coordinator, metadata_filename, base_dir, hparams):\n\t\tsuper(Feeder, self).__init__()\n\n\t\tself._coord = coordinator\n\t\tself._hparams = hparams\n\t\tself._train_offset = 0\n\t\tself._test_offset = 0\n\n\t\tif hparams.symmetric_mels:\n\t\t\tself._spec_pad = -hparams.max_abs_value\n\t\telse:\n\t\t\tself._spec_pad = 0.\n\n\t\t#Base directory of the project (to map files from different locations)\n\t\tself._base_dir = base_dir\n\n\t\t#Load metadata\n\t\tself._data_dir = os.path.dirname(metadata_filename)\n\t\twith open(metadata_filename, 'r', encoding='utf-8') as f:\n\t\t\tself._metadata = [line.strip().split('|') for line in f]\n\n\t\t#Train test split\n\t\tif hparams.wavenet_test_size is None:\n\t\t\tassert hparams.wavenet_test_batches is not None\n\n\t\ttest_size = (hparams.wavenet_test_size if hparams.wavenet_test_size is not None\n\t\t\telse hparams.wavenet_test_batches * hparams.wavenet_batch_size)\n\t\tindices = np.arange(len(self._metadata))\n\t\ttrain_indices, test_indices = train_test_split(indices,\n\t\t\ttest_size=test_size, random_state=hparams.wavenet_data_random_state)\n\n\t\t#Make sure test size is a multiple of batch size else round up\n\t\tlen_test_indices = _round_down(len(test_indices), hparams.wavenet_batch_size)\n\t\textra_test = test_indices[len_test_indices:]\n\t\ttest_indices = test_indices[:len_test_indices]\n\t\ttrain_indices = np.concatenate([train_indices, extra_test])\n\n\t\tself._train_meta = list(np.array(self._metadata)[train_indices])\n\t\tself._test_meta = list(np.array(self._metadata)[test_indices])\n\n\t\tself.test_steps = len(self._test_meta) // hparams.wavenet_batch_size\n\n\t\tif hparams.wavenet_test_size is None:\n\t\t\tassert hparams.wavenet_test_batches == self.test_steps\n\n\t\t#Get conditioning status\n\t\tself.local_condition, self.global_condition = self._check_conditions()\n\n\t\twith tf.device('/cpu:0'):\n\t\t\t# Create placeholders for inputs and targets. Don't specify batch size because we want\n\t\t\t# to be able to feed different batch sizes at eval time.\n\t\t\tif is_scalar_input(hparams.input_type):\n\t\t\t\tinput_placeholder = tf.placeholder(tf.float32, shape=(None, 1, None), name='audio_inputs')\n\t\t\t\ttarget_placeholder = tf.placeholder(tf.float32, shape=(None, None, 1), name='audio_targets')\n\t\t\t\ttarget_type = tf.float32\n\t\t\telse:\n\t\t\t\tinput_placeholder = tf.placeholder(tf.float32, shape=(None, hparams.quantize_channels, None), name='audio_inputs')\n\t\t\t\ttarget_placeholder = tf.placeholder(tf.int32, shape=(None, None, 1), name='audio_targets')\n\t\t\t\ttarget_type = tf.int32\n\n\t\t\tself._placeholders = [\n\t\t\tinput_placeholder,\n\t\t\ttarget_placeholder,\n\t\t\ttf.placeholder(tf.int32, shape=(None, ), name='input_lengths'),\n\t\t\t]\n\n\t\t\tqueue_types = [tf.float32, target_type, tf.int32]\n\n\t\t\tif self.local_condition:\n\t\t\t\tself._placeholders.append(tf.placeholder(tf.float32, shape=(None, hparams.num_mels, None), name='local_condition_features'))\n\t\t\t\tqueue_types.append(tf.float32)\n\t\t\tif self.global_condition:\n\t\t\t\tself._placeholders.append(tf.placeholder(tf.int32, shape=(None, 1), name='global_condition_features'))\n\t\t\t\tqueue_types.append(tf.int32)\n\n\t\t\t# Create queue for buffering data\n\t\t\tqueue = tf.FIFOQueue(8, queue_types, name='input_queue')\n\t\t\tself._enqueue_op = queue.enqueue(self._placeholders)\n\t\t\tvariables = queue.dequeue()\n\n\t\t\tself.inputs = variables[0]\n\t\t\tself.inputs.set_shape(self._placeholders[0].shape)\n\t\t\tself.targets = variables[1]\n\t\t\tself.targets.set_shape(self._placeholders[1].shape)\n\t\t\tself.input_lengths = variables[2]\n\t\t\tself.input_lengths.set_shape(self._placeholders[2].shape)\n\n\t\t\tidx = 3\n\n\t\t\t#If local conditioning disabled override c inputs with None\n\t\t\tif hparams.cin_channels < 0:\n\t\t\t\tself.local_condition_features = None\n\t\t\telse:\n\t\t\t\tself.local_condition_features = variables[idx]\n\t\t\t\tself.local_condition_features.set_shape(self._placeholders[idx].shape)\n\t\t\t\tidx += 1\n\n\t\t\t#If global conditioning disabled override g inputs with None\n\t\t\tif hparams.gin_channels < 0:\n\t\t\t\tself.global_condition_features = None\n\t\t\telse:\n\t\t\t\tself.global_condition_features = variables[idx]\n\t\t\t\tself.global_condition_features.set_shape(self._placeholders[idx].shape)\n\n\t\t\t# Create queue for buffering eval data\n\t\t\teval_queue = tf.FIFOQueue(1, queue_types, name='eval_queue')\n\t\t\tself._eval_enqueue_op = eval_queue.enqueue(self._placeholders)\n\t\t\teval_variables = eval_queue.dequeue()\n\n\t\t\tself.eval_inputs = eval_variables[0]\n\t\t\tself.eval_inputs.set_shape(self._placeholders[0].shape)\n\t\t\tself.eval_targets = eval_variables[1]\n\t\t\tself.eval_targets.set_shape(self._placeholders[1].shape)\n\t\t\tself.eval_input_lengths = eval_variables[2]\n\t\t\tself.eval_input_lengths.set_shape(self._placeholders[2].shape)\n\n\t\t\teval_idx = 3\n\n\t\t\t#If local conditioning disabled override c inputs with None\n\t\t\tif hparams.cin_channels < 0:\n\t\t\t\tself.eval_local_condition_features = None\n\t\t\telse:\n\t\t\t\tself.eval_local_condition_features = eval_variables[eval_idx]\n\t\t\t\tself.eval_local_condition_features.set_shape(self._placeholders[eval_idx].shape)\n\t\t\t\teval_idx += 1\n\n\t\t\t#If global conditioning disabled override g inputs with None\n\t\t\tif hparams.gin_channels < 0:\n\t\t\t\tself.eval_global_condition_features = None\n\t\t\telse:\n\t\t\t\tself.eval_global_condition_features = eval_variables[eval_idx]\n\t\t\t\tself.eval_global_condition_features.set_shape(self._placeholders[eval_idx].shape)\n\n\n\tdef start_threads(self, session):\n\t\tself._session = session\n\t\tthread = threading.Thread(name='background', target=self._enqueue_next_train_group)\n\t\tthread.daemon = True #Thread will close when parent quits\n\t\tthread.start()\n\n\t\tthread = threading.Thread(name='background', target=self._enqueue_next_test_group)\n\t\tthread.daemon = True #Thread will close when parent quits\n\t\tthread.start()\n\n\tdef _get_test_groups(self):\n\t\tmeta = self._test_meta[self._test_offset]\n\t\tself._test_offset += 1\n\n\t\tif self._hparams.train_with_GTA:\n\t\t\tmel_file = meta[2]\n\t\telse:\n\t\t\tmel_file = meta[1]\n\t\taudio_file = meta[0]\n\n\t\tinput_data = np.load(os.path.join(self._base_dir, audio_file))\n\n\t\tif self.local_condition:\n\t\t\tlocal_condition_features = np.load(os.path.join(self._base_dir, mel_file))\n\t\telse:\n\t\t\tlocal_condition_features = None\n\n\t\tif self.global_condition:\n\t\t\tglobal_condition_features = meta[3]\n\t\t\tif global_condition_features == '<no_g>':\n\t\t\t\traise RuntimeError('Please redo the wavenet preprocessing (or GTA synthesis) to assign global condition features!')\n\t\telse:\n\t\t\tglobal_condition_features = None\n\n\t\treturn (input_data, local_condition_features, global_condition_features, len(input_data))\n\n\tdef make_test_batches(self):\n\t\tstart = time.time()\n\n\t\t#Read one example for evaluation\n\t\tn = 1\n\n\t\t#Test on entire test set (one sample at an evaluation step)\n\t\texamples = [self._get_test_groups() for i in range(len(self._test_meta))]\n\t\tbatches = [examples[i: i+n] for i in range(0, len(examples), n)]\n\t\tnp.random.shuffle(batches)\n\n\t\tprint('\\nGenerated {} test batches of size {} in {:.3f} sec'.format(len(batches), n, time.time() - start))\n\t\tlog('\\nGenerated {} test batches of size {} in {:.3f} sec'.format(len(batches), n, time.time() - start))\n\t\treturn batches\n\n\tdef _enqueue_next_train_group(self):\n\t\twhile not self._coord.should_stop():\n\t\t\tstart = time.time()\n\n\t\t\t# Read a group of examples\n\t\t\tn = self._hparams.wavenet_batch_size\n\t\t\texamples = [self._get_next_example() for i in range(n * _batches_per_group)]\n\n\t\t\t# Bucket examples base on similiar output length for efficiency\n\t\t\texamples.sort(key=lambda x: x[-1])\n\t\t\tbatches = [examples[i: i+n] for i in range(0, len(examples), n)]\n\t\t\tnp.random.shuffle(batches)\n\n\t\t\tprint('\\nGenerated {} train batches of size {} in {:.3f} sec'.format(len(batches), n, time.time() - start))\n\t\t\tlog('\\nGenerated {} train batches of size {} in {:.3f} sec'.format(len(batches), n, time.time() - start))\n\t\t\tfor batch in batches:\n\t\t\t\tfeed_dict = dict(zip(self._placeholders, self._prepare_batch(batch)))\n\t\t\t\tself._session.run(self._enqueue_op, feed_dict=feed_dict)\n\n\tdef _enqueue_next_test_group(self):\n\t\ttest_batches = self.make_test_batches()\n\t\twhile not self._coord.should_stop():\n\t\t\tfor batch in test_batches:\n\t\t\t\tfeed_dict = dict(zip(self._placeholders, self._prepare_batch(batch)))\n\t\t\t\tself._session.run(self._eval_enqueue_op, feed_dict=feed_dict)\n\n\tdef _get_next_example(self):\n\t\t'''Get a single example (input, output, len_output) from disk\n\t\t'''\n\t\tif self._train_offset >= len(self._train_meta):\n\t\t\tself._train_offset = 0\n\t\t\tnp.random.shuffle(self._train_meta)\n\t\tmeta = self._train_meta[self._train_offset]\n\t\tself._train_offset += 1\n\n\t\tif self._hparams.train_with_GTA:\n\t\t\tmel_file = meta[2]\n\t\t\tif 'linear' in mel_file:\n\t\t\t\traise RuntimeError('Linear spectrogram files selected instead of GTA mels, did you specify the wrong metadata?')\n\t\telse:\n\t\t\tmel_file = meta[1]\n\t\taudio_file = meta[0]\n\n\t\tinput_data = np.load(os.path.join(self._base_dir, audio_file))\n\n\t\tif self.local_condition:\n\t\t\tlocal_condition_features = np.load(os.path.join(self._base_dir, mel_file))\n\t\telse:\n\t\t\tlocal_condition_features = None\n\n\t\tif self.global_condition:\n\t\t\tglobal_condition_features = meta[3]\n\t\t\tif global_condition_features == '<no_g>':\n\t\t\t\traise RuntimeError('Please redo the wavenet preprocessing (or GTA synthesis) to assign global condition features!')\n\t\telse:\n\t\t\tglobal_condition_features = None\n\n\t\treturn (input_data, local_condition_features, global_condition_features, len(input_data))\n\n\n\tdef _prepare_batch(self, batches):\n\t\tassert 0 == len(batches) % self._hparams.wavenet_num_gpus\n\t\tsize_per_device = int(len(batches) / self._hparams.wavenet_num_gpus)\n\t\tnp.random.shuffle(batches)\n\n\t\t#Limit time steps to save GPU Memory usage\n\t\tmax_time_steps = self._limit_time()\n\t\t#Adjust time resolution for upsampling\n\t\tbatches = self._adjust_time_resolution(batches, self.local_condition, max_time_steps)\n\n\t\t#time lengths\n\t\tinput_lengths = np.asarray([len(x[0]) for x in batches], np.int32)\n\t\tmax_input_length = max(input_lengths)\n\n\t\t#Since all inputs/targets will have the same lengths for all GPUs, we can simply treat all GPUs batches as one big batch and stack all data. (fixed length)\n\t\tinputs = self._prepare_inputs([x[0] for x in batches], max_input_length)\n\t\ttargets = self._prepare_targets([x[0] for x in batches], max_input_length)\n\t\tlocal_condition_features = self._prepare_local_conditions(self.local_condition, [x[1] for x in batches])\n\t\tglobal_condition_features = self._prepare_global_conditions(self.global_condition, [x[2] for x in batches])\n\n\t\t#Create final batches\n\t\tnew_batches = (inputs, targets, input_lengths)\n\t\tif local_condition_features is not None:\n\t\t\tnew_batches += (local_condition_features, )\n\t\tif global_condition_features is not None:\n\t\t\tnew_batches += (global_condition_features, )\n\n\t\treturn new_batches\n\n\tdef _prepare_inputs(self, inputs, maxlen):\n\t\tif is_mulaw_quantize(self._hparams.input_type):\n\t\t\t#[batch_size, time_steps, quantize_channels]\n\t\t\tx_batch = np.stack([_pad_inputs(np_utils.to_categorical(\n\t\t\t\tx, num_classes=self._hparams.quantize_channels), maxlen) for x in inputs]).astype(np.float32)\n\t\telse:\n\t\t\t#[batch_size, time_steps, 1]\n\t\t\tx_batch = np.stack([_pad_inputs(x.reshape(-1, 1), maxlen) for x in inputs]).astype(np.float32)\n\t\tassert len(x_batch.shape) == 3\n\t\t#Convert to channels first [batch_size, quantize_channels (or 1), time_steps]\n\t\tx_batch = np.transpose(x_batch, (0, 2, 1))\n\t\treturn x_batch\n\n\tdef _prepare_targets(self, targets, maxlen):\n\t\t#[batch_size, time_steps]\n\t\tif is_mulaw_quantize(self._hparams.input_type):\n\t\t\ty_batch = np.stack([_pad_targets(x, maxlen) for x in targets]).astype(np.int32)\n\t\telse:\n\t\t\ty_batch = np.stack([_pad_targets(x, maxlen) for x in targets]).astype(np.float32)\n\t\tassert len(y_batch.shape) == 2\n\t\t#Add extra axis (make 3 dimension)\n\t\ty_batch = np.expand_dims(y_batch, axis=-1)\n\t\treturn y_batch\n\n\tdef _prepare_local_conditions(self, local_condition, c_features):\n\t\tif local_condition:\n\t\t\tmaxlen = max([len(x) for x in c_features])\n\t\t\t#[-max, max] or [0,max]\n\t\t\tT2_output_range = (-self._hparams.max_abs_value, self._hparams.max_abs_value) if self._hparams.symmetric_mels else (0, self._hparams.max_abs_value)\n\n\t\t\tif self._hparams.clip_for_wavenet:\n\t\t\t\tc_features = [np.clip(x, T2_output_range[0], T2_output_range[1]) for x in c_features]\n\t\t\t\t\n\t\t\tc_batch = np.stack([_pad_inputs(x, maxlen, _pad=T2_output_range[0]) for x in c_features]).astype(np.float32)\n\t\t\tassert len(c_batch.shape) == 3\n\t\t\t#[batch_size, c_channels, time_steps]\n\t\t\tc_batch = np.transpose(c_batch, (0, 2, 1))\n\n\t\t\tif self._hparams.normalize_for_wavenet:\n\t\t\t\t#rerange to [0, 1]\n\t\t\t\tc_batch = _interp(c_batch, T2_output_range).astype(np.float32)\n\n\t\telse:\n\t\t\tc_batch = None\n\n\t\treturn c_batch\n\n\tdef _prepare_global_conditions(self, global_condition, g_features):\n\t\tif global_condition:\n\t\t\tg_batch = np.array(g_features).astype(np.int32).reshape(-1, 1)\n\n\t\telse:\n\t\t\tg_batch = None\n\n\t\treturn g_batch\n\n\tdef _check_conditions(self):\n\t\tlocal_condition = self._hparams.cin_channels > 0\n\t\tglobal_condition = self._hparams.gin_channels > 0\n\t\treturn local_condition, global_condition\n\n\tdef _limit_time(self):\n\t\t'''Limit time resolution to save GPU memory.\n\t\t'''\n\t\tif self._hparams.max_time_sec is not None:\n\t\t\treturn int(self._hparams.max_time_sec * self._hparams.sample_rate)\n\n\t\telif self._hparams.max_time_steps is not None:\n\t\t\treturn self._hparams.max_time_steps\n\n\t\telse:\n\t\t\treturn None\n\n\tdef _adjust_time_resolution(self, batch, local_condition, max_time_steps):\n\t\t'''Adjust time resolution between audio and local condition\n\t\t'''\n\t\tif local_condition:\n\t\t\tnew_batch = []\n\t\t\tfor b in batch:\n\t\t\t\tx, c, g, l = b\n\t\t\t\tself._assert_ready_for_upsample(x, c)\n\t\t\t\tif max_time_steps is not None:\n\t\t\t\t\tmax_steps = _ensure_divisible(max_time_steps, audio.get_hop_size(self._hparams), True)\n\t\t\t\t\tif len(x) > max_time_steps:\n\t\t\t\t\t\tmax_time_frames = max_steps // audio.get_hop_size(self._hparams)\n\t\t\t\t\t\tstart = np.random.randint(0, len(c) - max_time_frames)\n\t\t\t\t\t\ttime_start = start * audio.get_hop_size(self._hparams)\n\t\t\t\t\t\tx = x[time_start: time_start + max_time_frames * audio.get_hop_size(self._hparams)]\n\t\t\t\t\t\tc = c[start: start + max_time_frames, :]\n\t\t\t\t\t\tself._assert_ready_for_upsample(x, c)\n\n\t\t\t\tnew_batch.append((x, c, g, l))\n\t\t\treturn new_batch\n\n\t\telse:\n\t\t\tnew_batch = []\n\t\t\tfor b in batch:\n\t\t\t\tx, c, g, l = b\n\t\t\t\tx = audio.trim_silence(x, hparams)\n\t\t\t\tif max_time_steps is not None and len(x) > max_time_steps:\n\t\t\t\t\tstart = np.random.randint(0, len(c) - max_time_steps)\n\t\t\t\t\tx = x[start: start + max_time_steps]\n\t\t\t\tnew_batch.append((x, c, g, l))\n\t\t\treturn new_batch\n\n\tdef _assert_ready_for_upsample(self, x, c):\n\t\tassert len(x) % len(c) == 0 and len(x) // len(c) == audio.get_hop_size(self._hparams)\n\n\ndef _pad_inputs(x, maxlen, _pad=0):\n\treturn np.pad(x, [(0, maxlen - len(x)), (0, 0)], mode='constant', constant_values=_pad)\n\ndef _pad_targets(x, maxlen, _pad=0):\n\treturn np.pad(x, (0, maxlen - len(x)), mode='constant', constant_values=_pad)\n\ndef _round_up(x, multiple):\n\tremainder = x % multiple\n\treturn x if remainder == 0 else x + multiple - remainder\n\ndef _round_down(x, multiple):\n\tremainder = x % multiple\n\treturn x if remainder == 0 else x - remainder\n\ndef _ensure_divisible(length, divisible_by=256, lower=True):\n\tif length % divisible_by == 0:\n\t\treturn length\n\tif lower:\n\t\treturn length - length % divisible_by\n\telse:\n\t\treturn length + (divisible_by - length % divisible_by)\n\ndef _interp(feats, in_range):\n\t#rescales from [-max, max] (or [0, max]) to [0, 1]\n\treturn (feats - in_range[0]) / (in_range[1] - in_range[0])\n"
] | [
[
"tensorflow.device",
"numpy.expand_dims",
"tensorflow.FIFOQueue",
"numpy.clip",
"sklearn.model_selection.train_test_split",
"numpy.random.shuffle",
"tensorflow.placeholder",
"numpy.concatenate",
"numpy.transpose",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
Juanlu001/numpy | [
"1e494f1e283340d545b1c7c15dded04a4aaae939"
] | [
"numpy/core/tests/test_regression.py"
] | [
"from __future__ import division, absolute_import, print_function\n\nimport copy\nimport pickle\nimport sys\nimport platform\nimport gc\nimport warnings\nimport tempfile\nfrom os import path\nfrom io import BytesIO\nfrom itertools import chain\n\nimport numpy as np\nfrom numpy.testing import (\n run_module_suite, assert_, assert_equal, IS_PYPY,\n assert_almost_equal, assert_array_equal, assert_array_almost_equal,\n assert_raises, assert_warns, dec, suppress_warnings,\n _assert_valid_refcount, HAS_REFCOUNT,\n )\nfrom numpy.compat import asbytes, asunicode, long\n\n\nclass TestRegression(object):\n def test_invalid_round(self):\n # Ticket #3\n v = 4.7599999999999998\n assert_array_equal(np.array([v]), np.array(v))\n\n def test_mem_empty(self):\n # Ticket #7\n np.empty((1,), dtype=[('x', np.int64)])\n\n def test_pickle_transposed(self):\n # Ticket #16\n a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))\n f = BytesIO()\n pickle.dump(a, f)\n f.seek(0)\n b = pickle.load(f)\n f.close()\n assert_array_equal(a, b)\n\n def test_typeNA(self):\n # Ticket #31\n assert_equal(np.typeNA[np.int64], 'Int64')\n assert_equal(np.typeNA[np.uint64], 'UInt64')\n\n def test_dtype_names(self):\n # Ticket #35\n # Should succeed\n np.dtype([(('name', 'label'), np.int32, 3)])\n\n def test_reduce(self):\n # Ticket #40\n assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)\n\n def test_zeros_order(self):\n # Ticket #43\n np.zeros([3], int, 'C')\n np.zeros([3], order='C')\n np.zeros([3], int, order='C')\n\n def test_asarray_with_order(self):\n # Check that nothing is done when order='F' and array C/F-contiguous\n a = np.ones(2)\n assert_(a is np.asarray(a, order='F'))\n\n def test_ravel_with_order(self):\n # Check that ravel works when order='F' and array C/F-contiguous\n a = np.ones(2)\n assert_(not a.ravel('F').flags.owndata)\n\n def test_sort_bigendian(self):\n # Ticket #47\n a = np.linspace(0, 10, 11)\n c = a.astype(np.dtype('<f8'))\n c.sort()\n assert_array_almost_equal(c, a)\n\n def test_negative_nd_indexing(self):\n # Ticket #49\n c = np.arange(125).reshape((5, 5, 5))\n origidx = np.array([-1, 0, 1])\n idx = np.array(origidx)\n c[idx]\n assert_array_equal(idx, origidx)\n\n def test_char_dump(self):\n # Ticket #50\n f = BytesIO()\n ca = np.char.array(np.arange(1000, 1010), itemsize=4)\n ca.dump(f)\n f.seek(0)\n ca = np.load(f)\n f.close()\n\n def test_noncontiguous_fill(self):\n # Ticket #58.\n a = np.zeros((5, 3))\n b = a[:, :2,]\n\n def rs():\n b.shape = (10,)\n\n assert_raises(AttributeError, rs)\n\n def test_bool(self):\n # Ticket #60\n np.bool_(1) # Should succeed\n\n def test_indexing1(self):\n # Ticket #64\n descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]\n buffer = ((([6j, 4j],),),)\n h = np.array(buffer, dtype=descr)\n h['x']['y']['z']\n\n def test_indexing2(self):\n # Ticket #65\n descr = [('x', 'i4', (2,))]\n buffer = ([3, 2],)\n h = np.array(buffer, dtype=descr)\n h['x']\n\n def test_round(self):\n # Ticket #67\n x = np.array([1+2j])\n assert_almost_equal(x**(-1), [1/(1+2j)])\n\n def test_scalar_compare(self):\n # Trac Ticket #72\n # https://github.com/numpy/numpy/issues/565\n a = np.array(['test', 'auto'])\n assert_array_equal(a == 'auto', np.array([False, True]))\n assert_(a[1] == 'auto')\n assert_(a[0] != 'auto')\n b = np.linspace(0, 10, 11)\n # This should return true for now, but will eventually raise an error:\n with suppress_warnings() as sup:\n sup.filter(FutureWarning)\n assert_(b != 'auto')\n assert_(b[0] != 'auto')\n\n def test_unicode_swapping(self):\n # Ticket #79\n ulen = 1\n ucs_value = u'\\U0010FFFF'\n ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)\n ua.newbyteorder() # Should succeed.\n\n def test_object_array_fill(self):\n # Ticket #86\n x = np.zeros(1, 'O')\n x.fill([])\n\n def test_mem_dtype_align(self):\n # Ticket #93\n assert_raises(TypeError, np.dtype,\n {'names':['a'], 'formats':['foo']}, align=1)\n\n @dec.knownfailureif((sys.version_info[0] >= 3) or\n (sys.platform == \"win32\" and\n platform.architecture()[0] == \"64bit\"),\n \"numpy.intp('0xff', 16) not supported on Py3, \"\n \"as it does not inherit from Python int\")\n def test_intp(self):\n # Ticket #99\n i_width = np.int_(0).nbytes*2 - 1\n np.intp('0x' + 'f'*i_width, 16)\n assert_raises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16)\n assert_raises(ValueError, np.intp, '0x1', 32)\n assert_equal(255, np.intp('0xFF', 16))\n assert_equal(1024, np.intp(1024))\n\n def test_endian_bool_indexing(self):\n # Ticket #105\n a = np.arange(10., dtype='>f8')\n b = np.arange(10., dtype='<f8')\n xa = np.where((a > 2) & (a < 6))\n xb = np.where((b > 2) & (b < 6))\n ya = ((a > 2) & (a < 6))\n yb = ((b > 2) & (b < 6))\n assert_array_almost_equal(xa, ya.nonzero())\n assert_array_almost_equal(xb, yb.nonzero())\n assert_(np.all(a[ya] > 0.5))\n assert_(np.all(b[yb] > 0.5))\n\n def test_endian_where(self):\n # GitHub issue #369\n net = np.zeros(3, dtype='>f4')\n net[1] = 0.00458849\n net[2] = 0.605202\n max_net = net.max()\n test = np.where(net <= 0., max_net, net)\n correct = np.array([ 0.60520202, 0.00458849, 0.60520202])\n assert_array_almost_equal(test, correct)\n\n def test_endian_recarray(self):\n # Ticket #2185\n dt = np.dtype([\n ('head', '>u4'),\n ('data', '>u4', 2),\n ])\n buf = np.recarray(1, dtype=dt)\n buf[0]['head'] = 1\n buf[0]['data'][:] = [1, 1]\n\n h = buf[0]['head']\n d = buf[0]['data'][0]\n buf[0]['head'] = h\n buf[0]['data'][0] = d\n assert_(buf[0]['head'] == 1)\n\n def test_mem_dot(self):\n # Ticket #106\n x = np.random.randn(0, 1)\n y = np.random.randn(10, 1)\n # Dummy array to detect bad memory access:\n _z = np.ones(10)\n _dummy = np.empty((0, 10))\n z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)\n np.dot(x, np.transpose(y), out=z)\n assert_equal(_z, np.ones(10))\n # Do the same for the built-in dot:\n np.core.multiarray.dot(x, np.transpose(y), out=z)\n assert_equal(_z, np.ones(10))\n\n def test_arange_endian(self):\n # Ticket #111\n ref = np.arange(10)\n x = np.arange(10, dtype='<f8')\n assert_array_equal(ref, x)\n x = np.arange(10, dtype='>f8')\n assert_array_equal(ref, x)\n\n def test_argmax(self):\n # Ticket #119\n a = np.random.normal(0, 1, (4, 5, 6, 7, 8))\n for i in range(a.ndim):\n a.argmax(i) # Should succeed\n\n def test_mem_divmod(self):\n # Ticket #126\n for i in range(10):\n divmod(np.array([i])[0], 10)\n\n def test_hstack_invalid_dims(self):\n # Ticket #128\n x = np.arange(9).reshape((3, 3))\n y = np.array([0, 0, 0])\n assert_raises(ValueError, np.hstack, (x, y))\n\n def test_squeeze_type(self):\n # Ticket #133\n a = np.array([3])\n b = np.array(3)\n assert_(type(a.squeeze()) is np.ndarray)\n assert_(type(b.squeeze()) is np.ndarray)\n\n def test_add_identity(self):\n # Ticket #143\n assert_equal(0, np.add.identity)\n\n def test_numpy_float_python_long_addition(self):\n # Check that numpy float and python longs can be added correctly.\n a = np.float_(23.) + 2**135\n assert_equal(a, 23. + 2**135)\n\n def test_binary_repr_0(self):\n # Ticket #151\n assert_equal('0', np.binary_repr(0))\n\n def test_rec_iterate(self):\n # Ticket #160\n descr = np.dtype([('i', int), ('f', float), ('s', '|S3')])\n x = np.rec.array([(1, 1.1, '1.0'),\n (2, 2.2, '2.0')], dtype=descr)\n x[0].tolist()\n [i for i in x[0]]\n\n def test_unicode_string_comparison(self):\n # Ticket #190\n a = np.array('hello', np.unicode_)\n b = np.array('world')\n a == b\n\n def test_tobytes_FORTRANORDER_discontiguous(self):\n # Fix in r2836\n # Create non-contiguous Fortran ordered array\n x = np.array(np.random.rand(3, 3), order='F')[:, :2]\n assert_array_almost_equal(x.ravel(), np.frombuffer(x.tobytes()))\n\n def test_flat_assignment(self):\n # Correct behaviour of ticket #194\n x = np.empty((3, 1))\n x.flat = np.arange(3)\n assert_array_almost_equal(x, [[0], [1], [2]])\n x.flat = np.arange(3, dtype=float)\n assert_array_almost_equal(x, [[0], [1], [2]])\n\n def test_broadcast_flat_assignment(self):\n # Ticket #194\n x = np.empty((3, 1))\n\n def bfa():\n x[:] = np.arange(3)\n\n def bfb():\n x[:] = np.arange(3, dtype=float)\n\n assert_raises(ValueError, bfa)\n assert_raises(ValueError, bfb)\n\n def test_nonarray_assignment(self):\n # See also Issue gh-2870, test for non-array assignment\n # and equivalent unsafe casted array assignment\n a = np.arange(10)\n b = np.ones(10, dtype=bool)\n r = np.arange(10)\n\n def assign(a, b, c):\n a[b] = c\n\n assert_raises(ValueError, assign, a, b, np.nan)\n a[b] = np.array(np.nan) # but not this.\n assert_raises(ValueError, assign, a, r, np.nan)\n a[r] = np.array(np.nan)\n\n def test_unpickle_dtype_with_object(self):\n # Implemented in r2840\n dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])\n f = BytesIO()\n pickle.dump(dt, f)\n f.seek(0)\n dt_ = pickle.load(f)\n f.close()\n assert_equal(dt, dt_)\n\n def test_mem_array_creation_invalid_specification(self):\n # Ticket #196\n dt = np.dtype([('x', int), ('y', np.object_)])\n # Wrong way\n assert_raises(ValueError, np.array, [1, 'object'], dt)\n # Correct way\n np.array([(1, 'object')], dt)\n\n def test_recarray_single_element(self):\n # Ticket #202\n a = np.array([1, 2, 3], dtype=np.int32)\n b = a.copy()\n r = np.rec.array(a, shape=1, formats=['3i4'], names=['d'])\n assert_array_equal(a, b)\n assert_equal(a, r[0][0])\n\n def test_zero_sized_array_indexing(self):\n # Ticket #205\n tmp = np.array([])\n\n def index_tmp():\n tmp[np.array(10)]\n\n assert_raises(IndexError, index_tmp)\n\n def test_chararray_rstrip(self):\n # Ticket #222\n x = np.chararray((1,), 5)\n x[0] = b'a '\n x = x.rstrip()\n assert_equal(x[0], b'a')\n\n def test_object_array_shape(self):\n # Ticket #239\n assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,))\n assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2))\n assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2))\n assert_equal(np.array([], dtype=object).shape, (0,))\n assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0))\n assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,))\n\n def test_mem_around(self):\n # Ticket #243\n x = np.zeros((1,))\n y = [0]\n decimal = 6\n np.around(abs(x-y), decimal) <= 10.0**(-decimal)\n\n def test_character_array_strip(self):\n # Ticket #246\n x = np.char.array((\"x\", \"x \", \"x \"))\n for c in x:\n assert_equal(c, \"x\")\n\n def test_lexsort(self):\n # Lexsort memory error\n v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n assert_equal(np.lexsort(v), 0)\n\n def test_lexsort_invalid_sequence(self):\n # Issue gh-4123\n class BuggySequence(object):\n def __len__(self):\n return 4\n\n def __getitem__(self, key):\n raise KeyError\n\n assert_raises(KeyError, np.lexsort, BuggySequence())\n\n def test_pickle_py2_bytes_encoding(self):\n # Check that arrays and scalars pickled on Py2 are\n # unpickleable on Py3 using encoding='bytes'\n\n test_data = [\n # (original, py2_pickle)\n (np.unicode_('\\u6f2c'),\n b\"cnumpy.core.multiarray\\nscalar\\np0\\n(cnumpy\\ndtype\\np1\\n\"\n b\"(S'U1'\\np2\\nI0\\nI1\\ntp3\\nRp4\\n(I3\\nS'<'\\np5\\nNNNI4\\nI4\\n\"\n b\"I0\\ntp6\\nbS',o\\\\x00\\\\x00'\\np7\\ntp8\\nRp9\\n.\"),\n\n (np.array([9e123], dtype=np.float64),\n b\"cnumpy.core.multiarray\\n_reconstruct\\np0\\n(cnumpy\\nndarray\\n\"\n b\"p1\\n(I0\\ntp2\\nS'b'\\np3\\ntp4\\nRp5\\n(I1\\n(I1\\ntp6\\ncnumpy\\ndtype\\n\"\n b\"p7\\n(S'f8'\\np8\\nI0\\nI1\\ntp9\\nRp10\\n(I3\\nS'<'\\np11\\nNNNI-1\\nI-1\\n\"\n b\"I0\\ntp12\\nbI00\\nS'O\\\\x81\\\\xb7Z\\\\xaa:\\\\xabY'\\np13\\ntp14\\nb.\"),\n\n (np.array([(9e123,)], dtype=[('name', float)]),\n b\"cnumpy.core.multiarray\\n_reconstruct\\np0\\n(cnumpy\\nndarray\\np1\\n\"\n b\"(I0\\ntp2\\nS'b'\\np3\\ntp4\\nRp5\\n(I1\\n(I1\\ntp6\\ncnumpy\\ndtype\\np7\\n\"\n b\"(S'V8'\\np8\\nI0\\nI1\\ntp9\\nRp10\\n(I3\\nS'|'\\np11\\nN(S'name'\\np12\\ntp13\\n\"\n b\"(dp14\\ng12\\n(g7\\n(S'f8'\\np15\\nI0\\nI1\\ntp16\\nRp17\\n(I3\\nS'<'\\np18\\nNNNI-1\\n\"\n b\"I-1\\nI0\\ntp19\\nbI0\\ntp20\\nsI8\\nI1\\nI0\\ntp21\\n\"\n b\"bI00\\nS'O\\\\x81\\\\xb7Z\\\\xaa:\\\\xabY'\\np22\\ntp23\\nb.\"),\n ]\n\n if sys.version_info[:2] >= (3, 4):\n # encoding='bytes' was added in Py3.4\n for original, data in test_data:\n result = pickle.loads(data, encoding='bytes')\n assert_equal(result, original)\n\n if isinstance(result, np.ndarray) and result.dtype.names:\n for name in result.dtype.names:\n assert_(isinstance(name, str))\n\n def test_pickle_dtype(self):\n # Ticket #251\n pickle.dumps(float)\n\n def test_swap_real(self):\n # Ticket #265\n assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)\n assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0)\n assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0)\n assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)\n\n def test_object_array_from_list(self):\n # Ticket #270\n assert_(np.array([1, 'A', None]).shape == (3,))\n\n def test_multiple_assign(self):\n # Ticket #273\n a = np.zeros((3, 1), int)\n a[[1, 2]] = 1\n\n def test_empty_array_type(self):\n assert_equal(np.array([]).dtype, np.zeros(0).dtype)\n\n def test_void_copyswap(self):\n dt = np.dtype([('one', '<i4'), ('two', '<i4')])\n x = np.array((1, 2), dtype=dt)\n x = x.byteswap()\n assert_(x['one'] > 1 and x['two'] > 2)\n\n def test_method_args(self):\n # Make sure methods and functions have same default axis\n # keyword and arguments\n funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'),\n ('sometrue', 'any'),\n ('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),\n 'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',\n 'round', 'min', 'max', 'argsort', 'sort']\n funcs2 = ['compress', 'take', 'repeat']\n\n for func in funcs1:\n arr = np.random.rand(8, 7)\n arr2 = arr.copy()\n if isinstance(func, tuple):\n func_meth = func[1]\n func = func[0]\n else:\n func_meth = func\n res1 = getattr(arr, func_meth)()\n res2 = getattr(np, func)(arr2)\n if res1 is None:\n res1 = arr\n\n if res1.dtype.kind in 'uib':\n assert_((res1 == res2).all(), func)\n else:\n assert_(abs(res1-res2).max() < 1e-8, func)\n\n for func in funcs2:\n arr1 = np.random.rand(8, 7)\n arr2 = np.random.rand(8, 7)\n res1 = None\n if func == 'compress':\n arr1 = arr1.ravel()\n res1 = getattr(arr2, func)(arr1)\n else:\n arr2 = (15*arr2).astype(int).ravel()\n if res1 is None:\n res1 = getattr(arr1, func)(arr2)\n res2 = getattr(np, func)(arr1, arr2)\n assert_(abs(res1-res2).max() < 1e-8, func)\n\n def test_mem_lexsort_strings(self):\n # Ticket #298\n lst = ['abc', 'cde', 'fgh']\n np.lexsort((lst,))\n\n def test_fancy_index(self):\n # Ticket #302\n x = np.array([1, 2])[np.array([0])]\n assert_equal(x.shape, (1,))\n\n def test_recarray_copy(self):\n # Ticket #312\n dt = [('x', np.int16), ('y', np.float64)]\n ra = np.array([(1, 2.3)], dtype=dt)\n rb = np.rec.array(ra, dtype=dt)\n rb['x'] = 2.\n assert_(ra['x'] != rb['x'])\n\n def test_rec_fromarray(self):\n # Ticket #322\n x1 = np.array([[1, 2], [3, 4], [5, 6]])\n x2 = np.array(['a', 'dd', 'xyz'])\n x3 = np.array([1.1, 2, 3])\n np.rec.fromarrays([x1, x2, x3], formats=\"(2,)i4,a3,f8\")\n\n def test_object_array_assign(self):\n x = np.empty((2, 2), object)\n x.flat[2] = (1, 2, 3)\n assert_equal(x.flat[2], (1, 2, 3))\n\n def test_ndmin_float64(self):\n # Ticket #324\n x = np.array([1, 2, 3], dtype=np.float64)\n assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)\n assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)\n\n def test_ndmin_order(self):\n # Issue #465 and related checks\n assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)\n assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)\n assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)\n assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)\n\n def test_mem_axis_minimization(self):\n # Ticket #327\n data = np.arange(5)\n data = np.add.outer(data, data)\n\n def test_mem_float_imag(self):\n # Ticket #330\n np.float64(1.0).imag\n\n def test_dtype_tuple(self):\n # Ticket #334\n assert_(np.dtype('i4') == np.dtype(('i4', ())))\n\n def test_dtype_posttuple(self):\n # Ticket #335\n np.dtype([('col1', '()i4')])\n\n def test_numeric_carray_compare(self):\n # Ticket #341\n assert_equal(np.array(['X'], 'c'), b'X')\n\n def test_string_array_size(self):\n # Ticket #342\n assert_raises(ValueError,\n np.array, [['X'], ['X', 'X', 'X']], '|S1')\n\n def test_dtype_repr(self):\n # Ticket #344\n dt1 = np.dtype(('uint32', 2))\n dt2 = np.dtype(('uint32', (2,)))\n assert_equal(dt1.__repr__(), dt2.__repr__())\n\n def test_reshape_order(self):\n # Make sure reshape order works.\n a = np.arange(6).reshape(2, 3, order='F')\n assert_equal(a, [[0, 2, 4], [1, 3, 5]])\n a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n b = a[:, 1]\n assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])\n\n def test_reshape_zero_strides(self):\n # Issue #380, test reshaping of zero strided arrays\n a = np.ones(1)\n a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))\n assert_(a.reshape(5, 1).strides[0] == 0)\n\n def test_reshape_zero_size(self):\n # GitHub Issue #2700, setting shape failed for 0-sized arrays\n a = np.ones((0, 2))\n a.shape = (-1, 2)\n\n # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.\n # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous.\n @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)\n def test_reshape_trailing_ones_strides(self):\n # GitHub issue gh-2949, bad strides for trailing ones of new shape\n a = np.zeros(12, dtype=np.int32)[::2] # not contiguous\n strides_c = (16, 8, 8, 8)\n strides_f = (8, 24, 48, 48)\n assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c)\n assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)\n assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))\n\n def test_repeat_discont(self):\n # Ticket #352\n a = np.arange(12).reshape(4, 3)[:, 2]\n assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])\n\n def test_array_index(self):\n # Make sure optimization is not called in this case.\n a = np.array([1, 2, 3])\n a2 = np.array([[1, 2, 3]])\n assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)])\n\n def test_object_argmax(self):\n a = np.array([1, 2, 3], dtype=object)\n assert_(a.argmax() == 2)\n\n def test_recarray_fields(self):\n # Ticket #372\n dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])\n dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])\n for a in [np.array([(1, 2), (3, 4)], \"i4,i4\"),\n np.rec.array([(1, 2), (3, 4)], \"i4,i4\"),\n np.rec.array([(1, 2), (3, 4)]),\n np.rec.fromarrays([(1, 2), (3, 4)], \"i4,i4\"),\n np.rec.fromarrays([(1, 2), (3, 4)])]:\n assert_(a.dtype in [dt0, dt1])\n\n def test_random_shuffle(self):\n # Ticket #374\n a = np.arange(5).reshape((5, 1))\n b = a.copy()\n np.random.shuffle(b)\n assert_equal(np.sort(b, axis=0), a)\n\n def test_refcount_vdot(self):\n # Changeset #3443\n _assert_valid_refcount(np.vdot)\n\n def test_startswith(self):\n ca = np.char.array(['Hi', 'There'])\n assert_equal(ca.startswith('H'), [True, False])\n\n def test_noncommutative_reduce_accumulate(self):\n # Ticket #413\n tosubtract = np.arange(5)\n todivide = np.array([2.0, 0.5, 0.25])\n assert_equal(np.subtract.reduce(tosubtract), -10)\n assert_equal(np.divide.reduce(todivide), 16.0)\n assert_array_equal(np.subtract.accumulate(tosubtract),\n np.array([0, -1, -3, -6, -10]))\n assert_array_equal(np.divide.accumulate(todivide),\n np.array([2., 4., 16.]))\n\n def test_convolve_empty(self):\n # Convolve should raise an error for empty input array.\n assert_raises(ValueError, np.convolve, [], [1])\n assert_raises(ValueError, np.convolve, [1], [])\n\n def test_multidim_byteswap(self):\n # Ticket #449\n r = np.array([(1, (0, 1, 2))], dtype=\"i2,3i2\")\n assert_array_equal(r.byteswap(),\n np.array([(256, (0, 256, 512))], r.dtype))\n\n def test_string_NULL(self):\n # Changeset 3557\n assert_equal(np.array(\"a\\x00\\x0b\\x0c\\x00\").item(),\n 'a\\x00\\x0b\\x0c')\n\n def test_junk_in_string_fields_of_recarray(self):\n # Ticket #483\n r = np.array([[b'abc']], dtype=[('var1', '|S20')])\n assert_(asbytes(r['var1'][0][0]) == b'abc')\n\n def test_take_output(self):\n # Ensure that 'take' honours output parameter.\n x = np.arange(12).reshape((3, 4))\n a = np.take(x, [0, 2], axis=1)\n b = np.zeros_like(a)\n np.take(x, [0, 2], axis=1, out=b)\n assert_array_equal(a, b)\n\n def test_take_object_fail(self):\n # Issue gh-3001\n d = 123.\n a = np.array([d, 1], dtype=object)\n if HAS_REFCOUNT:\n ref_d = sys.getrefcount(d)\n try:\n a.take([0, 100])\n except IndexError:\n pass\n if HAS_REFCOUNT:\n assert_(ref_d == sys.getrefcount(d))\n\n def test_array_str_64bit(self):\n # Ticket #501\n s = np.array([1, np.nan], dtype=np.float64)\n with np.errstate(all='raise'):\n np.array_str(s) # Should succeed\n\n def test_frompyfunc_endian(self):\n # Ticket #503\n from math import radians\n uradians = np.frompyfunc(radians, 1, 1)\n big_endian = np.array([83.4, 83.5], dtype='>f8')\n little_endian = np.array([83.4, 83.5], dtype='<f8')\n assert_almost_equal(uradians(big_endian).astype(float),\n uradians(little_endian).astype(float))\n\n def test_mem_string_arr(self):\n # Ticket #514\n s = \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"\n t = []\n np.hstack((t, s))\n\n def test_arr_transpose(self):\n # Ticket #516\n x = np.random.rand(*(2,)*16)\n x.transpose(list(range(16))) # Should succeed\n\n def test_string_mergesort(self):\n # Ticket #540\n x = np.array(['a']*32)\n assert_array_equal(x.argsort(kind='m'), np.arange(32))\n\n def test_argmax_byteorder(self):\n # Ticket #546\n a = np.arange(3, dtype='>f')\n assert_(a[a.argmax()] == a.max())\n\n def test_rand_seed(self):\n # Ticket #555\n for l in np.arange(4):\n np.random.seed(l)\n\n def test_mem_deallocation_leak(self):\n # Ticket #562\n a = np.zeros(5, dtype=float)\n b = np.array(a, dtype=float)\n del a, b\n\n def test_mem_on_invalid_dtype(self):\n \"Ticket #583\"\n assert_raises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)\n\n def test_dot_negative_stride(self):\n # Ticket #588\n x = np.array([[1, 5, 25, 125., 625]])\n y = np.array([[20.], [160.], [640.], [1280.], [1024.]])\n z = y[::-1].copy()\n y2 = y[::-1]\n assert_equal(np.dot(x, z), np.dot(x, y2))\n\n def test_object_casting(self):\n # This used to trigger the object-type version of\n # the bitwise_or operation, because float64 -> object\n # casting succeeds\n def rs():\n x = np.ones([484, 286])\n y = np.zeros([484, 286])\n x |= y\n\n assert_raises(TypeError, rs)\n\n def test_unicode_scalar(self):\n # Ticket #600\n x = np.array([\"DROND\", \"DROND1\"], dtype=\"U6\")\n el = x[1]\n new = pickle.loads(pickle.dumps(el))\n assert_equal(new, el)\n\n def test_arange_non_native_dtype(self):\n # Ticket #616\n for T in ('>f4', '<f4'):\n dt = np.dtype(T)\n assert_equal(np.arange(0, dtype=dt).dtype, dt)\n assert_equal(np.arange(0.5, dtype=dt).dtype, dt)\n assert_equal(np.arange(5, dtype=dt).dtype, dt)\n\n def test_bool_flat_indexing_invalid_nr_elements(self):\n s = np.ones(10, dtype=float)\n x = np.array((15,), dtype=float)\n\n def ia(x, s, v):\n x[(s > 0)] = v\n\n assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float))\n assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float))\n\n # Old special case (different code path):\n assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))\n assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))\n\n def test_mem_scalar_indexing(self):\n # Ticket #603\n x = np.array([0], dtype=float)\n index = np.array(0, dtype=np.int32)\n x[index]\n\n def test_binary_repr_0_width(self):\n assert_equal(np.binary_repr(0, width=3), '000')\n\n def test_fromstring(self):\n assert_equal(np.fromstring(\"12:09:09\", dtype=int, sep=\":\"),\n [12, 9, 9])\n\n def test_searchsorted_variable_length(self):\n x = np.array(['a', 'aa', 'b'])\n y = np.array(['d', 'e'])\n assert_equal(x.searchsorted(y), [3, 3])\n\n def test_string_argsort_with_zeros(self):\n # Check argsort for strings containing zeros.\n x = np.frombuffer(b\"\\x00\\x02\\x00\\x01\", dtype=\"|S2\")\n assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))\n assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))\n\n def test_string_sort_with_zeros(self):\n # Check sort for strings containing zeros.\n x = np.frombuffer(b\"\\x00\\x02\\x00\\x01\", dtype=\"|S2\")\n y = np.frombuffer(b\"\\x00\\x01\\x00\\x02\", dtype=\"|S2\")\n assert_array_equal(np.sort(x, kind=\"q\"), y)\n\n def test_copy_detection_zero_dim(self):\n # Ticket #658\n np.indices((0, 3, 4)).T.reshape(-1, 3)\n\n def test_flat_byteorder(self):\n # Ticket #657\n x = np.arange(10)\n assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:])\n assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4'))\n\n def test_uint64_from_negative(self):\n assert_equal(np.uint64(-2), np.uint64(18446744073709551614))\n\n def test_sign_bit(self):\n x = np.array([0, -0.0, 0])\n assert_equal(str(np.abs(x)), '[0. 0. 0.]')\n\n def test_flat_index_byteswap(self):\n for dt in (np.dtype('<i4'), np.dtype('>i4')):\n x = np.array([-1, 0, 1], dtype=dt)\n assert_equal(x.flat[0].dtype, x[0].dtype)\n\n def test_copy_detection_corner_case(self):\n # Ticket #658\n np.indices((0, 3, 4)).T.reshape(-1, 3)\n\n # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.\n # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,\n # 0-sized reshape itself is tested elsewhere.\n @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)\n def test_copy_detection_corner_case2(self):\n # Ticket #771: strides are not set correctly when reshaping 0-sized\n # arrays\n b = np.indices((0, 3, 4)).T.reshape(-1, 3)\n assert_equal(b.strides, (3 * b.itemsize, b.itemsize))\n\n def test_object_array_refcounting(self):\n # Ticket #633\n if not hasattr(sys, 'getrefcount'):\n return\n\n # NB. this is probably CPython-specific\n\n cnt = sys.getrefcount\n\n a = object()\n b = object()\n c = object()\n\n cnt0_a = cnt(a)\n cnt0_b = cnt(b)\n cnt0_c = cnt(c)\n\n # -- 0d -> 1-d broadcast slice assignment\n\n arr = np.zeros(5, dtype=np.object_)\n\n arr[:] = a\n assert_equal(cnt(a), cnt0_a + 5)\n\n arr[:] = b\n assert_equal(cnt(a), cnt0_a)\n assert_equal(cnt(b), cnt0_b + 5)\n\n arr[:2] = c\n assert_equal(cnt(b), cnt0_b + 3)\n assert_equal(cnt(c), cnt0_c + 2)\n\n del arr\n\n # -- 1-d -> 2-d broadcast slice assignment\n\n arr = np.zeros((5, 2), dtype=np.object_)\n arr0 = np.zeros(2, dtype=np.object_)\n\n arr0[0] = a\n assert_(cnt(a) == cnt0_a + 1)\n arr0[1] = b\n assert_(cnt(b) == cnt0_b + 1)\n\n arr[:, :] = arr0\n assert_(cnt(a) == cnt0_a + 6)\n assert_(cnt(b) == cnt0_b + 6)\n\n arr[:, 0] = None\n assert_(cnt(a) == cnt0_a + 1)\n\n del arr, arr0\n\n # -- 2-d copying + flattening\n\n arr = np.zeros((5, 2), dtype=np.object_)\n\n arr[:, 0] = a\n arr[:, 1] = b\n assert_(cnt(a) == cnt0_a + 5)\n assert_(cnt(b) == cnt0_b + 5)\n\n arr2 = arr.copy()\n assert_(cnt(a) == cnt0_a + 10)\n assert_(cnt(b) == cnt0_b + 10)\n\n arr2 = arr[:, 0].copy()\n assert_(cnt(a) == cnt0_a + 10)\n assert_(cnt(b) == cnt0_b + 5)\n\n arr2 = arr.flatten()\n assert_(cnt(a) == cnt0_a + 10)\n assert_(cnt(b) == cnt0_b + 10)\n\n del arr, arr2\n\n # -- concatenate, repeat, take, choose\n\n arr1 = np.zeros((5, 1), dtype=np.object_)\n arr2 = np.zeros((5, 1), dtype=np.object_)\n\n arr1[...] = a\n arr2[...] = b\n assert_(cnt(a) == cnt0_a + 5)\n assert_(cnt(b) == cnt0_b + 5)\n\n tmp = np.concatenate((arr1, arr2))\n assert_(cnt(a) == cnt0_a + 5 + 5)\n assert_(cnt(b) == cnt0_b + 5 + 5)\n\n tmp = arr1.repeat(3, axis=0)\n assert_(cnt(a) == cnt0_a + 5 + 3*5)\n\n tmp = arr1.take([1, 2, 3], axis=0)\n assert_(cnt(a) == cnt0_a + 5 + 3)\n\n x = np.array([[0], [1], [0], [1], [1]], int)\n tmp = x.choose(arr1, arr2)\n assert_(cnt(a) == cnt0_a + 5 + 2)\n assert_(cnt(b) == cnt0_b + 5 + 3)\n\n del tmp # Avoid pyflakes unused variable warning\n\n def test_mem_custom_float_to_array(self):\n # Ticket 702\n class MyFloat(object):\n def __float__(self):\n return 1.0\n\n tmp = np.atleast_1d([MyFloat()])\n tmp.astype(float) # Should succeed\n\n def test_object_array_refcount_self_assign(self):\n # Ticket #711\n class VictimObject(object):\n deleted = False\n\n def __del__(self):\n self.deleted = True\n\n d = VictimObject()\n arr = np.zeros(5, dtype=np.object_)\n arr[:] = d\n del d\n arr[:] = arr # refcount of 'd' might hit zero here\n assert_(not arr[0].deleted)\n arr[:] = arr # trying to induce a segfault by doing it again...\n assert_(not arr[0].deleted)\n\n def test_mem_fromiter_invalid_dtype_string(self):\n x = [1, 2, 3]\n assert_raises(ValueError,\n np.fromiter, [xi for xi in x], dtype='S')\n\n def test_reduce_big_object_array(self):\n # Ticket #713\n oldsize = np.setbufsize(10*16)\n a = np.array([None]*161, object)\n assert_(not np.any(a))\n np.setbufsize(oldsize)\n\n def test_mem_0d_array_index(self):\n # Ticket #714\n np.zeros(10)[np.array(0)]\n\n def test_floats_from_string(self):\n # Ticket #640, floats from string\n fsingle = np.single('1.234')\n fdouble = np.double('1.234')\n flongdouble = np.longdouble('1.234')\n assert_almost_equal(fsingle, 1.234)\n assert_almost_equal(fdouble, 1.234)\n assert_almost_equal(flongdouble, 1.234)\n\n def test_nonnative_endian_fill(self):\n # Non-native endian arrays were incorrectly filled with scalars\n # before r5034.\n if sys.byteorder == 'little':\n dtype = np.dtype('>i4')\n else:\n dtype = np.dtype('<i4')\n x = np.empty([1], dtype=dtype)\n x.fill(1)\n assert_equal(x, np.array([1], dtype=dtype))\n\n def test_dot_alignment_sse2(self):\n # Test for ticket #551, changeset r5140\n x = np.zeros((30, 40))\n y = pickle.loads(pickle.dumps(x))\n # y is now typically not aligned on a 8-byte boundary\n z = np.ones((1, y.shape[0]))\n # This shouldn't cause a segmentation fault:\n np.dot(z, y)\n\n def test_astype_copy(self):\n # Ticket #788, changeset r5155\n # The test data file was generated by scipy.io.savemat.\n # The dtype is float64, but the isbuiltin attribute is 0.\n data_dir = path.join(path.dirname(__file__), 'data')\n filename = path.join(data_dir, \"astype_copy.pkl\")\n if sys.version_info[0] >= 3:\n f = open(filename, 'rb')\n xp = pickle.load(f, encoding='latin1')\n f.close()\n else:\n f = open(filename)\n xp = pickle.load(f)\n f.close()\n xpd = xp.astype(np.float64)\n assert_((xp.__array_interface__['data'][0] !=\n xpd.__array_interface__['data'][0]))\n\n def test_compress_small_type(self):\n # Ticket #789, changeset 5217.\n # compress with out argument segfaulted if cannot cast safely\n import numpy as np\n a = np.array([[1, 2], [3, 4]])\n b = np.zeros((2, 1), dtype=np.single)\n try:\n a.compress([True, False], axis=1, out=b)\n raise AssertionError(\"compress with an out which cannot be \"\n \"safely casted should not return \"\n \"successfully\")\n except TypeError:\n pass\n\n def test_attributes(self):\n # Ticket #791\n class TestArray(np.ndarray):\n def __new__(cls, data, info):\n result = np.array(data)\n result = result.view(cls)\n result.info = info\n return result\n\n def __array_finalize__(self, obj):\n self.info = getattr(obj, 'info', '')\n\n dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')\n assert_(dat.info == 'jubba')\n dat.resize((4, 2))\n assert_(dat.info == 'jubba')\n dat.sort()\n assert_(dat.info == 'jubba')\n dat.fill(2)\n assert_(dat.info == 'jubba')\n dat.put([2, 3, 4], [6, 3, 4])\n assert_(dat.info == 'jubba')\n dat.setfield(4, np.int32, 0)\n assert_(dat.info == 'jubba')\n dat.setflags()\n assert_(dat.info == 'jubba')\n assert_(dat.all(1).info == 'jubba')\n assert_(dat.any(1).info == 'jubba')\n assert_(dat.argmax(1).info == 'jubba')\n assert_(dat.argmin(1).info == 'jubba')\n assert_(dat.argsort(1).info == 'jubba')\n assert_(dat.astype(TestArray).info == 'jubba')\n assert_(dat.byteswap().info == 'jubba')\n assert_(dat.clip(2, 7).info == 'jubba')\n assert_(dat.compress([0, 1, 1]).info == 'jubba')\n assert_(dat.conj().info == 'jubba')\n assert_(dat.conjugate().info == 'jubba')\n assert_(dat.copy().info == 'jubba')\n dat2 = TestArray([2, 3, 1, 0], 'jubba')\n choices = [[0, 1, 2, 3], [10, 11, 12, 13],\n [20, 21, 22, 23], [30, 31, 32, 33]]\n assert_(dat2.choose(choices).info == 'jubba')\n assert_(dat.cumprod(1).info == 'jubba')\n assert_(dat.cumsum(1).info == 'jubba')\n assert_(dat.diagonal().info == 'jubba')\n assert_(dat.flatten().info == 'jubba')\n assert_(dat.getfield(np.int32, 0).info == 'jubba')\n assert_(dat.imag.info == 'jubba')\n assert_(dat.max(1).info == 'jubba')\n assert_(dat.mean(1).info == 'jubba')\n assert_(dat.min(1).info == 'jubba')\n assert_(dat.newbyteorder().info == 'jubba')\n assert_(dat.prod(1).info == 'jubba')\n assert_(dat.ptp(1).info == 'jubba')\n assert_(dat.ravel().info == 'jubba')\n assert_(dat.real.info == 'jubba')\n assert_(dat.repeat(2).info == 'jubba')\n assert_(dat.reshape((2, 4)).info == 'jubba')\n assert_(dat.round().info == 'jubba')\n assert_(dat.squeeze().info == 'jubba')\n assert_(dat.std(1).info == 'jubba')\n assert_(dat.sum(1).info == 'jubba')\n assert_(dat.swapaxes(0, 1).info == 'jubba')\n assert_(dat.take([2, 3, 5]).info == 'jubba')\n assert_(dat.transpose().info == 'jubba')\n assert_(dat.T.info == 'jubba')\n assert_(dat.var(1).info == 'jubba')\n assert_(dat.view(TestArray).info == 'jubba')\n # These methods do not preserve subclasses\n assert_(type(dat.nonzero()[0]) is np.ndarray)\n assert_(type(dat.nonzero()[1]) is np.ndarray)\n\n def test_recarray_tolist(self):\n # Ticket #793, changeset r5215\n # Comparisons fail for NaN, so we can't use random memory\n # for the test.\n buf = np.zeros(40, dtype=np.int8)\n a = np.recarray(2, formats=\"i4,f8,f8\", names=\"id,x,y\", buf=buf)\n b = a.tolist()\n assert_( a[0].tolist() == b[0])\n assert_( a[1].tolist() == b[1])\n\n def test_nonscalar_item_method(self):\n # Make sure that .item() fails graciously when it should\n a = np.arange(5)\n assert_raises(ValueError, a.item)\n\n def test_char_array_creation(self):\n a = np.array('123', dtype='c')\n b = np.array([b'1', b'2', b'3'])\n assert_equal(a, b)\n\n def test_unaligned_unicode_access(self):\n # Ticket #825\n for i in range(1, 9):\n msg = 'unicode offset: %d chars' % i\n t = np.dtype([('a', 'S%d' % i), ('b', 'U2')])\n x = np.array([(b'a', u'b')], dtype=t)\n if sys.version_info[0] >= 3:\n assert_equal(str(x), \"[(b'a', 'b')]\", err_msg=msg)\n else:\n assert_equal(str(x), \"[('a', u'b')]\", err_msg=msg)\n\n def test_sign_for_complex_nan(self):\n # Ticket 794.\n with np.errstate(invalid='ignore'):\n C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])\n have = np.sign(C)\n want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])\n assert_equal(have, want)\n\n def test_for_equal_names(self):\n # Ticket #674\n dt = np.dtype([('foo', float), ('bar', float)])\n a = np.zeros(10, dt)\n b = list(a.dtype.names)\n b[0] = \"notfoo\"\n a.dtype.names = b\n assert_(a.dtype.names[0] == \"notfoo\")\n assert_(a.dtype.names[1] == \"bar\")\n\n def test_for_object_scalar_creation(self):\n # Ticket #816\n a = np.object_()\n b = np.object_(3)\n b2 = np.object_(3.0)\n c = np.object_([4, 5])\n d = np.object_([None, {}, []])\n assert_(a is None)\n assert_(type(b) is int)\n assert_(type(b2) is float)\n assert_(type(c) is np.ndarray)\n assert_(c.dtype == object)\n assert_(d.dtype == object)\n\n def test_array_resize_method_system_error(self):\n # Ticket #840 - order should be an invalid keyword.\n x = np.array([[0, 1], [2, 3]])\n assert_raises(TypeError, x.resize, (2, 2), order='C')\n\n def test_for_zero_length_in_choose(self):\n \"Ticket #882\"\n a = np.array(1)\n assert_raises(ValueError, lambda x: x.choose([]), a)\n\n def test_array_ndmin_overflow(self):\n \"Ticket #947.\"\n assert_raises(ValueError, lambda: np.array([1], ndmin=33))\n\n def test_void_scalar_with_titles(self):\n # No ticket\n data = [('john', 4), ('mary', 5)]\n dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]\n arr = np.array(data, dtype=dtype1)\n assert_(arr[0][0] == 'john')\n assert_(arr[0][1] == 4)\n\n def test_void_scalar_constructor(self):\n #Issue #1550\n\n #Create test string data, construct void scalar from data and assert\n #that void scalar contains original data.\n test_string = np.array(\"test\")\n test_string_void_scalar = np.core.multiarray.scalar(\n np.dtype((\"V\", test_string.dtype.itemsize)), test_string.tobytes())\n\n assert_(test_string_void_scalar.view(test_string.dtype) == test_string)\n\n #Create record scalar, construct from data and assert that\n #reconstructed scalar is correct.\n test_record = np.ones((), \"i,i\")\n test_record_void_scalar = np.core.multiarray.scalar(\n test_record.dtype, test_record.tobytes())\n\n assert_(test_record_void_scalar == test_record)\n\n #Test pickle and unpickle of void and record scalars\n assert_(pickle.loads(pickle.dumps(test_string)) == test_string)\n assert_(pickle.loads(pickle.dumps(test_record)) == test_record)\n\n def test_blasdot_uninitialized_memory(self):\n # Ticket #950\n for m in [0, 1, 2]:\n for n in [0, 1, 2]:\n for k in range(3):\n # Try to ensure that x->data contains non-zero floats\n x = np.array([123456789e199], dtype=np.float64)\n if IS_PYPY:\n x.resize((m, 0), refcheck=False)\n else:\n x.resize((m, 0))\n y = np.array([123456789e199], dtype=np.float64)\n if IS_PYPY:\n y.resize((0, n), refcheck=False)\n else:\n y.resize((0, n))\n\n # `dot` should just return zero (m, n) matrix\n z = np.dot(x, y)\n assert_(np.all(z == 0))\n assert_(z.shape == (m, n))\n\n def test_zeros(self):\n # Regression test for #1061.\n # Set a size which cannot fit into a 64 bits signed integer\n sz = 2 ** 64\n good = 'Maximum allowed dimension exceeded'\n try:\n np.empty(sz)\n except ValueError as e:\n if not str(e) == good:\n self.fail(\"Got msg '%s', expected '%s'\" % (e, good))\n except Exception as e:\n self.fail(\"Got exception of type %s instead of ValueError\" % type(e))\n\n def test_huge_arange(self):\n # Regression test for #1062.\n # Set a size which cannot fit into a 64 bits signed integer\n sz = 2 ** 64\n good = 'Maximum allowed size exceeded'\n try:\n np.arange(sz)\n assert_(np.size == sz)\n except ValueError as e:\n if not str(e) == good:\n self.fail(\"Got msg '%s', expected '%s'\" % (e, good))\n except Exception as e:\n self.fail(\"Got exception of type %s instead of ValueError\" % type(e))\n\n def test_fromiter_bytes(self):\n # Ticket #1058\n a = np.fromiter(list(range(10)), dtype='b')\n b = np.fromiter(list(range(10)), dtype='B')\n assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\n assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\n\n def test_array_from_sequence_scalar_array(self):\n # Ticket #1078: segfaults when creating an array with a sequence of\n # 0d arrays.\n a = np.array((np.ones(2), np.array(2)))\n assert_equal(a.shape, (2,))\n assert_equal(a.dtype, np.dtype(object))\n assert_equal(a[0], np.ones(2))\n assert_equal(a[1], np.array(2))\n\n a = np.array(((1,), np.array(1)))\n assert_equal(a.shape, (2,))\n assert_equal(a.dtype, np.dtype(object))\n assert_equal(a[0], (1,))\n assert_equal(a[1], np.array(1))\n\n def test_array_from_sequence_scalar_array2(self):\n # Ticket #1081: weird array with strange input...\n t = np.array([np.array([]), np.array(0, object)])\n assert_equal(t.shape, (2,))\n assert_equal(t.dtype, np.dtype(object))\n\n def test_array_too_big(self):\n # Ticket #1080.\n assert_raises(ValueError, np.zeros, [975]*7, np.int8)\n assert_raises(ValueError, np.zeros, [26244]*5, np.int8)\n\n def test_dtype_keyerrors_(self):\n # Ticket #1106.\n dt = np.dtype([('f1', np.uint)])\n assert_raises(KeyError, dt.__getitem__, \"f2\")\n assert_raises(IndexError, dt.__getitem__, 1)\n assert_raises(ValueError, dt.__getitem__, 0.0)\n\n def test_lexsort_buffer_length(self):\n # Ticket #1217, don't segfault.\n a = np.ones(100, dtype=np.int8)\n b = np.ones(100, dtype=np.int32)\n i = np.lexsort((a[::-1], b))\n assert_equal(i, np.arange(100, dtype=int))\n\n def test_object_array_to_fixed_string(self):\n # Ticket #1235.\n a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)\n b = np.array(a, dtype=(np.str_, 8))\n assert_equal(a, b)\n c = np.array(a, dtype=(np.str_, 5))\n assert_equal(c, np.array(['abcde', 'ijklm']))\n d = np.array(a, dtype=(np.str_, 12))\n assert_equal(a, d)\n e = np.empty((2, ), dtype=(np.str_, 8))\n e[:] = a[:]\n assert_equal(a, e)\n\n def test_unicode_to_string_cast(self):\n # Ticket #1240.\n a = np.array([[u'abc', u'\\u03a3'],\n [u'asdf', u'erw']],\n dtype='U')\n assert_raises(UnicodeEncodeError, np.array, a, 'S4')\n\n def test_mixed_string_unicode_array_creation(self):\n a = np.array(['1234', u'123'])\n assert_(a.itemsize == 16)\n a = np.array([u'123', '1234'])\n assert_(a.itemsize == 16)\n a = np.array(['1234', u'123', '12345'])\n assert_(a.itemsize == 20)\n a = np.array([u'123', '1234', u'12345'])\n assert_(a.itemsize == 20)\n a = np.array([u'123', '1234', u'1234'])\n assert_(a.itemsize == 16)\n\n def test_misaligned_objects_segfault(self):\n # Ticket #1198 and #1267\n a1 = np.zeros((10,), dtype='O,c')\n a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')\n a1['f0'] = a2\n repr(a1)\n np.argmax(a1['f0'])\n a1['f0'][1] = \"FOO\"\n a1['f0'] = \"FOO\"\n np.array(a1['f0'], dtype='S')\n np.nonzero(a1['f0'])\n a1.sort()\n copy.deepcopy(a1)\n\n def test_misaligned_scalars_segfault(self):\n # Ticket #1267\n s1 = np.array(('a', 'Foo'), dtype='c,O')\n s2 = np.array(('b', 'Bar'), dtype='c,O')\n s1['f1'] = s2['f1']\n s1['f1'] = 'Baz'\n\n def test_misaligned_dot_product_objects(self):\n # Ticket #1267\n # This didn't require a fix, but it's worth testing anyway, because\n # it may fail if .dot stops enforcing the arrays to be BEHAVED\n a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')\n b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')\n np.dot(a['f0'], b['f0'])\n\n def test_byteswap_complex_scalar(self):\n # Ticket #1259 and gh-441\n for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:\n z = np.array([2.2-1.1j], dtype)\n x = z[0] # always native-endian\n y = x.byteswap()\n if x.dtype.byteorder == z.dtype.byteorder:\n # little-endian machine\n assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype.newbyteorder()))\n else:\n # big-endian machine\n assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype))\n # double check real and imaginary parts:\n assert_equal(x.real, y.real.byteswap())\n assert_equal(x.imag, y.imag.byteswap())\n\n def test_structured_arrays_with_objects1(self):\n # Ticket #1299\n stra = 'aaaa'\n strb = 'bbbb'\n x = np.array([[(0, stra), (1, strb)]], 'i8,O')\n x[x.nonzero()] = x.ravel()[:1]\n assert_(x[0, 1] == x[0, 0])\n\n @dec.skipif(not HAS_REFCOUNT, \"python has no sys.getrefcount\")\n def test_structured_arrays_with_objects2(self):\n # Ticket #1299 second test\n stra = 'aaaa'\n strb = 'bbbb'\n numb = sys.getrefcount(strb)\n numa = sys.getrefcount(stra)\n x = np.array([[(0, stra), (1, strb)]], 'i8,O')\n x[x.nonzero()] = x.ravel()[:1]\n assert_(sys.getrefcount(strb) == numb)\n assert_(sys.getrefcount(stra) == numa + 2)\n\n def test_duplicate_title_and_name(self):\n # Ticket #1254\n dtspec = [(('a', 'a'), 'i'), ('b', 'i')]\n assert_raises(ValueError, np.dtype, dtspec)\n\n def test_signed_integer_division_overflow(self):\n # Ticket #1317.\n def test_type(t):\n min = np.array([np.iinfo(t).min])\n min //= -1\n\n with np.errstate(divide=\"ignore\"):\n for t in (np.int8, np.int16, np.int32, np.int64, int, np.long):\n test_type(t)\n\n def test_buffer_hashlib(self):\n try:\n from hashlib import md5\n except ImportError:\n from md5 import new as md5\n\n x = np.array([1, 2, 3], dtype=np.dtype('<i4'))\n assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')\n\n def test_0d_string_scalar(self):\n # Bug #1436; the following should succeed\n np.asarray('x', '>c')\n\n def test_log1p_compiler_shenanigans(self):\n # Check if log1p is behaving on 32 bit intel systems.\n assert_(np.isfinite(np.log1p(np.exp2(-53))))\n\n def test_fromiter_comparison(self):\n a = np.fromiter(list(range(10)), dtype='b')\n b = np.fromiter(list(range(10)), dtype='B')\n assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\n assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\n\n def test_fromstring_crash(self):\n # Ticket #1345: the following should not cause a crash\n np.fromstring(b'aa, aa, 1.0', sep=',')\n\n def test_ticket_1539(self):\n dtypes = [x for x in np.typeDict.values()\n if (issubclass(x, np.number)\n and not issubclass(x, np.timedelta64))]\n a = np.array([], np.bool_) # not x[0] because it is unordered\n failures = []\n\n for x in dtypes:\n b = a.astype(x)\n for y in dtypes:\n c = a.astype(y)\n try:\n np.dot(b, c)\n except TypeError:\n failures.append((x, y))\n if failures:\n raise AssertionError(\"Failures: %r\" % failures)\n\n def test_ticket_1538(self):\n x = np.finfo(np.float32)\n for name in 'eps epsneg max min resolution tiny'.split():\n assert_equal(type(getattr(x, name)), np.float32,\n err_msg=name)\n\n def test_ticket_1434(self):\n # Check that the out= argument in var and std has an effect\n data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)))\n out = np.zeros((3,))\n\n ret = data.var(axis=1, out=out)\n assert_(ret is out)\n assert_array_equal(ret, data.var(axis=1))\n\n ret = data.std(axis=1, out=out)\n assert_(ret is out)\n assert_array_equal(ret, data.std(axis=1))\n\n def test_complex_nan_maximum(self):\n cnan = complex(0, np.nan)\n assert_equal(np.maximum(1, cnan), cnan)\n\n def test_subclass_int_tuple_assignment(self):\n # ticket #1563\n class Subclass(np.ndarray):\n def __new__(cls, i):\n return np.ones((i,)).view(cls)\n\n x = Subclass(5)\n x[(0,)] = 2 # shouldn't raise an exception\n assert_equal(x[0], 2)\n\n def test_ufunc_no_unnecessary_views(self):\n # ticket #1548\n class Subclass(np.ndarray):\n pass\n x = np.array([1, 2, 3]).view(Subclass)\n y = np.add(x, x, x)\n assert_equal(id(x), id(y))\n\n @dec.skipif(not HAS_REFCOUNT, \"python has no sys.getrefcount\")\n def test_take_refcount(self):\n # ticket #939\n a = np.arange(16, dtype=float)\n a.shape = (4, 4)\n lut = np.ones((5 + 3, 4), float)\n rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)\n c1 = sys.getrefcount(rgba)\n try:\n lut.take(a, axis=0, mode='clip', out=rgba)\n except TypeError:\n pass\n c2 = sys.getrefcount(rgba)\n assert_equal(c1, c2)\n\n def test_fromfile_tofile_seeks(self):\n # On Python 3, tofile/fromfile used to get (#1610) the Python\n # file handle out of sync\n f0 = tempfile.NamedTemporaryFile()\n f = f0.file\n f.write(np.arange(255, dtype='u1').tobytes())\n\n f.seek(20)\n ret = np.fromfile(f, count=4, dtype='u1')\n assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))\n assert_equal(f.tell(), 24)\n\n f.seek(40)\n np.array([1, 2, 3], dtype='u1').tofile(f)\n assert_equal(f.tell(), 43)\n\n f.seek(40)\n data = f.read(3)\n assert_equal(data, b\"\\x01\\x02\\x03\")\n\n f.seek(80)\n f.read(4)\n data = np.fromfile(f, dtype='u1', count=4)\n assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))\n\n f.close()\n\n def test_complex_scalar_warning(self):\n for tp in [np.csingle, np.cdouble, np.clongdouble]:\n x = tp(1+2j)\n assert_warns(np.ComplexWarning, float, x)\n with suppress_warnings() as sup:\n sup.filter(np.ComplexWarning)\n assert_equal(float(x), float(x.real))\n\n def test_complex_scalar_complex_cast(self):\n for tp in [np.csingle, np.cdouble, np.clongdouble]:\n x = tp(1+2j)\n assert_equal(complex(x), 1+2j)\n\n def test_complex_boolean_cast(self):\n # Ticket #2218\n for tp in [np.csingle, np.cdouble, np.clongdouble]:\n x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp)\n assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool))\n assert_(np.any(x))\n assert_(np.all(x[1:]))\n\n def test_uint_int_conversion(self):\n x = 2**64 - 1\n assert_equal(int(np.uint64(x)), x)\n\n def test_duplicate_field_names_assign(self):\n ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8')\n ra.dtype.names = ('f1', 'f2')\n repr(ra) # should not cause a segmentation fault\n assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))\n\n def test_eq_string_and_object_array(self):\n # From e-mail thread \"__eq__ with str and object\" (Keith Goodman)\n a1 = np.array(['a', 'b'], dtype=object)\n a2 = np.array(['a', 'c'])\n assert_array_equal(a1 == a2, [True, False])\n assert_array_equal(a2 == a1, [True, False])\n\n def test_nonzero_byteswap(self):\n a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)\n a.dtype = np.float32\n assert_equal(a.nonzero()[0], [1])\n a = a.byteswap().newbyteorder()\n assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap\n\n def test_find_common_type_boolean(self):\n # Ticket #1695\n assert_(np.find_common_type([], ['?', '?']) == '?')\n\n def test_empty_mul(self):\n a = np.array([1.])\n a[1:1] *= 2\n assert_equal(a, [1.])\n\n def test_array_side_effect(self):\n # The second use of itemsize was throwing an exception because in\n # ctors.c, discover_itemsize was calling PyObject_Length without\n # checking the return code. This failed to get the length of the\n # number 2, and the exception hung around until something checked\n # PyErr_Occurred() and returned an error.\n assert_equal(np.dtype('S10').itemsize, 10)\n np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)\n assert_equal(np.dtype('S10').itemsize, 10)\n\n def test_any_float(self):\n # all and any for floats\n a = np.array([0.1, 0.9])\n assert_(np.any(a))\n assert_(np.all(a))\n\n def test_large_float_sum(self):\n a = np.arange(10000, dtype='f')\n assert_equal(a.sum(dtype='d'), a.astype('d').sum())\n\n def test_ufunc_casting_out(self):\n a = np.array(1.0, dtype=np.float32)\n b = np.array(1.0, dtype=np.float64)\n c = np.array(1.0, dtype=np.float32)\n np.add(a, b, out=c)\n assert_equal(c, 2.0)\n\n def test_array_scalar_contiguous(self):\n # Array scalars are both C and Fortran contiguous\n assert_(np.array(1.0).flags.c_contiguous)\n assert_(np.array(1.0).flags.f_contiguous)\n assert_(np.array(np.float32(1.0)).flags.c_contiguous)\n assert_(np.array(np.float32(1.0)).flags.f_contiguous)\n\n def test_squeeze_contiguous(self):\n # Similar to GitHub issue #387\n a = np.zeros((1, 2)).squeeze()\n b = np.zeros((2, 2, 2), order='F')[:, :, ::2].squeeze()\n assert_(a.flags.c_contiguous)\n assert_(a.flags.f_contiguous)\n assert_(b.flags.f_contiguous)\n\n def test_reduce_contiguous(self):\n # GitHub issue #387\n a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))\n b = np.add.reduce(np.zeros((2, 1, 2)), 1)\n assert_(a.flags.c_contiguous)\n assert_(a.flags.f_contiguous)\n assert_(b.flags.c_contiguous)\n\n def test_object_array_self_reference(self):\n # Object arrays with references to themselves can cause problems\n a = np.array(0, dtype=object)\n a[()] = a\n assert_raises(TypeError, int, a)\n assert_raises(TypeError, long, a)\n assert_raises(TypeError, float, a)\n assert_raises(TypeError, oct, a)\n assert_raises(TypeError, hex, a)\n\n # Test the same for a circular reference.\n b = np.array(a, dtype=object)\n a[()] = b\n assert_raises(TypeError, int, a)\n # NumPy has no tp_traverse currently, so circular references\n # cannot be detected. So resolve it:\n a[()] = 0\n\n # This was causing a to become like the above\n a = np.array(0, dtype=object)\n a[...] += 1\n assert_equal(a, 1)\n\n def test_object_array_self_copy(self):\n # An object array being copied into itself DECREF'ed before INCREF'ing\n # causing segmentation faults (gh-3787)\n a = np.array(object(), dtype=object)\n np.copyto(a, a)\n if HAS_REFCOUNT:\n assert_(sys.getrefcount(a[()]) == 2)\n a[()].__class__ # will segfault if object was deleted\n\n def test_zerosize_accumulate(self):\n \"Ticket #1733\"\n x = np.array([[42, 0]], dtype=np.uint32)\n assert_equal(np.add.accumulate(x[:-1, 0]), [])\n\n def test_objectarray_setfield(self):\n # Setfield should not overwrite Object fields with non-Object data\n x = np.array([1, 2, 3], dtype=object)\n assert_raises(TypeError, x.setfield, 4, np.int32, 0)\n\n def test_setting_rank0_string(self):\n \"Ticket #1736\"\n s1 = b\"hello1\"\n s2 = b\"hello2\"\n a = np.zeros((), dtype=\"S10\")\n a[()] = s1\n assert_equal(a, np.array(s1))\n a[()] = np.array(s2)\n assert_equal(a, np.array(s2))\n\n a = np.zeros((), dtype='f4')\n a[()] = 3\n assert_equal(a, np.array(3))\n a[()] = np.array(4)\n assert_equal(a, np.array(4))\n\n def test_string_astype(self):\n \"Ticket #1748\"\n s1 = b'black'\n s2 = b'white'\n s3 = b'other'\n a = np.array([[s1], [s2], [s3]])\n assert_equal(a.dtype, np.dtype('S5'))\n b = a.astype(np.dtype('S0'))\n assert_equal(b.dtype, np.dtype('S5'))\n\n def test_ticket_1756(self):\n # Ticket #1756\n s = b'0123456789abcdef'\n a = np.array([s]*5)\n for i in range(1, 17):\n a1 = np.array(a, \"|S%d\" % i)\n a2 = np.array([s[:i]]*5)\n assert_equal(a1, a2)\n\n def test_fields_strides(self):\n \"gh-2355\"\n r = np.frombuffer(b'abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')\n assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])\n assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])\n assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])\n assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)\n\n def test_alignment_update(self):\n # Check that alignment flag is updated on stride setting\n a = np.arange(10)\n assert_(a.flags.aligned)\n a.strides = 3\n assert_(not a.flags.aligned)\n\n def test_ticket_1770(self):\n \"Should not segfault on python 3k\"\n import numpy as np\n try:\n a = np.zeros((1,), dtype=[('f1', 'f')])\n a['f1'] = 1\n a['f2'] = 1\n except ValueError:\n pass\n except Exception:\n raise AssertionError\n\n def test_ticket_1608(self):\n \"x.flat shouldn't modify data\"\n x = np.array([[1, 2], [3, 4]]).T\n np.array(x.flat)\n assert_equal(x, [[1, 3], [2, 4]])\n\n def test_pickle_string_overwrite(self):\n import re\n\n data = np.array([1], dtype='b')\n blob = pickle.dumps(data, protocol=1)\n data = pickle.loads(blob)\n\n # Check that loads does not clobber interned strings\n s = re.sub(\"a(.)\", \"\\x01\\\\1\", \"a_\")\n assert_equal(s[0], \"\\x01\")\n data[0] = 0xbb\n s = re.sub(\"a(.)\", \"\\x01\\\\1\", \"a_\")\n assert_equal(s[0], \"\\x01\")\n\n def test_pickle_bytes_overwrite(self):\n if sys.version_info[0] >= 3:\n data = np.array([1], dtype='b')\n data = pickle.loads(pickle.dumps(data))\n data[0] = 0xdd\n bytestring = \"\\x01 \".encode('ascii')\n assert_equal(bytestring[0:1], '\\x01'.encode('ascii'))\n\n def test_pickle_py2_array_latin1_hack(self):\n # Check that unpickling hacks in Py3 that support\n # encoding='latin1' work correctly.\n\n # Python2 output for pickle.dumps(numpy.array([129], dtype='b'))\n data = (b\"cnumpy.core.multiarray\\n_reconstruct\\np0\\n(cnumpy\\nndarray\\np1\\n(I0\\n\"\n b\"tp2\\nS'b'\\np3\\ntp4\\nRp5\\n(I1\\n(I1\\ntp6\\ncnumpy\\ndtype\\np7\\n(S'i1'\\np8\\n\"\n b\"I0\\nI1\\ntp9\\nRp10\\n(I3\\nS'|'\\np11\\nNNNI-1\\nI-1\\nI0\\ntp12\\nbI00\\nS'\\\\x81'\\n\"\n b\"p13\\ntp14\\nb.\")\n if sys.version_info[0] >= 3:\n # This should work:\n result = pickle.loads(data, encoding='latin1')\n assert_array_equal(result, np.array([129], dtype='b'))\n # Should not segfault:\n assert_raises(Exception, pickle.loads, data, encoding='koi8-r')\n\n def test_pickle_py2_scalar_latin1_hack(self):\n # Check that scalar unpickling hack in Py3 that supports\n # encoding='latin1' work correctly.\n\n # Python2 output for pickle.dumps(...)\n datas = [\n # (original, python2_pickle, koi8r_validity)\n (np.unicode_('\\u6bd2'),\n (b\"cnumpy.core.multiarray\\nscalar\\np0\\n(cnumpy\\ndtype\\np1\\n\"\n b\"(S'U1'\\np2\\nI0\\nI1\\ntp3\\nRp4\\n(I3\\nS'<'\\np5\\nNNNI4\\nI4\\nI0\\n\"\n b\"tp6\\nbS'\\\\xd2k\\\\x00\\\\x00'\\np7\\ntp8\\nRp9\\n.\"),\n 'invalid'),\n\n (np.float64(9e123),\n (b\"cnumpy.core.multiarray\\nscalar\\np0\\n(cnumpy\\ndtype\\np1\\n(S'f8'\\n\"\n b\"p2\\nI0\\nI1\\ntp3\\nRp4\\n(I3\\nS'<'\\np5\\nNNNI-1\\nI-1\\nI0\\ntp6\\n\"\n b\"bS'O\\\\x81\\\\xb7Z\\\\xaa:\\\\xabY'\\np7\\ntp8\\nRp9\\n.\"),\n 'invalid'),\n\n (np.bytes_(b'\\x9c'), # different 8-bit code point in KOI8-R vs latin1\n (b\"cnumpy.core.multiarray\\nscalar\\np0\\n(cnumpy\\ndtype\\np1\\n(S'S1'\\np2\\n\"\n b\"I0\\nI1\\ntp3\\nRp4\\n(I3\\nS'|'\\np5\\nNNNI1\\nI1\\nI0\\ntp6\\nbS'\\\\x9c'\\np7\\n\"\n b\"tp8\\nRp9\\n.\"),\n 'different'),\n ]\n if sys.version_info[0] >= 3:\n for original, data, koi8r_validity in datas:\n result = pickle.loads(data, encoding='latin1')\n assert_equal(result, original)\n\n # Decoding under non-latin1 encoding (e.g.) KOI8-R can\n # produce bad results, but should not segfault.\n if koi8r_validity == 'different':\n # Unicode code points happen to lie within latin1,\n # but are different in koi8-r, resulting to silent\n # bogus results\n result = pickle.loads(data, encoding='koi8-r')\n assert_(result != original)\n elif koi8r_validity == 'invalid':\n # Unicode code points outside latin1, so results\n # to an encoding exception\n assert_raises(ValueError, pickle.loads, data, encoding='koi8-r')\n else:\n raise ValueError(koi8r_validity)\n\n def test_structured_type_to_object(self):\n a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8')\n a_obj = np.empty((2,), dtype=object)\n a_obj[0] = (0, 1)\n a_obj[1] = (3, 2)\n # astype records -> object\n assert_equal(a_rec.astype(object), a_obj)\n # '=' records -> object\n b = np.empty_like(a_obj)\n b[...] = a_rec\n assert_equal(b, a_obj)\n # '=' object -> records\n b = np.empty_like(a_rec)\n b[...] = a_obj\n assert_equal(b, a_rec)\n\n def test_assign_obj_listoflists(self):\n # Ticket # 1870\n # The inner list should get assigned to the object elements\n a = np.zeros(4, dtype=object)\n b = a.copy()\n a[0] = [1]\n a[1] = [2]\n a[2] = [3]\n a[3] = [4]\n b[...] = [[1], [2], [3], [4]]\n assert_equal(a, b)\n # The first dimension should get broadcast\n a = np.zeros((2, 2), dtype=object)\n a[...] = [[1, 2]]\n assert_equal(a, [[1, 2], [1, 2]])\n\n def test_memoryleak(self):\n # Ticket #1917 - ensure that array data doesn't leak\n for i in range(1000):\n # 100MB times 1000 would give 100GB of memory usage if it leaks\n a = np.empty((100000000,), dtype='i1')\n del a\n\n @dec.skipif(not HAS_REFCOUNT, \"python has no sys.getrefcount\")\n def test_ufunc_reduce_memoryleak(self):\n a = np.arange(6)\n acnt = sys.getrefcount(a)\n np.add.reduce(a)\n assert_equal(sys.getrefcount(a), acnt)\n\n def test_search_sorted_invalid_arguments(self):\n # Ticket #2021, should not segfault.\n x = np.arange(0, 4, dtype='datetime64[D]')\n assert_raises(TypeError, x.searchsorted, 1)\n\n def test_string_truncation(self):\n # Ticket #1990 - Data can be truncated in creation of an array from a\n # mixed sequence of numeric values and strings\n for val in [True, 1234, 123.4, complex(1, 234)]:\n for tostr in [asunicode, asbytes]:\n b = np.array([val, tostr('xx')])\n assert_equal(tostr(b[0]), tostr(val))\n b = np.array([tostr('xx'), val])\n assert_equal(tostr(b[1]), tostr(val))\n\n # test also with longer strings\n b = np.array([val, tostr('xxxxxxxxxx')])\n assert_equal(tostr(b[0]), tostr(val))\n b = np.array([tostr('xxxxxxxxxx'), val])\n assert_equal(tostr(b[1]), tostr(val))\n\n def test_string_truncation_ucs2(self):\n # Ticket #2081. Python compiled with two byte unicode\n # can lead to truncation if itemsize is not properly\n # adjusted for NumPy's four byte unicode.\n if sys.version_info[0] >= 3:\n a = np.array(['abcd'])\n else:\n a = np.array([u'abcd'])\n assert_equal(a.dtype.itemsize, 16)\n\n def test_unique_stable(self):\n # Ticket #2063 must always choose stable sort for argsort to\n # get consistent results\n v = np.array(([0]*5 + [1]*6 + [2]*6)*4)\n res = np.unique(v, return_index=True)\n tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))\n assert_equal(res, tgt)\n\n def test_unicode_alloc_dealloc_match(self):\n # Ticket #1578, the mismatch only showed up when running\n # python-debug for python versions >= 2.7, and then as\n # a core dump and error message.\n a = np.array(['abc'], dtype=np.unicode)[0]\n del a\n\n def test_refcount_error_in_clip(self):\n # Ticket #1588\n a = np.zeros((2,), dtype='>i2').clip(min=0)\n x = a + a\n # This used to segfault:\n y = str(x)\n # Check the final string:\n assert_(y == \"[0 0]\")\n\n def test_searchsorted_wrong_dtype(self):\n # Ticket #2189, it used to segfault, so we check that it raises the\n # proper exception.\n a = np.array([('a', 1)], dtype='S1, int')\n assert_raises(TypeError, np.searchsorted, a, 1.2)\n # Ticket #2066, similar problem:\n dtype = np.format_parser(['i4', 'i4'], [], [])\n a = np.recarray((2, ), dtype)\n assert_raises(TypeError, np.searchsorted, a, 1)\n\n def test_complex64_alignment(self):\n # Issue gh-2668 (trac 2076), segfault on sparc due to misalignment\n dtt = np.complex64\n arr = np.arange(10, dtype=dtt)\n # 2D array\n arr2 = np.reshape(arr, (2, 5))\n # Fortran write followed by (C or F) read caused bus error\n data_str = arr2.tobytes('F')\n data_back = np.ndarray(arr2.shape,\n arr2.dtype,\n buffer=data_str,\n order='F')\n assert_array_equal(arr2, data_back)\n\n def test_structured_count_nonzero(self):\n arr = np.array([0, 1]).astype('i4, (2)i4')[:1]\n count = np.count_nonzero(arr)\n assert_equal(count, 0)\n\n def test_copymodule_preserves_f_contiguity(self):\n a = np.empty((2, 2), order='F')\n b = copy.copy(a)\n c = copy.deepcopy(a)\n assert_(b.flags.fortran)\n assert_(b.flags.f_contiguous)\n assert_(c.flags.fortran)\n assert_(c.flags.f_contiguous)\n\n def test_fortran_order_buffer(self):\n import numpy as np\n a = np.array([['Hello', 'Foob']], dtype='U5', order='F')\n arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a)\n arr2 = np.array([[[u'H', u'e', u'l', u'l', u'o'],\n [u'F', u'o', u'o', u'b', u'']]])\n assert_array_equal(arr, arr2)\n\n def test_assign_from_sequence_error(self):\n # Ticket #4024.\n arr = np.array([1, 2, 3])\n assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9])\n arr.__setitem__(slice(None), [9])\n assert_equal(arr, [9, 9, 9])\n\n def test_format_on_flex_array_element(self):\n # Ticket #4369.\n dt = np.dtype([('date', '<M8[D]'), ('val', '<f8')])\n arr = np.array([('2000-01-01', 1)], dt)\n formatted = '{0}'.format(arr[0])\n assert_equal(formatted, str(arr[0]))\n\n def test_deepcopy_on_0d_array(self):\n # Ticket #3311.\n arr = np.array(3)\n arr_cp = copy.deepcopy(arr)\n\n assert_equal(arr, arr_cp)\n assert_equal(arr.shape, arr_cp.shape)\n assert_equal(int(arr), int(arr_cp))\n assert_(arr is not arr_cp)\n assert_(isinstance(arr_cp, type(arr)))\n\n def test_deepcopy_F_order_object_array(self):\n # Ticket #6456.\n a = {'a': 1}\n b = {'b': 2}\n arr = np.array([[a, b], [a, b]], order='F')\n arr_cp = copy.deepcopy(arr)\n\n assert_equal(arr, arr_cp)\n assert_(arr is not arr_cp)\n # Ensure that we have actually copied the item.\n assert_(arr[0, 1] is not arr_cp[1, 1])\n # Ensure we are allowed to have references to the same object.\n assert_(arr[0, 1] is arr[1, 1])\n # Check the references hold for the copied objects.\n assert_(arr_cp[0, 1] is arr_cp[1, 1])\n\n def test_deepcopy_empty_object_array(self):\n # Ticket #8536.\n # Deepcopy should succeed\n a = np.array([], dtype=object)\n b = copy.deepcopy(a)\n assert_(a.shape == b.shape)\n\n def test_bool_subscript_crash(self):\n # gh-4494\n c = np.rec.array([(1, 2, 3), (4, 5, 6)])\n masked = c[np.array([True, False])]\n base = masked.base\n del masked, c\n base.dtype\n\n def test_richcompare_crash(self):\n # gh-4613\n import operator as op\n\n # dummy class where __array__ throws exception\n class Foo(object):\n __array_priority__ = 1002\n\n def __array__(self, *args, **kwargs):\n raise Exception()\n\n rhs = Foo()\n lhs = np.array(1)\n for f in [op.lt, op.le, op.gt, op.ge]:\n if sys.version_info[0] >= 3:\n assert_raises(TypeError, f, lhs, rhs)\n elif not sys.py3kwarning:\n # With -3 switch in python 2, DeprecationWarning is raised\n # which we are not interested in\n f(lhs, rhs)\n assert_(not op.eq(lhs, rhs))\n assert_(op.ne(lhs, rhs))\n\n def test_richcompare_scalar_and_subclass(self):\n # gh-4709\n class Foo(np.ndarray):\n def __eq__(self, other):\n return \"OK\"\n\n x = np.array([1, 2, 3]).view(Foo)\n assert_equal(10 == x, \"OK\")\n assert_equal(np.int32(10) == x, \"OK\")\n assert_equal(np.array([10]) == x, \"OK\")\n\n def test_pickle_empty_string(self):\n # gh-3926\n\n import pickle\n test_string = np.string_('')\n assert_equal(pickle.loads(pickle.dumps(test_string)), test_string)\n\n def test_frompyfunc_many_args(self):\n # gh-5672\n\n def passer(*args):\n pass\n\n assert_raises(ValueError, np.frompyfunc, passer, 32, 1)\n\n def test_repeat_broadcasting(self):\n # gh-5743\n a = np.arange(60).reshape(3, 4, 5)\n for axis in chain(range(-a.ndim, a.ndim), [None]):\n assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis))\n\n def test_frompyfunc_nout_0(self):\n # gh-2014\n\n def f(x):\n x[0], x[-1] = x[-1], x[0]\n\n uf = np.frompyfunc(f, 1, 0)\n a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]])\n assert_equal(uf(a), ())\n assert_array_equal(a, [[3, 2, 1], [5, 4], [9, 7, 8, 6]])\n\n @dec.skipif(not HAS_REFCOUNT, \"python has no sys.getrefcount\")\n def test_leak_in_structured_dtype_comparison(self):\n # gh-6250\n recordtype = np.dtype([('a', np.float64),\n ('b', np.int32),\n ('d', (str, 5))])\n\n # Simple case\n a = np.zeros(2, dtype=recordtype)\n for i in range(100):\n a == a\n assert_(sys.getrefcount(a) < 10)\n\n # The case in the bug report.\n before = sys.getrefcount(a)\n u, v = a[0], a[1]\n u == v\n del u, v\n gc.collect()\n after = sys.getrefcount(a)\n assert_equal(before, after)\n\n def test_empty_percentile(self):\n # gh-6530 / gh-6553\n assert_array_equal(np.percentile(np.arange(10), []), np.array([]))\n\n def test_void_compare_segfault(self):\n # gh-6922. The following should not segfault\n a = np.ones(3, dtype=[('object', 'O'), ('int', '<i2')])\n a.sort()\n\n def test_reshape_size_overflow(self):\n # gh-7455\n a = np.ones(20)[::2]\n if np.dtype(np.intp).itemsize == 8:\n # 64 bit. The following are the prime factors of 2**63 + 5,\n # plus a leading 2, so when multiplied together as int64,\n # the result overflows to a total size of 10.\n new_shape = (2, 13, 419, 691, 823, 2977518503)\n else:\n # 32 bit. The following are the prime factors of 2**31 + 5,\n # plus a leading 2, so when multiplied together as int32,\n # the result overflows to a total size of 10.\n new_shape = (2, 7, 7, 43826197)\n assert_raises(ValueError, a.reshape, new_shape)\n\n def test_invalid_structured_dtypes(self):\n # gh-2865\n # mapping python objects to other dtypes\n assert_raises(ValueError, np.dtype, ('O', [('name', 'i8')]))\n assert_raises(ValueError, np.dtype, ('i8', [('name', 'O')]))\n assert_raises(ValueError, np.dtype,\n ('i8', [('name', [('name', 'O')])]))\n assert_raises(ValueError, np.dtype, ([('a', 'i4'), ('b', 'i4')], 'O'))\n assert_raises(ValueError, np.dtype, ('i8', 'O'))\n # wrong number/type of tuple elements in dict\n assert_raises(ValueError, np.dtype,\n ('i', {'name': ('i', 0, 'title', 'oops')}))\n assert_raises(ValueError, np.dtype,\n ('i', {'name': ('i', 'wrongtype', 'title')}))\n # disallowed as of 1.13\n assert_raises(ValueError, np.dtype,\n ([('a', 'O'), ('b', 'O')], [('c', 'O'), ('d', 'O')]))\n # allowed as a special case due to existing use, see gh-2798\n a = np.ones(1, dtype=('O', [('name', 'O')]))\n assert_equal(a[0], 1)\n\n def test_correct_hash_dict(self):\n # gh-8887 - __hash__ would be None despite tp_hash being set\n all_types = set(np.typeDict.values()) - {np.void}\n for t in all_types:\n val = t()\n\n try:\n hash(val)\n except TypeError as e:\n assert_equal(t.__hash__, None)\n else:\n assert_(t.__hash__ != None)\n\n def test_scalar_copy(self):\n scalar_types = set(np.sctypeDict.values())\n values = {\n np.void: b\"a\",\n np.bytes_: b\"a\",\n np.unicode_: \"a\",\n np.datetime64: \"2017-08-25\",\n }\n for sctype in scalar_types:\n item = sctype(values.get(sctype, 1))\n item2 = copy.copy(item)\n assert_equal(item, item2)\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n"
] | [
[
"numpy.setbufsize",
"numpy.all",
"numpy.binary_repr",
"numpy.where",
"numpy.unique",
"numpy.frombuffer",
"numpy.zeros",
"numpy.testing.assert_raises",
"numpy.chararray",
"numpy.find_common_type",
"numpy.array",
"numpy.fromfile",
"numpy.indices",
"numpy.random.shuffle",
"numpy.testing.assert_array_equal",
"numpy.float_",
"numpy.add",
"numpy.object_",
"numpy.asarray",
"numpy.ndarray",
"numpy.concatenate",
"numpy.iinfo",
"numpy.copyto",
"numpy.reshape",
"numpy.testing.suppress_warnings",
"numpy.frompyfunc",
"numpy.argmax",
"numpy.float32",
"numpy.rec.array",
"numpy.random.rand",
"numpy.testing.assert_",
"numpy.divide.reduce",
"numpy.errstate",
"numpy.testing.assert_warns",
"numpy.add.accumulate",
"numpy.testing.run_module_suite",
"numpy.maximum",
"numpy.rec.fromarrays",
"numpy.ones",
"numpy.recarray",
"numpy.empty",
"numpy.take",
"numpy.linspace",
"numpy.divide.accumulate",
"numpy.longdouble",
"numpy.typeDict.values",
"numpy.zeros_like",
"numpy.bool_",
"numpy.double",
"numpy.testing.assert_equal",
"numpy.hstack",
"numpy.add.reduce",
"numpy.lexsort",
"numpy.array_str",
"numpy.count_nonzero",
"numpy.unicode_",
"numpy.testing.assert_array_almost_equal",
"numpy.format_parser",
"numpy.nonzero",
"numpy.char.array",
"numpy.transpose",
"numpy.single",
"numpy.uint64",
"numpy.testing.dec.skipif",
"numpy.dot",
"numpy.string_",
"numpy.exp2",
"numpy.subtract.accumulate",
"numpy.dtype",
"numpy.lib.stride_tricks.as_strided",
"numpy.random.randn",
"numpy.any",
"numpy.testing._assert_valid_refcount",
"numpy.arange",
"numpy.empty_like",
"numpy.sctypeDict.values",
"numpy.finfo",
"numpy.testing.assert_almost_equal",
"numpy.bytes_",
"numpy.load",
"numpy.int_",
"numpy.subtract.reduce",
"numpy.abs",
"numpy.random.seed",
"numpy.intp",
"numpy.add.outer",
"numpy.int32",
"numpy.sort",
"numpy.compat.asbytes",
"numpy.sign",
"numpy.random.normal",
"numpy.fromstring",
"numpy.float64"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Talendar/qdeep | [
"7228edc9cc7d7e6c6bc59e93a3eb726fda15704d"
] | [
"qdeep/dqn/agent.py"
] | [
"\"\"\" DQN agent implementation.\n\nHeavily based on: https://github.com/deepmind/acme/blob/master/acme/agents/tf/dqn/agent.py\n\"\"\"\n\nimport copy\nfrom typing import Optional, List, Dict\n\nimport numpy as np\nimport reverb\nimport sonnet as snt\nimport tensorflow as tf\nimport trfl\nfrom acme import datasets\nfrom acme import specs\nfrom acme.adders import reverb as adders\nfrom acme.agents import agent\nfrom acme.agents.tf import actors\nfrom acme.tf import utils as tf2_utils\nfrom acme.utils import loggers\n\nfrom qdeep.dqn import learning\n\n\nclass DQNAgent(agent.Agent):\n \"\"\" DQN agent.\n\n This implements a single-process DQN agent. This is a simple Q-learning\n algorithm that inserts N-step transitions into a replay buffer, and\n periodically updates its policy by sampling these transitions using\n prioritization.\n\n Args:\n environment_spec: description of the actions, observations, etc.\n network: the online Q network (the one being optimized)\n batch_size: batch size for updates.\n prefetch_size: size to prefetch from replay.\n target_update_period: number of learner steps to perform before\n updating the target networks.\n samples_per_insert: number of samples to take from replay for every\n insert that is made.\n min_replay_size: minimum replay size before updating. This and all\n following arguments are related to dataset construction and will be\n ignored if a dataset argument is passed.\n max_replay_size: maximum replay size.\n importance_sampling_exponent: power to which importance weights are\n raised before normalizing.\n priority_exponent: exponent used in prioritized sampling.\n n_step: number of steps to squash into a single transition.\n epsilon: probability of taking a random action; ignored if a policy\n network is given.\n learning_rate: learning rate for the q-network update.\n discount: discount to use for TD updates.\n logger: logger object to be used by learner.\n max_gradient_norm: used for gradient clipping.\n expert_data: List of dictionaries containing the expert data to be added\n to the agent's replay memory. Each dictionary represents and episode\n and must have two keys: \"first\" and \"mid\". The \"first\" key's value\n must be a `TimeStep` object of the type `StepType.FIRST`. The \"mid\"\n key's value, on the other hand, must be a list containing tuples\n with, respectively, an action and a `TimeStep` object.\n \"\"\"\n\n def __init__(\n self,\n environment_spec: specs.EnvironmentSpec,\n network: snt.Module,\n batch_size: int = 32,\n prefetch_size: int = 4,\n target_update_period: int = 100,\n samples_per_insert: float = 32.0,\n min_replay_size: int = 1000,\n max_replay_size: int = 100000,\n importance_sampling_exponent: float = 0.2,\n priority_exponent: float = 0.6,\n n_step: int = 5,\n epsilon: Optional[float] = 0.05,\n learning_rate: float = 1e-3,\n discount: float = 0.99,\n logger: loggers.Logger = None,\n max_gradient_norm: Optional[float] = None,\n expert_data: List[Dict] = None,\n ) -> None:\n \"\"\" Initialize the agent. \"\"\"\n\n # Create a replay server to add data to. This uses no limiter behavior\n # in order to allow the Agent interface to handle it.\n replay_table = reverb.Table(\n name=adders.DEFAULT_PRIORITY_TABLE,\n sampler=reverb.selectors.Prioritized(priority_exponent),\n remover=reverb.selectors.Fifo(),\n max_size=max_replay_size,\n rate_limiter=reverb.rate_limiters.MinSize(1),\n signature=adders.NStepTransitionAdder.signature(environment_spec))\n self._server = reverb.Server([replay_table], port=None)\n\n # The adder is used to insert observations into replay.\n address = f'localhost:{self._server.port}'\n adder = adders.NStepTransitionAdder(\n client=reverb.Client(address),\n n_step=n_step,\n discount=discount)\n\n # Adding expert data to the replay memory:\n if expert_data is not None:\n for d in expert_data:\n adder.add_first(d[\"first\"])\n for (action, next_ts) in d[\"mid\"]:\n adder.add(np.int32(action), next_ts)\n\n # The dataset provides an interface to sample from replay.\n replay_client = reverb.TFClient(address)\n dataset = datasets.make_reverb_dataset(\n server_address=address,\n batch_size=batch_size,\n prefetch_size=prefetch_size)\n\n # Creating the epsilon greedy policy network:\n epsilon = tf.Variable(epsilon)\n policy_network = snt.Sequential([\n network,\n lambda q: trfl.epsilon_greedy(q, epsilon=epsilon).sample(),\n ])\n\n # Create a target network.\n target_network = copy.deepcopy(network)\n\n # Ensure that we create the variables before proceeding (maybe not\n # needed).\n tf2_utils.create_variables(network, [environment_spec.observations])\n tf2_utils.create_variables(target_network,\n [environment_spec.observations])\n\n # Create the actor which defines how we take actions.\n actor = actors.FeedForwardActor(policy_network, adder)\n\n # The learner updates the parameters (and initializes them).\n learner = learning.DQNLearner(\n network=network,\n target_network=target_network,\n discount=discount,\n importance_sampling_exponent=importance_sampling_exponent,\n learning_rate=learning_rate,\n target_update_period=target_update_period,\n dataset=dataset,\n replay_client=replay_client,\n max_gradient_norm=max_gradient_norm,\n logger=logger,\n )\n\n super().__init__(\n actor=actor,\n learner=learner,\n min_observations=max(batch_size, min_replay_size),\n observations_per_step=float(batch_size) / samples_per_insert)\n"
] | [
[
"numpy.int32",
"tensorflow.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"1.4",
"2.2",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
sflender/autogluon | [
"058398b61d1b2011f56a9dce149b0989adbbb04a"
] | [
"core/src/autogluon/core/models/ensemble/bagged_ensemble_model.py"
] | [
"import copy\nimport logging\nimport os\nimport time\nfrom collections import Counter\nfrom statistics import mean\nfrom functools import reduce\n\nimport numpy as np\nimport pandas as pd\n\nfrom ...constants import MULTICLASS, REGRESSION, SOFTCLASS, REFIT_FULL_SUFFIX\nfrom ...utils.exceptions import TimeLimitExceeded\nfrom ...utils.loaders import load_pkl\nfrom ...utils.savers import save_pkl\nfrom ...utils.utils import generate_kfold, _compute_fi_with_stddev\n\nfrom ..abstract.abstract_model import AbstractModel\n\nlogger = logging.getLogger(__name__)\n\n\n# TODO: Add metadata object with info like score on each model, train time on each model, etc.\nclass BaggedEnsembleModel(AbstractModel):\n \"\"\"\n Bagged ensemble meta-model which fits a given model multiple times across different splits of the training data.\n \"\"\"\n _oof_filename = 'oof.pkl'\n\n def __init__(self, model_base: AbstractModel, random_state=0, **kwargs):\n self.model_base = model_base\n self._child_type = type(self.model_base)\n self.models = []\n self._oof_pred_proba = None\n self._oof_pred_model_repeats = None\n self._n_repeats = 0 # Number of n_repeats with at least 1 model fit, if kfold=5 and 8 models have been fit, _n_repeats is 2\n self._n_repeats_finished = 0 # Number of n_repeats finished, if kfold=5 and 8 models have been fit, _n_repeats_finished is 1\n self._k_fold_end = 0 # Number of models fit in current n_repeat (0 if completed), if kfold=5 and 8 models have been fit, _k_fold_end is 3\n self._k = None # k models per n_repeat, equivalent to kfold value\n self._k_per_n_repeat = [] # k-fold used for each n_repeat. == [5, 10, 3] if first kfold was 5, second was 10, and third was 3\n self._random_state = random_state\n self.low_memory = True\n self.bagged_mode = None\n\n try:\n feature_metadata = self.model_base.feature_metadata\n except:\n feature_metadata = None\n\n eval_metric = kwargs.pop('eval_metric', self.model_base.eval_metric)\n stopping_metric = kwargs.pop('stopping_metric', self.model_base.stopping_metric)\n\n super().__init__(problem_type=self.model_base.problem_type, eval_metric=eval_metric, stopping_metric=stopping_metric, feature_metadata=feature_metadata, **kwargs)\n\n def _set_default_params(self):\n default_params = {'save_bag_folds': True}\n for param, val in default_params.items():\n self._set_default_param_value(param, val)\n super()._set_default_params()\n\n def is_valid(self):\n return self.is_fit() and (self._n_repeats == self._n_repeats_finished)\n\n def can_infer(self):\n return self.is_fit() and self.params.get('save_bag_folds', True)\n\n def is_stratified(self):\n if self.problem_type == REGRESSION or self.problem_type == SOFTCLASS:\n return False\n else:\n return True\n\n def is_fit(self):\n return len(self.models) != 0\n\n # TODO: This assumes bagged ensemble has a complete k_fold and no partial k_fold models, this is likely fine but will act incorrectly if called when only a partial k_fold has been completed\n # Solving this is memory intensive, requires all oof_pred_probas from all n_repeats, so its probably not worth it.\n @property\n def oof_pred_proba(self):\n # TODO: Require is_valid == True (add option param to ignore is_valid)\n return self._oof_pred_proba_func(self._oof_pred_proba, self._oof_pred_model_repeats)\n\n @staticmethod\n def _oof_pred_proba_func(oof_pred_proba, oof_pred_model_repeats):\n oof_pred_model_repeats_without_0 = np.where(oof_pred_model_repeats == 0, 1, oof_pred_model_repeats)\n if oof_pred_proba.ndim == 2:\n oof_pred_model_repeats_without_0 = oof_pred_model_repeats_without_0[:, None]\n return oof_pred_proba / oof_pred_model_repeats_without_0\n\n def preprocess(self, X, preprocess_nonadaptive=True, model=None, **kwargs):\n if preprocess_nonadaptive:\n if model is None:\n if not self.models:\n return X\n model = self.models[0]\n model = self.load_child(model)\n return model.preprocess(X, preprocess_stateful=False)\n else:\n return X\n\n def _fit(self, X_train, y_train, k_fold=5, k_fold_start=0, k_fold_end=None, n_repeats=1, n_repeat_start=0, time_limit=None, **kwargs):\n if k_fold < 1:\n k_fold = 1\n if k_fold_end is None:\n k_fold_end = k_fold\n\n if self._oof_pred_proba is None and (k_fold_start != 0 or n_repeat_start != 0):\n self._load_oof()\n if n_repeat_start != self._n_repeats_finished:\n raise ValueError(f'n_repeat_start must equal self._n_repeats_finished, values: ({n_repeat_start}, {self._n_repeats_finished})')\n if n_repeats <= n_repeat_start:\n raise ValueError(f'n_repeats must be greater than n_repeat_start, values: ({n_repeats}, {n_repeat_start})')\n if k_fold_start != self._k_fold_end:\n raise ValueError(f'k_fold_start must equal previous k_fold_end, values: ({k_fold_start}, {self._k_fold_end})')\n if k_fold_start >= k_fold_end:\n # TODO: Remove this limitation if n_repeats > 1\n raise ValueError(f'k_fold_end must be greater than k_fold_start, values: ({k_fold_end}, {k_fold_start})')\n if (n_repeats - n_repeat_start) > 1 and k_fold_end != k_fold:\n # TODO: Remove this limitation\n raise ValueError(f'k_fold_end must equal k_fold when (n_repeats - n_repeat_start) > 1, values: ({k_fold_end}, {k_fold})')\n if self._k is not None and self._k != k_fold:\n raise ValueError(f'k_fold must equal previously fit k_fold value for the current n_repeat, values: (({k_fold}, {self._k})')\n fold_start = n_repeat_start * k_fold + k_fold_start\n fold_end = (n_repeats - 1) * k_fold + k_fold_end\n time_start = time.time()\n\n model_base = self._get_model_base()\n if self.features is not None:\n model_base.features = self.features\n model_base.feature_metadata = self.feature_metadata # TODO: Don't pass this here\n\n if self.model_base is not None:\n self.save_model_base(self.model_base)\n self.model_base = None\n\n if k_fold == 1:\n if self._n_repeats != 0:\n raise ValueError(f'n_repeats must equal 0 when fitting a single model with k_fold < 2, values: ({self._n_repeats}, {k_fold})')\n model_base.set_contexts(path_context=self.path + model_base.name + os.path.sep)\n time_start_fit = time.time()\n model_base.fit(X_train=X_train, y_train=y_train, time_limit=time_limit, **kwargs)\n model_base.fit_time = time.time() - time_start_fit\n model_base.predict_time = None\n self._oof_pred_proba = model_base.predict_proba(X=X_train) # TODO: Cheater value, will be overfit to valid set\n self._oof_pred_model_repeats = np.ones(shape=len(X_train), dtype=np.uint8)\n self._n_repeats = 1\n self._n_repeats_finished = 1\n self._k_per_n_repeat = [1]\n self.bagged_mode = False\n model_base.reduce_memory_size(remove_fit=True, remove_info=False, requires_save=True)\n if not self.params.get('save_bag_folds', True):\n model_base.model = None\n if self.low_memory:\n self.save_child(model_base, verbose=False)\n self.models = [model_base.name]\n else:\n self.models = [model_base]\n self._add_child_times_to_bag(model=model_base)\n return\n\n # TODO: Preprocess data here instead of repeatedly\n kfolds = generate_kfold(X=X_train, y=y_train, n_splits=k_fold, stratified=self.is_stratified(), random_state=self._random_state, n_repeats=n_repeats)\n\n oof_pred_proba, oof_pred_model_repeats = self._construct_empty_oof(X=X_train, y=y_train)\n\n models = []\n folds_to_fit = fold_end - fold_start\n for j in range(n_repeat_start, n_repeats): # For each n_repeat\n cur_repeat_count = j - n_repeat_start\n fold_start_n_repeat = fold_start + cur_repeat_count * k_fold\n fold_end_n_repeat = min(fold_start_n_repeat + k_fold, fold_end)\n # TODO: Consider moving model fit inner for loop to a function to simply this code\n for i in range(fold_start_n_repeat, fold_end_n_repeat): # For each fold\n folds_finished = i - fold_start\n folds_left = fold_end - i\n fold = kfolds[i]\n time_elapsed = time.time() - time_start\n if time_limit is not None:\n time_left = time_limit - time_elapsed\n required_time_per_fold = time_left / folds_left\n time_limit_fold = required_time_per_fold * 0.8\n if folds_finished > 0:\n expected_time_required = time_elapsed * folds_to_fit / folds_finished\n expected_remaining_time_required = expected_time_required * folds_left / folds_to_fit\n if expected_remaining_time_required > time_left:\n raise TimeLimitExceeded\n if time_left <= 0:\n raise TimeLimitExceeded\n else:\n time_limit_fold = None\n\n time_start_fold = time.time()\n train_index, val_index = fold\n X_train_fold, X_val_fold = X_train.iloc[train_index, :], X_train.iloc[val_index, :]\n y_train_fold, y_val_fold = y_train.iloc[train_index], y_train.iloc[val_index]\n fold_model = copy.deepcopy(model_base)\n fold_model.name = f'{fold_model.name}_F{i+1}'\n fold_model.set_contexts(self.path + fold_model.name + os.path.sep)\n fold_model.fit(X_train=X_train_fold, y_train=y_train_fold, X_val=X_val_fold, y_val=y_val_fold, time_limit=time_limit_fold, **kwargs)\n time_train_end_fold = time.time()\n if time_limit is not None: # Check to avoid unnecessarily predicting and saving a model when an Exception is going to be raised later\n if i != (fold_end - 1):\n time_elapsed = time.time() - time_start\n time_left = time_limit - time_elapsed\n expected_time_required = time_elapsed * folds_to_fit / (folds_finished + 1)\n expected_remaining_time_required = expected_time_required * (folds_left - 1) / folds_to_fit\n if expected_remaining_time_required > time_left:\n raise TimeLimitExceeded\n pred_proba = fold_model.predict_proba(X_val_fold)\n time_predict_end_fold = time.time()\n fold_model.fit_time = time_train_end_fold - time_start_fold\n fold_model.predict_time = time_predict_end_fold - time_train_end_fold\n fold_model.val_score = fold_model.score_with_y_pred_proba(y=y_val_fold, y_pred_proba=pred_proba)\n fold_model.reduce_memory_size(remove_fit=True, remove_info=False, requires_save=True)\n if not self.params.get('save_bag_folds', True):\n fold_model.model = None\n if self.low_memory:\n self.save_child(fold_model, verbose=False)\n models.append(fold_model.name)\n else:\n models.append(fold_model)\n oof_pred_proba[val_index] += pred_proba\n oof_pred_model_repeats[val_index] += 1\n self._add_child_times_to_bag(model=fold_model)\n if (fold_end_n_repeat != fold_end) or (k_fold == k_fold_end):\n self._k_per_n_repeat.append(k_fold)\n self.models += models\n\n self.bagged_mode = True\n\n if self._oof_pred_proba is None:\n self._oof_pred_proba = oof_pred_proba\n self._oof_pred_model_repeats = oof_pred_model_repeats\n else:\n self._oof_pred_proba += oof_pred_proba\n self._oof_pred_model_repeats += oof_pred_model_repeats\n\n self._n_repeats = n_repeats\n if k_fold == k_fold_end:\n self._k = None\n self._k_fold_end = 0\n self._n_repeats_finished = self._n_repeats\n else:\n self._k = k_fold\n self._k_fold_end = k_fold_end\n self._n_repeats_finished = self._n_repeats - 1\n\n def predict_proba(self, X, normalize=None, **kwargs):\n model = self.load_child(self.models[0])\n X = self.preprocess(X, model=model, **kwargs)\n pred_proba = model.predict_proba(X=X, preprocess_nonadaptive=False, normalize=normalize)\n for model in self.models[1:]:\n model = self.load_child(model)\n pred_proba += model.predict_proba(X=X, preprocess_nonadaptive=False, normalize=normalize)\n pred_proba = pred_proba / len(self.models)\n\n return pred_proba\n\n def _predict_proba(self, X, normalize=False, **kwargs):\n return self.predict_proba(X=X, normalize=normalize, **kwargs)\n\n def score_with_oof(self, y):\n self._load_oof()\n valid_indices = self._oof_pred_model_repeats > 0\n y = y[valid_indices]\n y_pred_proba = self.oof_pred_proba[valid_indices]\n\n return self.score_with_y_pred_proba(y=y, y_pred_proba=y_pred_proba)\n\n # TODO: Augment to generate OOF after shuffling each column in X (Batching), this is the fastest way.\n # TODO: v0.1 Reduce logging clutter during OOF importance calculation (Currently logs separately for each child)\n # Generates OOF predictions from pre-trained bagged models, assuming X and y are in the same row order as used in .fit(X, y)\n def compute_feature_importance(self, X, y, features=None, is_oof=True, time_limit=None, silent=False, **kwargs) -> pd.DataFrame:\n if features is None:\n features = self.load_child(model=self.models[0]).features\n if not is_oof:\n return super().compute_feature_importance(X, y, features=features, time_limit=time_limit, silent=silent, **kwargs)\n fi_fold_list = []\n model_index = 0\n num_children = len(self.models)\n if time_limit is not None:\n time_limit_per_child = time_limit / num_children\n else:\n time_limit_per_child = None\n if not silent:\n logging_message = f'Computing feature importance via permutation shuffling for {len(features)} features using out-of-fold (OOF) data aggregated across {num_children} child models...'\n if time_limit is not None:\n logging_message = f'{logging_message} Time limit: {time_limit}s...'\n logger.log(20, logging_message)\n\n time_start = time.time()\n early_stop = False\n children_completed = 0\n log_final_suffix = ''\n for n_repeat, k in enumerate(self._k_per_n_repeat):\n if is_oof:\n if not self.bagged_mode:\n raise AssertionError('Model trained with no validation data cannot get feature importances on training data, please specify new test data to compute feature importances (model=%s)' % self.name)\n kfolds = generate_kfold(X=X, y=y, n_splits=k, stratified=self.is_stratified(), random_state=self._random_state, n_repeats=n_repeat + 1)\n cur_kfolds = kfolds[n_repeat * k:(n_repeat+1) * k]\n else:\n cur_kfolds = [(None, list(range(len(X))))]*k\n for i, fold in enumerate(cur_kfolds):\n _, test_index = fold\n model = self.load_child(self.models[model_index + i])\n fi_fold = model.compute_feature_importance(X=X.iloc[test_index, :], y=y.iloc[test_index], features=features, time_limit=time_limit_per_child,\n silent=silent, log_prefix='\\t', importance_as_list=True, **kwargs)\n fi_fold_list.append(fi_fold)\n\n children_completed += 1\n if time_limit is not None and children_completed != num_children:\n time_now = time.time()\n time_left = time_limit - (time_now - time_start)\n time_child_average = (time_now - time_start) / children_completed\n if time_left < (time_child_average * 1.1):\n log_final_suffix = f' (Early stopping due to lack of time...)'\n early_stop = True\n break\n if early_stop:\n break\n model_index += k\n # TODO: DON'T THROW AWAY SAMPLES! USE LARGER N\n fi_list_dict = dict()\n for val in fi_fold_list:\n val = val['importance'].to_dict() # TODO: Don't throw away stddev information of children\n for key in val:\n if key not in fi_list_dict:\n fi_list_dict[key] = []\n fi_list_dict[key] += val[key]\n fi_df = _compute_fi_with_stddev(fi_list_dict)\n\n if not silent:\n logger.log(20, f'\\t{round(time.time() - time_start, 2)}s\\t= Actual runtime (Completed {children_completed} of {num_children} children){log_final_suffix}')\n\n return fi_df\n\n def load_child(self, model, verbose=False) -> AbstractModel:\n if isinstance(model, str):\n child_path = self.create_contexts(self.path + model + os.path.sep)\n return self._child_type.load(path=child_path, verbose=verbose)\n else:\n return model\n\n def save_child(self, model, verbose=False):\n child = self.load_child(model)\n child.set_contexts(self.path + child.name + os.path.sep)\n child.save(verbose=verbose)\n\n # TODO: Multiply epochs/n_iterations by some value (such as 1.1) to account for having more training data than bagged models\n def convert_to_refit_full_template(self):\n init_args = self._get_init_args()\n init_args['hyperparameters']['save_bag_folds'] = True # refit full models must save folds\n model_base_name_orig = init_args['model_base'].name\n init_args['model_base'] = self.convert_to_refitfull_template_child()\n model_base_name_new = init_args['model_base'].name\n if model_base_name_orig in init_args['name'] and model_base_name_orig != model_base_name_new:\n init_args['name'] = init_args['name'].replace(model_base_name_orig, model_base_name_new, 1)\n else:\n init_args['name'] = init_args['name'] + '_FULL'\n\n model_full_template = self.__class__(**init_args)\n return model_full_template\n\n def convert_to_refitfull_template_child(self):\n compressed_params = self._get_compressed_params()\n child_compressed = copy.deepcopy(self._get_model_base())\n child_compressed.feature_metadata = self.feature_metadata # TODO: Don't pass this here\n child_compressed.params = compressed_params\n child_compressed.name = child_compressed.name + REFIT_FULL_SUFFIX\n child_compressed.set_contexts(self.path_root + child_compressed.name + os.path.sep)\n return child_compressed\n\n def _get_init_args(self):\n init_args = dict(\n model_base=self._get_model_base(),\n random_state=self._random_state,\n )\n init_args.update(super()._get_init_args())\n init_args.pop('problem_type')\n init_args.pop('feature_metadata')\n return init_args\n\n def _get_compressed_params(self, model_params_list=None):\n if model_params_list is None:\n model_params_list = [\n self.load_child(child).get_trained_params()\n for child in self.models\n ]\n\n model_params_compressed = dict()\n for param in model_params_list[0].keys():\n model_param_vals = [model_params[param] for model_params in model_params_list]\n if all(isinstance(val, bool) for val in model_param_vals):\n counter = Counter(model_param_vals)\n compressed_val = counter.most_common(1)[0][0]\n elif all(isinstance(val, int) for val in model_param_vals):\n compressed_val = round(mean(model_param_vals))\n elif all(isinstance(val, float) for val in model_param_vals):\n compressed_val = mean(model_param_vals)\n else:\n try:\n counter = Counter(model_param_vals)\n compressed_val = counter.most_common(1)[0][0]\n except TypeError:\n compressed_val = model_param_vals[0]\n model_params_compressed[param] = compressed_val\n return model_params_compressed\n\n def _get_compressed_params_trained(self):\n model_params_list = [\n self.load_child(child).params_trained\n for child in self.models\n ]\n return self._get_compressed_params(model_params_list=model_params_list)\n\n def _get_model_base(self):\n if self.model_base is None:\n return self.load_model_base()\n else:\n return self.model_base\n\n def _add_child_times_to_bag(self, model):\n if self.fit_time is None:\n self.fit_time = model.fit_time\n else:\n self.fit_time += model.fit_time\n\n if self.predict_time is None:\n self.predict_time = model.predict_time\n else:\n self.predict_time += model.predict_time\n\n @classmethod\n def load(cls, path: str, reset_paths=True, low_memory=True, load_oof=False, verbose=True):\n model = super().load(path=path, reset_paths=reset_paths, verbose=verbose)\n if not low_memory:\n model.persist_child_models(reset_paths=reset_paths)\n if load_oof:\n model._load_oof()\n return model\n\n @classmethod\n def load_oof(cls, path, verbose=True):\n try:\n oof = load_pkl.load(path=path + 'utils' + os.path.sep + cls._oof_filename, verbose=verbose)\n oof_pred_proba = oof['_oof_pred_proba']\n oof_pred_model_repeats = oof['_oof_pred_model_repeats']\n except FileNotFoundError:\n model = cls.load(path=path, reset_paths=True, verbose=verbose)\n model._load_oof()\n oof_pred_proba = model._oof_pred_proba\n oof_pred_model_repeats = model._oof_pred_model_repeats\n return cls._oof_pred_proba_func(oof_pred_proba=oof_pred_proba, oof_pred_model_repeats=oof_pred_model_repeats)\n\n def _load_oof(self):\n if self._oof_pred_proba is not None:\n pass\n else:\n oof = load_pkl.load(path=self.path + 'utils' + os.path.sep + self._oof_filename)\n self._oof_pred_proba = oof['_oof_pred_proba']\n self._oof_pred_model_repeats = oof['_oof_pred_model_repeats']\n\n def persist_child_models(self, reset_paths=True):\n for i, model_name in enumerate(self.models):\n if isinstance(model_name, str):\n child_path = self.create_contexts(self.path + model_name + os.path.sep)\n child_model = self._child_type.load(path=child_path, reset_paths=reset_paths, verbose=True)\n self.models[i] = child_model\n\n def load_model_base(self):\n return load_pkl.load(path=self.path + 'utils' + os.path.sep + 'model_template.pkl')\n\n def save_model_base(self, model_base):\n save_pkl.save(path=self.path + 'utils' + os.path.sep + 'model_template.pkl', object=model_base)\n\n def save(self, path=None, verbose=True, save_oof=True, save_children=False) -> str:\n if path is None:\n path = self.path\n\n if save_children:\n model_names = []\n for child in self.models:\n child = self.load_child(child)\n child.set_contexts(path + child.name + os.path.sep)\n child.save(verbose=False)\n model_names.append(child.name)\n self.models = model_names\n\n if save_oof and self._oof_pred_proba is not None:\n save_pkl.save(path=path + 'utils' + os.path.sep + self._oof_filename, object={\n '_oof_pred_proba': self._oof_pred_proba,\n '_oof_pred_model_repeats': self._oof_pred_model_repeats,\n })\n self._oof_pred_proba = None\n self._oof_pred_model_repeats = None\n\n return super().save(path=path, verbose=verbose)\n\n # If `remove_fit_stack=True`, variables will be removed that are required to fit more folds and to fit new stacker models which use this model as a base model.\n # This includes OOF variables.\n def reduce_memory_size(self, remove_fit_stack=False, remove_fit=True, remove_info=False, requires_save=True, reduce_children=False, **kwargs):\n super().reduce_memory_size(remove_fit=remove_fit, remove_info=remove_info, requires_save=requires_save, **kwargs)\n if remove_fit_stack:\n try:\n os.remove(self.path + 'utils' + os.path.sep + self._oof_filename)\n except FileNotFoundError:\n pass\n if requires_save:\n self._oof_pred_proba = None\n self._oof_pred_model_repeats = None\n try:\n os.remove(self.path + 'utils' + os.path.sep + 'model_template.pkl')\n except FileNotFoundError:\n pass\n if requires_save:\n self.model_base = None\n try:\n os.rmdir(self.path + 'utils')\n except OSError:\n pass\n if reduce_children:\n for model in self.models:\n model = self.load_child(model)\n model.reduce_memory_size(remove_fit=remove_fit, remove_info=remove_info, requires_save=requires_save, **kwargs)\n if requires_save and self.low_memory:\n self.save_child(model=model)\n\n def _get_model_names(self):\n model_names = []\n for model in self.models:\n if isinstance(model, str):\n model_names.append(model)\n else:\n model_names.append(model.name)\n return model_names\n\n def get_info(self):\n info = super().get_info()\n children_info = self._get_child_info()\n child_memory_sizes = [child['memory_size'] for child in children_info.values()]\n sum_memory_size_child = sum(child_memory_sizes)\n if child_memory_sizes:\n max_memory_size_child = max(child_memory_sizes)\n else:\n max_memory_size_child = 0\n if self.low_memory:\n max_memory_size = info['memory_size'] + sum_memory_size_child\n min_memory_size = info['memory_size'] + max_memory_size_child\n else:\n max_memory_size = info['memory_size']\n min_memory_size = info['memory_size'] - sum_memory_size_child + max_memory_size_child\n\n bagged_info = dict(\n child_model_type=self._child_type.__name__,\n num_child_models=len(self.models),\n child_model_names=self._get_model_names(),\n _n_repeats=self._n_repeats,\n # _n_repeats_finished=self._n_repeats_finished, # commented out because these are too technical\n # _k_fold_end=self._k_fold_end,\n # _k=self._k,\n _k_per_n_repeat=self._k_per_n_repeat,\n _random_state=self._random_state,\n low_memory=self.low_memory, # If True, then model will attempt to use at most min_memory_size memory by having at most one child in memory. If False, model will use max_memory_size memory.\n bagged_mode=self.bagged_mode,\n max_memory_size=max_memory_size, # Memory used when all children are loaded into memory at once.\n min_memory_size=min_memory_size, # Memory used when only the largest child is loaded into memory.\n child_hyperparameters=self._get_model_base().params,\n child_hyperparameters_fit = self._get_compressed_params_trained(),\n child_ag_args_fit = self._get_model_base().params_aux,\n )\n info['bagged_info'] = bagged_info\n info['children_info'] = children_info\n\n child_features_full = list(set().union(*[child['features'] for child in children_info.values()]))\n info['features'] = child_features_full\n info['num_features'] = len(child_features_full)\n\n return info\n\n def get_memory_size(self):\n models = self.models\n self.models = None\n memory_size = super().get_memory_size()\n self.models = models\n return memory_size\n\n def _get_child_info(self):\n child_info_dict = dict()\n for model in self.models:\n if isinstance(model, str):\n child_path = self.create_contexts(self.path + model + os.path.sep)\n child_info_dict[model] = self._child_type.load_info(child_path)\n else:\n child_info_dict[model.name] = model.get_info()\n return child_info_dict\n\n def _construct_empty_oof(self, X, y):\n if self.problem_type == MULTICLASS:\n oof_pred_proba = np.zeros(shape=(len(X), len(y.unique())), dtype=np.float32)\n elif self.problem_type == SOFTCLASS:\n oof_pred_proba = np.zeros(shape=y.shape, dtype=np.float32)\n else:\n oof_pred_proba = np.zeros(shape=len(X), dtype=np.float32)\n oof_pred_model_repeats = np.zeros(shape=len(X), dtype=np.uint8)\n return oof_pred_proba, oof_pred_model_repeats\n\n def _preprocess_fit_resources(self, silent=False, **kwargs):\n \"\"\"Pass along to child models to avoid altering up-front\"\"\"\n return kwargs\n\n # TODO: Currently double disk usage, saving model in HPO and also saving model in bag\n def _hyperparameter_tune(self, X_train, y_train, k_fold, scheduler_options, preprocess_kwargs=None, **kwargs):\n if len(self.models) != 0:\n raise ValueError('self.models must be empty to call hyperparameter_tune, value: %s' % self.models)\n\n self.model_base.feature_metadata = self.feature_metadata # TODO: Move this\n\n # TODO: Preprocess data here instead of repeatedly\n if preprocess_kwargs is None:\n preprocess_kwargs = dict()\n X_train = self.preprocess(X=X_train, preprocess=False, fit=True, **preprocess_kwargs)\n kfolds = generate_kfold(X=X_train, y=y_train, n_splits=k_fold, stratified=self.is_stratified(), random_state=self._random_state, n_repeats=1)\n\n train_index, test_index = kfolds[0]\n X_train_fold, X_val_fold = X_train.iloc[train_index, :], X_train.iloc[test_index, :]\n y_train_fold, y_val_fold = y_train.iloc[train_index], y_train.iloc[test_index]\n orig_time = scheduler_options[1]['time_out']\n scheduler_options[1]['time_out'] = orig_time * 0.8 # TODO: Scheduler doesn't early stop on final model, this is a safety net. Scheduler should be updated to early stop\n hpo_models, hpo_model_performances, hpo_results = self.model_base.hyperparameter_tune(X_train=X_train_fold, y_train=y_train_fold, X_val=X_val_fold, y_val=y_val_fold, scheduler_options=scheduler_options, **kwargs)\n scheduler_options[1]['time_out'] = orig_time\n\n bags = {}\n bags_performance = {}\n for i, (model_name, model_path) in enumerate(hpo_models.items()):\n child: AbstractModel = self._child_type.load(path=model_path)\n y_pred_proba = child.predict_proba(X_val_fold)\n\n # TODO: Create new Ensemble Here\n bag = copy.deepcopy(self)\n bag.name = bag.name + os.path.sep + str(i)\n bag.set_contexts(self.path_root + bag.name + os.path.sep)\n\n oof_pred_proba, oof_pred_model_repeats = self._construct_empty_oof(X=X_train, y=y_train)\n oof_pred_proba[test_index] += y_pred_proba\n oof_pred_model_repeats[test_index] += 1\n\n bag.model_base = None\n child.set_contexts(bag.path + child.name + os.path.sep)\n bag.save_model_base(child.convert_to_template())\n\n bag._k = k_fold\n bag._k_fold_end = 1\n bag._n_repeats = 1\n bag._oof_pred_proba = oof_pred_proba\n bag._oof_pred_model_repeats = oof_pred_model_repeats\n child.name = child.name + '_fold_0'\n child.set_contexts(bag.path + child.name + os.path.sep)\n if not self.params.get('save_bag_folds', True):\n child.model = None\n if bag.low_memory:\n bag.save_child(child, verbose=False)\n bag.models.append(child.name)\n else:\n bag.models.append(child)\n bag.val_score = child.val_score\n bag._add_child_times_to_bag(model=child)\n\n bag.save()\n bags[bag.name] = bag.path\n bags_performance[bag.name] = bag.val_score\n\n # TODO: hpo_results likely not correct because no renames\n return bags, bags_performance, hpo_results\n"
] | [
[
"numpy.where",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
baidut/PatchVQ | [
"040486b6342dfd36695f1daea0b5c4d77d728a23"
] | [
"fastiqa/browser.py"
] | [
"\"\"\"\nWhat you should know about browser:\n\n* (Browser() << KonIQ) + (Browser() << CLIVE) -- close the first one will close all\n* sync mode: show results of different methods. (qmap comparison, or one showing qmap, one showing the results, very flexible)\n\n# Browser(methods=['PaQ2PiQ-BM', 'PaQ2PiQ-RM']) << KonIQ\n\n# Browser() << KoNViD\n\"\"\"\n\nfrom .bunch import IqaDataBunch\nfrom cached_property import cached_property\n# IqaData, Rois0123Label, cached_property\nfrom pathlib import Path\nimport os, io\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom PIL import Image, ImageTk # put it after tkinter to overwrite tkinter.Image\nimport numpy as np # np.roll\nimport logging\n\n\"\"\"\n# %% browse one database\nfrom fastiqa.bunches.iqa.im2mos import *\nfrom fastiqa.iqa import *\ndls = Im2MOS(bs=2)\ndls.bs\ndls.df\ndls.get_df()\n# %%\ndls2 = dls << CLIVE\ndls2.bs\ndls.bs\n\n# %%\n# dls.show_batch()\ndls.bs\n\n\n# %%\nself = IqaDataBunch() << CLIVE\nself.df\n# %%\nfrom fastiqa.iqa import *\nfrom fastiqa.browser import *\nself = Browser() << LIVE_FB_IQA # CLIVE\nself\n\npropobj = getattr(self.__class__, 'index', None)\npropobj\n# %%\nself.df\n\nself.reload()\nprint(self._df_view)\nself.df\n# %%\n\n# NOTE: exit to run next browser\nBrowser(KonIQ)\nBrowser(FLIVE)\nBrowser(FLIVE640)\n\n# %% browse multiple database at the same time\nfrom fastiqa.gui import *; Browser(FLIVE640) + Browser(CLIVE) + Browser(KonIQ)\n\n\n# %%\nfrom fastiqa.browser import *\nfrom fastiqa.iqa import *\n# Browser() << KonIQ\n(Browser() << KonIQ) + (Browser() << CLIVE)\n# %%\na.label_types\na.label_col\n# Browser() << CLIVE\n#\n\n\n# %%\n\n\nfrom fastiqa.vqa import *\n\n\n\n# Browser << KonIQ << CLIVE\nVidBrowser() << KoNViD\n# %%\n\"\"\"\n\nclass Browser(IqaDataBunch):\n # TODO label_types: also show predictions\n pred = None\n fn = None\n img = None\n tk_img = None\n canvas = None\n _index = 0\n percent = 1 # 100%\n cfg_rectangle = {}\n hide_scores = False\n opt_bbox_width = [4, 0, 1]\n out_dir = Path('')\n _df_view = None\n width = None\n height = None\n # label_types = None # 'mos', # 'mos', 'PaQ2PiQ', 'NIQE'\n label_range = None # map the mos to (0, 100)\n roi_col = [[\"left\", \"top\", \"right\", \"bottom\"]]\n\n @cached_property\n def label_cols(self):\n return self.label_col if isinstance( self.label_col, (list, tuple) ) else [self.label_col]\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def get_df(self):\n df = super().get_df()\n self.opt_label_col = self.label_cols\n self.roi_col = np.array(self.roi_col).reshape(-1,4)\n self.opt_roi_col_idx = list(range(len(self.roi_col)))\n # if self.width is not None: # and not isinstance(self.label_col, (list, tuple)):\n if len(self.opt_label_col) == 1: # no roi location\n df['left'] = 0\n df['top'] = 0\n if self.width is not None:\n df['right'] = self.width\n df['bottom'] = self.height\n else:\n df['bottom'] = df['height']\n df['right'] = df['width']\n if self.label_range is not None:\n print('scores are mapped to (0, 100) for browsing')\n min, max = self.label_range\n for col in self.opt_label_col:\n df[col] = (df[col] - min )*100/(max-min)\n\n if self.pred is not None:\n print('sort by pred error')\n if len(self.pred) != len(df): # only valid set\n df = df[df.is_valid]\n\n df['pred'] = self.pred\n assert len(self.pred) == len(df), 'number of predictions does not match with number of actual values'\n assert len(df[df['pred'].isna()]) == 0, \"self.pred = df['output'].tolist()\"\n df['pred_err'] = df['pred'] - df[self.opt_label_col[0]]\n df = df.sort_values(by='pred_err', ignore_index=True) # pred > target, pred < target\n return df\n\n def __add__(self, other):\n other.window = Toplevel(master=self.window)\n other.load_frame()\n return self\n\n def load_frame(self):\n self.reload()\n self.frame = Frame(self.window, width=500, height=400, bd=1)\n self.frame.pack()\n self.frame.bind(\"<Key>\", self.on_key) # canvas covered by image don't response to key press...\n self.frame.bind(\"<Left>\", self.prev)\n self.frame.bind(\"<Right>\", self.next)\n self.frame.bind(\"<Up>\", self.prev_mode)\n self.frame.bind(\"<Down>\", self.next_mode)\n self.frame.bind(\"<Escape>\", self.exit)\n self.canvas = Canvas(self.frame)\n # self.canvas.bind(\"<Button-1>\", self.callback)\n self.frame.focus_set()\n self.window.protocol(\"WM_DELETE_WINDOW\", self.exit)\n self.show()\n\n @cached_property\n def window(self):\n return Tk()\n\n def _repr_html_(self):\n self.load_frame()\n return self.window.mainloop()\n\n @property\n def index(self):\n return self._index\n\n @index.setter # __setattr__ conflict\n def index(self, value):\n logging.debug('index:', value)\n self._index = int(value) % len(self._df_view)\n\n def show(self):\n # suffix\n # zscore? prefix\n #\n def add_bbox(roi_col_idx):\n #x1, x2 = self._df_view['left' + suffix][self.index], self._df_view['right' + suffix][self.index]\n # y1, y2 = self._df_view['top' + suffix][self.index], self._df_view['bottom' + suffix][self.index]\n roi_col = self.roi_col[roi_col_idx]\n x1, y1, x2, y2 = self._df_view.loc[self.index, roi_col].tolist()\n\n color = 'lightgreen' if roi_col_idx == self.opt_roi_col_idx[0] else 'yellow'\n self.canvas.create_rectangle(x1, y1, x2, y2, outline=color, width=self.opt_bbox_width[0], **self.cfg_rectangle)\n\n if not self.hide_scores:\n # TODO self.label_cols[0] mos or zscore (add score_mode)\n # show all predictions? mos, zscore, pred\n # assert type(self.label_col) != list\n s = f\"{self._df_view[self.label_cols[roi_col_idx]][self.index]:.1f}\"\n # if len(self.opt_roi_col_idx)==1 and self.pred is not None:\n if roi_col_idx==0 and self.pred is not None: # image score\n s = f\"Actual: {s} / Predication: {self.pred[self.index]:.1f}\" # load from the table!!!!\n text = self.canvas.create_text((x1, y1), anchor=NW, text=s)\n r = self.canvas.create_rectangle(self.canvas.bbox(text), fill=color, outline=color)\n self.canvas.tag_lower(r, text)\n\n self.fn = self._df_view[self.fn_col][self.index]\n file = self.path / self.folder / (str(self.fn)+self.fn_suffix) # some database (e.g. KoNViD, AVA) contain fn typed int, convert it first\n self.img = self.open_image(file)\n width, height = self.img.size\n # PIL image\n self.tk_img = ImageTk.PhotoImage(self.img)\n # tk_img = ImageTk.PhotoImage(im)\n # self.canvas.itemconfig(self.image_on_canvas, image=tk_img)\n # then it will be optimized, showing nothing\n\n self.canvas.delete(\"all\")\n self.canvas.config(width=width, height=height)\n\n self.canvas.create_image(0, 0, image=self.tk_img, anchor=NW)\n\n # only for Rois0123Label\n # if isinstance(self.label, Rois0123Label):\n for idx in self.opt_roi_col_idx:\n add_bbox(idx)\n # add_bbox('_image')\n # add_bbox('_patch_1')\n # add_bbox('_patch_2')\n # add_bbox('_patch_3')\n\n # self.image_on_canvas =\n # self.canvas.itemconfig(self.image_on_canvas, image=self.tk_img)\n #\n # self.canvas.coords(self.patch1_on_canvas,\n # self._df_view.left_patch_1[self.index],\n # self._df_view.top_patch_1[self.index],\n # self._df_view.right_patch_1[self.index],\n # self._df_view.bottom_patch_1[self.index],\n # )\n\n self.canvas.pack()\n fn = self._df_view[self.fn_col][self.index]\n self.window.title(f'[{width}x{height}]({self.index + 1}/{len(self._df_view)}: {self.percent * 100:.2f}%) {fn}')\n\n # some API to custom your browser\n def open_image(self, file):\n \"\"\"\n\n :param file:\n :return: a PIL image\n \"\"\"\n return Image.open(file) # \"../data/FLIVE/EE371R/cj23478+019.jpg\"\n # if self.apply_img_proc: im = self.img_proc(im)\n\n\n def prev(self, event=None):\n self.index -= 1\n self.show()\n\n def next(self, event=None):\n self.index += 1\n self.show()\n\n def prev_mode(self, event=None):\n self.opt_roi_col_idx = np.roll(self.opt_roi_col_idx, -1)\n self.show()\n\n def next_mode(self, event=None):\n self.opt_roi_col_idx = np.roll(self.opt_roi_col_idx, 1)\n self.show()\n\n # def reset(self, event):\n # self.valid_mos = None\n\n def exit(self, event=None):\n self.window.destroy()\n\n def filter(self, func):\n df = self._df_view[func(self._df_view)]\n if len(df) == 0:\n messagebox.showwarning(\"Warning\", \"No image found!\")\n else:\n self.percent = len(df) / len(self._df_view)\n self._df_view = df.reset_index() # otherwise index 0 will be dropped\n self._index = 0\n self.show()\n return self\n\n def save_image(self):\n # self.grab_image(self.canvas).save(self.fn)\n # https://stackoverflow.com/questions/41940945/saving-canvas-from-tkinter-to-file?rq=1\n ps = self.canvas.postscript(colormode='color')\n img = Image.open(io.BytesIO(ps.encode('utf-8')))\n img.save(self.out_dir / self.fn.rsplit('/', 1)[1], 'jpeg')\n\n def reload(self):\n self._df_view = self.df\n\n def on_key(self, event):\n self.frame.focus_set()\n # print(\"pressed\", repr(event.char))\n if event.char in [str(n) for n in range(10)]:\n self.reload()\n col_name = self.opt_label_col[0]\n # there might not be valid data\n self.filter(lambda x: x[col_name] // 10 == int(event.char))\n\n elif event.char is ' ':\n self.reload()\n self.show()\n elif event.char is 's': # save capture\n self.save_image()\n\n elif event.char is 'h': # hide score\n self.hide_scores = not self.hide_scores\n self.show()\n elif event.char is 'w': # i\n self.opt_bbox_width = np.roll(self.opt_bbox_width, 1)\n self.show()\n else:\n pass\n # print(self.index)\n\n\n # https://stackoverflow.com/questions/9886274/how-can-i-convert-canvas-content-to-an-image\n # def grab_image(self, widget):\n # x = self.window.winfo_rootx() + widget.winfo_x()\n # y = self.window.winfo_rooty() + widget.winfo_y()\n # x1 = x + widget.winfo_width()\n # y1 = y + widget.winfo_height()\n # return ImageGrab.grab().crop((x, y, x1, y1))\n # # .save(filename)\n\n def callback(self, event):\n self.frame.focus_set()\n print(\"clicked at\", event.x, event.y)\n print(self._df_view[self.fn_col][self.index])\n\n\n\nclass VidBrowser(Browser):\n def open_image(self, file):\n \"\"\"\n\n :param file:\n :return: a PIL image\n \"\"\"\n file = file/'image_00001.jpg'\n return Image.open(file) # \"../data/FLIVE/EE371R/cj23478+019.jpg\"\n # if self.apply_img_proc: im = self.img_proc(im)\n\n\"\"\"\nWontFix\n* support different backend: tkinter or matplotlib\n\nReference\n=========\n\nhttps://effbot.org/tkinterbook/tkinter-events-and-bindings.htm\n\nMatplotlib backbone\n===================\n\nhttps://matplotlib.org/gallery/animation/image_slices_viewer.html\n\n\nPySimpleGUI\n============\n\nPySimpleGUI is a wrapper for Tkinter and Qt (others on the way). The amount of code required to implement custom GUIs is much shorter using PySimpleGUI than if the same GUI were written directly using Tkinter or Qt.\n\nsudo apt-get install python-tk\nsudo apt-get install python3-tk\n\nhttps://github.com/PySimpleGUI/PySimpleGUI\n\nnot working here, cannot switch images\n\nTkinter\n========\n\nsudo apt-get install python3.6-tk\n\n\nwont support python 2\nfor browser only, support python 2\n\nimport sys\nif sys.version_info[0] == 3:\n # for Python3\n from tkinter import *\n # print(TclVersion)\nelse:\n # for Python2\n from Tkinter import *\n\n\"\"\"\n"
] | [
[
"numpy.array",
"numpy.roll"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SamPaskewitz/statsrat | [
"3f970f1731b7ec2e22c36a49375619e6afb802a8"
] | [
"statsrat/exemplar/atn_update.py"
] | [
"import numpy as np\n\ndef null(sim, x, y, y_psb, rtrv, y_hat, y_lrn, x_ex, y_ex, n_x, n_y, ex_seen_yet, ex_counts, n_ex, sim_pars):\n '''\n Don't update attention (it remains constant).\n '''\n return 0\nnull.par_names = []\n\ndef gradient_ngsec(sim, x, y, y_psb, rtrv, y_hat, y_lrn, x_ex, y_ex, n_x, n_y, ex_seen_yet, ex_counts, n_ex, sim_pars):\n '''\n Gradient descent on total squared error (assuming separate attention weights for each exemplar)\n when rtrv = normalized_sim_ex_counts and sim = Gaussian.\n \n Notes\n -----\n I have double checked that the math is correct (SP, 4/14/2021).\n '''\n delta = y - y_hat\n # use loops to keep things simple for now\n update = sim_pars['atn_lrate_par']*sim_pars['decay_rate']*np.ones((n_ex, n_x))\n for m in range(n_ex):\n for n in range(n_x):\n sq_dist = (x[n] - x_ex[m, n])**2\n error_factor = np.sum(delta*(y_hat - y_ex[m, :]))\n update[m, n] *= rtrv[m]*sq_dist*error_factor\n return update\ngradient_ngsec.par_names = ['atn_lrate_par']\n\ndef gradient_ngsec_common(sim, x, y, y_psb, rtrv, y_hat, y_lrn, x_ex, y_ex, n_x, n_y, ex_seen_yet, ex_counts, n_ex, sim_pars):\n '''\n Gradient descent on total squared error (assuming common attention weights across exemplars)\n when rtrv = normalized_sim_ex_counts and sim = Gaussian.\n '''\n delta = y - y_hat\n # use loops to keep things simple for now\n update = -sim_pars['atn_lrate_par']*sim_pars['decay_rate']*np.ones((n_ex, n_x))\n for n in range(n_x):\n sq_dist = (x[n] - x_ex[:, n])**2\n rwsd = np.sum(rtrv*sq_dist) # retrieval weighted sum of sq_dist\n foo = y_ex*(rtrv*(sq_dist - rwsd)).reshape((n_ex, 1))\n ex_factor = np.sum(foo, axis = 0)\n update[:, n] *= np.sum(delta*ex_factor)\n return update\ngradient_ngsec_common.par_names = ['atn_lrate_par']\n\ndef gradient_ngsec_both(sim, x, y, y_psb, rtrv, y_hat, y_lrn, x_ex, y_ex, n_x, n_y, ex_seen_yet, ex_counts, n_ex, sim_pars):\n '''\n Gradient descent on total squared error when rtrv = normalized_sim_ex_counts and sim = Gaussian.\n Attention weights have two parts: one that is common across exemplars (for each cue) and one\n that is unique to each exemplar/cue.\n '''\n delta = y - y_hat\n # update for common part of weights\n update_c = -sim_pars['atn_lrate_par']*sim_pars['decay_rate']*np.ones((n_ex, n_x))\n for n in range(n_x):\n sq_dist = (x[n] - x_ex[:, n])**2\n rwsd = np.sum(rtrv*sq_dist) # retrieval weighted sum of sq_dist\n foo = y_ex*(rtrv*(sq_dist - rwsd)).reshape((n_ex, 1))\n ex_factor = np.sum(foo, axis = 0)\n update_c[:, n] *= np.sum(delta*ex_factor)\n \n # update for separate part of weights\n update_s = sim_pars['atn_lrate_par']*sim_pars['decay_rate']*np.ones((n_ex, n_x))\n for m in range(n_ex):\n for n in range(n_x):\n sq_dist = (x[n] - x_ex[m, n])**2\n error_factor = np.sum(delta*(y_hat - y_ex[m, :]))\n update_s[m, n] *= rtrv[m]*sq_dist*error_factor\n \n return update_c + update_s\ngradient_ngsec_both.par_names = ['atn_lrate_par']\n\ndef gradient_norm_cityblock_common(sim, x, y, y_psb, rtrv, y_hat, y_lrn, x_ex, y_ex, n_x, n_y, ex_seen_yet, ex_counts, n_ex, sim_pars):\n '''\n Gradient descent on total squared error (assuming common attention weights across exemplars)\n when rtrv = normalized_sim_ex_counts and sim = city_block (based on L1 distance).\n '''\n delta = y - y_hat\n # use loops to keep things simple for now\n update = -sim_pars['atn_lrate_par']*sim_pars['decay_rate']*np.ones((n_ex, n_x))\n for n in range(n_x):\n abs_dif = np.abs(x[n] - x_ex[:, n])\n rwsd = np.sum(rtrv*abs_dif) # retrieval weighted sum of sq_dist\n foo = y_ex*(rtrv*(abs_dif - rwsd)).reshape((n_ex, 1))\n ex_factor = np.sum(foo, axis = 0)\n update[:, n] *= np.sum(delta*ex_factor)\n return update\ngradient_norm_cityblock_common.par_names = ['atn_lrate_par']\n\ndef heuristic(sim, x, y, y_psb, rtrv, y_hat, y_lrn, x_ex, y_ex, n_x, n_y, ex_seen_yet, ex_counts, n_ex, sim_pars):\n '''\n Heuristic designed to adjust attention toward relevant stimuli.\n Each exemplar has a separate set of attention weights.\n Only the current exemplar's weights are adjusted.\n '''\n current = sim == 1 # assume that current exemplar has a similarity of 1, and no others do\n update = np.zeros((n_ex, n_x))\n for m in range(n_ex):\n if ex_seen_yet[m]:\n sq_y_dist = np.sum((y_ex[m, :] - y)**2)\n for n in range(n_x):\n sq_x_dist = (x_ex[m, n] - x[n])**2\n update[current, n] += sim_pars['atn_lrate_par']*sq_x_dist*sq_y_dist\n return update\nheuristic.par_names = ['atn_lrate_par']"
] | [
[
"numpy.abs",
"numpy.zeros",
"numpy.sum",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MohitChaudhari7/Machine-Learning-Using-Python | [
"ff368791133a54df098490d283daf30547b10e8e"
] | [
"Classification/logistic_regression.py"
] | [
"# Logistic Regression\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('../Datasets/advertising.csv')\nind = dataset.iloc[:, [0, 2]].values #independent variables(daily time spent on the site and income)\ndep = dataset.iloc[:, -1].values #dependent variables\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nind_train, ind_test, dep_train, dep_test = train_test_split(ind, dep, test_size = 0.2, random_state = 0)\n\n# Feature Scaling ,we do not scale the dep variable as it gives only 1 or 0\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nind_train = sc.fit_transform(ind_train) #we fit the data to training set and not the test set\nind_test = sc.transform(ind_test)\n\n# Training the Logistic Regression model on the Training set\nfrom sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression(random_state = 0)\nclassifier.fit(ind_train, dep_train) #we train the classifier\n\ndep_pred = classifier.predict(ind_test) #we predict the test set results\n\n# read about plotting of contours here \"https://matplotlib.org/3.1.1/gallery/images_contours_and_fields/contour_image.html#sphx-glr-gallery-images-contours-and-fields-contour-image-py\"\n# Plotting the Training set results\nfrom matplotlib.colors import ListedColormap\nx, y = ind_train, dep_train\nX, Y = np.meshgrid(np.arange(start = x[:, 0].min() - 0.5, stop = x[:, 0].max() + 0.5, step = 0.01),\n np.arange(start = x[:, 1].min() - 0.5, stop = x[:, 1].max() + 0.5, step = 0.01))\nplt.xlim(X.min(), X.max())\nplt.ylim(Y.min(), Y.max())\nplt.contourf(X, Y, classifier.predict(np.array([X.ravel(), Y.ravel()]).T).reshape(X.shape),\n alpha = 0.5, cmap = ListedColormap(('red', 'blue')))\n#plotting the data points\nun_y =np.unique(y)\nfor i, j in enumerate(un_y):\n plt.scatter(x[y == j, 0], x[y == j, 1],c = ListedColormap(('red', 'blue'))(i), label = j)\nplt.title('Training Set Results')\nplt.xlabel('Daily time spent on the site')\nplt.ylabel('Income')\nplt.legend()\nplt.show()\n\n# Plotting the Test set results\nx, y = ind_test, dep_test\nX, Y = np.meshgrid(np.arange(start = x[:, 0].min() - 0.5, stop = x[:, 0].max() + 0.5, step = 0.01),\n np.arange(start = x[:, 1].min() - 0.5, stop = x[:, 1].max() + 0.5, step = 0.01))\nplt.xlim(X.min(), X.max())\nplt.ylim(Y.min(), Y.max())\nplt.contourf(X, Y, classifier.predict(np.array([X.ravel(), Y.ravel()]).T).reshape(X.shape),\n alpha = 0.5, cmap = ListedColormap(('red', 'blue')))\n#plotting the data points\nun_y =np.unique(y)\nfor i, j in enumerate(un_y):\n plt.scatter(x[y == j, 0], x[y == j, 1],c = ListedColormap(('red', 'blue'))(i), label = j)\nplt.title('Test Set results')\nplt.xlabel('Daily time spent on the site')\nplt.ylabel('Income')\nplt.legend()\nplt.show()\n\n# Confusion Matrix(this matrix contains the amount of datapoints those are in correct region and those are in incorrect region)\nfrom sklearn.metrics import confusion_matrix\nprint(confusion_matrix(dep_test, dep_pred))\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"sklearn.linear_model.LogisticRegression",
"matplotlib.pyplot.title",
"numpy.unique",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.confusion_matrix",
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.xlabel",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
vmarcin/FIT-projects | [
"69e3e0f1f271aefd3135f92a681738a4f1a24395"
] | [
"sui/dicewars/ai/dt/wpm_c.py"
] | [
"import numpy\nimport logging\n\nfrom ..utils import probability_of_successful_attack, sigmoid\nfrom ..utils import possible_attacks\n\nfrom dicewars.client.ai_driver import BattleCommand, EndTurnCommand\n\n\nclass AI:\n \"\"\"Agent using Win Probability Maximization (WPM) using logarithms\n of player scores and dice\n\n This agent estimates win probability given the current state of the game.\n As a feature to describe the state, a vector of logarithms of players' dice\n and scores is used. The agent choses such moves, that will have the highest\n improvement in the estimated probability.\n \"\"\"\n def __init__(self, player_name, board, players_order):\n \"\"\"\n Parameters\n ----------\n game : Game\n\n Attributes\n ----------\n players_order : list of int\n Names of players in the order they are playing, with the agent being first\n weights : dict of numpy.array\n Weights for estimating win probability\n largest_region: list of int\n Names of areas in the largest region\n \"\"\"\n self.player_name = player_name\n self.logger = logging.getLogger('AI')\n self.players = board.nb_players_alive()\n self.largest_region = []\n\n self.players_order = players_order\n while self.player_name != self.players_order[0]:\n self.players_order.append(self.players_order.pop(0))\n\n self.weights = {\n 2: numpy.array([1.30214778, 2.25563871, -1.30214778, -2.25563871]),\n 3: numpy.array([1.03427841, 0.50262886, -0.78619448, -0.31264667,\n -0.74070513, -0.3344083]),\n 4: numpy.array([1.04279419, 0.25416893, -0.64830571, -0.15321224,\n -0.64217824, -0.11354054, -0.59113493, -0.19902261]),\n 5: numpy.array([0.88792394, 0.23898045, -0.50630318, -0.10684734,\n -0.48406202, -0.12877724, -0.48004353, -0.17429738,\n -0.51195613, -0.12572176]),\n 6: numpy.array([0.84452717, 0.20915755, -0.4275969, -0.12319906,\n -0.438397, -0.11476484, -0.44610219, -0.10640943,\n -0.42926595, -0.15994294, -0.40215393, -0.12508173]),\n 7: numpy.array([0.77043331, 0.22744643, -0.34448306, -0.16104125,\n -0.34304867, -0.16545059, -0.36316993, -0.14238659,\n -0.37359036, -0.13535348, -0.34917492, -0.13725688,\n -0.36908313, -0.11803061]),\n 8: numpy.array([0.71518557, 0.2580538, -0.3303392, -0.13374949,\n -0.3288953, -0.16076534, -0.31261043, -0.14316612,\n -0.31785557, -0.16003507, -0.31410674, -0.16487769,\n -0.33290964, -0.12624279, -0.33843017, -0.14888412]),\n }[self.players]\n numpy.warnings.filterwarnings('ignore')\n\n def ai_turn(self, board, nb_moves_this_turn, nb_turns_this_game, time_left):\n \"\"\"AI agent's turn\n\n This agent estimates probability to win the game from the feature vector associated\n with the outcome of the move and chooses such that has highest improvement in the\n probability.\n \"\"\"\n self.board = board\n self.logger.debug(\"Looking for possible turns.\")\n turns = self.possible_turns()\n if turns and turns[0][0] != 'end':\n turn = turns[0]\n area_name = turn[0]\n self.logger.debug(\"Possible turn: {}\".format(turn))\n atk_area = self.board.get_area(turn[0])\n atk_power = atk_area.get_dice()\n\n if turn[2] >= -0.05 or atk_power == 8:\n return BattleCommand(turn[0], turn[1])\n\n if turns and turns[0][0] == 'end':\n for i in range(1, len(turns)):\n area_name = turns[i][0]\n atk_area = self.board.get_area(area_name)\n atk_power = atk_area.get_dice()\n if atk_power == 8:\n return BattleCommand(area_name, turns[i][1])\n\n self.logger.debug(\"Don't want to attack anymore.\")\n return EndTurnCommand()\n\n def get_features(self, end_turn=False):\n \"\"\"Get features associated with a move\n\n Parameters\n ----------\n end_turn : bool\n The move is ending the turn\n\n Returns\n -------\n list of int\n \"\"\"\n features = []\n for p in self.players_order:\n score = numpy.log(self.get_score_by_player(p) + 1)\n if end_turn and p == self.player_name:\n dice = numpy.log(self.board.get_player_dice(p) + self.get_score_by_player(p) + 1)\n else:\n dice = numpy.log(self.board.get_player_dice(p) + 1)\n features.append(score)\n features.append(dice)\n return features\n\n def possible_turns(self):\n \"\"\"Get list of possible turns with the associated improvement\n in estimated win probability. The list is sorted in descending order\n with respect to the improvement.\n \"\"\"\n turns = []\n name = self.player_name\n\n features = self.get_features()\n wp_start = numpy.log(sigmoid(numpy.dot(numpy.array(features), self.weights)))\n\n end_features = self.get_features(end_turn=True)\n wp_end = numpy.log(sigmoid(numpy.dot(numpy.array(end_features), self.weights)))\n improvement = wp_end - wp_start\n\n turns.append(['end', 0, improvement])\n\n for source, target in possible_attacks(self.board, self.player_name):\n area_name = source.get_name()\n atk_power = source.get_dice()\n def_power = target.get_dice()\n opponent_name = target.get_owner_name()\n # check whether the attack would expand the largest region\n increase_score = False\n if area_name in self.largest_region:\n increase_score = True\n else:\n for n in target.get_adjacent_areas():\n if n in self.largest_region:\n increase_score = True\n break\n\n a_dice = self.board.get_player_dice(name)\n a_score = self.get_score_by_player(name)\n if increase_score:\n a_score += 1\n\n atk_dice = {\n \"current\": a_dice,\n \"win\": a_dice + a_score,\n \"loss\": a_dice + a_score - atk_power + 1,\n }\n\n d_dice = self.board.get_player_dice(opponent_name)\n def_dice = {\n \"loss\": d_dice,\n \"win\": d_dice - def_power,\n }\n\n atk_prob = probability_of_successful_attack(self.board, area_name, target.get_name())\n opponent_idx = self.players_order.index(opponent_name) * 2 + 1\n win_features = [d for d in features]\n win_features[1] = numpy.log(atk_dice[\"win\"] + 1)\n win_features[opponent_idx] = numpy.log(def_dice[\"win\"] + 1)\n\n loss_features = [d for d in features]\n loss_features[1] = numpy.log(atk_dice[\"loss\"] + 1)\n loss_features[opponent_idx] = numpy.log(def_dice[\"loss\"] + 1)\n\n wp_win = sigmoid(numpy.dot(numpy.array(win_features), self.weights))\n wp_loss = sigmoid(numpy.dot(numpy.array(loss_features), self.weights))\n\n wp_win = sigmoid(numpy.dot(numpy.array(win_features), self.weights))\n wp_loss = sigmoid(numpy.dot(numpy.array(loss_features), self.weights))\n total_prob = (wp_win * atk_prob) + (wp_loss * (1.0 - atk_prob))\n wp_atk = numpy.log(total_prob)\n\n improvement = wp_atk - wp_start\n turns.append([area_name, target.get_name(), improvement])\n\n return sorted(turns, key=lambda turn: turn[2], reverse=True)\n\n def get_score_by_player(self, player_name, skip_area=None):\n \"\"\"Get score of a player\n\n Parameters\n ----------\n player_name : int\n skip_area : int\n Name of an area to be excluded from the calculation\n\n Returns\n -------\n int\n score of the player\n \"\"\"\n players_regions = self.board.get_players_regions(self.player_name, skip_area=skip_area)\n max_region_size = max(len(region) for region in players_regions)\n\n return max_region_size\n\n def get_largest_region(self):\n \"\"\"Get size of the largest region, including the areas within\n\n Attributes\n ----------\n largest_region : list of int\n Names of areas in the largest region\n\n Returns\n -------\n int\n Number of areas in the largest region\n \"\"\"\n self.largest_region = []\n\n players_regions = self.board.get_players_regions(self.player_name)\n max_region_size = max(len(region) for region in players_regions)\n max_sized_regions = [region for region in players_regions if len(region) == max_region_size]\n\n for region in max_sized_regions:\n for area in region:\n self.largest_region.append(area)\n return max_region_size\n"
] | [
[
"numpy.log",
"numpy.array",
"numpy.warnings.filterwarnings"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
XYHC-MMDA/Multi-modal-Multi-task-DA | [
"ed8297eb489d50c580795713cccb72bc958f406f",
"ed8297eb489d50c580795713cccb72bc958f406f"
] | [
"nuscenes/eval/common/loaders.py",
"scripts/old/old_acc_curve.py"
] | [
"# nuScenes dev-kit.\n# Code written by Oscar Beijbom, 2019.\n\nimport json\nfrom typing import Dict, Tuple\n\nimport numpy as np\nimport tqdm\nfrom pyquaternion import Quaternion\n\nfrom nuscenes import NuScenes\nfrom nuscenes.eval.common.data_classes import EvalBoxes\nfrom nuscenes.eval.detection.data_classes import DetectionBox\nfrom nuscenes.eval.detection.utils import category_to_detection_name\nfrom nuscenes.eval.tracking.data_classes import TrackingBox\nfrom nuscenes.eval.tracking.utils import category_to_tracking_name\nfrom nuscenes.utils.data_classes import Box\nfrom nuscenes.utils.geometry_utils import points_in_box\nfrom nuscenes.utils.splits import create_splits_scenes\n\n\ndef load_prediction(result_path: str, max_boxes_per_sample: int, box_cls, verbose: bool = False) \\\n -> Tuple[EvalBoxes, Dict]:\n \"\"\"\n Loads object predictions from file.\n :param result_path: Path to the .json result file provided by the user.\n :param max_boxes_per_sample: Maximim number of boxes allowed per sample.\n :param box_cls: Type of box to load, e.g. DetectionBox or TrackingBox.\n :param verbose: Whether to print messages to stdout.\n :return: The deserialized results and meta data.\n \"\"\"\n\n # Load from file and check that the format is correct.\n with open(result_path) as f:\n data = json.load(f)\n assert 'results' in data, 'Error: No field `results` in result file. Please note that the result format changed.' \\\n 'See https://www.nuscenes.org/object-detection for more information.'\n\n # Deserialize results and get meta data.\n all_results = EvalBoxes.deserialize(data['results'], box_cls)\n meta = data['meta']\n if verbose:\n print(\"Loaded results from {}. Found detections for {} samples.\"\n .format(result_path, len(all_results.sample_tokens)))\n\n # Check that each sample has no more than x predicted boxes.\n for sample_token in all_results.sample_tokens:\n assert len(all_results.boxes[sample_token]) <= max_boxes_per_sample, \\\n \"Error: Only <= %d boxes per sample allowed!\" % max_boxes_per_sample\n\n return all_results, meta\n\n\ndef load_merge_from_pkl(nusc: NuScenes, pkl_path: str, box_cls, verbose: bool = False) -> EvalBoxes:\n # Init.\n if box_cls == DetectionBox:\n attribute_map = {a['token']: a['name'] for a in nusc.attribute}\n\n if verbose:\n print('Loading annotations for {} split from nuScenes version: {}'.format(pkl_path, nusc.version))\n\n import mmcv\n infos = mmcv.load(pkl_path)['infos']\n samples = []\n for info in infos:\n samples.append(nusc.get('sample', info['token']))\n all_annotations = EvalBoxes()\n\n # Load annotations and filter predictions and annotations.\n merge_map = dict(car='vehicle',\n truck='vehicle',\n bus='vehicle',\n trailer='vehicle',\n construction_vehicle='vehicle',\n pedestrian='pedestrian',\n motorcycle='bike',\n bicycle='bike',\n traffic_cone='traffic_boundary',\n barrier='traffic_boundary')\n for sample in tqdm.tqdm(samples, leave=verbose):\n sample_token = sample['token']\n cam_token = sample['data']['CAM_FRONT']\n _, boxes_cam, _ = nusc.get_sample_data(cam_token)\n sample_annotation_tokens = [box.token for box in boxes_cam]\n\n # sample = nusc.get('sample', sample_token)\n # sample_annotation_tokens = sample['anns']\n\n sample_boxes = []\n for sample_annotation_token in sample_annotation_tokens:\n\n sample_annotation = nusc.get('sample_annotation', sample_annotation_token)\n if box_cls == DetectionBox:\n # Get label name in detection task and filter unused labels.\n detection_name = category_to_detection_name(sample_annotation['category_name'])\n if detection_name is None:\n continue\n detection_name = merge_map[detection_name]\n\n # Get attribute_name.\n attr_tokens = sample_annotation['attribute_tokens']\n attr_count = len(attr_tokens)\n if attr_count == 0:\n attribute_name = ''\n elif attr_count == 1:\n attribute_name = attribute_map[attr_tokens[0]]\n else:\n raise Exception('Error: GT annotations must not have more than one attribute!')\n\n sample_boxes.append(\n box_cls(\n sample_token=sample_token,\n translation=sample_annotation['translation'],\n size=sample_annotation['size'],\n rotation=sample_annotation['rotation'],\n velocity=nusc.box_velocity(sample_annotation['token'])[:2],\n num_pts=sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts'],\n detection_name=detection_name,\n detection_score=-1.0, # GT samples do not have a score.\n attribute_name=attribute_name\n )\n )\n else:\n raise NotImplementedError('Error: Invalid box_cls %s!' % box_cls)\n\n all_annotations.add_boxes(sample_token, sample_boxes)\n\n if verbose:\n print(\"Loaded ground truth annotations for {} samples.\".format(len(all_annotations.sample_tokens)))\n\n return all_annotations\n\n\ndef load_pkl_front_cam(nusc: NuScenes, pkl_path: str, box_cls, verbose: bool = False) -> EvalBoxes:\n # Init.\n if box_cls == DetectionBox:\n attribute_map = {a['token']: a['name'] for a in nusc.attribute}\n\n if verbose:\n print('Loading annotations for {} split from nuScenes version: {}'.format(pkl_path, nusc.version))\n\n import mmcv\n infos = mmcv.load(pkl_path)['infos']\n samples = []\n for info in infos:\n samples.append(nusc.get('sample', info['token']))\n\n all_annotations = EvalBoxes()\n\n # Load annotations and filter predictions and annotations.\n for sample in tqdm.tqdm(samples, leave=verbose):\n sample_token = sample['token']\n cam_token = sample['data']['CAM_FRONT']\n _, boxes_cam, _ = nusc.get_sample_data(cam_token)\n sample_annotation_tokens = [box.token for box in boxes_cam]\n\n # sample = nusc.get('sample', sample_token)\n # sample_annotation_tokens = sample['anns']\n\n sample_boxes = []\n for sample_annotation_token in sample_annotation_tokens:\n\n sample_annotation = nusc.get('sample_annotation', sample_annotation_token)\n if box_cls == DetectionBox:\n # Get label name in detection task and filter unused labels.\n detection_name = category_to_detection_name(sample_annotation['category_name'])\n if detection_name is None:\n continue\n\n # Get attribute_name.\n attr_tokens = sample_annotation['attribute_tokens']\n attr_count = len(attr_tokens)\n if attr_count == 0:\n attribute_name = ''\n elif attr_count == 1:\n attribute_name = attribute_map[attr_tokens[0]]\n else:\n raise Exception('Error: GT annotations must not have more than one attribute!')\n\n sample_boxes.append(\n box_cls(\n sample_token=sample_token,\n translation=sample_annotation['translation'],\n size=sample_annotation['size'],\n rotation=sample_annotation['rotation'],\n velocity=nusc.box_velocity(sample_annotation['token'])[:2],\n num_pts=sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts'],\n detection_name=detection_name,\n detection_score=-1.0, # GT samples do not have a score.\n attribute_name=attribute_name\n )\n )\n else:\n raise NotImplementedError('Error: Invalid box_cls %s!' % box_cls)\n\n all_annotations.add_boxes(sample_token, sample_boxes)\n\n if verbose:\n print(\"Loaded ground truth annotations for {} samples.\".format(len(all_annotations.sample_tokens)))\n\n return all_annotations\n\n\ndef load_gt_front_cam(nusc: NuScenes, eval_split: str, box_cls, verbose: bool = False) -> EvalBoxes:\n # Init.\n if box_cls == DetectionBox:\n attribute_map = {a['token']: a['name'] for a in nusc.attribute}\n\n if verbose:\n print('Loading annotations for {} split from nuScenes version: {}'.format(eval_split, nusc.version))\n\n # Only keep samples from this split.\n splits = create_splits_scenes()\n\n # Check compatibility of split with nusc_version.\n version = nusc.version\n if eval_split in {'train', 'val', 'train_detect', 'train_track'}:\n assert version.endswith('trainval'), \\\n 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)\n elif eval_split in {'mini_train', 'mini_val'}:\n assert version.endswith('mini'), \\\n 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)\n elif eval_split == 'test':\n assert version.endswith('test'), \\\n 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)\n else:\n raise ValueError('Error: Requested split {} which this function cannot map to the correct NuScenes version.'\n .format(eval_split))\n\n if eval_split == 'test':\n # Check that you aren't trying to cheat :).\n assert len(nusc.sample_annotation) > 0, \\\n 'Error: You are trying to evaluate on the test set but you do not have the annotations!'\n\n samples = []\n for sample in nusc.sample:\n scene_record = nusc.get('scene', sample['scene_token'])\n if scene_record['name'] in splits[eval_split]:\n samples.append(sample)\n\n all_annotations = EvalBoxes()\n\n # Load annotations and filter predictions and annotations.\n tracking_id_set = set()\n for sample in tqdm.tqdm(samples, leave=verbose):\n sample_token = sample['token']\n cam_token = sample['data']['CAM_FRONT']\n _, boxes_cam, _ = nusc.get_sample_data(cam_token)\n sample_annotation_tokens = [box.token for box in boxes_cam]\n\n # sample = nusc.get('sample', sample_token)\n # sample_annotation_tokens = sample['anns']\n\n sample_boxes = []\n for sample_annotation_token in sample_annotation_tokens:\n\n sample_annotation = nusc.get('sample_annotation', sample_annotation_token)\n if box_cls == DetectionBox:\n # Get label name in detection task and filter unused labels.\n detection_name = category_to_detection_name(sample_annotation['category_name'])\n if detection_name is None:\n continue\n\n # Get attribute_name.\n attr_tokens = sample_annotation['attribute_tokens']\n attr_count = len(attr_tokens)\n if attr_count == 0:\n attribute_name = ''\n elif attr_count == 1:\n attribute_name = attribute_map[attr_tokens[0]]\n else:\n raise Exception('Error: GT annotations must not have more than one attribute!')\n\n sample_boxes.append(\n box_cls(\n sample_token=sample_token,\n translation=sample_annotation['translation'],\n size=sample_annotation['size'],\n rotation=sample_annotation['rotation'],\n velocity=nusc.box_velocity(sample_annotation['token'])[:2],\n num_pts=sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts'],\n detection_name=detection_name,\n detection_score=-1.0, # GT samples do not have a score.\n attribute_name=attribute_name\n )\n )\n elif box_cls == TrackingBox:\n # Use nuScenes token as tracking id.\n tracking_id = sample_annotation['instance_token']\n tracking_id_set.add(tracking_id)\n\n # Get label name in detection task and filter unused labels.\n tracking_name = category_to_tracking_name(sample_annotation['category_name'])\n if tracking_name is None:\n continue\n\n sample_boxes.append(\n box_cls(\n sample_token=sample_token,\n translation=sample_annotation['translation'],\n size=sample_annotation['size'],\n rotation=sample_annotation['rotation'],\n velocity=nusc.box_velocity(sample_annotation['token'])[:2],\n num_pts=sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts'],\n tracking_id=tracking_id,\n tracking_name=tracking_name,\n tracking_score=-1.0 # GT samples do not have a score.\n )\n )\n else:\n raise NotImplementedError('Error: Invalid box_cls %s!' % box_cls)\n\n all_annotations.add_boxes(sample_token, sample_boxes)\n\n if verbose:\n print(\"Loaded ground truth annotations for {} samples.\".format(len(all_annotations.sample_tokens)))\n\n return all_annotations\n\n\ndef load_gt(nusc: NuScenes, eval_split: str, box_cls, verbose: bool = False) -> EvalBoxes:\n \"\"\"\n Loads ground truth boxes from DB.\n :param nusc: A NuScenes instance.\n :param eval_split: The evaluation split for which we load GT boxes.\n :param box_cls: Type of box to load, e.g. DetectionBox or TrackingBox.\n :param verbose: Whether to print messages to stdout.\n :return: The GT boxes.\n \"\"\"\n # Init.\n if box_cls == DetectionBox:\n attribute_map = {a['token']: a['name'] for a in nusc.attribute}\n\n if verbose:\n print('Loading annotations for {} split from nuScenes version: {}'.format(eval_split, nusc.version))\n # Read out all sample_tokens in DB.\n sample_tokens_all = [s['token'] for s in nusc.sample]\n assert len(sample_tokens_all) > 0, \"Error: Database has no samples!\"\n\n # Only keep samples from this split.\n splits = create_splits_scenes()\n\n # Check compatibility of split with nusc_version.\n version = nusc.version\n if eval_split in {'train', 'val', 'train_detect', 'train_track'}:\n assert version.endswith('trainval'), \\\n 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)\n elif eval_split in {'mini_train', 'mini_val'}:\n assert version.endswith('mini'), \\\n 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)\n elif eval_split == 'test':\n assert version.endswith('test'), \\\n 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)\n else:\n raise ValueError('Error: Requested split {} which this function cannot map to the correct NuScenes version.'\n .format(eval_split))\n\n if eval_split == 'test':\n # Check that you aren't trying to cheat :).\n assert len(nusc.sample_annotation) > 0, \\\n 'Error: You are trying to evaluate on the test set but you do not have the annotations!'\n\n sample_tokens = []\n for sample_token in sample_tokens_all:\n scene_token = nusc.get('sample', sample_token)['scene_token']\n scene_record = nusc.get('scene', scene_token)\n if scene_record['name'] in splits[eval_split]:\n sample_tokens.append(sample_token)\n\n all_annotations = EvalBoxes()\n\n # Load annotations and filter predictions and annotations.\n tracking_id_set = set()\n for sample_token in tqdm.tqdm(sample_tokens, leave=verbose):\n\n sample = nusc.get('sample', sample_token)\n sample_annotation_tokens = sample['anns']\n\n sample_boxes = []\n for sample_annotation_token in sample_annotation_tokens:\n\n sample_annotation = nusc.get('sample_annotation', sample_annotation_token)\n if box_cls == DetectionBox:\n # Get label name in detection task and filter unused labels.\n detection_name = category_to_detection_name(sample_annotation['category_name'])\n if detection_name is None:\n continue\n\n # Get attribute_name.\n attr_tokens = sample_annotation['attribute_tokens']\n attr_count = len(attr_tokens)\n if attr_count == 0:\n attribute_name = ''\n elif attr_count == 1:\n attribute_name = attribute_map[attr_tokens[0]]\n else:\n raise Exception('Error: GT annotations must not have more than one attribute!')\n\n sample_boxes.append(\n box_cls(\n sample_token=sample_token,\n translation=sample_annotation['translation'],\n size=sample_annotation['size'],\n rotation=sample_annotation['rotation'],\n velocity=nusc.box_velocity(sample_annotation['token'])[:2],\n num_pts=sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts'],\n detection_name=detection_name,\n detection_score=-1.0, # GT samples do not have a score.\n attribute_name=attribute_name\n )\n )\n elif box_cls == TrackingBox:\n # Use nuScenes token as tracking id.\n tracking_id = sample_annotation['instance_token']\n tracking_id_set.add(tracking_id)\n\n # Get label name in detection task and filter unused labels.\n tracking_name = category_to_tracking_name(sample_annotation['category_name'])\n if tracking_name is None:\n continue\n\n sample_boxes.append(\n box_cls(\n sample_token=sample_token,\n translation=sample_annotation['translation'],\n size=sample_annotation['size'],\n rotation=sample_annotation['rotation'],\n velocity=nusc.box_velocity(sample_annotation['token'])[:2],\n num_pts=sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts'],\n tracking_id=tracking_id,\n tracking_name=tracking_name,\n tracking_score=-1.0 # GT samples do not have a score.\n )\n )\n else:\n raise NotImplementedError('Error: Invalid box_cls %s!' % box_cls)\n\n all_annotations.add_boxes(sample_token, sample_boxes)\n\n if verbose:\n print(\"Loaded ground truth annotations for {} samples.\".format(len(all_annotations.sample_tokens)))\n\n return all_annotations\n\n\ndef add_center_dist(nusc: NuScenes,\n eval_boxes: EvalBoxes):\n \"\"\"\n Adds the cylindrical (xy) center distance from ego vehicle to each box.\n :param nusc: The NuScenes instance.\n :param eval_boxes: A set of boxes, either GT or predictions.\n :return: eval_boxes augmented with center distances.\n \"\"\"\n for sample_token in eval_boxes.sample_tokens:\n sample_rec = nusc.get('sample', sample_token)\n sd_record = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP'])\n pose_record = nusc.get('ego_pose', sd_record['ego_pose_token'])\n cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token'])\n lidar2ego_rotation = cs_record['rotation']\n lidar2ego_translation = cs_record['translation']\n ego2global_rotation = pose_record['rotation']\n ego2global_translation = pose_record['translation']\n\n for box in eval_boxes[sample_token]:\n # Both boxes and ego pose are given in global coord system, so distance can be calculated directly.\n # Note that the z component of the ego pose is 0.\n center_ego = np.array(box.translation) - np.array(ego2global_translation)\n center_ego_tmp = np.dot(Quaternion(ego2global_rotation).inverse.rotation_matrix, center_ego)\n center_lidar_tmp = center_ego_tmp - np.array(lidar2ego_translation)\n center_lidar = np.dot(Quaternion(lidar2ego_rotation).inverse.rotation_matrix, center_lidar_tmp)\n \n if isinstance(box, DetectionBox) or isinstance(box, TrackingBox):\n box.ego_translation = tuple(center_ego)\n box.lidar_translation = tuple(center_lidar) \n else:\n raise NotImplementedError\n\n return eval_boxes\n\n\ndef filter_eval_boxes(nusc: NuScenes,\n eval_boxes: EvalBoxes,\n max_dist: Dict[str, float],\n verbose: bool = False) -> EvalBoxes:\n \"\"\"\n Applies filtering to boxes. Distance, bike-racks and points per box.\n :param nusc: An instance of the NuScenes class.\n :param eval_boxes: An instance of the EvalBoxes class.\n :param max_dist: Maps the detection name to the eval distance threshold for that class.\n :param verbose: Whether to print to stdout.\n \"\"\"\n # Retrieve box type for detectipn/tracking boxes.\n class_field = _get_box_class_field(eval_boxes)\n\n # Accumulators for number of filtered boxes.\n total, dist_filter, point_filter, bike_rack_filter = 0, 0, 0, 0\n for ind, sample_token in enumerate(eval_boxes.sample_tokens):\n\n # Filter on distance first.\n total += len(eval_boxes[sample_token])\n eval_boxes.boxes[sample_token] = [box for box in eval_boxes[sample_token] if\n box.ego_dist < max_dist[box.__getattribute__(class_field)]]\n dist_filter += len(eval_boxes[sample_token])\n\n # Then remove boxes with zero points in them. Eval boxes have -1 points by default.\n eval_boxes.boxes[sample_token] = [box for box in eval_boxes[sample_token] if not box.num_pts == 0]\n point_filter += len(eval_boxes[sample_token])\n\n # Perform bike-rack filtering.\n sample_anns = nusc.get('sample', sample_token)['anns']\n bikerack_recs = [nusc.get('sample_annotation', ann) for ann in sample_anns if\n nusc.get('sample_annotation', ann)['category_name'] == 'static_object.bicycle_rack']\n bikerack_boxes = [Box(rec['translation'], rec['size'], Quaternion(rec['rotation'])) for rec in bikerack_recs]\n filtered_boxes = []\n for box in eval_boxes[sample_token]:\n if box.__getattribute__(class_field) in ['bicycle', 'motorcycle']:\n in_a_bikerack = False\n for bikerack_box in bikerack_boxes:\n if np.sum(points_in_box(bikerack_box, np.expand_dims(np.array(box.translation), axis=1))) > 0:\n in_a_bikerack = True\n if not in_a_bikerack:\n filtered_boxes.append(box)\n else:\n filtered_boxes.append(box)\n\n eval_boxes.boxes[sample_token] = filtered_boxes\n bike_rack_filter += len(eval_boxes.boxes[sample_token])\n\n if verbose:\n print(\"=> Original number of boxes: %d\" % total)\n print(\"=> After distance based filtering: %d\" % dist_filter)\n print(\"=> After LIDAR points based filtering: %d\" % point_filter)\n print(\"=> After bike rack filtering: %d\" % bike_rack_filter)\n\n return eval_boxes\n\n\ndef filter_half_boxes(nusc: NuScenes,\n eval_boxes: EvalBoxes,\n max_dist: Dict[str, float],\n verbose: bool = False) -> EvalBoxes:\n \"\"\"\n Applies filtering to boxes. Distance, bike-racks and points per box.\n :param nusc: An instance of the NuScenes class.\n :param eval_boxes: An instance of the EvalBoxes class.\n :param max_dist: Maps the detection name to the eval distance threshold for that class.\n :param verbose: Whether to print to stdout.\n \"\"\"\n # Retrieve box type for detectipn/tracking boxes.\n class_field = _get_box_class_field(eval_boxes)\n\n # Accumulators for number of filtered boxes.\n total, dist_filter, point_filter, bike_rack_filter = 0, 0, 0, 0\n for ind, sample_token in enumerate(eval_boxes.sample_tokens):\n\n # Filter on distance first.\n total += len(eval_boxes[sample_token])\n eval_boxes.boxes[sample_token] = [box for box in eval_boxes[sample_token] if\n box.lidar_translation[1] > 0 and\n box.ego_dist < max_dist[box.__getattribute__(class_field)]]\n dist_filter += len(eval_boxes[sample_token])\n\n # Then remove boxes with zero points in them. Eval boxes have -1 points by default.\n eval_boxes.boxes[sample_token] = [box for box in eval_boxes[sample_token] if not box.num_pts == 0]\n point_filter += len(eval_boxes[sample_token])\n\n # Perform bike-rack filtering.\n sample_anns = nusc.get('sample', sample_token)['anns']\n bikerack_recs = [nusc.get('sample_annotation', ann) for ann in sample_anns if\n nusc.get('sample_annotation', ann)['category_name'] == 'static_object.bicycle_rack']\n bikerack_boxes = [Box(rec['translation'], rec['size'], Quaternion(rec['rotation'])) for rec in bikerack_recs]\n filtered_boxes = []\n for box in eval_boxes[sample_token]:\n if box.__getattribute__(class_field) in ['bicycle', 'motorcycle']:\n in_a_bikerack = False\n for bikerack_box in bikerack_boxes:\n if np.sum(points_in_box(bikerack_box, np.expand_dims(np.array(box.translation), axis=1))) > 0:\n in_a_bikerack = True\n if not in_a_bikerack:\n filtered_boxes.append(box)\n else:\n filtered_boxes.append(box)\n\n eval_boxes.boxes[sample_token] = filtered_boxes\n bike_rack_filter += len(eval_boxes.boxes[sample_token])\n\n if verbose:\n print(\"=> Original number of boxes: %d\" % total)\n print(\"=> After distance based filtering: %d\" % dist_filter)\n print(\"=> After LIDAR points based filtering: %d\" % point_filter)\n print(\"=> After bike rack filtering: %d\" % bike_rack_filter)\n\n return eval_boxes\n\n\ndef _get_box_class_field(eval_boxes: EvalBoxes) -> str:\n \"\"\"\n Retrieve the name of the class field in the boxes.\n This parses through all boxes until it finds a valid box.\n If there are no valid boxes, this function throws an exception.\n :param eval_boxes: The EvalBoxes used for evaluation.\n :return: The name of the class field in the boxes, e.g. detection_name or tracking_name.\n \"\"\"\n assert len(eval_boxes.boxes) > 0\n box = None\n for val in eval_boxes.boxes.values():\n if len(val) > 0:\n box = val[0]\n break\n if isinstance(box, DetectionBox):\n class_field = 'detection_name'\n elif isinstance(box, TrackingBox):\n class_field = 'tracking_name'\n else:\n raise Exception('Error: Invalid box type: %s' % box)\n\n return class_field\n",
"import matplotlib.pyplot as plt\nimport numpy as np\n\nplt_colors = ['c', 'r', 'g', 'b', 'y', 'k', 'm', '#2A0134', '#FF00FF', '#800000']\nplt_markers = ['*', '.', 'o', '^', 'v', '<', '>', '1', '2', '3', '4', 's', 'p', ',']\nfont = {\n 'size': 18\n}\n\nsrc_domain, tgt_domain = 'usa', 'sng'\nsub_dir = 'baseline2_usa'\ntgt_test_file = f'../checkpoints/fusion_consis/xmuda/{sub_dir}/filter.txt' # to add: tgt_val, src_val\n\naccf = open(tgt_test_file, 'r')\nlines = accf.readlines()\naccf.close()\ntgt_seg = []\nfor line in lines:\n if line.startswith('overall_iou'):\n iou = float(line.split()[-1])\n tgt_seg.append(iou)\ntgt_seg = tgt_seg[:24]\nx_range = np.arange(len(tgt_seg)) + 1\n\n\nif __name__ == '__main__':\n print(f'{sub_dir}: {max(tgt_seg)}')\n\n plt.xlabel('epoch', font)\n plt.ylabel('Seg_mIOU', font)\n tgt_seg = np.array(tgt_seg)\n plt.plot(x_range, tgt_seg, label='target_val', color='b', linewidth=0.7)\n plt.legend(loc='best', prop=font)\n plt.xticks(range(0, 25))\n plt.ylim(bottom=0, top=0.7)\n plt.show()\n"
] | [
[
"numpy.array"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wesleygas/urban-piriquito | [
"a07dacb8ecf1da40e15d7085ef4502a87e6894bf"
] | [
"interface.py"
] | [
"try:\n # Python2\n import Tkinter as tk\nexcept ImportError:\n # Python3\n import tkinter\n# needs Python25 or higher\n\nfrom functools import partial\nfrom signalTeste import *\nimport matplotlib.pyplot as plt\n#matplotlib.use('TkAgg')\n\nnum ='12'\nsig = signalMeu()\ndef numero():\n global num, sig\n def on_key_press(event):\n global num, sig\n num = repr(event.char)\n num = num[1:-1]\n print(int(num))\n tempo, sinal = sig.geraNum(int(num))\n sig.playSig(sinal)\n plt.close(\"all\")\n plt.plot(tempo[0:500],sinal[0:500])\n plt.show(block=False)\n \n \n def click(btn):\n global num, sig\n # test the button command click\n if(btn == \"exit\"):\n boot.quit()\n else:\n print(int(btn))\n tempo, sinal = sig.geraNum(int(btn), duration=2)\n sig.playSig(sinal)\n plt.close(\"all\")\n plt.plot(tempo[0:500],sinal[0:500])\n #tempo, sinal = sig.calcFFT(sinal, 48000)\n #plt.plot(tempo,sinal)\n plt.show(block=False)\n \n \n \n num = btn\n #return num\n\n boot = tkinter.Tk()\n\n boot['bg'] = 'green'\n # create a labeled frame for the keypad buttons\n # relief='groove' and labelanchor='nw' are default\n lf = tkinter.LabelFrame(boot, bd=8)\n lf.pack(padx=15, pady=15)\n # typical calculator button layout\n btn_list = [\n '1', '2', '3',\n '4', '5', '6',\n '7', '8', '9',\n '','0', 'exit']\n # create and position all buttons with a for-loop\n # r, c used for row, column grid values\n r = 1\n c = 0\n n = 0\n # list(range()) needed for Python3\n btn = list(range(len(btn_list)))\n for label in btn_list:\n # partial takes care of function and argument\n cmd = partial(click, label)\n # create the button\n btn[n] = tkinter.Button(lf, text=label, width=10, height=5, command=cmd)\n # position the button\n btn[n].grid(row=r, column=c)\n # increment button index\n n += 1\n # update row/column position\n c += 1\n if c == 3:\n c = 0\n r += 1\n\n frame = tkinter.Frame(boot, width=100, height=100)\n frame.bind(\"<KeyRelease-1>\", on_key_press)\n frame.bind(\"<KeyRelease-2>\", on_key_press)\n frame.bind(\"<KeyRelease-3>\", on_key_press)\n frame.bind(\"<KeyRelease-4>\", on_key_press)\n frame.bind(\"<KeyRelease-5>\", on_key_press)\n frame.bind(\"<KeyRelease-6>\", on_key_press)\n frame.bind(\"<KeyRelease-7>\", on_key_press)\n frame.bind(\"<KeyRelease-8>\", on_key_press)\n frame.bind(\"<KeyRelease-9>\", on_key_press)\n frame.bind(\"<KeyRelease-0>\", on_key_press)\n frame.pack()\n frame.focus_set()\n\n\n\n tk = boot\n tk.resizable(width=False, height=False)\n tk.mainloop()\n\n\n\n\nif __name__ == '__main__':\n numero()"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.close"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fatiando-bot/verde | [
"5d008b0866c1792183ccd02e7ddc0e917f7ed6e1",
"5d008b0866c1792183ccd02e7ddc0e917f7ed6e1"
] | [
"tutorials/model_evaluation.py",
"verde/vector.py"
] | [
"\"\"\"\n.. _model_evaluation:\n\nEvaluating Performance\n======================\n\nThe Green's functions based interpolations in Verde are all linear regressions under the\nhood. This means that we can use some of the same tactics from\n:mod:`sklearn.model_selection` to evaluate our interpolator's performance. Once we have\na quantified measure of the quality of a given fitted gridder, we can use it to tune the\ngridder's parameters, like ``damping`` for a :class:`~verde.Spline` (see\n:ref:`model_selection`).\n\nVerde provides adaptations of common scikit-learn tools to work better with spatial\ndata. Let's use these tools to evaluate the performance of a :class:`~verde.Spline` on\nour sample air temperature data.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cartopy.crs as ccrs\nimport pyproj\nimport verde as vd\n\ndata = vd.datasets.fetch_texas_wind()\n\n# Use Mercator projection because Spline is a Cartesian gridder\nprojection = pyproj.Proj(proj=\"merc\", lat_ts=data.latitude.mean())\nproj_coords = projection(data.longitude.values, data.latitude.values)\n\nregion = vd.get_region((data.longitude, data.latitude))\n# For this data, we'll generate a grid with 15 arc-minute spacing\nspacing = 15 / 60\n\n########################################################################################\n# Splitting the data\n# ------------------\n#\n# We can't evaluate a gridder on the data that went into fitting it. The true test of a\n# model is if it can correctly predict data that it hasn't seen before. scikit-learn has\n# the :func:`sklearn.model_selection.train_test_split` function to separate a dataset\n# into two parts: one for fitting the model (called *training* data) and a separate one\n# for evaluating the model (called *testing* data). Using it with spatial data would\n# involve some tedious array conversions so Verde implements\n# :func:`verde.train_test_split` which does the same thing but takes coordinates and\n# data arrays instead.\n#\n# The split is done randomly so we specify a seed for the random number generator to\n# guarantee that we'll get the same result every time we run this example. You probably\n# don't want to do that for real data. We'll keep 30% of the data to use for testing\n# (``test_size=0.3``).\n\ntrain, test = vd.train_test_split(\n proj_coords, data.air_temperature_c, test_size=0.3, random_state=0\n)\n\n########################################################################################\n# The returned ``train`` and ``test`` variables are tuples containing coordinates, data,\n# and (optionally) weights arrays. Since we're not using weights, the third element of\n# the tuple will be ``None``:\nprint(train)\n\n\n########################################################################################\n#\nprint(test)\n\n########################################################################################\n# Let's plot these two datasets with different colors:\n\nplt.figure(figsize=(8, 6))\nax = plt.axes()\nax.set_title(\"Air temperature measurements for Texas\")\nax.plot(train[0][0], train[0][1], \".r\", label=\"train\")\nax.plot(test[0][0], test[0][1], \".b\", label=\"test\")\nax.legend()\nax.set_aspect(\"equal\")\nplt.tight_layout()\nplt.show()\n\n########################################################################################\n# We can pass the training dataset to the :meth:`~verde.base.BaseGridder.fit` method of\n# most gridders using Python's argument expansion using the ``*`` symbol.\n\nspline = vd.Spline()\nspline.fit(*train)\n\n########################################################################################\n# Let's plot the gridded result to see what it looks like. First, we'll create a\n# geographic grid:\ngrid = spline.grid(\n region=region,\n spacing=spacing,\n projection=projection,\n dims=[\"latitude\", \"longitude\"],\n data_names=[\"temperature\"],\n)\nprint(grid)\n\n########################################################################################\n# Then, we'll mask out grid points that are too far from any given data point and plot\n# the grid:\nmask = vd.distance_mask(\n (data.longitude, data.latitude),\n maxdist=3 * spacing * 111e3,\n coordinates=vd.grid_coordinates(region, spacing=spacing),\n projection=projection,\n)\ngrid = grid.where(mask)\n\nplt.figure(figsize=(8, 6))\nax = plt.axes(projection=ccrs.Mercator())\nax.set_title(\"Gridded temperature\")\npc = grid.temperature.plot.pcolormesh(\n ax=ax,\n cmap=\"plasma\",\n transform=ccrs.PlateCarree(),\n add_colorbar=False,\n add_labels=False,\n)\nplt.colorbar(pc).set_label(\"C\")\nax.plot(data.longitude, data.latitude, \".k\", markersize=1, transform=ccrs.PlateCarree())\nvd.datasets.setup_texas_wind_map(ax)\nplt.tight_layout()\nplt.show()\n\n########################################################################################\n# Scoring\n# --------\n#\n# Gridders in Verde implement the :meth:`~verde.base.BaseGridder.score` method that\n# calculates the `R² coefficient of determination\n# <https://en.wikipedia.org/wiki/Coefficient_of_determination>`__\n# for a given comparison dataset (``test`` in our case). The R² score is at most 1,\n# meaning a perfect prediction, but has no lower bound.\n\nscore = spline.score(*test)\nprint(\"R² score:\", score)\n\n########################################################################################\n# That's a good score meaning that our gridder is able to accurately predict data that\n# wasn't used in the gridding algorithm.\n#\n# .. caution::\n#\n# Once caveat for this score is that it is highly dependent on the particular split\n# that we made. Changing the random number generator seed in\n# :func:`verde.train_test_split` will result in a different score.\n\n# Use 1 as a seed instead of 0\ntrain_other, test_other = vd.train_test_split(\n proj_coords, data.air_temperature_c, test_size=0.3, random_state=1\n)\n\nprint(\"R² score with seed 1:\", vd.Spline().fit(*train_other).score(*test_other))\n\n########################################################################################\n# Cross-validation\n# ----------------\n#\n# A more robust way of scoring the gridders is to use function\n# :func:`verde.cross_val_score`, which (by default) uses a `k-fold cross-validation\n# <https://en.wikipedia.org/wiki/Cross-validation_(statistics)#k-fold_cross-validation>`__\n# by default. It will split the data *k* times and return the score on each *fold*. We\n# can then take a mean of these scores.\n\nscores = vd.cross_val_score(vd.Spline(), proj_coords, data.air_temperature_c)\nprint(\"k-fold scores:\", scores)\nprint(\"Mean score:\", np.mean(scores))\n\n########################################################################################\n# You can also use most cross-validation splitter classes from\n# :mod:`sklearn.model_selection` by specifying the ``cv`` argument. For example, if we\n# want to shuffle then split the data *n* times\n# (:class:`sklearn.model_selection.ShuffleSplit`):\n\nfrom sklearn.model_selection import ShuffleSplit\n\nshuffle = ShuffleSplit(n_splits=10, test_size=0.3, random_state=0)\n\nscores = vd.cross_val_score(\n vd.Spline(), proj_coords, data.air_temperature_c, cv=shuffle\n)\nprint(\"shuffle scores:\", scores)\nprint(\"Mean score:\", np.mean(scores))\n\n########################################################################################\n# **That is not a very good score** so clearly the default arguments for\n# :class:`~verde.Spline` aren't suitable for this dataset. We could try different\n# combinations manually until we get a good score. A better way is to do this\n# automatically. In :ref:`model_selection` we'll go over how to do that.\n",
"\"\"\"\nClasses for dealing with vector data.\n\"\"\"\nimport numpy as np\nfrom sklearn.utils.validation import check_is_fitted\n\nfrom .base import n_1d_arrays, check_fit_input, least_squares, BaseGridder\nfrom .spline import warn_weighted_exact_solution\nfrom .utils import parse_engine\nfrom .coordinates import get_region\n\ntry:\n import numba\n from numba import jit\nexcept ImportError:\n numba = None\n from .utils import dummy_jit as jit\n\n\n# Default arguments for numba.jit\nJIT_ARGS = dict(nopython=True, target=\"cpu\", fastmath=True, parallel=True)\n\n\nclass Vector(BaseGridder):\n \"\"\"\n Fit an estimator to each component of multi-component vector data.\n\n Provides a convenient way of fitting and gridding vector data using scalar gridders\n and estimators.\n\n Each data component provided to :meth:`~verde.Vector.fit` is fitted to a separated\n estimator. Methods like :meth:`~verde.Vector.grid` and :meth:`~verde.Vector.predict`\n will operate on the multiple components simultaneously.\n\n .. warning::\n\n Never pass code like this as input to this class: ``[vd.Trend(1)]*3``. This\n creates 3 references to the **same instance** of ``Trend``, which means that\n they will all get the same coefficients after fitting. Use a list comprehension\n instead: ``[vd.Trend(1) for i in range(3)]``.\n\n Parameters\n ----------\n components : tuple or list\n A tuple or list of the estimator/gridder instances used for each component. The\n estimators will be applied for each data component in the same order that they\n are given here.\n\n Attributes\n ----------\n components : tuple\n Tuple of the fitted estimators on each component of the data.\n region_ : tuple\n The boundaries (``[W, E, S, N]``) of the data used to fit the interpolator. Used\n as the default region for the :meth:`~verde.Vector.grid` and\n :meth:`~verde.Vector.scatter` methods.\n\n See also\n --------\n verde.Chain : Chain filtering operations to fit on each subsequent output.\n\n \"\"\"\n\n def __init__(self, components):\n super().__init__()\n self.components = components\n\n def fit(self, coordinates, data, weights=None):\n \"\"\"\n Fit the estimators to the given multi-component data.\n\n The data region is captured and used as default for the\n :meth:`~verde.Vector.grid` and :meth:`~verde.Vector.scatter` methods.\n\n All input arrays must have the same shape. If weights are given, there\n must be a separate array for each component of the data.\n\n Parameters\n ----------\n coordinates : tuple of arrays\n Arrays with the coordinates of each data point. Should be in the\n following order: (easting, northing, vertical, ...). Only easting\n and northing will be used, all subsequent coordinates will be\n ignored.\n data : tuple of array\n The data values of each component at each data point. Must be a\n tuple.\n weights : None or tuple of array\n If not None, then the weights assigned to each data point of each\n data component. Typically, this should be 1 over the data\n uncertainty squared.\n\n Returns\n -------\n self\n Returns this estimator instance for chaining operations.\n\n \"\"\"\n if not isinstance(data, tuple):\n raise ValueError(\n \"Data must be a tuple of arrays. {} given.\".format(type(data))\n )\n if weights is not None and not isinstance(weights, tuple):\n raise ValueError(\n \"Weights must be a tuple of arrays. {} given.\".format(type(weights))\n )\n coordinates, data, weights = check_fit_input(coordinates, data, weights)\n self.region_ = get_region(coordinates[:2])\n for estimator, data_comp, weight_comp in zip(self.components, data, weights):\n estimator.fit(coordinates, data_comp, weight_comp)\n return self\n\n def predict(self, coordinates):\n \"\"\"\n Evaluate each data component on a set of points.\n\n Requires a fitted estimator (see :meth:`~verde.Vector.fit`).\n\n Parameters\n ----------\n coordinates : tuple of arrays\n Arrays with the coordinates of each data point. Should be in the\n following order: (easting, northing, vertical, ...). Only easting\n and northing will be used, all subsequent coordinates will be\n ignored.\n\n Returns\n -------\n data : tuple of array\n The values for each vector component evaluated on the given points. The\n order of components will be the same as was provided to\n :meth:`~verde.Vector.fit`.\n\n \"\"\"\n check_is_fitted(self, [\"region_\"])\n return tuple(comp.predict(coordinates) for comp in self.components)\n\n\nclass VectorSpline2D(BaseGridder):\n r\"\"\"\n Elastically coupled interpolation of 2-component vector data.\n\n This gridder assumes Cartesian coordinates.\n\n Uses the Green's functions based on elastic deformation from [SandwellWessel2016]_.\n The interpolation is done by estimating point forces that generate an elastic\n deformation that fits the observed vector data. The deformation equations are based\n on a 2D elastic sheet with a constant Poisson's ratio. The data can then be\n predicted at any desired location.\n\n The east and north data components are coupled through the elastic deformation\n equations. This coupling is controlled by the Poisson's ratio, which is usually\n between -1 and 1. The special case of Poisson's ratio -1 leads to an uncoupled\n interpolation, meaning that the east and north components don't interfere with each\n other.\n\n The point forces are traditionally placed under each data point. The force locations\n are set the first time :meth:`~verde.VectorSpline2D.fit` is called. Subsequent calls\n will fit using the same force locations as the first call. This configuration\n results in an exact prediction at the data points but can be unstable.\n\n [SandwellWessel2016]_ stabilize the solution using Singular Value Decomposition but\n we use ridge regression instead. The regularization can be controlled using the\n *damping* argument. Alternatively, you can specify the position of the forces\n manually using the *force_coords* argument. Regularization or forces not coinciding\n with data points will result in a least-squares estimate, not an exact solution.\n Note that the least-squares solution is required for data weights to have any\n effect.\n\n Before fitting, the Jacobian (design, sensitivity, feature, etc) matrix for the\n spline is normalized using :class:`sklearn.preprocessing.StandardScaler` without\n centering the mean so that the transformation can be undone in the estimated forces.\n\n Parameters\n ----------\n poisson : float\n The Poisson's ratio for the elastic deformation Green's functions. Default is\n 0.5. A value of -1 will lead to uncoupled interpolation of the east and north\n data components.\n mindist : float\n A minimum distance between the point forces and data points. Needed because the\n Green's functions are singular when forces and data points coincide. Acts as a\n fudge factor. A good rule of thumb is to use the average spacing between data\n points.\n damping : None or float\n The positive damping regularization parameter. Controls how much smoothness is\n imposed on the estimated forces. If None, no regularization is used.\n force_coords : None or tuple of arrays\n The easting and northing coordinates of the point forces. If None (default),\n then will be set to the data coordinates the first time\n :meth:`~verde.VectorSpline2D.fit` is called.\n engine : str\n Computation engine for the Jacobian matrix and predictions. Can be ``'auto'``,\n ``'numba'``, or ``'numpy'``. If ``'auto'``, will use numba if it is installed or\n numpy otherwise. The numba version is multi-threaded and usually faster, which\n makes fitting and predicting faster.\n\n Attributes\n ----------\n force_ : array\n The estimated forces that fit the observed data.\n region_ : tuple\n The boundaries (``[W, E, S, N]``) of the data used to fit the\n interpolator. Used as the default region for the\n :meth:`~verde.VectorSpline2D.grid` and :meth:`~verde.VectorSpline2D.scatter`\n methods.\n\n \"\"\"\n\n def __init__(\n self, poisson=0.5, mindist=10e3, damping=None, force_coords=None, engine=\"auto\"\n ):\n super().__init__()\n self.poisson = poisson\n self.mindist = mindist\n self.damping = damping\n self.force_coords = force_coords\n self.engine = engine\n\n def fit(self, coordinates, data, weights=None):\n \"\"\"\n Fit the gridder to the given 2-component vector data.\n\n The data region is captured and used as default for the\n :meth:`~verde.VectorSpline2D.grid` and :meth:`~verde.VectorSpline2D.scatter`\n methods.\n\n All input arrays must have the same shape.\n\n Parameters\n ----------\n coordinates : tuple of arrays\n Arrays with the coordinates of each data point. Should be in the\n following order: (easting, northing, vertical, ...). Only easting\n and northing will be used, all subsequent coordinates will be\n ignored.\n data : tuple of array\n A tuple ``(east_component, north_component)`` of arrays with the\n vector data values at each point.\n weights : None or tuple array\n If not None, then the weights assigned to each data point. Must be\n one array per data component. Typically, this should be 1 over the\n data uncertainty squared.\n\n Returns\n -------\n self\n Returns this estimator instance for chaining operations.\n\n \"\"\"\n coordinates, data, weights = check_fit_input(\n coordinates, data, weights, unpack=False\n )\n if len(data) != 2:\n raise ValueError(\n \"Need two data components. Only {} given.\".format(len(data))\n )\n # Capture the data region to use as a default when gridding.\n self.region_ = get_region(coordinates[:2])\n if any(w is not None for w in weights):\n weights = np.concatenate([i.ravel() for i in weights])\n else:\n weights = None\n warn_weighted_exact_solution(self, weights)\n data = np.concatenate([i.ravel() for i in data])\n if self.force_coords is None:\n self.force_coords = tuple(i.copy() for i in n_1d_arrays(coordinates, n=2))\n jacobian = self.jacobian(coordinates[:2], self.force_coords)\n self.force_ = least_squares(jacobian, data, weights, self.damping)\n return self\n\n def predict(self, coordinates):\n \"\"\"\n Evaluate the fitted gridder on the given set of points.\n\n Requires a fitted estimator (see :meth:`~verde.VectorSpline2D.fit`).\n\n Parameters\n ----------\n coordinates : tuple of arrays\n Arrays with the coordinates of each data point. Should be in the\n following order: (easting, northing, vertical, ...). Only easting\n and northing will be used, all subsequent coordinates will be\n ignored.\n\n Returns\n -------\n data : tuple of arrays\n A tuple ``(east_component, north_component)`` of arrays with the\n predicted vector data values at each point.\n\n \"\"\"\n check_is_fitted(self, [\"force_\"])\n force_east, force_north = self.force_coords\n east, north = n_1d_arrays(coordinates, n=2)\n cast = np.broadcast(*coordinates[:2])\n npoints = cast.size\n components = (\n np.empty(npoints, dtype=east.dtype),\n np.empty(npoints, dtype=east.dtype),\n )\n if parse_engine(self.engine) == \"numba\":\n components = predict_2d_numba(\n east,\n north,\n force_east,\n force_north,\n self.mindist,\n self.poisson,\n self.force_,\n components[0],\n components[1],\n )\n else:\n components = predict_2d_numpy(\n east,\n north,\n force_east,\n force_north,\n self.mindist,\n self.poisson,\n self.force_,\n components[0],\n components[1],\n )\n return tuple(comp.reshape(cast.shape) for comp in components)\n\n def jacobian(self, coordinates, force_coords, dtype=\"float64\"):\n \"\"\"\n Make the Jacobian matrix for the 2D coupled elastic deformation.\n\n The Jacobian is segmented into 4 parts, each relating a force component to a\n data component [SandwellWessel2016]_::\n\n | J_ee J_ne |*|f_e| = |d_e|\n | J_ne J_nn | |f_n| |d_n|\n\n The forces and data are assumed to be stacked into 1D arrays with the east\n component on top of the north component.\n\n Parameters\n ----------\n coordinates : tuple of arrays\n Arrays with the coordinates of each data point. Should be in the\n following order: (easting, northing, vertical, ...). Only easting and\n northing will be used, all subsequent coordinates will be ignored.\n force_coords : tuple of arrays\n Arrays with the coordinates for the forces. Should be in the same order as\n the coordinate arrays.\n dtype : str or numpy dtype\n The type of the Jacobian array.\n\n Returns\n -------\n jacobian : 2D array\n The (n_data*2, n_forces*2) Jacobian matrix.\n\n \"\"\"\n force_east, force_north = n_1d_arrays(force_coords, n=2)\n east, north = n_1d_arrays(coordinates, n=2)\n jac = np.empty((east.size * 2, force_east.size * 2), dtype=dtype)\n if parse_engine(self.engine) == \"numba\":\n jac = jacobian_2d_numba(\n east, north, force_east, force_north, self.mindist, self.poisson, jac\n )\n else:\n jac = jacobian_2d_numpy(\n east, north, force_east, force_north, self.mindist, self.poisson, jac\n )\n return jac\n\n\ndef greens_func_2d(east, north, mindist, poisson):\n \"Calculate the Green's functions for the 2D elastic case.\"\n distance = np.sqrt(east ** 2 + north ** 2)\n # The mindist factor helps avoid singular matrices when the force and\n # computation point are too close\n distance += mindist\n # Pre-compute common terms for the Green's functions of each component\n ln_r = (3 - poisson) * np.log(distance)\n over_r2 = (1 + poisson) / distance ** 2\n green_ee = ln_r + over_r2 * north ** 2\n green_nn = ln_r + over_r2 * east ** 2\n green_ne = -over_r2 * east * north\n return green_ee, green_nn, green_ne\n\n\ndef predict_2d_numpy(\n east, north, force_east, force_north, mindist, poisson, forces, vec_east, vec_north\n):\n \"Calculate the predicted data using numpy.\"\n vec_east[:] = 0\n vec_north[:] = 0\n nforces = forces.size // 2\n for j in range(nforces):\n green_ee, green_nn, green_ne = greens_func_2d(\n east - force_east[j], north - force_north[j], mindist, poisson\n )\n vec_east += green_ee * forces[j] + green_ne * forces[j + nforces]\n vec_north += green_ne * forces[j] + green_nn * forces[j + nforces]\n return vec_east, vec_north\n\n\ndef jacobian_2d_numpy(east, north, force_east, force_north, mindist, poisson, jac):\n \"Calculate the Jacobian matrix using numpy broadcasting.\"\n npoints = east.size\n nforces = force_east.size\n # Reshaping the data coordinates to a column vector will automatically build a\n # Green's functions matrix between each data point and force.\n green_ee, green_nn, green_ne = greens_func_2d(\n east.reshape((npoints, 1)) - force_east,\n north.reshape((npoints, 1)) - force_north,\n mindist,\n poisson,\n )\n jac[:npoints, :nforces] = green_ee\n jac[npoints:, nforces:] = green_nn\n jac[:npoints, nforces:] = green_ne\n jac[npoints:, :nforces] = green_ne # J is symmetric\n return jac\n\n\n@jit(**JIT_ARGS)\ndef predict_2d_numba(\n east, north, force_east, force_north, mindist, poisson, forces, vec_east, vec_north\n):\n \"Calculate the predicted data using numba to speed things up.\"\n nforces = forces.size // 2\n for i in numba.prange(east.size): # pylint: disable=not-an-iterable\n vec_east[i] = 0\n vec_north[i] = 0\n for j in range(nforces):\n green_ee, green_nn, green_ne = GREENS_FUNC_2D_JIT(\n east[i] - force_east[j], north[i] - force_north[j], mindist, poisson\n )\n vec_east[i] += green_ee * forces[j] + green_ne * forces[j + nforces]\n vec_north[i] += green_ne * forces[j] + green_nn * forces[j + nforces]\n return vec_east, vec_north\n\n\n@jit(**JIT_ARGS)\ndef jacobian_2d_numba(east, north, force_east, force_north, mindist, poisson, jac):\n \"Calculate the Jacobian matrix using numba to speed things up.\"\n nforces = force_east.size\n npoints = east.size\n for i in numba.prange(npoints): # pylint: disable=not-an-iterable\n for j in range(nforces):\n green_ee, green_nn, green_ne = GREENS_FUNC_2D_JIT(\n east[i] - force_east[j], north[i] - force_north[j], mindist, poisson\n )\n jac[i, j] = green_ee\n jac[i + npoints, j + nforces] = green_nn\n jac[i, j + nforces] = green_ne\n jac[i + npoints, j] = green_ne # J is symmetric\n return jac\n\n\n# JIT compile the Greens functions for use in numba functions\nGREENS_FUNC_2D_JIT = jit(**JIT_ARGS)(greens_func_2d)\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"sklearn.model_selection.ShuffleSplit",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.colorbar",
"numpy.mean",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.log",
"sklearn.utils.validation.check_is_fitted",
"numpy.sqrt",
"numpy.broadcast",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liangfok/oh-distro | [
"eeee1d832164adce667e56667dafc64a8d7b8cee"
] | [
"software/config/terrain/drc_rehearsal_F.py"
] | [
"import numpy as np\n\nblockName = 'cinderblock'\nblockSize = np.array([15 + 5/8.0, 15 + 3/8.0, 5 + 5/8.0]) * 0.0254 # meters\nblockTiltAngle = 15 # degrees\n\n\n# F=sloping up forward (+x), B=sloping up backward (-x),\n# R=sloping up rightward (-y), L=sloping up leftward (+y)\n# last row is closest to robot (robot is on bottom looking up)\n# column order is left-to-right on robot (+y to -y)\nblockTypes = [\n [ 'N', 'N', 'N', 'N' ],\n [ 'F', 'R', 'B', 'L' ],\n [ 'R', 'B', 'L', 'F' ],\n [ 'N', 'N', 'N', 'N' ],\n [ 'L', 'F', 'R', 'B' ],\n [ 'F', 'R', 'B', 'L' ]\n]\nblockTypes.reverse()\n\n# 0=ground level, 1=one cinderblock offset, etc\nblockLevels = [\n [ -0.9, -0.9, -0.9, -0.9 ],\n [ 0, 0, 0, 0 ],\n [ 0, 0, 0, 0 ],\n [ 1, 1, 1, 1 ],\n [ 0, 0, 0, 0 ],\n [ 0, 0, 0, 0 ]\n]\nblockLevels.reverse()\n\n# map between block types and (pitch,yaw) angles (degrees)\nblockAngleMap = { 'F': (15,180), 'B': (15,0), 'R': (15,90), 'L': (15,270), 'N': (0,0) }\n\n# TODO: this is just an example\n# which foot, block (row,col), offset (x,y), support\n# (row,col) refer to which block\n# (x,y) are offsets wrt the block center, in meters\n# support is an enum indicating foot support type\n# 0=heel-toe, 1=midfoot-toe, 2=heel-midfoot\nfootstepData = [\n [ 'right', (0,1), (-0.05, 0.08), 0 ],\n [ 'left', (0,0), (0.14, -0.11), 2 ],\n [ 'right', (1,1), (-0.02, 0.12), 0 ],\n [ 'left', (1,0), (0.04, -0.07), 0 ],\n [ 'right', (2,1), (-0.05, 0.11), 0 ],\n [ 'left', (2,0), (0.0, -0.08), 0 ],\n [ 'right', (2,1), (0.06, 0.11), 0 ],\n [ 'left', (3,0), (-0.03, -0.11), 0 ],\n [ 'right', (3,1), (0.03, 0.10), 0 ],\n [ 'left', (4,0), (-0.02, -0.10), 0 ],\n [ 'right', (4,1), (0.14, 0.07), 2 ],\n [ 'left', (5,0), (-0.02, -0.12), 0 ],\n [ 'right', (5,1), (0.05, 0.07), 0 ],\n]\n\nnumSteps = [7, -1]\n\n# where to stand, relative to front of first block\nstartingPosition = np.array([-0.39, 0.4, 0])\nstartingYaw = 0 # degrees\n\n# whether to lock lateral footstep offset\nforceZeroLateralFootstepOffset = False\n\nblockColor = [0.4, 0.6, 0.4]\nblockColorMatched = [0.5, 0.8, 0.5]\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
andrewsris/preProcessing | [
"70ec54e3f254faacea737f4cbb36a1294ce59417"
] | [
"stainNorm_Macenko.py"
] | [
"\"\"\"\nStain normalization based on the method of:\n\nM. Macenko et al., ‘A method for normalizing histology slides for quantitative analysis’, in 2009 IEEE International Symposium on Biomedical Imaging: From Nano to Macro, 2009, pp. 1107–1110.\n\nUses the spams package:\n\nhttp://spams-devel.gforge.inria.fr/index.html\n\nUse with python via e.g https://anaconda.org/conda-forge/python-spams\n\"\"\"\n\nfrom __future__ import division\n\nimport numpy as np\nimport stain_utils as ut\n\n\ndef get_stain_matrix(I, beta=0.15, alpha=1):\n \"\"\"\n Get stain matrix (2x3)\n :param I:\n :param beta:\n :param alpha:\n :return:\n \"\"\"\n OD = ut.RGB_to_OD(I).reshape((-1, 3))\n OD = (OD[(OD > beta).any(axis=1), :])\n _, V = np.linalg.eigh(np.cov(OD, rowvar=False))\n V = V[:, [2, 1]]\n if V[0, 0] < 0: V[:, 0] *= -1\n if V[0, 1] < 0: V[:, 1] *= -1\n That = np.dot(OD, V)\n phi = np.arctan2(That[:, 1], That[:, 0])\n minPhi = np.percentile(phi, alpha)\n maxPhi = np.percentile(phi, 100 - alpha)\n v1 = np.dot(V, np.array([np.cos(minPhi), np.sin(minPhi)]))\n v2 = np.dot(V, np.array([np.cos(maxPhi), np.sin(maxPhi)]))\n if v1[0] > v2[0]:\n HE = np.array([v1, v2])\n else:\n HE = np.array([v2, v1])\n return ut.normalize_rows(HE)\n\n\n###\n\nclass Normalizer(object):\n \"\"\"\n A stain normalization object\n \"\"\"\n\n def __init__(self):\n self.stain_matrix_target = None\n self.target_concentrations = None\n\n def fit(self, target):\n target = ut.standardize_brightness(target)\n self.stain_matrix_target = get_stain_matrix(target)\n self.target_concentrations = ut.get_concentrations(target, self.stain_matrix_target)\n\n def target_stains(self):\n return ut.OD_to_RGB(self.stain_matrix_target)\n\n def transform(self, I):\n I = ut.standardize_brightness(I)\n stain_matrix_source = get_stain_matrix(I)\n source_concentrations = ut.get_concentrations(I, stain_matrix_source)\n maxC_source = np.percentile(source_concentrations, 99, axis=0).reshape((1, 2))\n maxC_target = np.percentile(self.target_concentrations, 99, axis=0).reshape((1, 2))\n source_concentrations *= (maxC_target / maxC_source)\n return (255 * np.exp(-1 * np.dot(source_concentrations, self.stain_matrix_target).reshape(I.shape))).astype(\n np.uint8)\n\n def hematoxylin(self, I):\n I = ut.standardize_brightness(I)\n h, w, c = I.shape\n stain_matrix_source = get_stain_matrix(I)\n source_concentrations = ut.get_concentrations(I, stain_matrix_source)\n H = source_concentrations[:, 0].reshape(h, w)\n H = np.exp(-1 * H)\n return H\n"
] | [
[
"numpy.dot",
"numpy.cos",
"numpy.percentile",
"numpy.sin",
"numpy.arctan2",
"numpy.cov",
"numpy.exp",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fjczx/OpenPCDet-lazurite | [
"e3f17ab17b2c295e1786e34c6feb86adffe84b49",
"e3f17ab17b2c295e1786e34c6feb86adffe84b49"
] | [
"lazurite/h5_merge.py",
"tools/test_reverse.py"
] | [
"# -*- coding: utf-8 -*-\r\n# @Time : 4/3/2022 6:28 PM\r\n# @Author : Lazurite\r\n# @Email : [email protected]\r\n# @File : h5_create.py\r\n# @Software: PyCharm\r\nimport os\r\nimport tqdm\r\nimport h5py\r\nimport numpy as np\r\n\r\nh5_paths = [\"../data/nuscenes/v1.0-trainval/samples.h5\", \"../data/nuscenes/v1.0-trainval/sweeps.h5\"]\r\nh5_files = [h5py.File(path, \"r\") for path in h5_paths]\r\n\r\nh5_merge = h5py.File(\"../data/nuscenes/v1.0-trainval/samples_sweeps.h5\", \"w\")\r\nn_bin_data = h5_files[0][\"samples_data\"].shape[0] + h5_files[1][\"sweeps_data\"].shape[0]\r\n\r\nprint(\"Creating h5 file...\")\r\nprint(\"Number of bins:\", n_bin_data)\r\n\r\n\r\nnp_dt = h5py.special_dtype(vlen=np.dtype('float32'))\r\ndset = h5_merge.create_dataset(\"data\", shape=(n_bin_data, ), dtype=np_dt)\r\nstr_dt = h5py.special_dtype(vlen=str)\r\nname_map = h5_merge.create_dataset(\"name\", (n_bin_data), dtype=str_dt)\r\n\r\n\r\npbar = tqdm.tqdm(total=n_bin_data)\r\nlen_samples = h5_files[0][\"samples_data\"].shape[0]\r\nfor i in range(len_samples):\r\n dset[i] = h5_files[0][\"samples_data\"][i]\r\n name_map[i] = h5_files[0][\"samples_name\"][i]\r\n pbar.update(1)\r\nfor i in range(len_samples, n_bin_data):\r\n dset[i] = h5_files[1][\"sweeps_data\"][i - len_samples]\r\n name_map[i] = h5_files[1][\"sweeps_name\"][i - len_samples]\r\n pbar.update(1)\r\nh5_merge.close()\r\nprint(\"Done!\")\r\n",
"import argparse\nimport datetime\nimport glob\nimport os\nimport re\nimport time\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nfrom tensorboardX import SummaryWriter\n\nfrom eval_utils import eval_utils\nfrom pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file\nfrom pcdet.datasets import build_dataloader\nfrom pcdet.models import build_network\nfrom pcdet.utils import common_utils\n\n\ndef parse_config():\n parser = argparse.ArgumentParser(description='arg parser')\n parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')\n\n parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')\n parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')\n parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')\n parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')\n parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')\n parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')\n parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')\n parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,\n help='set extra config keys if needed')\n\n parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes')\n parser.add_argument('--start_epoch', type=int, default=0, help='')\n parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')\n parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints')\n parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')\n parser.add_argument('--save_to_file', action='store_true', default=False, help='')\n\n args = parser.parse_args()\n\n cfg_from_yaml_file(args.cfg_file, cfg)\n cfg.TAG = Path(args.cfg_file).stem\n cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'\n\n np.random.seed(1024)\n\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs, cfg)\n\n return args, cfg\n\n\ndef eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):\n # load checkpoint\n model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)\n model.cuda()\n\n # start evaluation\n eval_utils.eval_one_epoch(\n cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,\n result_dir=eval_output_dir, save_to_file=args.save_to_file\n )\n\n\ndef get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):\n ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))\n ckpt_list.sort(key=os.path.getmtime, reverse=True)\n evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]\n\n for cur_ckpt in ckpt_list:\n num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)\n if num_list.__len__() == 0:\n continue\n\n epoch_id = num_list[-1]\n if 'optim' in epoch_id:\n continue\n if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:\n return epoch_id, cur_ckpt\n return -1, None\n\n\ndef repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):\n # evaluated ckpt record\n ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])\n with open(ckpt_record_file, 'a'):\n pass\n\n # tensorboard log\n if cfg.LOCAL_RANK == 0:\n tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))\n total_time = 0\n first_eval = True\n\n while True:\n # check whether there is checkpoint which is not evaluated\n cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)\n if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:\n wait_second = 30\n if cfg.LOCAL_RANK == 0:\n print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \\r'\n % (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True)\n time.sleep(wait_second)\n total_time += 30\n if total_time > args.max_waiting_mins * 60 and (first_eval is False):\n break\n continue\n\n total_time = 0\n first_eval = False\n\n model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)\n model.cuda()\n\n # start evaluation\n cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']\n tb_dict = eval_utils.eval_one_epoch(\n cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,\n result_dir=cur_result_dir, save_to_file=args.save_to_file\n )\n\n if cfg.LOCAL_RANK == 0:\n for key, val in tb_dict.items():\n tb_log.add_scalar(key, val, cur_epoch_id)\n\n # record this epoch which has been evaluated\n with open(ckpt_record_file, 'a') as f:\n print('%s' % cur_epoch_id, file=f)\n logger.info('Epoch %s has been evaluated' % cur_epoch_id)\n\n\ndef main():\n args, cfg = parse_config()\n if args.launcher == 'none':\n dist_test = False\n total_gpus = 1\n else:\n total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(\n args.tcp_port, args.local_rank, backend='nccl'\n )\n dist_test = True\n\n if args.batch_size is None:\n args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU\n else:\n assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'\n args.batch_size = args.batch_size // total_gpus\n\n output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag\n output_dir.mkdir(parents=True, exist_ok=True)\n\n eval_output_dir = output_dir / 'eval'\n\n if not args.eval_all:\n num_list = re.findall(r'\\d+', args.ckpt) if args.ckpt is not None else []\n epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'\n eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']\n else:\n eval_output_dir = eval_output_dir / 'eval_all_default'\n\n if args.eval_tag is not None:\n eval_output_dir = eval_output_dir / args.eval_tag\n\n eval_output_dir.mkdir(parents=True, exist_ok=True)\n log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))\n logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)\n\n # log to file\n logger.info('**********************Start logging**********************')\n gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'\n logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)\n\n if dist_test:\n logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))\n for key, val in vars(args).items():\n logger.info('{:16} {}'.format(key, val))\n log_config_to_file(cfg, logger=logger)\n\n ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt'\n\n test_set, test_loader, sampler = build_dataloader(\n dataset_cfg=cfg.DATA_CONFIG,\n class_names=cfg.CLASS_NAMES,\n batch_size=args.batch_size,\n dist=dist_test, workers=args.workers, logger=logger, training=False\n )\n\n model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)\n with torch.no_grad():\n if args.eval_all:\n repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_test)\n else:\n eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.dtype"
],
[
"torch.no_grad",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
saroad2/knapsack_solver | [
"7247e464019a1afcaea0b7e5e76bdc729f4b0a51"
] | [
"src/knapsack_solver/__main__.py"
] | [
"from pathlib import Path\n\nimport click\nimport numpy as np\n\nfrom knapsack_solver.knapsack_problem import KnapsackProblem\nfrom knapsack_solver.plots_util import save_plots\nfrom knapsack_solver.weight import Weight\n\n\[email protected]()\[email protected](\"output-directory\", type=click.Path(file_okay=False))\[email protected](\"--mean-mass\", type=float, default=5.0)\[email protected](\"--mass-std\", type=float, default=1.0)\[email protected](\"--mean-value\", type=float, default=5.0)\[email protected](\"--value-std\", type=float, default=1.0)\[email protected](\"--number-of-weights\", type=int, default=100)\[email protected](\"--generation-size\", type=int, default=50)\[email protected](\"--max-mass\", type=float, default=25)\[email protected](\"--max-iterations\", type=int, default=50)\[email protected](\"--mutation-rate\", type=float, default=0.1)\[email protected](\"--crossover-rate\", type=float, default=0.3)\ndef knapsack_solver_cli(\n output_directory,\n mean_mass,\n mass_std,\n mean_value,\n value_std,\n number_of_weights,\n max_mass,\n max_iterations,\n generation_size,\n mutation_rate,\n crossover_rate,\n):\n output_directory = Path(output_directory)\n output_directory.mkdir(parents=True, exist_ok=True)\n weights = [\n Weight.random(\n identification=i,\n mean_mass=mean_mass,\n mass_std=mass_std,\n mean_value=mean_value,\n value_std=value_std,\n )\n for i in range(1, number_of_weights + 1)\n ]\n problem = KnapsackProblem(\n weights=weights,\n max_mass=max_mass,\n generation_size=generation_size,\n mutation_rate=mutation_rate,\n crossover_rate=crossover_rate,\n )\n history = []\n generation = problem.create_generation()\n history.append(generation)\n with click.progressbar(np.arange(1, max_iterations + 1), show_pos=True) as bar:\n for i in bar:\n generation = problem.create_next_generation(generation, identification=i)\n history.append(generation)\n bar.label = f\"Best sack value: {generation.max_value():.2f}\"\n\n best_sack = generation.best_sack()\n click.echo(f\"Best sack: {best_sack}\")\n for weight in best_sack:\n click.echo(f\"\\t{weight}\")\n\n save_plots(\n output_directory=output_directory, weights=problem.weights, history=history\n )\n\n\nif __name__ == \"__main__\":\n knapsack_solver_cli()\n"
] | [
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kateyose/nrn-7.6.7 | [
"603da174f660370abb425917cc5c64c3db03dcec"
] | [
"share/lib/python/neuron/crxd/rxd.py"
] | [
"from neuron import h, nrn, nrn_dll_sym \nfrom . import species, node, section1d, region\nfrom .nodelist import NodeList\nimport weakref\nimport numpy\nimport ctypes\nimport atexit\nfrom . import options\nfrom .rxdException import RxDException\nfrom . import initializer \nimport collections\nimport os\nfrom distutils import sysconfig\nimport uuid\nimport sys\nimport itertools\nfrom numpy.ctypeslib import ndpointer\nimport re\nimport platform\n# aliases to avoid repeatedly doing multiple hash-table lookups\n_numpy_array = numpy.array\n_numpy_zeros = numpy.zeros\n_species_get_all_species = species._get_all_species\n_node_get_states = node._get_states\n_section1d_transfer_to_legacy = section1d._transfer_to_legacy\n_ctypes_c_int = ctypes.c_int\n_weakref_ref = weakref.ref\n\n_external_solver = None\n_external_solver_initialized = False\n_windows_dll_files = []\n_windows_dll = []\n\n\n\nmake_time_ptr = nrn_dll_sym('make_time_ptr')\nmake_time_ptr.argtypes = [ctypes.py_object, ctypes.py_object]\nmake_time_ptr(h._ref_dt, h._ref_t)\n\n_double_ptr = ctypes.POINTER(ctypes.c_double)\n_int_ptr = ctypes.POINTER(_ctypes_c_int)\n_long_ptr = ctypes.POINTER(ctypes.c_long)\n\n\nfptr_prototype = ctypes.CFUNCTYPE(None)\nset_nonvint_block = nrn_dll_sym('set_nonvint_block')\nset_nonvint_block(nrn_dll_sym('rxd_nonvint_block'))\n\nset_setup = nrn_dll_sym('set_setup')\nset_setup.argtypes = [fptr_prototype]\nset_initialize = nrn_dll_sym('set_initialize')\nset_initialize.argtypes = [fptr_prototype]\n\nscatter_concentrations = nrn_dll_sym('scatter_concentrations')\n\n# Transfer extracellular concentrations to NEURON\n_fih_transfer_ecs = h.FInitializeHandler(1, scatter_concentrations)\n\n\nrxd_set_no_diffusion = nrn_dll_sym('rxd_set_no_diffusion')\n\nsetup_solver = nrn_dll_sym('setup_solver')\nsetup_solver.argtypes = [ndpointer(ctypes.c_double), ctypes.c_int, numpy.ctypeslib.ndpointer(numpy.int_, flags='contiguous'), ctypes.c_int, ctypes.py_object, ctypes.py_object]\n\n#states = None\n_set_num_threads = nrn_dll_sym('set_num_threads')\n_set_num_threads.argtypes = [ctypes.c_int]\n_get_num_threads = nrn_dll_sym('get_num_threads')\n_get_num_threads.restype = ctypes.c_int\n\n\nclear_rates = nrn_dll_sym('clear_rates')\nregister_rate = nrn_dll_sym('register_rate')\nregister_rate.argtypes = [ \n ctypes.c_int, #num species\n ctypes.c_int, #num regions\n ctypes.c_int, #num seg\n numpy.ctypeslib.ndpointer(ctypes.c_int, flags='contiguous'), #species ids\n ctypes.c_int, numpy.ctypeslib.ndpointer(ctypes.c_int, flags='contiguous'), #num ecs species\n numpy.ctypeslib.ndpointer(ctypes.c_int, flags='contiguous'), #ecs species ids\n ctypes.c_int, #num multicompartment reactions\n numpy.ctypeslib.ndpointer(ctypes.c_double, flags='contiguous'), #multicompartment multipliers\n ] #Reaction rate function\n\nsetup_currents = nrn_dll_sym('setup_currents')\nsetup_currents.argtypes = [\n ctypes.c_int, #number of membrane currents\n ctypes.c_int, #number induced currents\n ctypes.c_int, #number of nodes with membrane currents\n _int_ptr, #number of species involved in each membrane current\n _int_ptr, #charges of the species involved in each membrane current\n _int_ptr, #node indices\n _int_ptr, #node indices\n _double_ptr, #scaling (areas) of the fluxes\n _int_ptr, #charges for each species in each reation\n ctypes.POINTER(ctypes.py_object), #hoc pointers\n _int_ptr, #maps for membrane fluxes\n _int_ptr #maps for ecs fluxes\n]\n \n\nset_reaction_indices = nrn_dll_sym('set_reaction_indices')\nset_reaction_indices.argtypes = [ctypes.c_int, _int_ptr, _int_ptr, _int_ptr, \n _int_ptr,_int_ptr,_double_ptr, ctypes.c_int, _int_ptr, _int_ptr, _int_ptr,\n _int_ptr]\n\necs_register_reaction = nrn_dll_sym('ecs_register_reaction')\necs_register_reaction.argtype = [ctypes.c_int, ctypes.c_int, _int_ptr, fptr_prototype]\n\nset_euler_matrix = nrn_dll_sym('rxd_set_euler_matrix')\nset_euler_matrix.argtypes = [\n ctypes.c_int,\n ctypes.c_int,\n _long_ptr,\n _long_ptr,\n _double_ptr,\n numpy.ctypeslib.ndpointer(numpy.int_, flags='contiguous'),\n ctypes.c_int,\n numpy.ctypeslib.ndpointer(numpy.double, flags='contiguous'),\n]\nrxd_setup_curr_ptrs = nrn_dll_sym('rxd_setup_curr_ptrs')\nrxd_setup_curr_ptrs.argtypes = [\n ctypes.c_int,\n _int_ptr,\n numpy.ctypeslib.ndpointer(numpy.double, flags='contiguous'),\n ctypes.POINTER(ctypes.py_object),\n]\n\nrxd_setup_conc_ptrs = nrn_dll_sym('rxd_setup_conc_ptrs')\nrxd_setup_conc_ptrs.argtypes = [\n ctypes.c_int,\n _int_ptr,\n ctypes.POINTER(ctypes.py_object)\n]\n\n_c_headers = \"\"\"#include <math.h>\n/*Some functions supported by numpy that aren't included in math.h\n * names and arguments match the wrappers used in rxdmath.py\n */\ndouble factorial(const double);\ndouble degrees(const double);\nvoid radians(const double, double*);\ndouble log1p(const double);\n\"\"\"\n\ndef _list_to_cint_array(data):\n if data is None or len(data) == 0:\n return None\n else:\n return (ctypes.c_int * len(data))(*tuple(data))\n\ndef _list_to_cdouble_array(data):\n if data is None or len(data) == 0:\n return None\n else:\n return (ctypes.c_double * len(data))(*tuple(data))\n\ndef _list_to_clong_array(data):\n if data is None or len(data) == 0:\n return None\n else:\n return (ctypes.c_long * len(data))(*tuple(data))\n\ndef _list_to_pyobject_array(data):\n if data is None or len(data) == 0:\n return None\n else:\n return (ctypes.py_object * len(data))(*tuple(data))\n\ndef byeworld():\n # needed to prevent a seg-fault error at shutdown in at least some\n # combinations of NEURON and Python, which I think is due to objects\n # getting deleted out-of-order\n global _react_matrix_solver\n try:\n del _react_matrix_solver\n except NameError:\n # # if it already didn't exist, that's fine\n pass\n _windows_remove_dlls()\n \natexit.register(byeworld)\n\n# Faraday's constant (store to reduce number of lookups)\nFARADAY = h.FARADAY\n\n# converting from mM um^3 to molecules\n# = 6.02214129e23 * 1000. / 1.e18 / 1000\n# = avogadro * (L / m^3) * (m^3 / um^3) * (mM / M)\n# value for avogardro's constant from NIST webpage, accessed 25 April 2012:\n# http://physics.nist.gov/cgi-bin/cuu/Value?na\n_conversion_factor = 602214.129\n\n\n_cvode_object = h.CVode()\n\nlast_diam_change_cnt = None\nlast_structure_change_cnt = None\n\n_linmodadd_c = None\n_diffusion_matrix = None\n_curr_scales = None\n_curr_ptrs = None\n_curr_indices = None\n\n_all_reactions = []\n\n_zero_volume_indices = numpy.ndarray(0, dtype=numpy.int_)\n_nonzero_volume_indices = []\n\nnrn_tree_solve = nrn_dll_sym('nrn_tree_solve')\nnrn_tree_solve.restype = None\n\n_dptr = _double_ptr\n\n_dimensions = collections.defaultdict(lambda: 1)\n_default_dx = 0.25\n_default_method = 'deterministic'\n\n#CRxD\n_diffusion_d = None\n_diffusion_a = None\n_diffusion_b = None\n_diffusion_p = None\n_cur_node_indices = None\n_diffusion_a_ptr, _diffusion_b_ptr, _diffusion_p_ptr = None, None, None\n\ndef set_solve_type(domain=None, dimension=None, dx=None, nsubseg=None, method=None):\n \"\"\"Specify the numerical discretization and solver options.\n \n domain -- a section or Python iterable of sections\"\"\"\n setting_default = False\n if domain is None:\n domain = h.allsec()\n setting_default = True\n elif isinstance(domain, nrn.Section):\n domain = [domain]\n \n # NOTE: These attributes are set on a per-nrn.Section basis; they cannot \n # assume Section1D objects exist because they might be specified before\n # those objects are created\n \n # domain is now always an iterable (or invalid)\n if method is not None:\n raise RxDException('using set_solve_type to specify method is not yet implemented')\n if dimension is not None:\n if dimension not in (1, 3):\n raise RxDException('invalid option to set_solve_type: dimension must be 1 or 3')\n factory = lambda: dimension\n if setting_default:\n _dimensions.default_factory = factory\n for sec in domain:\n _dimensions[sec] = dimension \n if dx is not None:\n raise RxDException('using set_solve_type to specify dx is not yet implemented')\n if nsubseg is not None:\n raise RxDException('using set_solve_type to specify nsubseg is not yet implemented')\n \n\ndef _unregister_reaction(r):\n global _all_reactions\n for i, r2 in enumerate(_all_reactions):\n if r2() == r:\n del _all_reactions[i]\n break\n\ndef _register_reaction(r):\n # TODO: should we search to make sure that (a weakref to) r hasn't already been added?\n global _all_reactions, _external_solver_initialized\n _all_reactions.append(_weakref_ref(r))\n _external_solver_initialized = False\n \ndef _after_advance():\n global last_diam_change_cnt\n last_diam_change_cnt = _diam_change_count.value\n \ndef re_init():\n \"\"\"reinitializes all rxd concentrations to match HOC values, updates matrices\"\"\"\n global _external_solver_initialized\n h.define_shape()\n \n if not species._has_3d:\n # TODO: if we do have 3D, make sure that we do the necessary parts of this\n \n # update current pointers\n section1d._purge_cptrs()\n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None:\n s._register_cptrs()\n \n # update matrix equations\n _setup_matrices()\n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None: s.re_init()\n # TODO: is this safe? \n _cvode_object.re_init()\n\n _external_solver_initialized = False\n \ndef _invalidate_matrices():\n # TODO: make a separate variable for this?\n global _diffusion_matrix, _external_solver_initialized, last_structure_change_cnt\n _diffusion_matrix = None\n last_structure_change_cnt = None\n _external_solver_initialized = False\n\n_rxd_offset = None\n\ndef _atolscale(y):\n real_index_lookup = {item: index for index, item in enumerate(_nonzero_volume_indices)}\n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None:\n shifted_i = [real_index_lookup[i] + _rxd_offset for i in s.indices() if i in real_index_lookup]\n y[shifted_i] *= s._atolscale\n\ndef _ode_count(offset):\n global _rxd_offset, last_structure_change_cnt, _structure_change_count\n initializer._do_init()\n _rxd_offset = offset - len(_nonzero_volume_indices)\n if _diffusion_matrix is None or last_structure_change_cnt != _structure_change_count.value: _setup_matrices()\n last_structure_change_cnt = _structure_change_count.value\n return len(_nonzero_volume_indices)\n\ndef _ode_reinit(y):\n y[_rxd_offset : _rxd_offset + len(_nonzero_volume_indices)] = _node_get_states()[_nonzero_volume_indices]\n\ndef _ode_fun(t, y, ydot):\n initializer.assert_initialized()\n lo = _rxd_offset\n hi = lo + len(_nonzero_volume_indices)\n if lo == hi: return\n states = _node_get_states().copy()\n states[_nonzero_volume_indices] = y[lo : hi]\n\n # need to fill in the zero volume states with the correct concentration\n # this assumes that states at the zero volume indices is zero (although that\n # assumption could be easily removed)\n #matrix = _scipy_sparse_dok_matrix((len(_zero_volume_indices), len(states)))\n \"\"\"\n for i, row in enumerate(_zero_volume_indices):\n d = _diffusion_matrix[row, row]\n if d:\n nzj = _diffusion_matrix[row].nonzero()[1]\n print 'nzj:', nzj\n for j in nzj:\n matrix[i, j] = -_diffusion_matrix[row, j] / d\n states[_zero_volume_indices] = matrix * states\n \"\"\"\n if len(_zero_volume_indices):\n states[_zero_volume_indices] = _mat_for_zero_volume_nodes * states\n \"\"\"\n for i in _zero_volume_indices:\n v = _diffusion_matrix[i] * states\n d = _diffusion_matrix[i, i]\n if d:\n states[i] = -v / d\n \"\"\"\n # TODO: make this so that the section1d parts use cptrs (can't do this directly for 3D because sum, but could maybe move that into the C)\n # the old way: _section1d_transfer_to_legacy()\n# for sr in _species_get_all_species().values():\n# s = sr()\n# if s is not None: s._transfer_to_legacy()\n\n \n if ydot is not None:\n # diffusion_matrix = - jacobian \n ydot[lo : hi] = (_rxd_reaction(states) - _diffusion_matrix * states)[_nonzero_volume_indices]\n \n states[_zero_volume_indices] = 0\n\n_rxd_induced_currents = None\n_memb_cur_ptrs= []\ndef _setup_memb_currents():\n global _memb_cur_ptrs\n initializer._do_init()\n # setup membrane fluxes from our stuff\n # TODO: cache the memb_cur_ptrs, memb_cur_charges, memb_net_charges, memb_cur_mapped\n # because won't change very often\n # need this; think it's because of initialization of mod files\n if _curr_indices is None: return\n SPECIES_ABSENT = -1\n # TODO: change so that this is only called when there are in fact currents\n rxd_memb_scales = []\n _memb_cur_ptrs = []\n memb_cur_charges = []\n memb_net_charges = []\n memb_cur_mapped = []\n memb_cur_mapped_ecs = []\n for rptr in _all_reactions:\n r = rptr()\n if r and r._membrane_flux:\n scales = r._memb_scales\n rxd_memb_scales.extend(scales)\n _memb_cur_ptrs += r._cur_ptrs\n memb_cur_mapped += r._cur_mapped\n memb_cur_mapped_ecs += r._cur_mapped_ecs\n memb_cur_charges += [r._cur_charges] * len(scales)\n memb_net_charges += [r._net_charges] * len(scales)\n ecs_map = [SPECIES_ABSENT if i is None else i for i in list(itertools.chain.from_iterable(itertools.chain.from_iterable(memb_cur_mapped_ecs)))]\n ics_map = [SPECIES_ABSENT if i is None else i for i in list(itertools.chain.from_iterable(itertools.chain.from_iterable(memb_cur_mapped)))]\n if _memb_cur_ptrs:\n cur_counts = [len(x) for x in memb_cur_mapped]\n num_currents = numpy.array(cur_counts).sum()\n setup_currents(len(_memb_cur_ptrs),\n num_currents,\n len(_curr_indices), # num_currents == len(_curr_indices) if no Extracellular\n _list_to_cint_array(cur_counts),\n _list_to_cint_array(memb_net_charges),\n _list_to_cint_array(_curr_indices),\n _list_to_cint_array(_cur_node_indices),\n _list_to_cdouble_array(rxd_memb_scales),\n _list_to_cint_array(list(itertools.chain.from_iterable(memb_cur_charges))),\n _list_to_pyobject_array(list(itertools.chain.from_iterable(_memb_cur_ptrs))),\n _list_to_cint_array(ics_map),\n _list_to_cint_array(ecs_map))\n \ndef _currents(rhs):\n return\n if rxd_memb_flux:\n # TODO: remove the asserts when this is verified to work\n assert(len(rxd_memb_flux) == len(_cur_node_indices))\n assert(len(rxd_memb_flux) == len(memb_cur_ptrs))\n assert(len(rxd_memb_flux) == len(memb_cur_charges))\n assert(len(rxd_memb_flux) == len(memb_net_charges))\n for flux, cur_ptrs, cur_charges, net_charge, i, cur_maps in zip(rxd_memb_flux, memb_cur_ptrs, memb_cur_charges, memb_net_charges, _cur_node_indices, memb_cur_mapped):\n rhs[i] -= net_charge * flux\n #import sys\n #sys.exit()\n # TODO: remove this assert when more thoroughly tested\n assert(len(cur_ptrs) == len(cur_maps))\n for ptr, charge, cur_map_i in zip(cur_ptrs, cur_charges, cur_maps):\n # this has the opposite sign of the above because positive\n # currents lower the membrane potential\n cur = charge * flux\n ptr[0] += cur\n for c in cur_map_i:\n _rxd_induced_currents[c] += cur\n #for sign, c in zip([-1, 1], cur_maps):\n # if c is not None:\n # _rxd_induced_currents[c] += sign * cur\n\n_last_m = None\n_last_preconditioner = None\n_fixed_step_count = 0\n\n\ndef _rxd_reaction(states):\n # TODO: this probably shouldn't be here\n # TODO: this was included in the 3d, probably shouldn't be there either\n # TODO: if its None and there is 3D... should we do anything special?\n if _diffusion_matrix is None and not species._has_3d: _setup_matrices()\n\n b = _numpy_zeros(len(states))\n \n \n if _curr_ptr_vector is not None:\n _curr_ptr_vector.gather(_curr_ptr_storage_nrn)\n b[_curr_indices] = _curr_scales * (_curr_ptr_storage - _rxd_induced_currents) \n \n b[_curr_indices] = _curr_scales * [ptr[0] for ptr in _curr_ptrs]\n\n # TODO: store weak references to the r._evaluate in addition to r so no\n # repeated lookups\n #for rptr in _all_reactions:\n # r = rptr()\n # if r:\n # indices, mult, rate = r._evaluate(states)\n # we split this in parts to allow for multiplicities and to allow stochastic to make the same changes in different places\n # for i, m in zip(indices, mult):\n # b[i] += m * rate\n\n node._apply_node_fluxes(b)\n return b\n \n_last_preconditioner_dt = 0\n_last_dt = None\n_last_m = None\n_diffusion_d = None\n_diffusion_a = None\n_diffusion_b = None\n_diffusion_p = None\n_cur_node_indices = None\n\n_diffusion_a_ptr, _diffusion_b_ptr, _diffusion_p_ptr = None, None, None\n\ndef _setup():\n initializer._do_init()\n # TODO: this is when I should resetup matrices (structure changed event)\n global _last_dt, _external_solver_initialized\n _last_dt = None\n _external_solver_initialized = False\n \n # Using C-code for reactions\n options.use_reaction_contribution_to_jacobian = False\n\ndef _find_librxdmath():\n import glob\n base_path = os.path.join(h.neuronhome(), \"..\", \"..\", platform.machine(), \"lib\", \"librxdmath\")\n success = False \n for extension in ['', '.dll', '.so', '.dylib']:\n dll = base_path + extension\n try:\n success = os.path.exists(dll) \n except:\n pass\n if success: break\n if not success:\n if sys.platform.lower().startswith(\"win\"):\n dll = os.path.join(h.neuronhome(), 'bin', 'librxdmath.dll')\n success = os.path.exists(dll)\n if not success:\n raise RxDException('unable to connect to the librxdmath library')\n return dll\n \ndef _c_compile(formula):\n filename = 'rxddll' + str(uuid.uuid1())\n with open(filename + '.c', 'w') as f:\n f.write(formula)\n math_library = '-lm'\n fpic = '-fPIC'\n try:\n gcc = os.environ[\"CC\"]\n except:\n #when running on windows try and used the gcc included with NEURON\n if sys.platform.lower().startswith(\"win\"):\n math_library = ''\n fpic = ''\n gcc = os.path.join(h.neuronhome(),\"mingw\",\"mingw64\",\"bin\",\"x86_64-w64-mingw32-gcc.exe\")\n if not os.path.isfile(gcc):\n raise RxDException(\"unable to locate a C compiler. Please `set CC=<path to C compiler>`\")\n else:\n gcc = \"gcc\"\n #TODO: Check this works on non-Linux machines\n gcc_cmd = \"%s -I%s -I%s \" % (gcc, sysconfig.get_python_inc(), os.path.join(h.neuronhome(), \"..\", \"..\", \"include\", \"nrn\"))\n gcc_cmd += \"-shared %s %s.c %s \" % (fpic, filename, _find_librxdmath())\n gcc_cmd += \"-o %s.so %s\" % (filename, math_library)\n if sys.platform.lower().startswith(\"win\"):\n my_path = os.getenv('PATH')\n os.putenv('PATH', my_path + ';' + os.path.join(h.neuronhome(),\"mingw\",\"mingw64\",\"bin\"))\n os.system(gcc_cmd)\n os.putenv('PATH', my_path)\n else:\n os.system(gcc_cmd)\n #TODO: Find a better way of letting the system locate librxdmath.so.0\n rxdmath_dll = ctypes.cdll[_find_librxdmath()]\n dll = ctypes.cdll['./%s.so' % filename]\n reaction = dll.reaction\n reaction.argtypes = [ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double)] \n reaction.restype = ctypes.c_double\n os.remove(filename + '.c')\n if sys.platform.lower().startswith(\"win\"):\n #cannot remove dll that are in use\n _windows_dll.append(weakref.ref(dll))\n _windows_dll_files.append(filename + \".so\")\n else:\n os.remove(filename + '.so')\n return reaction\n\n\ndef _conductance(d):\n pass\n \ndef _ode_jacobian(dt, t, ypred, fpred):\n #print '_ode_jacobian: dt = %g, last_dt = %r' % (dt, _last_dt)\n lo = _rxd_offset\n hi = lo + len(_nonzero_volume_indices) \n _reaction_matrix_setup(dt, ypred[lo : hi])\n\n_curr_ptr_vector = None\n_curr_ptr_storage = None\n_curr_ptr_storage_nrn = None\npinverse = None\n_cur_map = None\n_h_ptrvector = h.PtrVector\n_h_vector = h.Vector\n\n_structure_change_count = nrn_dll_sym('structure_change_cnt', _ctypes_c_int)\n_diam_change_count = nrn_dll_sym('diam_change_cnt', _ctypes_c_int)\n\ndef _donothing(): pass\n\ndef _update_node_data(force=False):\n global last_diam_change_cnt, last_structure_change_cnt, _curr_indices, _curr_scales, _curr_ptrs, _cur_map\n global _curr_ptr_vector, _curr_ptr_storage, _curr_ptr_storage_nrn\n if last_diam_change_cnt != _diam_change_count.value or _structure_change_count.value != last_structure_change_cnt or force:\n _cur_map = {}\n last_diam_change_cnt = _diam_change_count.value\n last_structure_change_cnt = _structure_change_count.value\n #if not species._has_3d:\n # TODO: merge this with the 3d/hybrid case?\n nsegs_changed = 0\n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None: nsegs_changed += s._update_node_data()\n if nsegs_changed:\n section1d._purge_cptrs()\n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None:\n s._update_region_indices(True)\n s._register_cptrs()\n if species._has_1d and species._1d_submatrix_n():\n volumes = node._get_data()[0]\n _zero_volume_indices = (numpy.where(volumes == 0)[0]).astype(numpy.int_)\n setup_solver(_node_get_states(), len(_node_get_states()), _zero_volume_indices, len(_zero_volume_indices), h._ref_t, h._ref_dt)\n # TODO: separate compiling reactions -- so the indices can be updated without recompiling\n _compile_reactions()\n\n #end#if\n for rptr in _all_reactions:\n r = rptr()\n if r is not None: r._update_indices()\n _curr_indices = []\n _curr_scales = []\n _curr_ptrs = []\n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None: s._setup_currents(_curr_indices, _curr_scales, _curr_ptrs, _cur_map)\n \n num = len(_curr_ptrs)\n if num:\n _curr_ptr_vector = _h_ptrvector(num)\n _curr_ptr_vector.ptr_update_callback(_donothing)\n for i, ptr in enumerate(_curr_ptrs):\n _curr_ptr_vector.pset(i, ptr)\n \n _curr_ptr_storage_nrn = _h_vector(num)\n _curr_ptr_storage = _curr_ptr_storage_nrn.as_numpy()\n else:\n _curr_ptr_vector = None\n\n #_curr_scales = _numpy_array(_curr_scales) \n\n\ndef _matrix_to_rxd_sparse(m):\n \"\"\"precondition: assumes m a numpy array\"\"\"\n nonzero_i, nonzero_j = list(zip(*list(m.keys())))\n nonzero_values = numpy.ascontiguousarray(list(m.values()), dtype=numpy.float64)\n\n # number of rows\n n = m.shape[1]\n\n return n, len(nonzero_i), numpy.ascontiguousarray(nonzero_i, dtype=numpy.int_), numpy.ascontiguousarray(nonzero_j, dtype=numpy.int_), nonzero_values\n\n\n_euler_matrix = None\n\n# TODO: make sure this does the right thing when the diffusion constant changes between two neighboring nodes\ndef _setup_matrices():\n global _curr_ptrs\n global _cur_node_indices\n global _zero_volume_indices\n\n # TODO: this sometimes seems to get called twice. Figure out why and fix, if possible.\n\n # if the shape has changed update the nodes\n _update_node_data()\n\n n = len(_node_get_states())\n \n #TODO: Replace with ADI version \n \"\"\"\n if species._has_3d:\n _euler_matrix = _scipy_sparse_dok_matrix((n, n), dtype=float)\n\n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None: s._setup_matrices3d(_euler_matrix)\n\n _diffusion_matrix = -_euler_matrix\n\n _euler_matrix = _euler_matrix.tocsr()\n _update_node_data(True)\n\n # NOTE: if we also have 1D, this will be replaced with the correct values below\n _zero_volume_indices = []\n _nonzero_volume_indices = list(range(len(_node_get_states())))\n \n \"\"\"\n if species._has_1d:\n n = species._1d_submatrix_n()\n # TODO: initialization is slow. track down why\n \n _last_dt = None\n \n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None:\n s._assign_parents()\n \n _update_node_data(True)\n\n volumes = node._get_data()[0]\n _zero_volume_indices = (numpy.where(volumes == 0)[0]).astype(numpy.int_)\n _nonzero_volume_indices = volumes.nonzero()[0]\n\n # remove old linearmodeladdition\n _linmodadd_cur = None\n \n if n: \n # create sparse matrix for C in cy'+gy=b\n c_diagonal = numpy.zeros(n,dtype=ctypes.c_double)\n # most entries are 1 except those corresponding to the 0 and 1 ends\n \n # create the matrix G\n #if not species._has_3d:\n # # if we have both, then put the 1D stuff into the matrix that already exists for 3D\n _diffusion_matrix = [dict() for idx in range(n)] \n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None:\n s._setup_diffusion_matrix(_diffusion_matrix)\n s._setup_c_matrix(c_diagonal)\n #print '_diffusion_matrix.shape = %r, n = %r, species._has_3d = %r' % (_diffusion_matrix.shape, n, species._has_3d)\n euler_matrix_i, euler_matrix_j, euler_matrix_nonzero = [], [], []\n for i in range(n):\n mat_i = _diffusion_matrix[i]\n euler_matrix_i.extend(itertools.repeat(i,len(mat_i)))\n euler_matrix_j.extend(mat_i.keys())\n euler_matrix_nonzero.extend(mat_i.values())\n euler_matrix_nnonzero = len(euler_matrix_nonzero)\n assert(len(euler_matrix_i) == len(euler_matrix_j) == len(euler_matrix_nonzero))\n # modify C for cases where no diffusive coupling of 0, 1 ends\n # TODO: is there a better way to handle no diffusion?\n #for i in range(n):\n # if not _diffusion_matrix[i, i]:\n # _linmodadd_c[i, i] = 1\n\n \n # setup for induced membrane currents\n _cur_node_indices = []\n\n for rptr in _all_reactions:\n r = rptr()\n if r is not None:\n r._setup_membrane_fluxes(_cur_node_indices, _cur_map)\n \n #_cvode_object.re_init() \n\n #if species._has_3d:\n # _euler_matrix = -_diffusion_matrix\n\n\n #TODO: Replace this this to handle 1d/3d hybrid models\n \"\"\"\n if species._has_1d and species._has_3d:\n # TODO: add connections to matrix; for now: find them\n hybrid_neighbors = collections.defaultdict(lambda: [])\n hybrid_diams = {}\n dxs = set()\n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None:\n if s._nodes and s._secs:\n # have both 1D and 3D, so find the neighbors\n # for each of the 3D sections, find the parent sections\n for r in s._regions:\n dxs.add(r._dx)\n for sec in r._secs3d:\n parent_seg = sec.trueparentseg()\n parent_sec = None if not parent_seg else parent_seg.sec\n # are any of these a match with a 1d section?\n if s._has_region_section(r, parent_sec):\n # this section has a 1d section that is a parent\n index1d, indices3d = _get_node_indices(s, r, sec, h.section_orientation(sec=sec), parent_sec, h.parent_connection(sec=sec))\n hybrid_neighbors[index1d] += indices3d\n hybrid_diams[index1d] = parent_seg.diam\n else:\n for sec1d in r._secs1d:\n parent_1d_seg = sec1d.trueparentseg()\n parent_1d = None if not parent_seg else parent_seg.sec\n if parent_1d == sec:\n # it is the parent of a 1d section\n index1d, indices3d = _get_node_indices(s, r, sec, h.parent_connection(sec=sec1d), sec1d, sec1d.orientation())\n hybrid_neighbors[index1d] += indices3d\n hybrid_diams[index1d] = parent_1d_seg.diam\n break\n elif parent_1d == parent_sec:\n # it connects to the parent of a 1d section\n index1d, indices3d = _get_node_indices(s, r, sec, h.section_orientation(sec=sec), sec1d, sec1d.orientation())\n hybrid_neighbors[index1d] += indices3d\n hybrid_diams[index1d] = parent_1d_seg.diam\n break\n if len(dxs) > 1:\n raise RxDException('currently require a unique value for dx')\n dx = dxs.pop()\n diffs = node._diffs\n n = len(_node_get_states())\n # TODO: validate that we're doing the right thing at boundaries\n for index1d in list(hybrid_neighbors.keys()):\n neighbors3d = set(hybrid_neighbors[index1d])\n # NOTE: splitting the connection area equally across all the connecting nodes\n area = (numpy.pi * 0.25 * hybrid_diams[index1d] ** 2) / len(neighbors3d)\n for i in neighbors3d:\n d = diffs[i]\n vol = node._volumes[i]\n rate = d * area / (vol * dx / 2.)\n # make the connections on the 3d side\n _euler_matrix[i, i] -= rate\n _euler_matrix[i, index1d] += rate\n # make the connections on the 1d side (scale by vol because conserving mass not volume)\n _euler_matrix[index1d, index1d] -= rate * vol\n _euler_matrix[index1d, i] += rate * vol\n #print 'index1d row sum:', sum(_euler_matrix[index1d, j] for j in xrange(n))\n #print 'index1d col sum:', sum(_euler_matrix[j, index1d] for j in xrange(n))\n \"\"\"\n #CRxD\n if n and euler_matrix_nnonzero > 0:\n _update_node_data()\n section1d._transfer_to_legacy()\n set_euler_matrix(n, euler_matrix_nnonzero,\n _list_to_clong_array(euler_matrix_i),\n _list_to_clong_array(euler_matrix_j),\n _list_to_cdouble_array(euler_matrix_nonzero),\n _zero_volume_indices,\n len(_zero_volume_indices),\n c_diagonal)\n else:\n rxd_set_no_diffusion()\n setup_solver(_node_get_states(), len(_node_get_states()), _zero_volume_indices, len(_zero_volume_indices), h._ref_t, h._ref_dt)\n \n if _curr_indices is not None and len(_curr_indices) > 0:\n rxd_setup_curr_ptrs(len(_curr_indices), _list_to_cint_array(_curr_indices),\n numpy.concatenate(_curr_scales), _list_to_pyobject_array(_curr_ptrs))\n\n if section1d._all_cindices is not None and len(section1d._all_cindices) > 0:\n rxd_setup_conc_ptrs(len(section1d._all_cindices), \n _list_to_cint_array(section1d._all_cindices), \n _list_to_pyobject_array(section1d._all_cptrs))\n\n # we do this last because of performance issues with changing sparsity of csr matrices\n \"\"\"\n if _diffusion_matrix is not None:\n _diffusion_matrix = _diffusion_matrix.tocsr()\n if _euler_matrix is not None:\n _euler_matrix = _euler_matrix.tocsr()\n\n if species._has_1d:\n if species._has_3d:\n _diffusion_matrix = -_euler_matrix\n n = species._1d_submatrix_n()\n if n:\n matrix = _diffusion_matrix[_zero_volume_indices].tocsr()\n indptr = matrix.indptr\n matrixdata = matrix.data\n count = len(_zero_volume_indices)\n for row, i in enumerate(_zero_volume_indices):\n d = _diffusion_matrix[i, i]\n if d:\n matrixdata[indptr[row] : indptr[row + 1]] /= -d\n matrix[row, i] = 0\n else:\n matrixdata[indptr[row] : indptr[row + 1]] = 0\n global _mat_for_zero_volume_nodes\n _mat_for_zero_volume_nodes = matrix\n # TODO: _mat_for_zero_volume_nodes is used for CVode.\n # Figure out if/how it has to be changed for hybrid 1D/3D sims (probably just augment with identity? or change how its used to avoid multiplying by I)\n \n \"\"\"\n \n \"\"\"\n if pt1 in indices:\n ileft = indices[pt1]\n dleft = (d + diffs[ileft]) * 0.5\n left = dleft * areal / (vol * dx)\n euler_matrix[index, ileft] += left\n euler_matrix[index, index] -= left\n if pt2 in indices:\n iright = indices[pt2]\n dright = (d + diffs[iright]) * 0.5\n right = dright * arear / (vol * dx)\n euler_matrix[index, iright] += right\n euler_matrix[index, index] -= right\n\"\"\" \n \n\n\ndef _get_node_indices(species, region, sec3d, x3d, sec1d, x1d):\n # TODO: remove need for this assumption\n assert(x1d in (0, 1))\n disc_indices = region._indices_from_sec_x(sec3d, x3d)\n #print '%r(%g) connects to the 1d section %r(%g)' % (sec3d, x3d, sec1d, x1d)\n #print 'disc indices: %r' % disc_indices\n indices3d = []\n for node in species._nodes:\n if node._r == region:\n for i, j, k in disc_indices:\n if node._i == i and node._j == j and node._k == k:\n indices3d.append(node._index)\n #print 'found node %d with coordinates (%g, %g, %g)' % (node._index, node.x3d, node.y3d, node.z3d)\n # discard duplicates...\n # TODO: really, need to figure out all the 3d nodes connecting to a given 1d endpoint, then unique that\n indices3d = list(set(indices3d))\n #print '3d matrix indices: %r' % indices3d\n # TODO: remove the need for this assertion\n if x1d == sec1d.orientation():\n # TODO: make this whole thing more efficient\n # the parent node is the nonzero index on the first row before the diagonal\n first_row = min([node._index for node in species.nodes(region)(sec1d)])\n for j in range(first_row):\n if _euler_matrix[first_row, j] != 0:\n index_1d = j\n break\n else:\n raise RxDException('should never get here; could not find parent')\n elif x1d == 1 - sec1d.orientation():\n # the ending zero-volume node is the one after the last node\n # TODO: make this more efficient\n index_1d = max([node._index for node in species.nodes(region)(sec1d)]) + 1\n else:\n raise RxDException('should never get here; _get_node_indices apparently only partly converted to allow connecting to 1d in middle')\n #print '1d index is %d' % index_1d\n \n return index_1d, indices3d\n\ndef _compile_reactions():\n #clear all previous reactions (intracellular & extracellular) and the\n #supporting indexes\n #_windows_remove_dlls()\n clear_rates()\n \n regions_inv = dict() #regions -> reactions that occur there\n species_by_region = dict()\n all_species_involed = set()\n location_count = 0\n \n ecs_regions_inv = dict()\n ecs_species_by_region = dict()\n ecs_all_species_involed = set()\n ecs_mc_species_involved = set() \n from . import rate, multiCompartmentReaction\n\n #Find sets of sections that contain the same regions\n from .region import _c_region\n matched_regions = [] # the different combinations of regions that arise in different sections\n for nrnsec in list(section1d._rxd_sec_lookup.keys()):\n set_of_regions = set() # a set of the regions that occur in a given section\n for sec in section1d._rxd_sec_lookup[nrnsec]:\n if sec(): set_of_regions.add(sec()._region)\n if set_of_regions not in matched_regions:\n matched_regions.append(set_of_regions)\n region._c_region_lookup = dict()\n \n #create a c_region instance for each of the unique sets of regions\n c_region_list = []\n for sets in matched_regions:\n c_region_list.append(_c_region(sets))\n \n\n for rptr in _all_reactions:\n r = rptr()\n if not r:\n continue\n\n #Find all the species involved\n if isinstance(r,rate.Rate):\n if not r._species():\n continue\n sptrs = set(list(r._involved_species) + [r._species])\n else:\n sptrs = set(list(r._involved_species) + r._dests + r._sources)\n \n #Find all the regions involved\n if isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):\n if not hasattr(r._mult, 'flatten'):\n r._update_indices()\n react_regions = [s()._extracellular()._region for s in r._sources + r._dests if isinstance(s(),species.SpeciesOnExtracellular)] + [s()._region() for s in r._sources + r._dests if not isinstance(s(),species.SpeciesOnExtracellular)]\n react_regions += [sptr()._region() for sptr in sptrs if isinstance(sptr(),species.SpeciesOnRegion)]\n #if regions are specified - use those\n elif hasattr(r,'_active_regions'):\n react_regions = r._active_regions\n #Otherwise use all the regions where the species are\n else:\n react_regions = set()\n nsp = 0\n for sp in sptrs:\n s = sp()\n nsp += 1\n if isinstance(s,species.SpeciesOnRegion):\n react_regions.add(s._region())\n elif isinstance(s,species.SpeciesOnExtracellular):\n react_regions.add(s._extracellular()._region)\n elif isinstance(s,species._ExtracellularSpecies):\n react_regions.add(s._region)\n elif None not in s._regions:\n [react_regions.add(reg) for reg in s._regions + s._extracellular_regions]\n react_regions = list(react_regions)\n #Only regions where ALL the species are present -- unless it is a membrane\n #from collections import Counter\n #from . import geometry as geo\n #react_regions = [reg for reg, count in Counter(react_regions).iteritems() if count == nsp or isinstance(reg.geometry,geo.ScalableBorder)]\n #Any intracellular regions\n if not all([isinstance(x, region.Extracellular) for x in react_regions]):\n species_involved = []\n for sp in sptrs:\n s = sp()\n if not isinstance(s, species.SpeciesOnExtracellular):\n all_species_involed.add(s)\n species_involved.append(s)\n for reg in react_regions:\n if isinstance(reg, region.Extracellular):\n continue\n if reg in regions_inv:\n regions_inv[reg].append(rptr)\n else:\n regions_inv[reg] = [rptr]\n if reg in species_by_region:\n species_by_region[reg] = species_by_region[reg].union(species_involved)\n else:\n species_by_region[reg] = set(species_involved)\n for sec in reg._secs:\n location_count += sec.nseg\n #Any extracellular regions\n if any([isinstance(x, region.Extracellular) for x in react_regions]):\n #MultiCompartment - so can have both extracellular and intracellular regions\n if isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):\n for sp in sptrs:\n s = sp()\n if isinstance(s,species._ExtracellularSpecies):\n ecs_mc_species_involved.add(s)\n elif isinstance(s,species.SpeciesOnExtracellular):\n ecs_mc_species_involved.add(s._extracellular())\n for reg in react_regions:\n if reg in list(ecs_species_by_region.keys()):\n ecs_species_by_region[reg] = ecs_species_by_region[reg].union(ecs_mc_species_involved)\n else:\n ecs_species_by_region[reg] = set(ecs_mc_species_involved)\n #Otherwise - reaction can only have extracellular regions\n else:\n ecs_species_involved = []\n for sp in sptrs:\n s = sp()\n ecs_all_species_involed.add(s)\n ecs_species_involved.append(s)\n if any([isinstance(x, region.Region) for x in react_regions]):\n raise RxDException(\"Error: an %s cannot have both Extracellular and Intracellular regions. Use a MultiCompartmentReaction or specify the desired region with the 'region=' keyword argument\", rptr().__class__)\n for reg in react_regions:\n if not isinstance(reg, region.Extracellular):\n continue\n\n if reg in ecs_regions_inv:\n ecs_regions_inv[reg].append(rptr)\n else:\n ecs_regions_inv[reg] = [rptr]\n if reg in ecs_species_by_region:\n ecs_species_by_region[reg] = ecs_species_by_region[reg].union(ecs_species_involved)\n else:\n ecs_species_by_region[reg] = set(ecs_species_involved)\n #Create lists of indexes for intracellular reactions and rates\n nseg_by_region = [] # a list of the number of segments for each region\n # a table for location,species -> state index\n location_index = []\n for reg in regions_inv:\n rptr = weakref.ref(reg)\n for c_region in region._c_region_lookup[rptr]:\n for react in regions_inv[reg]:\n c_region.add_reaction(react,rptr)\n c_region.add_species(species_by_region[reg])\n if reg in ecs_species_by_region:\n c_region.add_ecs_species(ecs_species_by_region[reg])\n\n # now setup the reactions\n setup_solver(_node_get_states(), len(_node_get_states()), _zero_volume_indices, len(_zero_volume_indices), h._ref_t, h._ref_dt)\n #if there are no reactions\n if location_count == 0 and len(ecs_regions_inv) == 0:\n return None\n \n #Setup intracellular and multicompartment reactions\n if location_count > 0:\n from . import rate, multiCompartmentReaction\n for creg in c_region_list:\n creg._initalize()\n mc_mult_count = 0\n mc_mult_list = []\n species_ids_used = numpy.zeros((creg.num_species,creg.num_regions),bool)\n flux_ids_used = numpy.zeros((creg.num_species,creg.num_regions),bool)\n ecs_species_ids_used = numpy.zeros((creg.num_ecs_species,creg.num_regions),bool)\n fxn_string = _c_headers \n fxn_string += 'void reaction(double** species, double** rhs, double* mult, double** species_ecs, double** rhs_ecs, double** flux)\\n{'\n # declare the \"rate\" variable if any reactions (non-rates)\n for rprt in list(creg._react_regions.keys()):\n if not isinstance(rprt(),rate.Rate):\n fxn_string += '\\n\\tdouble rate;'\n break\n for rptr in list(creg._react_regions.keys()):\n r = rptr()\n if isinstance(r,rate.Rate):\n s = r._species()\n species_id = creg._species_ids.get(s._id)\n if isinstance(s,species.SpeciesOnRegion):\n region_ids = [creg._region_ids.get(s._region()._id)]\n else:\n region_ids = creg._react_regions[rptr]\n for region_id in region_ids:\n rate_str = re.sub(r'species\\[(\\d+)\\]\\[(\\d+)\\]',lambda m: \"species[%i][%i]\" % (creg._species_ids.get(int(m.groups()[0])), creg._region_ids.get(int(m.groups()[1]))), r._rate)\n rate_str = re.sub(r'species\\[(\\d+)\\]\\[\\]',lambda m: \"species[%i][%i]\" % (creg._species_ids.get(int(m.groups()[0])), region_id), rate_str)\n operator = '+=' if species_ids_used[species_id][region_id] else '='\n fxn_string += \"\\n\\trhs[%d][%d] %s %s;\" % (species_id, region_id, operator, rate_str)\n species_ids_used[species_id][region_id] = True\n elif isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):\n #Lookup the region_id for the reaction\n for sptr in r._sources + r._dests:\n if isinstance(sptr(),species.SpeciesOnExtracellular):\n continue\n region_id = creg._region_ids.get(sptr()._region()._id)\n rate_str = re.sub(r'species\\[(\\d+)\\]\\[(\\d+)\\]',lambda m: \"species[%i][%i]\" % (creg._species_ids.get(int(m.groups()[0])), creg._region_ids.get(int(m.groups()[1]))), r._rate)\n rate_str = re.sub(r'species\\[(\\d+)\\]\\[\\]',lambda m: \"species[%i][%i]\" % (creg._species_ids.get(int(m.groups()[0])), region_id), rate_str)\n rate_str = re.sub(r'species_ecs\\[(\\d+)\\]',lambda m: \"species_ecs[%i][%i]\" % (int(m.groups()[0]), region_id), rate_str)\n \n fxn_string += \"\\n\\trate = %s;\" % rate_str\n \n for sptr in r._sources + r._dests:\n s = sptr()\n if isinstance(s,species.SpeciesOnExtracellular):\n species_id = s._extracellular()._grid_id\n operator = '+=' if ecs_species_ids_used[species_id][region_id] else '='\n fxn_string += \"\\n\\trhs_ecs[%d][%d] %s mult[%d] * rate;\" % (species_id, region_id, operator, mc_mult_count)\n ecs_species_ids_used[species_id][region_id] = True\n else:\n species_id = creg._species_ids.get(s._id)\n region_id = creg._region_ids.get(sptr()._region()._id)\n operator = '+=' if species_ids_used[species_id][region_id] else '='\n fxn_string += \"\\n\\trhs[%d][%d] %s mult[%d] * rate;\" % (species_id, region_id, operator, mc_mult_count)\n species_ids_used[species_id][region_id] = True\n if r._membrane_flux:\n operator = '+=' if flux_ids_used[species_id][region_id] else '='\n fxn_string += \"\\n\\tif(flux) flux[%d][%d] %s rate;\" % (species_id, region_id, operator)\n flux_ids_used[species_id][region_id] = True\n #TODO: Fix problem if the whole region isn't part of the same aggregate c_region\n mc_mult_count += 1\n mc_mult_list.extend(r._mult.flatten())\n else:\n for region_id in creg._react_regions[rptr]:\n \n rate_str = re.sub(r'species\\[(\\d+)\\]\\[(\\d+)\\]',lambda m: \"species[%i][%i]\" % (creg._species_ids.get(int(m.groups()[0])), creg._region_ids.get(int(m.groups()[1]))), r._rate)\n rate_str = re.sub(r'species\\[(\\d+)\\]\\[\\]',lambda m: \"species[%i][%i]\" % (creg._species_ids.get(int(m.groups()[0])), region_id), rate_str)\n fxn_string += \"\\n\\trate = %s;\" % rate_str\n summed_mults = collections.defaultdict(lambda: 0)\n for (mult, sp) in zip(r._mult, r._sources + r._dests):\n summed_mults[creg._species_ids.get(sp()._id)] += mult\n for idx in sorted(summed_mults.keys()):\n operator = '+=' if species_ids_used[idx][region_id] else '='\n species_ids_used[idx][region_id] = True\n fxn_string += \"\\n\\trhs[%d][%d] %s (%g) * rate;\" % (idx, region_id, operator, summed_mults[idx])\n \n fxn_string += \"\\n}\\n\"\n register_rate(creg.num_species, creg.num_regions, creg.num_segments, creg.get_state_index(),\n creg.num_ecs_species, creg.get_ecs_species_ids(), creg.get_ecs_index(),\n mc_mult_count, numpy.array(mc_mult_list, dtype=ctypes.c_double),\n _c_compile(fxn_string))\n\n \n #Setup extracellular reactions\n if len(ecs_regions_inv) > 0:\n grid_ids = []\n all_gids = set() \n fxn_string = _c_headers \n #TODO: find the nrn include path in python\n #It is necessary for a couple of function in python that are not in math.h\n fxn_string += 'void reaction(double* species_ecs, double* rhs)\\n{'\n # declare the \"rate\" variable if any reactions (non-rates)\n for rptr in [r for rlist in list(ecs_regions_inv.values()) for r in rlist]:\n if not isinstance(rptr(),rate.Rate):\n fxn_string += '\\n\\tdouble rate;'\n break\n #get a list of all grid_ids invovled\n for rptr in [r for rlist in list(ecs_regions_inv.values()) for r in rlist]:\n if isinstance(rptr(),rate.Rate):\n for sp in [rptr()._species] + rptr()._involved_species_ecs:\n s = sp()[reg]._extracellular() if isinstance(sp(), species.Species) else sp()\n all_gids.add(sp()._extracellular()._grid_id if isinstance(s, species.SpeciesOnExtracellular) else s._grid_id)\n else:\n for sp in rptr()._sources + rptr()._dests + rptr()._involved_species_ecs:\n s = sp()[reg]._extracellular() if isinstance(sp(), species.Species) else sp()\n all_gids.add(sp()._extracellular()._grid_id if isinstance(s, species.SpeciesOnExtracellular) else s._grid_id)\n all_gids = list(all_gids)\n for reg in ecs_regions_inv:\n for rptr in ecs_regions_inv[reg]:\n r = rptr()\n rate_str = re.sub(r'species_ecs\\[(\\d+)\\]',lambda m: \"species_ecs[%i]\" % [pid for pid,gid in enumerate(all_gids) if gid == int(m.groups()[0])][0], r._rate_ecs)\n if isinstance(r,rate.Rate):\n s = r._species()\n #Get underlying rxd._ExtracellularSpecies for the grid_id\n if isinstance(s, species.Species):\n s = s[reg]._extracellular()\n elif isinstance(s, species.SpeciesOnExtracellular):\n s = s._extracellular()\n if s._grid_id in grid_ids:\n operator = '+=' \n else:\n operator = '='\n grid_ids.append(s._grid_id)\n pid = [pid for pid,gid in enumerate(all_gids) if gid == s._grid_id][0]\n fxn_string += \"\\n\\trhs[%d] %s %s;\" % (pid, operator, rate_str)\n else:\n idx=0\n fxn_string += \"\\n\\trate = %s;\" % rate_str\n for sp in r._sources + r._dests:\n s = sp()\n #Get underlying rxd._ExtracellularSpecies for the grid_id\n if isinstance(s, species.Species):\n s = s[reg]._extracellular()\n elif isinstance(s, species.SpeciesOnExtracellular):\n s = s._extracellular()\n if s._grid_id in grid_ids:\n operator = '+=' \n else:\n operator = '='\n grid_ids.append(s._grid_id)\n pid = [pid for pid,gid in enumerate(all_gids) if gid == s._grid_id][0]\n fxn_string += \"\\n\\trhs[%d] %s (%s)*rate;\" % (pid, operator, r._mult[idx])\n idx += 1\n fxn_string += \"\\n}\\n\"\n ecs_register_reaction(0, len(all_gids), _list_to_cint_array(all_gids), _c_compile(fxn_string))\n\ndef _init():\n if len(species._all_species) == 0:\n return None\n initializer._do_init()\n # TODO: check about the 0<x<1 problem alluded to in the documentation\n h.define_shape()\n\n # if the shape has changed update the nodes\n _update_node_data()\n \n if species._has_1d:\n section1d._purge_cptrs()\n \n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None:\n # TODO: are there issues with hybrid or 3D here? (I don't think so, but here's a bookmark just in case)\n s._register_cptrs()\n s._finitialize()\n _setup_matrices()\n _compile_reactions()\n _setup_memb_currents()\n\ndef _init_concentration():\n if len(species._all_species) == 0:\n return None\n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None:\n # TODO: are there issues with hybrid or 3D here? (I don't think so, but here's a bookmark just in case)\n s._finitialize()\n\n\n\n_has_nbs_registered = False\n_nbs = None\ndo_setup_matrices_fptr = None\ndef _do_nbs_register():\n global _has_nbs_registered, _nbs, _fih, _fih2, _fih3, do_setup_matrices_fptr\n \n if not _has_nbs_registered:\n #from neuron import nonvint_block_supervisor as _nbs\n\n _has_nbs_registered = True\n #_nbs.register(_callbacks) not used by crxd\n \n #\n # register the initialization handler and the ion register handler\n #\n _fih = h.FInitializeHandler(_init_concentration)\n _fih3 = h.FInitializeHandler(3, _init)\n\n set_setup_matrices = nrn_dll_sym('set_setup_matrices')\n set_setup_matrices.argtypes = [fptr_prototype]\n do_setup_matrices_fptr = fptr_prototype(_setup_matrices)\n set_setup_matrices(do_setup_matrices_fptr)\n\n _fih2 = h.FInitializeHandler(3, initializer._do_ion_register)\n\n\n #\n # register scatter/gather mechanisms\n #\n _cvode_object.extra_scatter_gather(0, _after_advance)\n \n\n# register the Python callbacks\ndo_setup_fptr = fptr_prototype(_setup)\ndo_initialize_fptr = fptr_prototype(_init)\nset_setup(do_setup_fptr)\nset_initialize(do_initialize_fptr)\n\ndef _windows_remove_dlls():\n global _windows_dll_files, _windows_dll\n for (dll_ptr,filepath) in zip(_windows_dll,_windows_dll_files):\n dll = dll_ptr()\n if dll:\n handle = dll._handle\n del dll\n ctypes.windll.kernel32.FreeLibrary(handle)\n os.remove(filepath)\n _windows_dll_files = []\n _windows_dll = []\n \n \ndef nthread(n=None):\n if(n):\n _set_num_threads(n)\n return _get_num_threads()\n"
] | [
[
"numpy.ascontiguousarray",
"numpy.ndarray",
"numpy.concatenate",
"numpy.ctypeslib.ndpointer",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alexlee-gk/visual_dynamics | [
"90227bb0d0aebb1989117b5c25ca311655ca7cc7",
"90227bb0d0aebb1989117b5c25ca311655ca7cc7"
] | [
"visual_dynamics/gui/gps_training_gui.py",
"visual_dynamics/utils/container.py"
] | [
"\"\"\"\nGPS Training GUI\n\nThe GPS Training GUI is used to interact with the GPS algorithm during training.\nIt contains the below seven functionalities:\n\nAction Panel contains buttons for stop, reset, go, fail\nAction Status Textbox displays action status\nAlgorithm Status Textbox displays algorithm status\nCost Plot displays costs after each iteration\nAlgorithm Output Textbox displays algorithm output after each iteration\n3D Trajectory Visualizer displays 3D trajectories after each iteration\nImage Visualizer displays images received from a rostopic\n\nFor more detailed documentation, visit: rll.berkeley.edu/gps/gui\n\"\"\"\nimport time\nimport threading\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n\nfrom gps.gui.config import config\nfrom gps.gui.action_panel import Action, ActionPanel\nfrom gps.gui.textbox import Textbox\nfrom gps.gui.mean_plotter import MeanPlotter\nfrom gps.gui.plotter_3d import Plotter3D\nfrom gps.gui.image_visualizer import ImageVisualizer\nfrom gps.gui.util import buffered_axis_limits, load_data_from_npz\n\nfrom gps.proto.gps_pb2 import END_EFFECTOR_POINTS\n\n# Needed for typechecks\nfrom gps.algorithm.algorithm_badmm import AlgorithmBADMM\nfrom gps.algorithm.algorithm_mdgps import AlgorithmMDGPS\n\nclass GPSTrainingGUI(object):\n\n def __init__(self, hyperparams):\n self._hyperparams = hyperparams\n self._log_filename = self._hyperparams['log_filename']\n if 'target_filename' in self._hyperparams:\n self._target_filename = self._hyperparams['target_filename']\n else:\n self._target_filename = None\n\n # GPS Training Status.\n self.mode = config['initial_mode'] # Modes: run, wait, end, request, process.\n self.request = None # Requests: stop, reset, go, fail, None.\n self.err_msg = None\n self._colors = {\n 'run': 'cyan',\n 'wait': 'orange',\n 'end': 'red',\n\n 'stop': 'red',\n 'reset': 'yellow',\n 'go': 'green',\n 'fail': 'magenta',\n }\n self._first_update = True\n\n # Actions.\n actions_arr = [\n Action('stop', 'stop', self.request_stop, axis_pos=0),\n Action('reset', 'reset', self.request_reset, axis_pos=1),\n Action('go', 'go', self.request_go, axis_pos=2),\n Action('fail', 'fail', self.request_fail, axis_pos=3),\n ]\n\n # Setup figure.\n plt.ion()\n plt.rcParams['toolbar'] = 'None'\n for key in plt.rcParams:\n if key.startswith('keymap.'):\n plt.rcParams[key] = ''\n\n self._fig = plt.figure(figsize=config['figsize'])\n self._fig.subplots_adjust(left=0.01, bottom=0.01, right=0.99, top=0.99,\n wspace=0, hspace=0)\n\n # Assign GUI component locations.\n self._gs = gridspec.GridSpec(16, 8)\n self._gs_action_panel = self._gs[0:2, 0:8]\n self._gs_action_output = self._gs[2:3, 0:4]\n self._gs_status_output = self._gs[3:4, 0:4]\n self._gs_cost_plotter = self._gs[2:4, 4:8]\n self._gs_algthm_output = self._gs[4:8, 0:8]\n if config['image_on']:\n self._gs_traj_visualizer = self._gs[8:16, 0:4]\n self._gs_image_visualizer = self._gs[8:16, 4:8]\n else:\n self._gs_traj_visualizer = self._gs[8:16, 0:8]\n\n # Create GUI components.\n self._action_panel = ActionPanel(self._fig, self._gs_action_panel, 1, 4, actions_arr)\n self._action_output = Textbox(self._fig, self._gs_action_output, border_on=True)\n self._status_output = Textbox(self._fig, self._gs_status_output, border_on=False)\n self._algthm_output = Textbox(self._fig, self._gs_algthm_output,\n max_display_size=config['algthm_output_max_display_size'],\n log_filename=self._log_filename,\n fontsize=config['algthm_output_fontsize'],\n font_family='monospace')\n self._cost_plotter = MeanPlotter(self._fig, self._gs_cost_plotter,\n color='blue', label='mean cost')\n self._traj_visualizer = Plotter3D(self._fig, self._gs_traj_visualizer,\n num_plots=self._hyperparams['conditions'])\n if config['image_on']:\n self._image_visualizer = ImageVisualizer(self._fig,\n self._gs_image_visualizer, cropsize=config['image_size'],\n rostopic=config['image_topic'], show_overlay_buttons=True)\n\n # Setup GUI components.\n self._algthm_output.log_text('\\n')\n self.set_output_text(self._hyperparams['info'])\n if config['initial_mode'] == 'run':\n self.run_mode()\n else:\n self.wait_mode()\n\n # Setup 3D Trajectory Visualizer plot titles and legends\n for m in range(self._hyperparams['conditions']):\n self._traj_visualizer.set_title(m, 'Condition %d' % (m))\n self._traj_visualizer.add_legend(linestyle='-', marker='None',\n color='green', label='Trajectory Samples')\n self._traj_visualizer.add_legend(linestyle='-', marker='None',\n color='blue', label='Policy Samples')\n self._traj_visualizer.add_legend(linestyle='None', marker='x',\n color=(0.5, 0, 0), label='LG Controller Means')\n self._traj_visualizer.add_legend(linestyle='-', marker='None',\n color='red', label='LG Controller Distributions')\n\n self._fig.canvas.draw()\n\n # Display calculating thread\n def display_calculating(delay, run_event):\n while True:\n if not run_event.is_set():\n run_event.wait()\n if run_event.is_set():\n self.set_status_text('Calculating.')\n time.sleep(delay)\n if run_event.is_set():\n self.set_status_text('Calculating..')\n time.sleep(delay)\n if run_event.is_set():\n self.set_status_text('Calculating...')\n time.sleep(delay)\n\n self._calculating_run = threading.Event()\n self._calculating_thread = threading.Thread(target=display_calculating,\n args=(1, self._calculating_run))\n self._calculating_thread.daemon = True\n self._calculating_thread.start()\n\n # GPS Training functions\n def request_stop(self, event=None):\n self.request_mode('stop')\n\n def request_reset(self, event=None):\n self.request_mode('reset')\n\n def request_go(self, event=None):\n self.request_mode('go')\n\n def request_fail(self, event=None):\n self.request_mode('fail')\n\n def request_mode(self, request):\n \"\"\"\n Sets the request mode (stop, reset, go, fail). The request is read by\n gps_main before sampling, and the appropriate action is taken.\n \"\"\"\n self.mode = 'request'\n self.request = request\n self.set_action_text(self.request + ' requested')\n self.set_action_bgcolor(self._colors[self.request], alpha=0.2)\n\n def process_mode(self):\n \"\"\"\n Completes the current request, after it is first read by gps_main.\n Displays visual confirmation that the request was processed,\n displays any error messages, and then switches into mode 'run' or 'wait'.\n \"\"\"\n self.mode = 'process'\n self.set_action_text(self.request + ' processed')\n self.set_action_bgcolor(self._colors[self.request], alpha=1.0)\n if self.err_msg:\n self.set_action_text(self.request + ' processed' + '\\nERROR: ' +\n self.err_msg)\n self.err_msg = None\n time.sleep(1.0)\n else:\n time.sleep(0.5)\n if self.request in ('stop', 'reset', 'fail'):\n self.wait_mode()\n elif self.request == 'go':\n self.run_mode()\n self.request = None\n\n def wait_mode(self):\n self.mode = 'wait'\n self.set_action_text('waiting')\n self.set_action_bgcolor(self._colors[self.mode], alpha=1.0)\n\n def run_mode(self):\n self.mode = 'run'\n self.set_action_text('running')\n self.set_action_bgcolor(self._colors[self.mode], alpha=1.0)\n\n def end_mode(self):\n self.mode = 'end'\n self.set_action_text('ended')\n self.set_action_bgcolor(self._colors[self.mode], alpha=1.0)\n\n def estop(self, event=None):\n self.set_action_text('estop: NOT IMPLEMENTED')\n\n # GUI functions\n def set_action_text(self, text):\n self._action_output.set_text(text)\n self._cost_plotter.draw_ticklabels() # redraw overflow ticklabels\n\n def set_action_bgcolor(self, color, alpha=1.0):\n self._action_output.set_bgcolor(color, alpha)\n self._cost_plotter.draw_ticklabels() # redraw overflow ticklabels\n\n def set_status_text(self, text):\n self._status_output.set_text(text)\n self._cost_plotter.draw_ticklabels() # redraw overflow ticklabels\n\n def set_output_text(self, text):\n self._algthm_output.set_text(text)\n self._cost_plotter.draw_ticklabels() # redraw overflow ticklabels\n\n def append_output_text(self, text):\n self._algthm_output.append_text(text)\n self._cost_plotter.draw_ticklabels() # redraw overflow ticklabels\n\n def start_display_calculating(self):\n self._calculating_run.set()\n\n def stop_display_calculating(self):\n self._calculating_run.clear()\n\n def set_image_overlays(self, condition):\n \"\"\"\n Sets up the image visualizer with what images to overlay if\n \"overlay_initial_image\" or \"overlay_target_image\" is pressed.\n \"\"\"\n if not config['image_on'] or not self._target_filename:\n return\n initial_image = load_data_from_npz(self._target_filename,\n config['image_overlay_actuator'], str(condition),\n 'initial', 'image', default=None)\n target_image = load_data_from_npz(self._target_filename,\n config['image_overlay_actuator'], str(condition),\n 'target', 'image', default=None)\n self._image_visualizer.set_initial_image(initial_image,\n alpha=config['image_overlay_alpha'])\n self._image_visualizer.set_target_image(target_image,\n alpha=config['image_overlay_alpha'])\n\n # Iteration update functions\n def update(self, itr, algorithm, agent, traj_sample_lists, pol_sample_lists):\n \"\"\"\n After each iteration, update the iteration data output, the cost plot,\n and the 3D trajectory visualizations (if end effector points exist).\n \"\"\"\n if self._first_update:\n self._output_column_titles(algorithm)\n self._first_update = False\n\n costs = [np.mean(np.sum(algorithm.prev[m].cs, axis=1)) for m in range(algorithm.M)]\n self._update_iteration_data(itr, algorithm, costs, pol_sample_lists)\n self._cost_plotter.update(costs, t=itr)\n if END_EFFECTOR_POINTS in agent.x_data_types:\n self._update_trajectory_visualizations(algorithm, agent,\n traj_sample_lists, pol_sample_lists)\n\n self._fig.canvas.draw()\n self._fig.canvas.flush_events() # Fixes bug in Qt4Agg backend\n\n def _output_column_titles(self, algorithm, policy_titles=False):\n \"\"\"\n Setup iteration data column titles: iteration, average cost, and for\n each condition the mean cost over samples, step size, linear Guassian\n controller entropies, and initial/final KL divergences for BADMM.\n \"\"\"\n self.set_output_text(self._hyperparams['experiment_name'])\n if isinstance(algorithm, AlgorithmMDGPS) or isinstance(algorithm, AlgorithmBADMM):\n condition_titles = '%3s | %8s %12s' % ('', '', '')\n itr_data_fields = '%3s | %8s %12s' % ('itr', 'avg_cost', 'avg_pol_cost')\n else:\n condition_titles = '%3s | %8s' % ('', '')\n itr_data_fields = '%3s | %8s' % ('itr', 'avg_cost')\n for m in range(algorithm.M):\n condition_titles += ' | %8s %9s %-7d' % ('', 'condition', m)\n itr_data_fields += ' | %8s %8s %8s' % (' cost ', ' step ', 'entropy ')\n if isinstance(algorithm, AlgorithmBADMM):\n condition_titles += ' %8s %8s %8s' % ('', '', '')\n itr_data_fields += ' %8s %8s %8s' % ('pol_cost', 'kl_div_i', 'kl_div_f')\n elif isinstance(algorithm, AlgorithmMDGPS):\n condition_titles += ' %8s' % ('')\n itr_data_fields += ' %8s' % ('pol_cost')\n self.append_output_text(condition_titles)\n self.append_output_text(itr_data_fields)\n\n def _update_iteration_data(self, itr, algorithm, costs, pol_sample_lists):\n \"\"\"\n Update iteration data information: iteration, average cost, and for\n each condition the mean cost over samples, step size, linear Guassian\n controller entropies, and initial/final KL divergences for BADMM.\n \"\"\"\n avg_cost = np.mean(costs)\n if pol_sample_lists is not None:\n test_idx = algorithm._hyperparams['test_conditions']\n # pol_sample_lists is a list of singletons\n samples = [sl[0] for sl in pol_sample_lists]\n pol_costs = [np.sum(algorithm.cost[idx].eval(s)[0])\n for s, idx in zip(samples, test_idx)]\n itr_data = '%3d | %8.2f %12.2f' % (itr, avg_cost, np.mean(pol_costs))\n else:\n itr_data = '%3d | %8.2f' % (itr, avg_cost)\n for m in range(algorithm.M):\n cost = costs[m]\n step = algorithm.prev[m].step_mult * algorithm.base_kl_step\n entropy = 2*np.sum(np.log(np.diagonal(algorithm.prev[m].traj_distr.chol_pol_covar,\n axis1=1, axis2=2)))\n itr_data += ' | %8.2f %8.2f %8.2f' % (cost, step, entropy)\n if isinstance(algorithm, AlgorithmBADMM):\n kl_div_i = algorithm.cur[m].pol_info.init_kl.mean()\n kl_div_f = algorithm.cur[m].pol_info.prev_kl.mean()\n itr_data += ' %8.2f %8.2f %8.2f' % (pol_costs[m], kl_div_i, kl_div_f)\n elif isinstance(algorithm, AlgorithmMDGPS):\n # TODO: Change for test/train better.\n if test_idx == algorithm._hyperparams['train_conditions']:\n itr_data += ' %8.2f' % (pol_costs[m])\n else:\n itr_data += ' %8s' % (\"N/A\")\n self.append_output_text(itr_data)\n\n def _update_trajectory_visualizations(self, algorithm, agent,\n traj_sample_lists, pol_sample_lists):\n \"\"\"\n Update 3D trajectory visualizations information: the trajectory samples,\n policy samples, and linear Gaussian controller means and covariances.\n \"\"\"\n xlim, ylim, zlim = self._calculate_3d_axis_limits(traj_sample_lists, pol_sample_lists)\n for m in range(algorithm.M):\n self._traj_visualizer.clear(m)\n self._traj_visualizer.set_lim(i=m, xlim=xlim, ylim=ylim, zlim=zlim)\n self._update_linear_gaussian_controller_plots(algorithm, agent, m)\n self._update_samples_plots(traj_sample_lists, m, 'green', 'Trajectory Samples')\n if pol_sample_lists:\n self._update_samples_plots(pol_sample_lists, m, 'blue', 'Policy Samples')\n self._traj_visualizer.draw() # this must be called explicitly\n\n def _calculate_3d_axis_limits(self, traj_sample_lists, pol_sample_lists):\n \"\"\"\n Calculate the 3D axis limits shared between trajectory plots,\n based on the minimum and maximum xyz values across all samples.\n \"\"\"\n all_eept = np.empty((0, 3))\n sample_lists = traj_sample_lists\n if pol_sample_lists:\n sample_lists += traj_sample_lists\n for sample_list in sample_lists:\n for sample in sample_list.get_samples():\n ee_pt = sample.get(END_EFFECTOR_POINTS)\n for i in range(ee_pt.shape[1]/3):\n ee_pt_i = ee_pt[:, 3*i+0:3*i+3]\n all_eept = np.r_[all_eept, ee_pt_i]\n min_xyz = np.amin(all_eept, axis=0)\n max_xyz = np.amax(all_eept, axis=0)\n xlim = buffered_axis_limits(min_xyz[0], max_xyz[0], buffer_factor=1.25)\n ylim = buffered_axis_limits(min_xyz[1], max_xyz[1], buffer_factor=1.25)\n zlim = buffered_axis_limits(min_xyz[2], max_xyz[2], buffer_factor=1.25)\n return xlim, ylim, zlim\n\n def _update_linear_gaussian_controller_plots(self, algorithm, agent, m):\n \"\"\"\n Update the linear Guassian controller plots with iteration data,\n for the mean and covariances of the end effector points.\n \"\"\"\n # Calculate mean and covariance for end effector points\n eept_idx = agent.get_idx_x(END_EFFECTOR_POINTS)\n start, end = eept_idx[0], eept_idx[-1]\n mu, sigma = algorithm.traj_opt.forward(algorithm.prev[m].traj_distr, algorithm.prev[m].traj_info)\n mu_eept, sigma_eept = mu[:, start:end+1], sigma[:, start:end+1, start:end+1]\n\n # Linear Gaussian Controller Distributions (Red)\n for i in range(mu_eept.shape[1]/3):\n mu, sigma = mu_eept[:, 3*i+0:3*i+3], sigma_eept[:, 3*i+0:3*i+3, 3*i+0:3*i+3]\n self._traj_visualizer.plot_3d_gaussian(i=m, mu=mu, sigma=sigma,\n edges=100, linestyle='-', linewidth=1.0, color='red',\n alpha=0.15, label='LG Controller Distributions')\n\n # Linear Gaussian Controller Means (Dark Red)\n for i in range(mu_eept.shape[1]/3):\n mu = mu_eept[:, 3*i+0:3*i+3]\n self._traj_visualizer.plot_3d_points(i=m, points=mu, linestyle='None',\n marker='x', markersize=5.0, markeredgewidth=1.0,\n color=(0.5, 0, 0), alpha=1.0, label='LG Controller Means')\n\n def _update_samples_plots(self, sample_lists, m, color, label):\n \"\"\"\n Update the samples plots with iteration data, for the trajectory samples\n and the policy samples.\n \"\"\"\n samples = sample_lists[m].get_samples()\n for sample in samples:\n ee_pt = sample.get(END_EFFECTOR_POINTS)\n for i in range(ee_pt.shape[1]/3):\n ee_pt_i = ee_pt[:, 3*i+0:3*i+3]\n self._traj_visualizer.plot_3d_points(m, ee_pt_i, color=color, label=label)\n\n def save_figure(self, filename):\n self._fig.savefig(filename)\n",
"from __future__ import division, print_function\n\nimport io\nimport os\nimport sys\n\nimport cv2\nimport h5py\nimport numpy as np\n\nfrom visual_dynamics.utils import config\nfrom visual_dynamics.utils import math_utils\n\n\ndef open_23(name, mode='r'):\n if sys.version_info.major == 3:\n return open(name, mode=mode)\n elif sys.version_info.major == 2:\n if mode == 'x':\n if os.path.exists(name):\n raise OSError(\"file %s exists\" % name)\n else:\n mode = 'w+'\n return io.open(name, mode=mode)\n else:\n raise ValueError(\"unknown major version %d\" % sys.version_info.major)\n\n\nclass DataContainer(object):\n def __init__(self, data_dir, mode='r'):\n self.data_dir = self._require_data_dir(data_dir, mode)\n self.mode = mode\n self.info_file = None\n self.hdf5_file = None\n\n info_fname = os.path.join(self.data_dir, 'info.yaml')\n self.info_file = open_23(info_fname, mode)\n try:\n self.info_dict = config.from_yaml(self.info_file) or dict() # store info entries here and dump it only when the container is closed\n except io.UnsupportedOperation: # file is probably empty\n self.info_dict = dict()\n self.data_shapes_dict = self.info_dict.get('data_shapes', None) or dict()\n self.datum_shapes_dict = self.info_dict.get('datum_shapes', None) or dict()\n data_fname = os.path.join(self.data_dir, 'data.h5')\n self.hdf5_file = h5py.File(data_fname, mode)\n\n def close(self):\n if self.info_file:\n if self.mode != 'r':\n try:\n self.add_info(data_shapes=self.data_shapes_dict)\n self.add_info(datum_shapes=self.datum_shapes_dict)\n config.to_yaml(self.info_dict, self.info_file)\n except io.UnsupportedOperation: # container is probably in read mode\n pass\n self.info_file.close()\n self.info_file = None\n if self.hdf5_file:\n self.hdf5_file.close()\n self.hdf5_file = None\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n self.close()\n\n def add_info(self, **info_dict):\n self.info_dict.update(**info_dict)\n\n def get_info(self, info_names):\n if isinstance(info_names, str):\n names = list([info_names])\n else:\n names = list(info_names)\n info = []\n for name in names:\n info.append(self.info_dict[name])\n if isinstance(info_names, str):\n info, = info\n return info\n\n def reserve(self, names, shape):\n if isinstance(names, str):\n names = list([names])\n else:\n names = list(names)\n try:\n shape = tuple(shape)\n except TypeError:\n shape = tuple((shape,))\n for name in names:\n if name in self.data_shapes_dict and self.data_shapes_dict[name] != shape:\n raise ValueError('unable to reserve for %s since it was already reserved with shape %s,'\n 'but shape %s was given' % (name, self.data_shapes_dict[name], shape))\n self.data_shapes_dict[name] = shape\n\n def add_datum(self, *inds, **datum_dict):\n for name, value in datum_dict.items():\n if name in self.datum_shapes_dict and self.datum_shapes_dict[name] != value.shape:\n raise ValueError('unable to add datum %s with shape %s since the shape %s was expected' %\n (name, value.shape, self.datum_shapes_dict[name]))\n self.datum_shapes_dict[name] = value.shape\n datum_size = self.get_data_size(name)\n shape = (datum_size, ) + value.shape\n dset = self.hdf5_file.require_dataset(name, shape, value.dtype, exact=True)\n datum_ind = self._get_datum_ind(*(inds + (name,)))\n dset[datum_ind] = value\n\n def get_datum(self, *inds_and_datum_names):\n inds, datum_names = inds_and_datum_names[:-1], inds_and_datum_names[-1]\n if isinstance(datum_names, str):\n names = list([datum_names])\n else:\n names = list(datum_names)\n datum = []\n for name in names:\n datum_ind = self._get_datum_ind(*(inds + (name,)))\n datum.append(self.hdf5_file[name][datum_ind][()])\n if isinstance(datum_names, str):\n datum, = datum\n return datum\n\n def get_datum_shape(self, name):\n shape = self.datum_shapes_dict.get(name, None)\n if shape is None:\n raise ValueError('shape for name %s does not exist' % name)\n return shape\n\n def get_data_shape(self, name):\n shape = self.data_shapes_dict.get(name, None)\n if shape is None:\n raise ValueError('shape is not reserved for name %s' % name)\n return shape\n\n def get_data_size(self, name):\n return np.prod(self.get_data_shape(name))\n\n def _get_canonical_inds(self, *inds_and_name):\n inds, name = inds_and_name[:-1], inds_and_name[-1]\n inds = list(inds)\n shape = self.get_data_shape(name)\n for i, ind in enumerate(inds):\n if ind < 0:\n inds[i] += shape[i]\n return tuple(inds)\n\n def _check_ind_range(self, *inds_and_name):\n inds, name = inds_and_name[:-1], inds_and_name[-1]\n shape = self.get_data_shape(name)\n if len(inds) != len(shape):\n raise IndexError('the number of indices does not match the number of dimensions of the data')\n for i, ind in enumerate(inds):\n if not (0 <= ind < shape[i]):\n raise IndexError('index at position %d is out of range for entry with name %s' % (i, name))\n\n def _get_datum_ind(self, *inds_and_name):\n inds, name = inds_and_name[:-1], inds_and_name[-1]\n inds = self._get_canonical_inds(*(inds + (name,)))\n self._check_ind_range(*(inds + (name,)))\n shape = self.get_data_shape(name)\n datum_ind = 0\n for ind, dim in zip(inds, shape):\n datum_ind *= dim\n datum_ind += ind\n assert 0 <= datum_ind < self.get_data_size(name)\n return datum_ind\n\n def _get_datum_inds(self, datum_ind, name):\n assert 0 <= datum_ind < self.get_data_size(name)\n shape = self.get_data_shape(name)\n ind = datum_ind\n inds = (-1,) * len(shape)\n for i, dim in reversed(list(enumerate(shape))):\n inds[i] = ind % dim\n ind //= dim\n assert datum_ind == self._get_datum_ind(*(inds + (name,)))\n return tuple(inds)\n\n def _require_data_dir(self, data_dir, mode):\n if 'r' in mode:\n if not os.path.exists(data_dir):\n raise IOError('data directory %s not found' % data_dir)\n elif 'a' in mode:\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n elif 'w' in mode:\n if os.path.exists(data_dir):\n for f in os.listdir(data_dir):\n os.remove(os.path.join(data_dir, f))\n else:\n os.makedirs(data_dir)\n elif 'x' in mode:\n if os.path.exists(data_dir):\n raise OSError('data directory %s exists' % data_dir)\n else:\n os.makedirs(data_dir)\n else:\n raise ValueError('mode %s not recognized' % mode)\n return data_dir\n\n\nclass ImageDataContainer(DataContainer):\n def add_datum(self, *inds, **datum_dict):\n other_dict = dict([item for item in datum_dict.items() if not item[0].endswith('image')])\n super(ImageDataContainer, self).add_datum(*inds, **other_dict)\n image_dict = dict([item for item in datum_dict.items() if item[0].endswith('image')])\n for image_name, image in image_dict.items():\n if image_name in self.datum_shapes_dict and self.datum_shapes_dict[image_name] != image.shape:\n raise ValueError('unable to add datum %s with shape %s since the shape %s was expected' %\n (image_name, image.shape, self.datum_shapes_dict[image_name]))\n self.datum_shapes_dict[image_name] = image.shape\n image_fname = self._get_image_fname(*(inds + (image_name,)))\n if image.dtype == np.uint8:\n if image.ndim == 3 and image.shape[2] == 3:\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n else:\n image = math_utils.pack_image(image)\n cv2.imwrite(image_fname, image, [int(cv2.IMWRITE_JPEG_QUALITY), 100])\n\n def _get_image_fname(self, *inds_and_name, **kwargs):\n inds, name = inds_and_name[:-1], inds_and_name[-1]\n ext = kwargs.get('ext', '.jpg')\n inds = self._get_canonical_inds(*(inds + (name,)))\n self._check_ind_range(*(inds + (name,)))\n shape = self.get_data_shape(name)\n image_fmt = '%s'\n for dim in shape:\n image_fmt += '_%0{:d}d'.format(len(str(dim-1)))\n image_fmt += ext\n image_fname = image_fmt % ((name,) + inds)\n image_fname = os.path.join(self.data_dir, image_fname)\n return image_fname\n\n def get_datum(self, *inds_and_datum_names):\n inds, datum_names = inds_and_datum_names[:-1], inds_and_datum_names[-1]\n if isinstance(datum_names, str):\n names = list([datum_names])\n else:\n names = list(datum_names)\n other_names = [name for name in names if not name.endswith('image')]\n other_datum = super(ImageDataContainer, self).get_datum(*(inds + (other_names,)))\n image_names = [name for name in names if name.endswith('image')]\n image_datum = []\n for image_name in image_names:\n image_fname = self._get_image_fname(*(inds + (image_name,)))\n if not os.path.isfile(image_fname):\n raise IOError('image file %s does not exist' % image_fname)\n image = cv2.imread(image_fname)\n if not image_name.endswith('depth_image'):\n if image.ndim == 3 and image.shape[2] == 3:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n else:\n\n image = math_utils.unpack_image(image)\n image_datum.append(image)\n # reorder items to follow the order of datum_names\n datum = []\n for datum_name in names:\n if 'image' in datum_name:\n datum.append(image_datum.pop(0))\n else:\n datum.append(other_datum.pop(0))\n if isinstance(datum_names, str):\n datum, = datum\n return datum\n\n\nclass MultiDataContainer(DataContainer):\n \"\"\"\n Light wrapper of multiple data containers to get basic information from a container while ensuring that all\n containers have the same information.\n \"\"\"\n def __init__(self, data_dirs, mode='r'):\n if mode != 'r':\n raise NotImplementedError\n self.containers = [ImageDataContainer(data_dir, mode=mode) for data_dir in data_dirs]\n\n def close(self):\n for container in self.containers:\n container.close()\n\n def add_info(self, **info_dict):\n raise NotImplementedError\n\n def get_info(self, info_names):\n info = None\n for container in self.containers:\n if info is None:\n info = container.get_info(info_names)\n else:\n other_info = container.get_info(info_names)\n try:\n equal = other_info == info\n except ValueError:\n equal = (np.asarray(other_info) == np.asarray(info)).all()\n if not equal:\n raise ValueError('infos are inconsistent across containers: %r, %r' % (info, other_info))\n return info\n\n def reserve(self, names, shape):\n raise NotImplementedError\n\n def add_datum(self, *inds, **datum_dict):\n raise NotImplementedError\n\n def get_datum(self, *inds_and_datum_names):\n raise NotImplementedError\n\n def get_datum_shape(self, name):\n shape = None\n for container in self.containers:\n if shape is None:\n shape = container.get_datum_shape(name)\n else:\n other_shape = container.get_datum_shape(name)\n if other_shape != shape:\n raise ValueError('shapes are inconsistent across containers: %r, %r' % (shape, other_shape))\n return shape\n\n def get_data_shape(self, name):\n raise NotImplementedError\n\n def get_data_size(self, name):\n size = 0\n for container in self.containers:\n size += container.get_data_size(name)\n return size\n"
] | [
[
"numpy.amax",
"numpy.amin",
"numpy.mean",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.ion",
"numpy.diagonal",
"numpy.sum",
"numpy.empty",
"matplotlib.pyplot.figure"
],
[
"numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
milokhl/places-2017 | [
"97d4500aacc35e8e55f31918c3fda081d6345c60",
"97d4500aacc35e8e55f31918c3fda081d6345c60",
"97d4500aacc35e8e55f31918c3fda081d6345c60"
] | [
"model/tensorflow/vgg_slim.py",
"model/tensorflow/past_models/vgg16_train.py",
"model/tensorflow/cifar10_tutorial.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains model definitions for versions of the Oxford VGG network.\nThese model definitions were introduced in the following technical report:\n Very Deep Convolutional Networks For Large-Scale Image Recognition\n Karen Simonyan and Andrew Zisserman\n arXiv technical report, 2015\n PDF: http://arxiv.org/pdf/1409.1556.pdf\n ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf\n CC-BY-4.0\nMore information can be obtained from the VGG website:\nwww.robots.ox.ac.uk/~vgg/research/very_deep/\nUsage:\n with slim.arg_scope(vgg.vgg_arg_scope()):\n outputs, end_points = vgg.vgg_a(inputs)\n with slim.arg_scope(vgg.vgg_arg_scope()):\n outputs, end_points = vgg.vgg_16(inputs)\n@@vgg_a\n@@vgg_16\n@@vgg_19\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib.framework.python.ops import arg_scope\nfrom tensorflow.contrib.layers.python.layers import layers as layers_lib\nfrom tensorflow.contrib.layers.python.layers import regularizers\nfrom tensorflow.contrib.layers.python.layers import utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import variable_scope\n\n\ndef vgg_arg_scope(weight_decay=0.0005):\n \"\"\"Defines the VGG arg scope.\n Args:\n weight_decay: The l2 regularization coefficient.\n Returns:\n An arg_scope.\n \"\"\"\n with arg_scope(\n [layers.conv2d, layers_lib.fully_connected],\n activation_fn=nn_ops.relu,\n weights_regularizer=regularizers.l2_regularizer(weight_decay),\n biases_initializer=init_ops.zeros_initializer()):\n with arg_scope([layers.conv2d], padding='SAME') as arg_sc:\n return arg_sc\n\n\ndef vgg_a(inputs,\n num_classes=1000,\n is_training=True,\n dropout_keep_prob=0.5,\n spatial_squeeze=True,\n scope='vgg_a'):\n \"\"\"Oxford Net VGG 11-Layers version A Example.\n Note: All the fully_connected layers have been transformed to conv2d layers.\n To use in classification mode, resize input to 224x224.\n Args:\n inputs: a tensor of size [batch_size, height, width, channels].\n num_classes: number of predicted classes.\n is_training: whether or not the model is being trained.\n dropout_keep_prob: the probability that activations are kept in the dropout\n layers during training.\n spatial_squeeze: whether or not should squeeze the spatial dimensions of the\n outputs. Useful to remove unnecessary dimensions for classification.\n scope: Optional scope for the variables.\n Returns:\n the last op containing the log predictions and end_points dict.\n \"\"\"\n with variable_scope.variable_scope(scope, 'vgg_a', [inputs]) as sc:\n end_points_collection = sc.original_name_scope + '_end_points'\n # Collect outputs for conv2d, fully_connected and max_pool2d.\n with arg_scope(\n [layers.conv2d, layers_lib.max_pool2d],\n outputs_collections=end_points_collection):\n net = layers_lib.repeat(\n inputs, 1, layers.conv2d, 64, [3, 3], scope='conv1')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')\n net = layers_lib.repeat(net, 1, layers.conv2d, 128, [3, 3], scope='conv2')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')\n net = layers_lib.repeat(net, 2, layers.conv2d, 256, [3, 3], scope='conv3')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')\n net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv4')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')\n net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv5')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')\n # Use conv2d instead of fully_connected layers.\n net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')\n net = layers_lib.dropout(\n net, dropout_keep_prob, is_training=is_training, scope='dropout6')\n net = layers.conv2d(net, 4096, [1, 1], scope='fc7')\n net = layers_lib.dropout(\n net, dropout_keep_prob, is_training=is_training, scope='dropout7')\n net = layers.conv2d(\n net,\n num_classes, [1, 1],\n activation_fn=None,\n normalizer_fn=None,\n scope='fc8')\n # Convert end_points_collection into a end_point dict.\n end_points = utils.convert_collection_to_dict(end_points_collection)\n if spatial_squeeze:\n net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')\n end_points[sc.name + '/fc8'] = net\n return net, end_points\n\n\nvgg_a.default_image_size = 224\n\n\ndef vgg_16(inputs,\n num_classes=1000,\n is_training=True,\n dropout_keep_prob=0.5,\n spatial_squeeze=True,\n scope='vgg_16'):\n \"\"\"Oxford Net VGG 16-Layers version D Example.\n Note: All the fully_connected layers have been transformed to conv2d layers.\n To use in classification mode, resize input to 224x224.\n Args:\n inputs: a tensor of size [batch_size, height, width, channels].\n num_classes: number of predicted classes.\n is_training: whether or not the model is being trained.\n dropout_keep_prob: the probability that activations are kept in the dropout\n layers during training.\n spatial_squeeze: whether or not should squeeze the spatial dimensions of the\n outputs. Useful to remove unnecessary dimensions for classification.\n scope: Optional scope for the variables.\n Returns:\n the last op containing the log predictions and end_points dict.\n \"\"\"\n with variable_scope.variable_scope(scope, 'vgg_16', [inputs]) as sc:\n end_points_collection = sc.original_name_scope + '_end_points'\n # Collect outputs for conv2d, fully_connected and max_pool2d.\n with arg_scope(\n [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],\n outputs_collections=end_points_collection):\n net = layers_lib.repeat(inputs, 2, layers.conv2d, 64, (3, 3), scope='conv1')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')\n net = layers_lib.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')\n net = layers_lib.repeat(net, 3, layers.conv2d, 256, [3, 3], scope='conv3')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')\n net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv4')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')\n net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv5')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')\n # Use conv2d instead of fully_connected layers.\n net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')\n net = layers_lib.dropout(\n net, dropout_keep_prob, is_training=is_training, scope='dropout6')\n net = layers.conv2d(net, 4096, [1, 1], scope='fc7')\n net = layers_lib.dropout(\n net, dropout_keep_prob, is_training=is_training, scope='dropout7')\n net = layers.conv2d(\n net,\n num_classes, [1, 1],\n activation_fn=None,\n normalizer_fn=None,\n scope='fc8')\n # Convert end_points_collection into a end_point dict.\n end_points = utils.convert_collection_to_dict(end_points_collection)\n if spatial_squeeze:\n net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')\n end_points[sc.name + '/fc8'] = net\n return net, end_points\n\n\nvgg_16.default_image_size = 224\n\n\ndef vgg_19(inputs,\n num_classes=1000,\n is_training=True,\n dropout_keep_prob=0.5,\n spatial_squeeze=True,\n scope='vgg_19'):\n \"\"\"Oxford Net VGG 19-Layers version E Example.\n Note: All the fully_connected layers have been transformed to conv2d layers.\n To use in classification mode, resize input to 224x224.\n Args:\n inputs: a tensor of size [batch_size, height, width, channels].\n num_classes: number of predicted classes.\n is_training: whether or not the model is being trained.\n dropout_keep_prob: the probability that activations are kept in the dropout\n layers during training.\n spatial_squeeze: whether or not should squeeze the spatial dimensions of the\n outputs. Useful to remove unnecessary dimensions for classification.\n scope: Optional scope for the variables.\n Returns:\n the last op containing the log predictions and end_points dict.\n \"\"\"\n with variable_scope.variable_scope(scope, 'vgg_19', [inputs]) as sc:\n end_points_collection = sc.name + '_end_points'\n # Collect outputs for conv2d, fully_connected and max_pool2d.\n with arg_scope(\n [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],\n outputs_collections=end_points_collection):\n net = layers_lib.repeat(inputs, 2, layers.conv2d, 64, [3, 3], scope='conv1')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')\n net = layers_lib.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')\n net = layers_lib.repeat(net, 4, layers.conv2d, 256, [3, 3], scope='conv3')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')\n net = layers_lib.repeat(net, 4, layers.conv2d, 512, [3, 3], scope='conv4')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')\n net = layers_lib.repeat(net, 4, layers.conv2d, 512, [3, 3], scope='conv5')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')\n # Use conv2d instead of fully_connected layers.\n net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')\n net = layers_lib.dropout(\n net, dropout_keep_prob, is_training=is_training, scope='dropout6')\n net = layers.conv2d(net, 4096, [1, 1], scope='fc7')\n net = layers_lib.dropout(\n net, dropout_keep_prob, is_training=is_training, scope='dropout7')\n net = layers.conv2d(\n net,\n num_classes, [1, 1],\n activation_fn=None,\n normalizer_fn=None,\n scope='fc8')\n # Convert end_points_collection into a end_point dict.\n end_points = utils.convert_collection_to_dict(end_points_collection)\n if spatial_squeeze:\n net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')\n end_points[sc.name + '/fc8'] = net\n return net, end_points\n\n\nvgg_19.default_image_size = 224\n\n# Alias\nvgg_d = vgg_16\nvgg_e = vgg_19",
"import os, datetime\nimport numpy as np\nimport tensorflow as tf\nfrom DataLoader import *\n\n# Dataset Parameters\nbatch_size = 200\nload_size = 256\nfine_size = 224\nc = 3\ndata_mean = np.asarray([0.45834960097,0.44674252445,0.41352266842])\n\n# Training Parameters\nlearning_rate = 0.001\ndropout = 0.5 # Dropout, probability to keep units\ntraining_iters = 100000\nstep_display = 50\nstep_save = 10000\npath_save = 'alexnet'\nstart_from = ''\n\nimport inspect\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nimport time\n\nVGG_MEAN = [103.939, 116.779, 123.68]\n\nclass VGG16:\n def __init__(self, vgg16_npy_path=None):\n if vgg16_npy_path is None:\n path = inspect.getfile(Vgg16)\n path = os.path.abspath(os.path.join(path, os.pardir))\n path = os.path.join(path, \"vgg16.npy\")\n vgg16_npy_path = path\n print(path)\n\n self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()\n\n # Change output layer to have 100 classes instead of 1000.\n modified_fc8_weights = self.data_dict['fc8'][0][:,:100]\n modified_fc8_biases = self.data_dict['fc8'][1][0:100]\n self.data_dict['fc8'][0] = modified_fc8_weights\n self.data_dict['fc8'][1] = modified_fc8_biases\n print('weights:', self.data_dict['fc8'][0].shape)\n print('biases:', self.data_dict['fc8'][1].shape)\n\n print(\"Loaded in weights from .npy file.\")\n\n def build(self, rgb):\n \"\"\"\n load variable from npy to build the VGG\n :param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]\n \"\"\"\n start_time = time.time()\n print(\"Started building model...\")\n rgb_scaled = rgb * 255.0\n\n # Convert RGB to BGR\n red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)\n assert red.get_shape().as_list()[1:] == [224, 224, 1]\n assert green.get_shape().as_list()[1:] == [224, 224, 1]\n assert blue.get_shape().as_list()[1:] == [224, 224, 1]\n bgr = tf.concat(axis=3, values=[\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n ])\n assert bgr.get_shape().as_list()[1:] == [224, 224, 3]\n\n self.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\n self.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\n self.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\n self.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\n self.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\n self.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\n self.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\n self.pool3 = self.max_pool(self.conv3_3, 'pool3')\n\n self.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\n self.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\n self.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\n self.pool4 = self.max_pool(self.conv4_3, 'pool4')\n\n self.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\n self.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\n self.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\n self.pool5 = self.max_pool(self.conv5_3, 'pool5')\n\n self.fc6 = self.fc_layer(self.pool5, \"fc6\")\n assert self.fc6.get_shape().as_list()[1:] == [4096]\n self.relu6 = tf.nn.relu(self.fc6)\n\n self.fc7 = self.fc_layer(self.relu6, \"fc7\")\n self.relu7 = tf.nn.relu(self.fc7)\n\n self.fc8 = self.fc_layer(self.relu7, \"fc8\")\n self.prob = tf.nn.softmax(self.fc8, name=\"prob\")\n print((\"build model finished: %ds\" % (time.time() - start_time))) # self.data_dict = None\n return self.prob\n \n def forward(self, rgb):\n \"\"\" Feed inputs through the network. \"\"\"\n return self.build(rgb)\n\n def avg_pool(self, bottom, name):\n return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)\n\n def max_pool(self, bottom, name):\n return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)\n\n def conv_layer(self, bottom, name):\n with tf.variable_scope(name):\n filt = self.get_conv_filter(name)\n conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')\n conv_biases = self.get_bias(name)\n bias = tf.nn.bias_add(conv, conv_biases)\n relu = tf.nn.relu(bias)\n return relu\n\n def fc_layer(self, bottom, name):\n with tf.variable_scope(name):\n shape = bottom.get_shape().as_list()\n dim = 1\n for d in shape[1:]:\n dim *= d\n x = tf.reshape(bottom, [-1, dim])\n\n weights = self.get_fc_weight(name)\n biases = self.get_bias(name)\n\n # Fully connected layer. Note that the '+' operation automatically\n # broadcasts the biases.\n fc = tf.nn.bias_add(tf.matmul(x, weights), biases)\n return fc\n\n def get_conv_filter(self, name, trainable=True):\n if trainable:\n return tf.Variable(self.data_dict[name][0], name=\"filter_\" + name)\n else:\n return tf.constant(self.data_dict[name][0], name=\"filter_\" + name)\n\n def get_bias(self, name, trainable=True):\n if trainable:\n return tf.Variable(self.data_dict[name][1], name=\"biases_\" + name)\n else:\n return tf.constant(self.data_dict[name][1], name=\"biases_\" + name)\n\n def get_fc_weight(self, name, trainable=True):\n if trainable:\n return tf.Variable(self.data_dict[name][0], name=\"weights_\" + name)\n else:\n return tf.constant(self.data_dict[name][0], name=\"weights_\" + name)\n\n# Construct dataloader\nopt_data_train = {\n #'data_h5': 'miniplaces_256_train.h5',\n 'data_root': '../../data/images/', # MODIFY PATH ACCORDINGLY\n 'data_list': '../../data/train.txt', # MODIFY PATH ACCORDINGLY\n 'load_size': load_size,\n 'fine_size': fine_size,\n 'data_mean': data_mean,\n 'randomize': True\n }\nopt_data_val = {\n #'data_h5': 'miniplaces_256_val.h5',\n 'data_root': '../../data/images/', # MODIFY PATH ACCORDINGLY\n 'data_list': '../../data/val.txt', # MODIFY PATH ACCORDINGLY\n 'load_size': load_size,\n 'fine_size': fine_size,\n 'data_mean': data_mean,\n 'randomize': False\n }\n\nloader_train = DataLoaderDisk(**opt_data_train)\nloader_val = DataLoaderDisk(**opt_data_val)\n#loader_train = DataLoaderH5(**opt_data_train)\n#loader_val = DataLoaderH5(**opt_data_val)\n\n# tf Graph input\nx = tf.placeholder(tf.float32, [None, fine_size, fine_size, c])\ny = tf.placeholder(tf.int64, None)\nkeep_dropout = tf.placeholder(tf.float32)\n\n# Construct model\nvgg = VGG16(vgg16_npy_path='./vgg16.npy')\nlogits = vgg.forward(x) # returns the output layer of the network\nprint('logits:', logits)\n\n# Define loss and optimizer\nloss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits))\ntrain_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n\n# Evaluate model\naccuracy1 = tf.reduce_mean(tf.cast(tf.nn.in_top_k(logits, y, 1), tf.float32))\naccuracy5 = tf.reduce_mean(tf.cast(tf.nn.in_top_k(logits, y, 5), tf.float32))\n\n# define initialization\ninit = tf.global_variables_initializer()\n\n# define saver\nsaver = tf.train.Saver()\n\n# define summary writer\n#writer = tf.train.SummaryWriter('.', graph=tf.get_default_graph())\n\n# Launch the graph\nwith tf.Session() as sess:\n # Initialization\n if len(start_from)>1:\n saver.restore(sess, start_from)\n else:\n sess.run(init)\n \n step = 0\n\n while step < training_iters:\n # Load a batch of training data\n images_batch, labels_batch = loader_train.next_batch(batch_size)\n \n if step % step_display == 0:\n print('[%s]:' %(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n # Calculate batch loss and accuracy on training set\n l, acc1, acc5 = sess.run([loss, accuracy1, accuracy5], feed_dict={x: images_batch, y: labels_batch, keep_dropout: 1.}) \n print(\"-Iter \" + str(step) + \", Training Loss= \" + \\\n \"{:.4f}\".format(l) + \", Accuracy Top1 = \" + \\\n \"{:.2f}\".format(acc1) + \", Top5 = \" + \\\n \"{:.2f}\".format(acc5))\n\n # Calculate batch loss and accuracy on validation set\n images_batch_val, labels_batch_val = loader_val.next_batch(batch_size) \n l, acc1, acc5 = sess.run([loss, accuracy1, accuracy5], feed_dict={x: images_batch_val, y: labels_batch_val, keep_dropout: 1.}) \n print(\"-Iter \" + str(step) + \", Validation Loss= \" + \\\n \"{:.4f}\".format(l) + \", Accuracy Top1 = \" + \\\n \"{:.2f}\".format(acc1) + \", Top5 = \" + \\\n \"{:.2f}\".format(acc5))\n \n # Run optimization op (backprop)\n sess.run(train_optimizer, feed_dict={x: images_batch, y: labels_batch, keep_dropout: dropout})\n \n step += 1\n \n # Save model\n if step % step_save == 0:\n saver.save(sess, path_save, global_step=step)\n print(\"Model saved at Iter %d !\" %(step))\n \n print(\"Optimization Finished!\")\n\n # Evaluate on the whole validation set\n print('Evaluation on the whole validation set...')\n num_batch = loader_val.size()/batch_size\n acc1_total = 0.\n acc5_total = 0.\n loader_val.reset()\n for i in range(num_batch):\n images_batch, labels_batch = loader_val.next_batch(batch_size) \n acc1, acc5 = sess.run([accuracy1, accuracy5], feed_dict={x: images_batch, y: labels_batch, keep_dropout: 1.})\n acc1_total += acc1\n acc5_total += acc5\n print(\"Validation Accuracy Top1 = \" + \\\n \"{:.2f}\".format(acc1) + \", Top5 = \" + \\\n \"{:.2f}\".format(acc5))\n\n acc1_total /= num_batch\n acc5_total /= num_batch\n print('Evaluation Finished! Accuracy Top1 = ' + \"{:.4f}\".format(acc1_total) + \", Top5 = \" + \"{:.4f}\".format(acc5_total))\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nTraining a classifier\n=====================\n\nThis is it. You have seen how to define neural networks, compute loss and make\nupdates to the weights of the network.\n\nNow you might be thinking,\n\nWhat about data?\n----------------\n\nGenerally, when you have to deal with image, text, audio or video data,\nyou can use standard python packages that load data into a numpy array.\nThen you can convert this array into a ``torch.*Tensor``.\n\n- For images, packages such as Pillow, OpenCV are useful.\n- For audio, packages such as scipy and librosa\n- For text, either raw Python or Cython based loading, or NLTK and\n SpaCy are useful.\n\nSpecifically for ``vision``, we have created a package called\n``torchvision``, that has data loaders for common datasets such as\nImagenet, CIFAR10, MNIST, etc. and data transformers for images, viz.,\n``torchvision.datasets`` and ``torch.utils.data.DataLoader``.\n\nThis provides a huge convenience and avoids writing boilerplate code.\n\nFor this tutorial, we will use the CIFAR10 dataset.\nIt has the classes: ‘airplane’, ‘automobile’, ‘bird’, ‘cat’, ‘deer’,\n‘dog’, ‘frog’, ‘horse’, ‘ship’, ‘truck’. The images in CIFAR-10 are of\nsize 3x32x32, i.e. 3-channel color images of 32x32 pixels in size.\n\n.. figure:: /_static/img/cifar10.png\n :alt: cifar10\n\n cifar10\n\n\nTraining an image classifier\n----------------------------\n\nWe will do the following steps in order:\n\n1. Load and normalizing the CIFAR10 training and test datasets using\n ``torchvision``\n2. Define a Convolution Neural Network\n3. Define a loss function\n4. Train the network on the training data\n5. Test the network on the test data\n\n1. Loading and normalizing CIFAR10\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nUsing ``torchvision``, it’s extremely easy to load CIFAR10.\n\"\"\"\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\n\n########################################################################\n# The output of torchvision datasets are PILImage images of range [0, 1].\n# We transform them to Tensors of normalized range [-1, 1]\n\ntransform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\ntrainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n download=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=4,\n shuffle=True, num_workers=2)\n\ntestset = torchvision.datasets.CIFAR10(root='./data', train=False,\n download=True, transform=transform)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=4,\n shuffle=False, num_workers=2)\n\nclasses = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n########################################################################\n# Let us show some of the training images, for fun.\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# functions to show an image\n\n\ndef imshow(img):\n img = img / 2 + 0.5 # unnormalize\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n\n\n# get some random training images\ndataiter = iter(trainloader)\nimages, labels = dataiter.next()\n\n# show images\nimshow(torchvision.utils.make_grid(images))\n# print labels\nprint(' '.join('%5s' % classes[labels[j]] for j in range(4)))\n\n\n########################################################################\n# 2. Define a Convolution Neural Network\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n# Copy the neural network from the Neural Networks section before and modify it to\n# take 3-channel images (instead of 1-channel images as it was defined).\n\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(3, 6, 5)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(6, 16, 5)\n self.fc1 = nn.Linear(16 * 5 * 5, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = x.view(-1, 16 * 5 * 5)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n\nnet = Net()\n\n########################################################################\n# 3. Define a Loss function and optimizer\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n# Let's use a Classification Cross-Entropy loss and SGD with momentum\n\nimport torch.optim as optim\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)\n\n########################################################################\n# 4. Train the network\n# ^^^^^^^^^^^^^^^^^^^^\n#\n# This is when things start to get interesting.\n# We simply have to loop over our data iterator, and feed the inputs to the\n# network and optimize\n\nfor epoch in range(2): # loop over the dataset multiple times\n\n running_loss = 0.0\n for i, data in enumerate(trainloader, 0):\n # get the inputs\n inputs, labels = data\n\n print(type(inputs), type(labels))\n print(inputs.size(), labels.size())\n\n # wrap them in Variable\n inputs, labels = Variable(inputs), Variable(labels)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.data[0]\n if i % 2000 == 1999: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 2000))\n running_loss = 0.0\n\nprint('Finished Training')\n\n########################################################################\n# 5. Test the network on the test data\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n# We have trained the network for 2 passes over the training dataset.\n# But we need to check if the network has learnt anything at all.\n#\n# We will check this by predicting the class label that the neural network\n# outputs, and checking it against the ground-truth. If the prediction is\n# correct, we add the sample to the list of correct predictions.\n#\n# Okay, first step. Let us display an image from the test set to get familiar.\n\ndataiter = iter(testloader)\nimages, labels = dataiter.next()\n\n# print images\nimshow(torchvision.utils.make_grid(images))\nprint('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))\n\n########################################################################\n# Okay, now let us see what the neural network thinks these examples above are:\n\noutputs = net(Variable(images))\n\n########################################################################\n# The outputs are energies for the 10 classes.\n# Higher the energy for a class, the more the network\n# thinks that the image is of the particular class.\n# So, let's get the index of the highest energy:\n_, predicted = torch.max(outputs.data, 1)\n\nprint('Predicted: ', ' '.join('%5s' % classes[predicted[j]]\n for j in range(4)))\n\n########################################################################\n# The results seem pretty good.\n#\n# Let us look at how the network performs on the whole dataset.\n\ncorrect = 0\ntotal = 0\nfor data in testloader:\n images, labels = data\n outputs = net(Variable(images))\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum()\n\nprint('Accuracy of the network on the 10000 test images: %d %%' % (\n 100 * correct / total))\n\n########################################################################\n# That looks waaay better than chance, which is 10% accuracy (randomly picking\n# a class out of 10 classes).\n# Seems like the network learnt something.\n#\n# Hmmm, what are the classes that performed well, and the classes that did\n# not perform well:\n\nclass_correct = list(0. for i in range(10))\nclass_total = list(0. for i in range(10))\nfor data in testloader:\n images, labels = data\n outputs = net(Variable(images))\n _, predicted = torch.max(outputs.data, 1)\n c = (predicted == labels).squeeze()\n for i in range(4):\n label = labels[i]\n class_correct[label] += c[i]\n class_total[label] += 1\n\n\nfor i in range(10):\n print('Accuracy of %5s : %2d %%' % (\n classes[i], 100 * class_correct[i] / class_total[i]))\n\n########################################################################\n# Okay, so what next?\n#\n# How do we run these neural networks on the GPU?\n#\n# Training on GPU\n# ----------------\n# Just like how you transfer a Tensor on to the GPU, you transfer the neural\n# net onto the GPU.\n# This will recursively go over all modules and convert their parameters and\n# buffers to CUDA tensors:\n#\n# .. code:: python\n#\n# net.cuda()\n#\n#\n# Remember that you will have to send the inputs and targets at every step\n# to the GPU too:\n#\n# ::\n#\n# inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())\n#\n# Why dont I notice MASSIVE speedup compared to CPU? Because your network\n# is realllly small.\n#\n# **Exercise:** Try increasing the width of your network (argument 2 of\n# the first ``nn.Conv2d``, and argument 1 of the second ``nn.Conv2d`` –\n# they need to be the same number), see what kind of speedup you get.\n#\n# **Goals achieved**:\n#\n# - Understanding PyTorch's Tensor library and neural networks at a high level.\n# - Train a small neural network to classify images\n#\n# Where do I go next?\n# -------------------\n#\n# - :doc:`Train neural nets to play video games </intermediate/reinforcement_q_learning>`\n# - `Train a state-of-the-art ResNet network on imagenet`_\n# - `Train an face generator using Generative Adversarial Networks`_\n# - `Train a word-level language model using Recurrent LSTM networks`_\n# - `More examples`_\n# - `More tutorials`_\n# - `Discuss PyTorch on the Forums`_\n# - `Chat with other users on Slack`_\n#\n# .. _Train a state-of-the-art ResNet network on imagenet: https://github.com/pytorch/examples/tree/master/imagenet\n# .. _Train an face generator using Generative Adversarial Networks: https://github.com/pytorch/examples/tree/master/dcgan\n# .. _Train a word-level language model using Recurrent LSTM networks: https://github.com/pytorch/examples/tree/master/word_language_model\n# .. _More examples: https://github.com/pytorch/examples\n# .. _More tutorials: https://github.com/pytorch/tutorials\n# .. _Discuss PyTorch on the Forums: https://discuss.pytorch.org/\n# .. _Chat with other users on Slack: http://pytorch.slack.com/messages/beginner/\n"
] | [
[
"tensorflow.contrib.layers.python.layers.layers.max_pool2d",
"tensorflow.contrib.layers.python.layers.regularizers.l2_regularizer",
"tensorflow.contrib.layers.python.layers.layers.dropout",
"tensorflow.python.ops.init_ops.zeros_initializer",
"tensorflow.contrib.layers.python.layers.layers.repeat",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.contrib.layers.conv2d",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.contrib.layers.python.layers.utils.convert_collection_to_dict",
"tensorflow.contrib.framework.python.ops.arg_scope"
],
[
"tensorflow.concat",
"numpy.asarray",
"tensorflow.nn.max_pool",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.conv2d",
"tensorflow.Variable",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.nn.in_top_k",
"numpy.load",
"tensorflow.matmul",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.nn.avg_pool",
"tensorflow.split",
"tensorflow.nn.bias_add",
"tensorflow.nn.relu",
"tensorflow.nn.softmax",
"tensorflow.constant",
"tensorflow.reshape",
"tensorflow.variable_scope"
],
[
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.nn.Conv2d",
"torch.utils.data.DataLoader",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"numpy.transpose",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13",
"1.10",
"1.12"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ayueaa/Some-Spiders | [
"4cf085e55eab822c08d06b62099d1c235d1840ae"
] | [
"lianjia_chengjiao(可做模板)/pandans合并数据库表重写入.py"
] | [
"import pymongo\r\nimport pandas as pd\r\n\r\n#连接到数据库\r\n#连接到数据库\r\nclient = pymongo.MongoClient(\"localhost\",27017)\r\nlianjia = client[\"ershoufang\"]\r\ninfo = lianjia[\"lianjia_solded\"]\r\nlocation = lianjia['locations']\r\nnew_info = lianjia['cd_lianjia_solded_total_2']\r\n\r\n#将数据表1(包含原始10w+房源信息)转化为DataFrame\r\ndata1 = pd.DataFrame(list(info.find()))\r\nprint(data1.head())\r\n#将数据表2(包含7k+小区经纬度信息)转化为DataFrame\r\ndata2 = pd.DataFrame(list(location.find()))\r\nprint(data2.head())\r\n#多表查询,以house_name为共同键,向表一合并,与mysql的查询功能类似,得到合并后的DataFrame\r\nresult =pd.merge(data1,data2,left_on=\"village_name\", right_on='house_name', how=\"left\").drop(['_id_x','_id_y'],axis=\"columns\")\r\n#衔接上面代码,用于插入数据库,遍历插入的,不知道有没有简单的办法啊~\r\nfor i in range(len(result)):\r\n s = result.loc[i]\r\n#这里加了str()函数是无奈之举,DataFrame中的专有float64等数字格式使MongoDB无法识别,写入会报错,暂时先全部转换为字符串格式写入吧\r\n dic = {index:str(s[index]) for index in s.index}\r\n new_info.insert_one(dic)\r\n print(dic)"
] | [
[
"pandas.merge"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
barentsen/photutils | [
"57cbe18c8c1b8b08c93daa3d5c8dd74c10c3daae"
] | [
"photutils/utils/tests/test_cutouts.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom astropy.tests.helper import pytest\n\nfrom ..cutouts import cutout_footprint\n\n\nXCS = [25.7]\nYCS = [26.2]\nXSTDDEVS = [3.2, 4.0]\nYSTDDEVS = [5.7, 4.1]\nTHETAS = np.array([30., 45.]) * np.pi / 180.\nDATA = np.zeros((3, 3))\nDATA[0:2, 1] = 1.\nDATA[1, 0:2] = 1.\nDATA[1, 1] = 2.\n\n\nclass TestCutoutFootprint(object):\n def test_dataonly(self):\n data = np.ones((5, 5))\n position = (2, 2)\n result1 = cutout_footprint(data, position, 3)\n result2 = cutout_footprint(data, position, footprint=np.ones((3, 3)))\n assert_allclose(result1[:-2], result2[:-2])\n assert result1[-2] is None\n assert result2[-2] is None\n assert result1[-1] == result2[-1]\n\n def test_mask_error(self):\n data = error = np.ones((5, 5))\n mask = np.zeros_like(data, dtype=bool)\n position = (2, 2)\n box_size1 = 3\n box_size2 = (3, 3)\n footprint = np.ones((3, 3))\n result1 = cutout_footprint(data, position, box_size1, mask=mask,\n error=error)\n result2 = cutout_footprint(data, position, box_size2, mask=mask,\n error=error)\n result3 = cutout_footprint(data, position, box_size1,\n footprint=footprint, mask=mask,\n error=error)\n assert_allclose(result1[:-1], result2[:-1])\n assert_allclose(result1[:-1], result3[:-1])\n assert result1[-1] == result2[-1]\n\n def test_position_len(self):\n with pytest.raises(ValueError):\n cutout_footprint(np.ones((3, 3)), [1])\n\n def test_nofootprint(self):\n with pytest.raises(ValueError):\n cutout_footprint(np.ones((3, 3)), (1, 1), box_size=None,\n footprint=None)\n\n def test_wrongboxsize(self):\n with pytest.raises(ValueError):\n cutout_footprint(np.ones((3, 3)), (1, 1), box_size=(1, 2, 3))\n"
] | [
[
"numpy.ones",
"numpy.zeros_like",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jenniferxsj/CS5001_Project | [
"64e1ecec65a431e66aa83751453bba71fcc33b7c"
] | [
"Monitor_temp_hum.py"
] | [
"#Final Project for CS 5001\n#Brian Meyer\n#Shujun Xiao\n#Xiaoliang Xu\n\nimport Adafruit_DHT as DHT\nimport json\nimport time\nimport psutil\nimport twilio\nfrom twilio.rest import Client\nimport matplotlib.pyplot as plt\nimport csv\nfrom matplotlib import rcParams\nimport http.client\nimport urllib\n\n# Turn on the interactive mode\nplt.ion()\n# Creact 3 lists to hold all the inputted data.\nx = []\ny_tem = []\ny_hum = []\n\n# API Thingspeak - Brian\nkey = '66EU45C8K4SJUNCH'\nchannelID = '1353959'\n\n\n# Define sensor type and pin number. - Shujun\nsensor = DHT.DHT22\npin = 27\n\n# Writing the data to the csv file. - Shujun\ndef write_temp(temperature, humidity):\n with open(\"temp_humidity.csv\", \"a\") as log:\n log.write(\"{0},{1},{2}\\n\".format(time.strftime(\"%H:%M:%S\"),str(temperature),str(humidity)))\n\n# Read the csv file and draw a graph using matplotlib. - Shujun\ndef graph():\n with open(\"temp_humidity.csv\",\"r\") as csvfile:\n plots = csv.reader(csvfile, delimiter=\",\")\n for row in plots:\n if row[0] not in x:\n x.append(row[0])\n y_tem.append(int(float(row[1])))\n y_hum.append(int(float(row[2])))\n plt.clf() # wipe out the graph \n rcParams['figure.figsize'] = 20,6 # set the size of the canvas \n plt.plot(x, y_tem, label = \"Temperature\")\n plt.plot(x, y_hum, label = \"Humidity\")\n plt.xlabel(\"Time\")\n plt.ylabel(\"Reading\")\n plt.title(\"Temperature and Humidity Readings\")\n plt.legend(loc=1) # put the legends on the upper right of the graph\n plt.grid(True,linestyle=\":\") # Adding grid to the graph\n plt.draw() # draw out the graph\n\n#conditionals sending variables to API statements - Xiaolang\ndef checkAvgTempForAcSwitch(tempValues, threshold):\n '''\n checkAvgTempForAC takes a list temp values, compute the average temperature, \n compare it with the threshold. \n params:\n tempValues: a list of temp values\n threshold: the threshold of the average temperature \n return:\n a tuple of (average temperature, statement), where the statement is a string.\n if the average temperature > threshold, statement = \"Switching on AC\";\n otherwise \"Switching off AC\"\n '''\n avg = sum(tempValues) / len(tempValues)\n if avg > threshold:\n text=\"Switching on AC\"\n sendtoSMS(text) \n\n# Connect with twilio and sending out messages - Brian\ndef sendtoSMS(statement):\n account_sid = 'AC96c973f5b3e4b88eca097ef809acc0f6'\n auth_token = 'af6e9952608904435b84c4707d086efd'\n client = Client(account_sid, auth_token)\n\n message = client.messages.create(body= statement, from_='+18507714790', to='+15857332025')\n\n print(message.sid)\n\n# Connect with Thinkspeak, print out the readings and connection status.- Brian\ndef thingspeak(temperature, humidity):\n while True:\n params = urllib.parse.urlencode({'field1': temperature, 'field2': humidity, 'key':key }) \n headers = {\"Content-typZZe\": \"application/x-www-form-urlencoded\",\"Accept\": \"text/plain\"}\n conn = http.client.HTTPConnection(\"api.thingspeak.com:80\")\n try:\n conn.request(\"POST\", \"/update\", params, headers)\n response = conn.getresponse()\n print(response.status, response.reason)\n data = response.read()\n conn.close()\n except:\n print(\"connection failed\")\n break\n\nsendtoSMS(\"The program is starting to run!\")\nwhile True: \n temperature, humidity = DHT.read_retry(sensor, pin) # get readings from sensor\n print(\"Temperature is:\",temperature, \"\\nHumidity is:\",humidity)\n write_temp(temperature, humidity)\n graph()\n thingspeak(temperature, humidity)\n tempValues = y_tem\n threshold=32\n checkAvgTempForAcSwitch(tempValues, threshold)\n plt.pause(5) \nsendtoSMS(\"The program is stopped!\")"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kimandsharp/bmb510 | [
"5446cd168709dd7f5d6cee66f596e57d3632af3d",
"5446cd168709dd7f5d6cee66f596e57d3632af3d"
] | [
"SciInf_utilities.py",
"MultiRareCounts.py"
] | [
"\"\"\"\nsome useful defs for bayes programs\n\"\"\"\nimport numpy as np\n#-------\n# globals\nCREDIBLE_MIN = 2.5 # lower percentile for credible interval\nCREDIBLE_MAX = 97.5 # upper percentile for credible interval # covers 95%\n#CREDIBLE_MIN = 5. # lower percentile for credible interval\n#CREDIBLE_MAX = 95. # upper percentile for credible interval # covers 90%\nNPOINT = 2501\nprint('number of integration points: ',NPOINT)\nMAKEPLOT = True\n#-------\ndef read_n(n,filename):\n # read a list of integers from a file\n data_file = open(filename,\"r\")\n contents = data_file.readlines()\n for line in contents:\n if(line[0] == '#'):\n print('%s' % line[:-1])\n continue\n if(len(line) <= 1):\n continue\n field = line.split()\n n.append(int(field[0]))\n data_file.close()\n ndata = len(n)\n print ('# data points %d ' % (ndata))\n return ndata\n\ndef read_x(x,filename):\n # read a list of reals (floats) from a file\n data_file = open(filename,\"r\")\n contents = data_file.readlines()\n for line in contents:\n if(line[0] == '#'):\n print('%s' % line[:-1])\n continue\n if(len(line) <= 1):\n continue\n field = line.split()\n #print(field)\n x.append(float(field[0]))\n data_file.close()\n ndata = len(x)\n print ('# data points %d ' % (ndata))\n return ndata\n #print(x)\n\ndef read_xy(x,y,filename):\n # read pairs of reals (floats), one pair per line separated by whitespace \n data_file = open(filename,\"r\")\n contents = data_file.readlines()\n for line in contents:\n if(line[0] == '#'):\n print('%s' % line[:-1])\n continue\n if(len(line) <= 1):\n continue\n field = line.split()\n #vprint(field)\n x.append(float(field[0]))\n y.append(float(field[1]))\n data_file.close()\n ndata = len(x)\n print ('# data points %d ' % (ndata))\n return ndata\n #print(x)\n #print(y)\n\ndef average_x(x):\n # return average of list of floats\n avx = 0.\n for i in range(len(x)):\n avx += x[i]\n if(len(x)>0): avx = avx/len(x)\n return avx\n\ndef average_xy(x,y):\n # return average of product of two lists of floats\n avx = 0.\n length = min(len(x),len(y))\n if(len(x)!=len(y)):\n print ('warning different length lists- downsizing') \n for i in range(length):\n avx += x[i]*y[i]\n if(length>0): avx = avx/length\n return avx\n\n\ndef pdf_to_cdf(x_axis,pdf,norm=True,discrete=False):\n \"\"\"\n integrate probability distribution function to get cumulative distribution function\n using trapezoidal rule\n \"\"\"\n n = len(pdf)\n cdf = np.zeros(n)\n if(discrete):\n cdf[0] = pdf[0]\n for i in range(1,n):\n cdf[i] = cdf[i-1] + pdf[i]\n else:\n for i in range(1,n):\n cdf[i] = cdf[i-1] + 0.5*(pdf[i]+pdf[i-1])*(x_axis[i] - x_axis[i-1])\n if(norm):\n cmax = cdf[n-1]\n cdf = cdf/cmax\n return cdf\n\ndef quantile(x_axis,cdf,percent,reverse=False):\n \"\"\"\n get quantile by scanning thru cdf\n \"\"\"\n n = len(cdf)\n if(not reverse):\n cut = percent/100.\n else:\n cut = 1. - percent/100.\n i = 0\n while((cdf[i]<=cut)and(i<n)):\n i += 1\n if(i>0):\n return x_axis[i-1]\n else:\n return x_axis[i]\n\ndef pdf_to_mean(x_axis,pdf,discrete=False):\n \"\"\"\n return mean as <x> = int(x.p(x)) using trapezoidal rule\n do not assume that pdf is normalized\n \"\"\"\n n = len(pdf)\n x_mean = 0.\n pdf_sum = 0.\n if(discrete):\n pdf_max = -1.e6\n for i in range(n):\n pdf_sum += pdf[i]\n x_mean += x_axis[i]*pdf[i]\n if(pdf[i] > pdf_max):\n pdf_max = pdf[i]\n x_mode = x_axis[i]\n x_mean /= pdf_sum\n else:\n pdf_max = pdf[0]\n x_mode = x_axis[0]\n for i in range(1,n):\n pdf_sum += 0.5*(pdf[i]+pdf[i-1])*(x_axis[i] - x_axis[i-1])\n x_mean += 0.5*(pdf[i]+pdf[i-1])*(x_axis[i] - x_axis[i-1])*0.5*(x_axis[i] + x_axis[i-1])\n if(pdf[i] > pdf_max):\n pdf_max = pdf[i]\n x_mode = x_axis[i]\n x_mean /= pdf_sum\n # print(\" mean: {:12.5f} \".format(x_mean))\n # print(\" mode: \",x_mode)\n return x_mean,x_mode\n\ndef sort_1_by_2(x,y,rev=False):\n \"\"\"\n sort one list by elements in another list\n \"\"\"\n #print('reverse',rev)\n if(len(x) == len(y)):\n y_x = zip(y,x)\n y_x_sorted = sorted(y_x,reverse=rev)\n y = [z[0] for z in y_x_sorted]\n x = [z[1] for z in y_x_sorted]\n return x,y\n else:\n print('lists of different length- not sorting')\n# for i in range(len(x)):\n# print(x[i],y[i])\n#\ndef summarize(x_axis,pdf,cdf,discrete=False,title='parameter'):\n median = quantile(x_axis,cdf,50.)\n limit_min = quantile(x_axis,cdf,CREDIBLE_MIN)\n limit_max = quantile(x_axis,cdf,CREDIBLE_MAX)\n mean,mode = pdf_to_mean(x_axis,pdf,discrete)\n print('\\n===========================================================')\n print('SUMMARY of posterior distribution for {:s} '.format(title))\n print('===========================================================')\n print('mean: {: 12.5f} mode: {:12.5f} '.format(mean, mode))\n print('median: {:12.5f}'.format(median))\n print('{:6.1f}% to {:6.1f}% limits: ({:12.5f} to {:12.5f})'.format(CREDIBLE_MIN,CREDIBLE_MAX,limit_min,limit_max))\n print('===========================================================\\n')\n return limit_min,limit_max\n\ndef write_pdf_cdf(x_axis,pdf,cdf,title='x pdf cdf',filename='pdf_cdf.dat'):\n head1 = '#' + title + '\\n'\n head2 = '# x p(x) cdf(x) ' + '\\n'\n fileout = open(filename,'w')\n fileout.write(head1)\n fileout.write(head2)\n for i in range(len(x_axis)):\n strbuf = '{:15.5g} {:15.5g} {:15.5g} \\n'.format(x_axis[i],pdf[i],cdf[i])\n fileout.write(strbuf)\n fileout.close()\n",
"\"\"\"\nBayesian analysis of multiple observations of rare events\neach observation characterized by n_i counts in time t_i\nusing eq. 2.14b of gelman et al, DBA3 chapter 2.6\n\"\"\"\nimport random as rn\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport kimpy_utilities as ku\nfrom math import *\nimport sys\npercentsign = '%'\n#-------------------------------------\nprint('\\nBayesian analysis of multiple observations of rare events')\nprint('each observation characterized by n_i counts in time t_i')\nprint('posterior is equivalent to that from a single observation of')\nprint('n_total counts in t_total time\\n')\nif(len(sys.argv) <2):\n data_file = input('Data file containing one pair of: #counts observation_time \\nper line>> ')\nelse:\n data_file = sys.argv[1]\ncount_data = []\ntobs_data = []\nprint('reading n t data from file ',data_file)\nku.read_xy(count_data,tobs_data,data_file)\nnset = len(count_data)\ntobs_tot = 0\ncount_tot = 0\nfor i in range(nset):\n tobs_tot += tobs_data[i]\n count_tot += count_data[i]\nr_mean = float(count_tot/tobs_tot)\nprint(' total count %8d observation time %12.5f mean rate %12.5f ' %(count_tot,tobs_tot,r_mean))\n#\n# generate pdf, cdf\nr_range = 3.\nd_rate = r_range*r_mean/(ku.NPOINT - 1)\nr_axis = np.zeros(ku.NPOINT)\nlog_r_pdf = np.zeros(ku.NPOINT)\nfor i in range(ku.NPOINT):\n r_axis[i] = (i+1)*d_rate\n #exponent of Poisson is counts minus 1 since using prior of 1/rate\n log_r_pdf[i] = (count_tot - 1.)*log(r_axis[i]) - tobs_tot*r_axis[i]\npdf_max = max(log_r_pdf)\nlog_r_pdf = log_r_pdf - pdf_max\nr_pdf = np.exp(log_r_pdf)\nr_cdf = ku.pdf_to_cdf(r_axis,r_pdf)\nku.write_pdf_cdf(r_axis,r_pdf,r_cdf,title='x pdf cdf',filename='mrate_pdf_cdf.dat')\n\nku.summarize(r_axis,r_pdf,r_cdf,title='rate')\n#\n# plot posterior pdf of rate\n#\nif(ku.MAKEPLOT):\n plt.figure(1)\n plt.plot(r_axis,r_pdf,'g-')\n plt.plot(r_axis,r_cdf,'r-')\n plt.xlabel('rate .')\n plt.ylabel(' prob(rate)')\n plt.title(' posterior pdf of rate')\n plt.ylim((0.,1.2))\n plt.grid(True)\n plt.show()\nsys.exit()\n\"\"\"\n#\nprint('\\ninput data mean stderr: ')\nfor j in range(nset):\n print('%12.5f %12.5f ' % (means_data[j],sterr_data[j]))\n#\n#==============================================================\n# compute mean of means, and stdev of means to get overall location, and\n# scale range for hyperparameter tau\n#==============================================================\nmu_mu = ku.average_x(means_data)\nmu2 = ku.average_xy(means_data,means_data)\nmu_var = mu2 - mu_mu*mu_mu\nmu_stdev = sqrt(mu_var)\nprint('\\nglobal mean, stdev of means: %12.5f %12.5f '% (mu_mu,mu_stdev))\ntau_range = 4.*mu_stdev\n#\n#==============================================================\n# compute posterior marginal distbn. for hyperparameter mu, tau, the center, spread of means\n# assuming a uniform prior for tau and mu\n#==============================================================\n#NPOINT = 2501 # debug\nNPOINT = ku.NPOINT\ndtau = tau_range/(NPOINT - 1.)\ntau_axis = np.zeros(NPOINT)\ntau_prob = np.zeros(NPOINT)\ntau_val = 0.\nfor i in range(NPOINT):\n tau_axis[i] = tau_val\n tau_prob[i] = tau_post(tau_val,means_data,sterr_data)\n tau_val += dtau\ntau_prob /= np.max(tau_prob)\ntau_cdf = ku.pdf_to_cdf(tau_axis,tau_prob)\nndim = 99\nquantile_axis = np.zeros(ndim) # values for inverse cdf of p(tau) for sampling\ntau_quantile = np.zeros(ndim) # value of tau for each percentile\nfor i in range(ndim):\n quantile_axis[i] = 1.*(i+1)\n tau_quantile[i] = ku.quantile(tau_axis,tau_cdf,quantile_axis[i])\ntau_up = tau_quantile[95] \nprint('tau 95{:1s} limits: ({:12.5f} to {:12.5f})'.format(percentsign,0.,tau_up))\n#print(tau_axis)\n#print(tau_quantile)\n\nplt.figure(1)\nplt.title('posterior marginal for hyperparameter tau (spread of means)')\nplt.plot(tau_axis,tau_prob,'g-')\nplt.plot(tau_axis,tau_cdf,'r-')\nplt.xlim(0.,tau_axis[-1])\nplt.ylim(0.,1.1)\nplt.xlabel('tau ')\nplt.ylabel('p(tau|data) ')\nplt.show()\n#\nplt.figure(2)\nplt.title(' inverse cdf for p(tau|data')\nplt.plot(quantile_axis,tau_quantile,'g-')\nplt.xlim(0.,100.)\nplt.xlabel('%')\nplt.ylabel('tau ')\nplt.show()\n#\n# sample global spread paramter tau from p(tau|data)\n# then sample global location mu from p(mu|tau,data) = Normal(mu_mu,mu_stdev)\n# (DBA3 eq. between 5.19 and 5.20)\n# then for each data set, sample its location parameter theta_j \n# from p(theta_j | mu, tau, data) = Normal(theta_av_j, stdev_j)\n# (DBA3 eq. 5.17)\nrn.seed(123)\ntau_sample = []\nmu_check = 0.\nnsample = 5000\ntheta_sample = np.zeros((nset,nsample))\n#ts = np.zeros((nsample,nset))\nfor i in range(nsample):\n i1 = rn.randint(0,ndim-1)\n tau_val = tau_quantile[i1]\n tau_sample.append(tau_val)\n mu_av, mu_prec = mu_params(tau_val,means_data,sterr_data)\n mu_stdev = sqrt(1./mu_prec)\n mu_val = rn.normalvariate(mu_av,mu_stdev)\n mu_check += mu_val\n #print(i1,tau_val,mu_val)\n for j in range(nset):\n theta_prec = 1./sterr_data[j]**2 + 1./tau_val**2\n theta_stdev = sqrt(1./theta_prec)\n theta_av = (means_data[j]/sterr_data[j]**2 + mu_val/tau_val**2)/theta_prec\n theta_val = rn.normalvariate(theta_av,theta_stdev)\n theta_sample[j][i] = theta_val\n #ts[i][j] = theta_val\nmu_check /= nsample\nprint('mean of means from %d samples: %12.5f ' % (nsample,mu_check))\n#\n# extract median, 95% credible interval ranges\nfor j in range(nset):\n theta_sample[j].sort()\n im = nsample//2\n il = int(nsample*0.025)\n iu = int(nsample*0.975)\n print('set %3d median %12.5f 95%s CI (%12.5f , %12.5f) ' \\\n %(j+1,theta_sample[j][im],percentsign,theta_sample[j][il],theta_sample[j][iu]))\n#print(theta_sample)\nnbins = 20\nplt.figure()\n#n, bins, patches = plt.hist(theta_sample[0], nbins)\naz.plot_forest(theta_sample,quartiles=True) # better than box plot for large data sets\n#plt.boxplot(ts)\nplt.show()\n#\n\"\"\"\n"
] | [
[
"numpy.zeros"
],
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"numpy.exp",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pavlin-policar/graphml-tutorials | [
"72fb9244e8d392b0222b3cfc94b26eb8463ead75"
] | [
"03-graph-classification/preprocess.py"
] | [
"from rdkit import Chem\nimport numpy as np\nfrom pysmiles import read_smiles\nimport networkx as nx\nfrom molecule import Molecule\nimport pickle\nimport pandas as pd\n\n\n\nclass RegressionData():\n \"\"\"\n \t:param mols: list of nx.Graph molecules describing respective SMILES string\n :param labels: list of labels where each label is a list of three topological indices\n [wiener_idx, hyper_wiener_idx, zagreb_idx]\n \"\"\"\n def __init__(self, mols, labels):\n self.mols = mols\n self.labels = labels\n \n self.periodic_table = Chem.GetPeriodicTable()\n self.ams = [nx.to_numpy_matrix(mol, weight='order') for mol in self.mols]\n self.graphs = [nx.from_numpy_matrix(am) for am in self.ams]\n self.element_lists = [mol.nodes(data = 'element') for mol in self.mols]\n \n def create_molecule(self, element_list, label, am):\n \"\"\"\n :param element_list: list of integers of atomic number of the molecule \n :param label: list of three topological indices [wiener_idx, hyper_wiener_idx, zagreb_idx]\n :param am: adjacency matrix of the molecule \n :return: Molecule object with its attributes specified by above parameters\n \"\"\"\n nodes = np.array([Chem.rdchem.PeriodicTable.GetAtomicNumber(self.periodic_table, atom[1]) for atom in element_list])\n return Molecule(nodes, label, am)\n\nclass ClassificationData():\n \"\"\"\n \t:param file_name: string of file name to be used as property prediction task data\n \"\"\"\n def __init__(self, file_name):\n self.data = pd.read_csv(file_name)\n \n self.smiles = self.data['smiles']\n self.labels = self.data['activity']\n self.mols = [read_smiles(smile) for smile in self.smiles]\n \n self.periodic_table = Chem.GetPeriodicTable()\n self.ams = [nx.to_numpy_matrix(mol, weight='order') for mol in self.mols]\n self.graphs = [nx.from_numpy_matrix(am) for am in self.ams]\n self.element_lists = [mol.nodes(data = 'element') for mol in self.mols]\n \n \n def create_molecule(self, element_list, label, am):\n \"\"\"\n :param element_list: list of integers of atomic number of the molecule \n :param label: if active 1, else 0\n :return: Molecule object with its attributes specified by above parameters\n \"\"\"\n nodes = np.array([Chem.rdchem.PeriodicTable.GetAtomicNumber(self.periodic_table, atom[1]) for atom in element_list])\n return Molecule(nodes, label, am)\n \n def get_labels(self):\n \"\"\"\n :return: list of labels of {0,1}\n \"\"\"\n return self.labels\n\n \ndef get_smiles(file_name):\n file = open(file_name, 'r')\n smiles = []\n for i in range(5000):\n line = next(file).strip()\n _,_,smile = line.partition('\\t')\n smiles.append(smile)\n return smiles\n\ndef save_mols(file_name):\n smiles = get_smiles(file_name)\n mols = [read_smiles(smile) for smile in smiles]\n pickle_out = open(\"5000_mols.pickle\", \"wb\")\n pickle.dump(mols, pickle_out)\n pickle_out.close()\n \ndef get_data(data):\n molecules = []\n for i in range (len(data.element_lists)):\n e = data.element_lists[i]\n label = data.labels[i]\n am = data.ams[i]\n\n mol = data.create_molecule(e, label, am)\n molecules.append(mol)\n \n return molecules\n\ndef get_labels(wiener_idx, hyper_wiener_idx, zagreb_idx):\n \"\"\"\n :param wiener_idx: np.array of shape [-1, 1] containing wiener index of each molecule \n :param hyper_wiener_idx: np.array of shape [-1, 1] containing hyper wiener index of each molecule \n :param zagreb_idx: np.array of shape [-1, 1] containing hyper zagreb index of each molecule \n :return: np.array of shape [-1, 3] where [wiener_idx, hyper_wiener_idx, zagreb_idx] of each \n molecule is concatenated\n \"\"\"\n wiener_idx = np.reshape(wiener_idx, (len(wiener_idx), 1))\n hyper_wiener_idx = np.reshape(hyper_wiener_idx, (len(hyper_wiener_idx), 1))\n zagreb_idx = np.reshape(zagreb_idx, (len(zagreb_idx), 1))\n labels = np.hstack((wiener_idx, hyper_wiener_idx, zagreb_idx))\n labels = np.log10(labels)\n return labels\n\n"
] | [
[
"numpy.hstack",
"numpy.log10",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
mohdsherif/mne-python | [
"affc6854168e32e73a075a0104e7af8bddd7eefe",
"affc6854168e32e73a075a0104e7af8bddd7eefe",
"affc6854168e32e73a075a0104e7af8bddd7eefe"
] | [
"mne/forward/forward.py",
"mne/utils/numerics.py",
"mne/coreg.py"
] | [
"# Authors: Alexandre Gramfort <[email protected]>\n# Matti Hamalainen <[email protected]>\n# Martin Luessi <[email protected]>\n#\n# License: BSD (3-clause)\n\nfrom time import time\nfrom copy import deepcopy\nimport re\n\nimport numpy as np\nfrom scipy import linalg, sparse\n\nimport shutil\nimport os\nfrom os import path as op\nimport tempfile\n\nfrom ..io import RawArray, Info\nfrom ..io.constants import FIFF\nfrom ..io.open import fiff_open\nfrom ..io.tree import dir_tree_find\nfrom ..io.tag import find_tag, read_tag\nfrom ..io.matrix import (_read_named_matrix, _transpose_named_matrix,\n write_named_matrix)\nfrom ..io.meas_info import read_bad_channels, write_info\nfrom ..io.pick import (pick_channels_forward, pick_info, pick_channels,\n pick_types)\nfrom ..io.write import (write_int, start_block, end_block,\n write_coord_trans, write_ch_info, write_name_list,\n write_string, start_file, end_file, write_id)\nfrom ..io.base import BaseRaw\nfrom ..evoked import Evoked, EvokedArray\nfrom ..epochs import BaseEpochs\nfrom ..source_space import (_read_source_spaces_from_tree,\n find_source_space_hemi, _set_source_space_vertices,\n _write_source_spaces_to_fid)\nfrom ..source_estimate import _BaseSourceEstimate\nfrom ..transforms import (transform_surface_to, invert_transform,\n write_trans)\nfrom ..utils import (_check_fname, get_subjects_dir, has_mne_c, warn,\n run_subprocess, check_fname, logger, verbose, fill_doc,\n _validate_type, _check_compensation_grade, _check_option)\nfrom ..label import Label\nfrom ..fixes import einsum\n\n\nclass Forward(dict):\n \"\"\"Forward class to represent info from forward solution.\"\"\"\n\n def copy(self):\n \"\"\"Copy the Forward instance.\"\"\"\n return Forward(deepcopy(self))\n\n def __repr__(self):\n \"\"\"Summarize forward info instead of printing all.\"\"\"\n entr = '<Forward'\n\n nchan = len(pick_types(self['info'], meg=True, eeg=False, exclude=[]))\n entr += ' | ' + 'MEG channels: %d' % nchan\n nchan = len(pick_types(self['info'], meg=False, eeg=True, exclude=[]))\n entr += ' | ' + 'EEG channels: %d' % nchan\n\n src_types = np.array([src['type'] for src in self['src']])\n if (src_types == 'surf').all():\n entr += (' | Source space: Surface with %d vertices'\n % self['nsource'])\n elif (src_types == 'vol').all():\n entr += (' | Source space: Volume with %d grid points'\n % self['nsource'])\n elif (src_types == 'discrete').all():\n entr += (' | Source space: Discrete with %d dipoles'\n % self['nsource'])\n else:\n count_string = ''\n if (src_types == 'surf').any():\n count_string += '%d surface, ' % (src_types == 'surf').sum()\n if (src_types == 'vol').any():\n count_string += '%d volume, ' % (src_types == 'vol').sum()\n if (src_types == 'discrete').any():\n count_string += '%d discrete, ' \\\n % (src_types == 'discrete').sum()\n count_string = count_string.rstrip(', ')\n entr += (' | Source space: Mixed (%s) with %d vertices'\n % (count_string, self['nsource']))\n\n if self['source_ori'] == FIFF.FIFFV_MNE_UNKNOWN_ORI:\n entr += (' | Source orientation: Unknown')\n elif self['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:\n entr += (' | Source orientation: Fixed')\n elif self['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI:\n entr += (' | Source orientation: Free')\n\n entr += '>'\n\n return entr\n\n\ndef _block_diag(A, n):\n \"\"\"Construct a block diagonal from a packed structure.\n\n You have to try it on a matrix to see what it's doing.\n\n If A is not sparse, then returns a sparse block diagonal \"bd\",\n diagonalized from the\n elements in \"A\".\n \"A\" is ma x na, comprising bdn=(na/\"n\") blocks of submatrices.\n Each submatrix is ma x \"n\", and these submatrices are\n placed down the diagonal of the matrix.\n\n If A is already sparse, then the operation is reversed, yielding\n a block\n row matrix, where each set of n columns corresponds to a block element\n from the block diagonal.\n\n Parameters\n ----------\n A : array\n The matrix\n n : int\n The block size\n Returns\n -------\n bd : sparse matrix\n The block diagonal matrix\n \"\"\"\n if sparse.issparse(A): # then make block sparse\n raise NotImplementedError('sparse reversal not implemented yet')\n ma, na = A.shape\n bdn = na // int(n) # number of submatrices\n\n if na % n > 0:\n raise ValueError('Width of matrix must be a multiple of n')\n\n tmp = np.arange(ma * bdn, dtype=np.int).reshape(bdn, ma)\n tmp = np.tile(tmp, (1, n))\n ii = tmp.ravel()\n\n jj = np.arange(na, dtype=np.int)[None, :]\n jj = jj * np.ones(ma, dtype=np.int)[:, None]\n jj = jj.T.ravel() # column indices foreach sparse bd\n\n bd = sparse.coo_matrix((A.T.ravel(), np.c_[ii, jj].T)).tocsc()\n\n return bd\n\n\ndef _inv_block_diag(A, n):\n \"\"\"Construct an inverse block diagonal from a packed structure.\n\n You have to try it on a matrix to see what it's doing.\n\n \"A\" is ma x na, comprising bdn=(na/\"n\") blocks of submatrices.\n Each submatrix is ma x \"n\", and the inverses of these submatrices\n are placed down the diagonal of the matrix.\n\n Parameters\n ----------\n A : array\n The matrix.\n n : int\n The block size.\n\n Returns\n -------\n bd : sparse matrix\n The block diagonal matrix.\n \"\"\"\n ma, na = A.shape\n bdn = na // int(n) # number of submatrices\n\n if na % n > 0:\n raise ValueError('Width of matrix must be a multiple of n')\n\n # modify A in-place to invert each sub-block\n A = A.copy()\n for start in range(0, na, 3):\n # this is a view\n A[:, start:start + 3] = linalg.inv(A[:, start:start + 3])\n\n tmp = np.arange(ma * bdn, dtype=np.int).reshape(bdn, ma)\n tmp = np.tile(tmp, (1, n))\n ii = tmp.ravel()\n\n jj = np.arange(na, dtype=np.int)[None, :]\n jj = jj * np.ones(ma, dtype=np.int)[:, None]\n jj = jj.T.ravel() # column indices foreach sparse bd\n\n bd = sparse.coo_matrix((A.T.ravel(), np.c_[ii, jj].T)).tocsc()\n\n return bd\n\n\ndef _get_tag_int(fid, node, name, id_):\n \"\"\"Check we have an appropriate tag.\"\"\"\n tag = find_tag(fid, node, id_)\n if tag is None:\n fid.close()\n raise ValueError(name + ' tag not found')\n return int(tag.data)\n\n\ndef _read_one(fid, node):\n \"\"\"Read all interesting stuff for one forward solution.\"\"\"\n # This function assumes the fid is open as a context manager\n if node is None:\n return None\n\n one = Forward()\n one['source_ori'] = _get_tag_int(fid, node, 'Source orientation',\n FIFF.FIFF_MNE_SOURCE_ORIENTATION)\n one['coord_frame'] = _get_tag_int(fid, node, 'Coordinate frame',\n FIFF.FIFF_MNE_COORD_FRAME)\n one['nsource'] = _get_tag_int(fid, node, 'Number of sources',\n FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)\n one['nchan'] = _get_tag_int(fid, node, 'Number of channels',\n FIFF.FIFF_NCHAN)\n try:\n one['sol'] = _read_named_matrix(fid, node,\n FIFF.FIFF_MNE_FORWARD_SOLUTION,\n transpose=True)\n one['_orig_sol'] = one['sol']['data'].copy()\n except Exception:\n logger.error('Forward solution data not found')\n raise\n\n try:\n fwd_type = FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD\n one['sol_grad'] = _read_named_matrix(fid, node, fwd_type,\n transpose=True)\n one['_orig_sol_grad'] = one['sol_grad']['data'].copy()\n except Exception:\n one['sol_grad'] = None\n\n if one['sol']['data'].shape[0] != one['nchan'] or \\\n (one['sol']['data'].shape[1] != one['nsource'] and\n one['sol']['data'].shape[1] != 3 * one['nsource']):\n raise ValueError('Forward solution matrix has wrong dimensions')\n\n if one['sol_grad'] is not None:\n if one['sol_grad']['data'].shape[0] != one['nchan'] or \\\n (one['sol_grad']['data'].shape[1] != 3 * one['nsource'] and\n one['sol_grad']['data'].shape[1] != 3 * 3 * one['nsource']):\n raise ValueError('Forward solution gradient matrix has '\n 'wrong dimensions')\n\n return one\n\n\ndef _read_forward_meas_info(tree, fid):\n \"\"\"Read light measurement info from forward operator.\n\n Parameters\n ----------\n tree : tree\n FIF tree structure.\n fid : file id\n The file id.\n\n Returns\n -------\n info : instance of Info\n The measurement info.\n \"\"\"\n # This function assumes fid is being used as a context manager\n info = Info()\n\n # Information from the MRI file\n parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)\n if len(parent_mri) == 0:\n raise ValueError('No parent MEG information found in operator')\n parent_mri = parent_mri[0]\n\n tag = find_tag(fid, parent_mri, FIFF.FIFF_MNE_FILE_NAME)\n info['mri_file'] = tag.data if tag is not None else None\n tag = find_tag(fid, parent_mri, FIFF.FIFF_PARENT_FILE_ID)\n info['mri_id'] = tag.data if tag is not None else None\n\n # Information from the MEG file\n parent_meg = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)\n if len(parent_meg) == 0:\n raise ValueError('No parent MEG information found in operator')\n parent_meg = parent_meg[0]\n\n tag = find_tag(fid, parent_meg, FIFF.FIFF_MNE_FILE_NAME)\n info['meas_file'] = tag.data if tag is not None else None\n tag = find_tag(fid, parent_meg, FIFF.FIFF_PARENT_FILE_ID)\n info['meas_id'] = tag.data if tag is not None else None\n\n # Add channel information\n chs = list()\n for k in range(parent_meg['nent']):\n kind = parent_meg['directory'][k].kind\n pos = parent_meg['directory'][k].pos\n if kind == FIFF.FIFF_CH_INFO:\n tag = read_tag(fid, pos)\n chs.append(tag.data)\n info['chs'] = chs\n info._update_redundant()\n\n # Get the MRI <-> head coordinate transformation\n tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)\n coord_head = FIFF.FIFFV_COORD_HEAD\n coord_mri = FIFF.FIFFV_COORD_MRI\n coord_device = FIFF.FIFFV_COORD_DEVICE\n coord_ctf_head = FIFF.FIFFV_MNE_COORD_CTF_HEAD\n if tag is None:\n raise ValueError('MRI/head coordinate transformation not found')\n cand = tag.data\n if cand['from'] == coord_mri and cand['to'] == coord_head:\n info['mri_head_t'] = cand\n else:\n raise ValueError('MRI/head coordinate transformation not found')\n\n # Get the MEG device <-> head coordinate transformation\n tag = find_tag(fid, parent_meg, FIFF.FIFF_COORD_TRANS)\n if tag is None:\n raise ValueError('MEG/head coordinate transformation not found')\n cand = tag.data\n if cand['from'] == coord_device and cand['to'] == coord_head:\n info['dev_head_t'] = cand\n elif cand['from'] == coord_ctf_head and cand['to'] == coord_head:\n info['ctf_head_t'] = cand\n else:\n raise ValueError('MEG/head coordinate transformation not found')\n\n info['bads'] = read_bad_channels(fid, parent_meg)\n # clean up our bad list, old versions could have non-existent bads\n info['bads'] = [bad for bad in info['bads'] if bad in info['ch_names']]\n\n # Check if a custom reference has been applied\n tag = find_tag(fid, parent_mri, FIFF.FIFF_MNE_CUSTOM_REF)\n if tag is None:\n tag = find_tag(fid, parent_mri, 236) # Constant 236 used before v0.11\n\n info['custom_ref_applied'] = bool(tag.data) if tag is not None else False\n info._check_consistency()\n return info\n\n\ndef _subject_from_forward(forward):\n \"\"\"Get subject id from inverse operator.\"\"\"\n return forward['src']._subject\n\n\n@verbose\ndef _merge_meg_eeg_fwds(megfwd, eegfwd, verbose=None):\n \"\"\"Merge loaded MEG and EEG forward dicts into one dict.\"\"\"\n if megfwd is not None and eegfwd is not None:\n if (megfwd['sol']['data'].shape[1] != eegfwd['sol']['data'].shape[1] or\n megfwd['source_ori'] != eegfwd['source_ori'] or\n megfwd['nsource'] != eegfwd['nsource'] or\n megfwd['coord_frame'] != eegfwd['coord_frame']):\n raise ValueError('The MEG and EEG forward solutions do not match')\n\n fwd = megfwd\n fwd['sol']['data'] = np.r_[fwd['sol']['data'], eegfwd['sol']['data']]\n fwd['_orig_sol'] = np.r_[fwd['_orig_sol'], eegfwd['_orig_sol']]\n fwd['sol']['nrow'] = fwd['sol']['nrow'] + eegfwd['sol']['nrow']\n\n fwd['sol']['row_names'] = (fwd['sol']['row_names'] +\n eegfwd['sol']['row_names'])\n if fwd['sol_grad'] is not None:\n fwd['sol_grad']['data'] = np.r_[fwd['sol_grad']['data'],\n eegfwd['sol_grad']['data']]\n fwd['_orig_sol_grad'] = np.r_[fwd['_orig_sol_grad'],\n eegfwd['_orig_sol_grad']]\n fwd['sol_grad']['nrow'] = (fwd['sol_grad']['nrow'] +\n eegfwd['sol_grad']['nrow'])\n fwd['sol_grad']['row_names'] = (fwd['sol_grad']['row_names'] +\n eegfwd['sol_grad']['row_names'])\n\n fwd['nchan'] = fwd['nchan'] + eegfwd['nchan']\n logger.info(' MEG and EEG forward solutions combined')\n elif megfwd is not None:\n fwd = megfwd\n else:\n fwd = eegfwd\n return fwd\n\n\n@verbose\ndef read_forward_solution(fname, include=(), exclude=(), verbose=None):\n \"\"\"Read a forward solution a.k.a. lead field.\n\n Parameters\n ----------\n fname : string\n The file name, which should end with -fwd.fif or -fwd.fif.gz.\n include : list, optional\n List of names of channels to include. If empty all channels\n are included.\n exclude : list, optional\n List of names of channels to exclude. If empty include all\n channels.\n %(verbose)s\n\n Returns\n -------\n fwd : instance of Forward\n The forward solution.\n\n See Also\n --------\n write_forward_solution, make_forward_solution\n\n Notes\n -----\n Forward solutions, which are derived from an original forward solution with\n free orientation, are always stored on disk as forward solution with free\n orientation in X/Y/Z RAS coordinates. To apply any transformation to the\n forward operator (surface orientation, fixed orientation) please apply\n :func:`convert_forward_solution` after reading the forward solution with\n :func:`read_forward_solution`.\n\n Forward solutions, which are derived from an original forward solution with\n fixed orientation, are stored on disk as forward solution with fixed\n surface-based orientations. Please note that the transformation to\n surface-based, fixed orientation cannot be reverted after loading the\n forward solution with :func:`read_forward_solution`.\n \"\"\"\n check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz',\n '_fwd.fif', '_fwd.fif.gz'))\n\n # Open the file, create directory\n logger.info('Reading forward solution from %s...' % fname)\n f, tree, _ = fiff_open(fname)\n with f as fid:\n # Find all forward solutions\n fwds = dir_tree_find(tree, FIFF.FIFFB_MNE_FORWARD_SOLUTION)\n if len(fwds) == 0:\n raise ValueError('No forward solutions in %s' % fname)\n\n # Parent MRI data\n parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)\n if len(parent_mri) == 0:\n raise ValueError('No parent MRI information in %s' % fname)\n parent_mri = parent_mri[0]\n\n src = _read_source_spaces_from_tree(fid, tree, patch_stats=False)\n for s in src:\n s['id'] = find_source_space_hemi(s)\n\n fwd = None\n\n # Locate and read the forward solutions\n megnode = None\n eegnode = None\n for k in range(len(fwds)):\n tag = find_tag(fid, fwds[k], FIFF.FIFF_MNE_INCLUDED_METHODS)\n if tag is None:\n raise ValueError('Methods not listed for one of the forward '\n 'solutions')\n\n if tag.data == FIFF.FIFFV_MNE_MEG:\n megnode = fwds[k]\n elif tag.data == FIFF.FIFFV_MNE_EEG:\n eegnode = fwds[k]\n\n megfwd = _read_one(fid, megnode)\n if megfwd is not None:\n if is_fixed_orient(megfwd):\n ori = 'fixed'\n else:\n ori = 'free'\n logger.info(' Read MEG forward solution (%d sources, '\n '%d channels, %s orientations)'\n % (megfwd['nsource'], megfwd['nchan'], ori))\n\n eegfwd = _read_one(fid, eegnode)\n if eegfwd is not None:\n if is_fixed_orient(eegfwd):\n ori = 'fixed'\n else:\n ori = 'free'\n logger.info(' Read EEG forward solution (%d sources, '\n '%d channels, %s orientations)'\n % (eegfwd['nsource'], eegfwd['nchan'], ori))\n\n fwd = _merge_meg_eeg_fwds(megfwd, eegfwd)\n\n # Get the MRI <-> head coordinate transformation\n tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)\n if tag is None:\n raise ValueError('MRI/head coordinate transformation not found')\n mri_head_t = tag.data\n if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or\n mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD):\n mri_head_t = invert_transform(mri_head_t)\n if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or\n mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD):\n fid.close()\n raise ValueError('MRI/head coordinate transformation not '\n 'found')\n fwd['mri_head_t'] = mri_head_t\n\n #\n # get parent MEG info\n #\n fwd['info'] = _read_forward_meas_info(tree, fid)\n\n # MNE environment\n parent_env = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)\n if len(parent_env) > 0:\n parent_env = parent_env[0]\n tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_WORKING_DIR)\n if tag is not None:\n fwd['info']['working_dir'] = tag.data\n tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_COMMAND_LINE)\n if tag is not None:\n fwd['info']['command_line'] = tag.data\n\n # Transform the source spaces to the correct coordinate frame\n # if necessary\n\n # Make sure forward solution is in either the MRI or HEAD coordinate frame\n if fwd['coord_frame'] not in (FIFF.FIFFV_COORD_MRI, FIFF.FIFFV_COORD_HEAD):\n raise ValueError('Only forward solutions computed in MRI or head '\n 'coordinates are acceptable')\n\n # Transform each source space to the HEAD or MRI coordinate frame,\n # depending on the coordinate frame of the forward solution\n # NOTE: the function transform_surface_to will also work on discrete and\n # volume sources\n nuse = 0\n for s in src:\n try:\n s = transform_surface_to(s, fwd['coord_frame'], mri_head_t)\n except Exception as inst:\n raise ValueError('Could not transform source space (%s)' % inst)\n\n nuse += s['nuse']\n\n # Make sure the number of sources match after transformation\n if nuse != fwd['nsource']:\n raise ValueError('Source spaces do not match the forward solution.')\n\n logger.info(' Source spaces transformed to the forward solution '\n 'coordinate frame')\n fwd['src'] = src\n\n # Handle the source locations and orientations\n fwd['source_rr'] = np.concatenate([ss['rr'][ss['vertno'], :]\n for ss in src], axis=0)\n\n # Store original source orientations\n fwd['_orig_source_ori'] = fwd['source_ori']\n\n # Deal with include and exclude\n pick_channels_forward(fwd, include=include, exclude=exclude, copy=False)\n\n if is_fixed_orient(fwd, orig=True):\n fwd['source_nn'] = np.concatenate([_src['nn'][_src['vertno'], :]\n for _src in fwd['src']], axis=0)\n fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI\n fwd['surf_ori'] = True\n else:\n fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3))\n fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI\n fwd['surf_ori'] = False\n return Forward(fwd)\n\n\n@verbose\ndef convert_forward_solution(fwd, surf_ori=False, force_fixed=False,\n copy=True, use_cps=True, verbose=None):\n \"\"\"Convert forward solution between different source orientations.\n\n Parameters\n ----------\n fwd : Forward\n The forward solution to modify.\n surf_ori : bool, optional (default False)\n Use surface-based source coordinate system? Note that force_fixed=True\n implies surf_ori=True.\n force_fixed : bool, optional (default False)\n Force fixed source orientation mode?\n copy : bool\n Whether to return a new instance or modify in place.\n use_cps : bool (default True)\n Whether to use cortical patch statistics to define normal\n orientations. Only used when surf_ori and/or force_fixed are True.\n %(verbose)s\n\n Returns\n -------\n fwd : Forward\n The modified forward solution.\n \"\"\"\n fwd = fwd.copy() if copy else fwd\n\n if force_fixed is True:\n surf_ori = True\n\n if any([src['type'] == 'vol' for src in fwd['src']]) and force_fixed:\n raise ValueError(\n 'Forward operator was generated with sources from a '\n 'volume source space. Conversion to fixed orientation is not '\n 'possible. Consider using a discrete source space if you have '\n 'meaningful normal orientations.')\n\n if surf_ori:\n if use_cps:\n if any(s.get('patch_inds') is not None for s in fwd['src']):\n use_ave_nn = True\n logger.info(' Average patch normals will be employed in '\n 'the rotation to the local surface coordinates..'\n '..')\n else:\n use_ave_nn = False\n logger.info(' No patch info available. The standard source '\n 'space normals will be employed in the rotation '\n 'to the local surface coordinates....')\n else:\n use_ave_nn = False\n\n # We need to change these entries (only):\n # 1. source_nn\n # 2. sol['data']\n # 3. sol['ncol']\n # 4. sol_grad['data']\n # 5. sol_grad['ncol']\n # 6. source_ori\n\n if is_fixed_orient(fwd, orig=True) or (force_fixed and not use_ave_nn):\n # Fixed\n fwd['source_nn'] = np.concatenate([s['nn'][s['vertno'], :]\n for s in fwd['src']], axis=0)\n if not is_fixed_orient(fwd, orig=True):\n logger.info(' Changing to fixed-orientation forward '\n 'solution with surface-based source orientations...')\n fix_rot = _block_diag(fwd['source_nn'].T, 1)\n # newer versions of numpy require explicit casting here, so *= no\n # longer works\n fwd['sol']['data'] = (fwd['_orig_sol'] *\n fix_rot).astype('float32')\n fwd['sol']['ncol'] = fwd['nsource']\n if fwd['sol_grad'] is not None:\n x = sparse.block_diag([fix_rot] * 3)\n fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod\n fwd['sol_grad']['ncol'] = 3 * fwd['nsource']\n fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI\n fwd['surf_ori'] = True\n\n elif surf_ori: # Free, surf-oriented\n # Rotate the local source coordinate systems\n fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3))\n logger.info(' Converting to surface-based source orientations...')\n # Actually determine the source orientations\n pp = 0\n for s in fwd['src']:\n if s['type'] in ['surf', 'discrete']:\n for p in range(s['nuse']):\n # Project out the surface normal and compute SVD\n if use_ave_nn and s.get('patch_inds') is not None:\n nn = s['nn'][s['pinfo'][s['patch_inds'][p]], :]\n nn = np.sum(nn, axis=0)[:, np.newaxis]\n nn /= linalg.norm(nn)\n else:\n nn = s['nn'][s['vertno'][p], :][:, np.newaxis]\n U, S, _ = linalg.svd(np.eye(3, 3) - nn * nn.T)\n # Make sure that ez is in the direction of nn\n if np.sum(nn.ravel() * U[:, 2].ravel()) < 0:\n U *= -1.0\n fwd['source_nn'][pp:pp + 3, :] = U.T\n pp += 3\n else:\n pp += 3 * s['nuse']\n\n # Rotate the solution components as well\n if force_fixed:\n fwd['source_nn'] = fwd['source_nn'][2::3, :]\n fix_rot = _block_diag(fwd['source_nn'].T, 1)\n # newer versions of numpy require explicit casting here, so *= no\n # longer works\n fwd['sol']['data'] = (fwd['_orig_sol'] *\n fix_rot).astype('float32')\n fwd['sol']['ncol'] = fwd['nsource']\n if fwd['sol_grad'] is not None:\n x = sparse.block_diag([fix_rot] * 3)\n fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod\n fwd['sol_grad']['ncol'] = 3 * fwd['nsource']\n fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI\n fwd['surf_ori'] = True\n else:\n surf_rot = _block_diag(fwd['source_nn'].T, 3)\n fwd['sol']['data'] = fwd['_orig_sol'] * surf_rot\n fwd['sol']['ncol'] = 3 * fwd['nsource']\n if fwd['sol_grad'] is not None:\n x = sparse.block_diag([surf_rot] * 3)\n fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod\n fwd['sol_grad']['ncol'] = 9 * fwd['nsource']\n fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI\n fwd['surf_ori'] = True\n\n else: # Free, cartesian\n logger.info(' Cartesian source orientations...')\n fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3))\n fwd['sol']['data'] = fwd['_orig_sol'].copy()\n fwd['sol']['ncol'] = 3 * fwd['nsource']\n if fwd['sol_grad'] is not None:\n fwd['sol_grad']['data'] = fwd['_orig_sol_grad'].copy()\n fwd['sol_grad']['ncol'] = 9 * fwd['nsource']\n fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI\n fwd['surf_ori'] = False\n\n logger.info(' [done]')\n\n return fwd\n\n\n@verbose\ndef write_forward_solution(fname, fwd, overwrite=False, verbose=None):\n \"\"\"Write forward solution to a file.\n\n Parameters\n ----------\n fname : str\n File name to save the forward solution to. It should end with -fwd.fif\n or -fwd.fif.gz.\n fwd : Forward\n Forward solution.\n overwrite : bool\n If True, overwrite destination file (if it exists).\n %(verbose)s\n\n See Also\n --------\n read_forward_solution\n\n Notes\n -----\n Forward solutions, which are derived from an original forward solution with\n free orientation, are always stored on disk as forward solution with free\n orientation in X/Y/Z RAS coordinates. Transformations (surface orientation,\n fixed orientation) will be reverted. To reapply any transformation to the\n forward operator please apply :func:`convert_forward_solution` after\n reading the forward solution with :func:`read_forward_solution`.\n\n Forward solutions, which are derived from an original forward solution with\n fixed orientation, are stored on disk as forward solution with fixed\n surface-based orientations. Please note that the transformation to\n surface-based, fixed orientation cannot be reverted after loading the\n forward solution with :func:`read_forward_solution`.\n \"\"\"\n check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz',\n '_fwd.fif', '_fwd.fif.gz'))\n\n # check for file existence\n _check_fname(fname, overwrite)\n fid = start_file(fname)\n start_block(fid, FIFF.FIFFB_MNE)\n\n #\n # MNE env\n #\n start_block(fid, FIFF.FIFFB_MNE_ENV)\n write_id(fid, FIFF.FIFF_BLOCK_ID)\n data = fwd['info'].get('working_dir', None)\n if data is not None:\n write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data)\n data = fwd['info'].get('command_line', None)\n if data is not None:\n write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data)\n end_block(fid, FIFF.FIFFB_MNE_ENV)\n\n #\n # Information from the MRI file\n #\n start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)\n write_string(fid, FIFF.FIFF_MNE_FILE_NAME, fwd['info']['mri_file'])\n if fwd['info']['mri_id'] is not None:\n write_id(fid, FIFF.FIFF_PARENT_FILE_ID, fwd['info']['mri_id'])\n # store the MRI to HEAD transform in MRI file\n write_coord_trans(fid, fwd['info']['mri_head_t'])\n end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)\n\n # write measurement info\n write_forward_meas_info(fid, fwd['info'])\n\n # invert our original source space transform\n src = list()\n for s in fwd['src']:\n s = deepcopy(s)\n try:\n # returns source space to original coordinate frame\n # usually MRI\n s = transform_surface_to(s, fwd['mri_head_t']['from'],\n fwd['mri_head_t'])\n except Exception as inst:\n raise ValueError('Could not transform source space (%s)' % inst)\n src.append(s)\n\n #\n # Write the source spaces (again)\n #\n _write_source_spaces_to_fid(fid, src)\n n_vert = sum([ss['nuse'] for ss in src])\n if fwd['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:\n n_col = n_vert\n else:\n n_col = 3 * n_vert\n\n # Undo transformations\n sol = fwd['_orig_sol'].copy()\n if fwd['sol_grad'] is not None:\n sol_grad = fwd['_orig_sol_grad'].copy()\n else:\n sol_grad = None\n\n if fwd['surf_ori'] is True:\n if fwd['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:\n warn('The forward solution, which is stored on disk now, is based '\n 'on a forward solution with fixed orientation. Please note '\n 'that the transformation to surface-based, fixed orientation '\n 'cannot be reverted after loading the forward solution with '\n 'read_forward_solution.', RuntimeWarning)\n else:\n warn('This forward solution is based on a forward solution with '\n 'free orientation. The original forward solution is stored '\n 'on disk in X/Y/Z RAS coordinates. Any transformation '\n '(surface orientation or fixed orientation) will be '\n 'reverted. To reapply any transformation to the forward '\n 'operator please apply convert_forward_solution after '\n 'reading the forward solution with read_forward_solution.',\n RuntimeWarning)\n\n #\n # MEG forward solution\n #\n picks_meg = pick_types(fwd['info'], meg=True, eeg=False, ref_meg=False,\n exclude=[])\n picks_eeg = pick_types(fwd['info'], meg=False, eeg=True, ref_meg=False,\n exclude=[])\n n_meg = len(picks_meg)\n n_eeg = len(picks_eeg)\n row_names_meg = [fwd['sol']['row_names'][p] for p in picks_meg]\n row_names_eeg = [fwd['sol']['row_names'][p] for p in picks_eeg]\n\n if n_meg > 0:\n meg_solution = dict(data=sol[picks_meg], nrow=n_meg, ncol=n_col,\n row_names=row_names_meg, col_names=[])\n _transpose_named_matrix(meg_solution)\n start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)\n write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_MEG)\n write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd['coord_frame'])\n write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION,\n fwd['_orig_source_ori'])\n write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert)\n write_int(fid, FIFF.FIFF_NCHAN, n_meg)\n write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, meg_solution)\n if sol_grad is not None:\n meg_solution_grad = dict(data=sol_grad[picks_meg],\n nrow=n_meg, ncol=n_col * 3,\n row_names=row_names_meg, col_names=[])\n _transpose_named_matrix(meg_solution_grad)\n write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD,\n meg_solution_grad)\n end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)\n\n #\n # EEG forward solution\n #\n if n_eeg > 0:\n eeg_solution = dict(data=sol[picks_eeg], nrow=n_eeg, ncol=n_col,\n row_names=row_names_eeg, col_names=[])\n _transpose_named_matrix(eeg_solution)\n start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)\n write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_EEG)\n write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd['coord_frame'])\n write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION,\n fwd['_orig_source_ori'])\n write_int(fid, FIFF.FIFF_NCHAN, n_eeg)\n write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert)\n write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, eeg_solution)\n if sol_grad is not None:\n eeg_solution_grad = dict(data=sol_grad[picks_eeg],\n nrow=n_eeg, ncol=n_col * 3,\n row_names=row_names_eeg, col_names=[])\n _transpose_named_matrix(eeg_solution_grad)\n write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD,\n eeg_solution_grad)\n end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)\n\n end_block(fid, FIFF.FIFFB_MNE)\n end_file(fid)\n\n\ndef is_fixed_orient(forward, orig=False):\n \"\"\"Check if the forward operator is fixed orientation.\n\n Parameters\n ----------\n forward : instance of Forward\n The forward.\n orig : bool\n If True, consider the original source orientation.\n If False (default), consider the current source orientation.\n\n Returns\n -------\n fixed_ori : bool\n Whether or not it is fixed orientation.\n \"\"\"\n if orig: # if we want to know about the original version\n fixed_ori = (forward['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI)\n else: # most of the time we want to know about the current version\n fixed_ori = (forward['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI)\n return fixed_ori\n\n\ndef write_forward_meas_info(fid, info):\n \"\"\"Write measurement info stored in forward solution.\n\n Parameters\n ----------\n fid : file id\n The file id\n info : instance of Info\n The measurement info.\n \"\"\"\n info._check_consistency()\n #\n # Information from the MEG file\n #\n start_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)\n write_string(fid, FIFF.FIFF_MNE_FILE_NAME, info['meas_file'])\n if info['meas_id'] is not None:\n write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])\n # get transformation from CTF and DEVICE to HEAD coordinate frame\n meg_head_t = info.get('dev_head_t', info.get('ctf_head_t'))\n if meg_head_t is None:\n fid.close()\n raise ValueError('Head<-->sensor transform not found')\n write_coord_trans(fid, meg_head_t)\n\n if 'chs' in info:\n # Channel information\n write_int(fid, FIFF.FIFF_NCHAN, len(info['chs']))\n for k, c in enumerate(info['chs']):\n # Scan numbers may have been messed up\n c = deepcopy(c)\n c['scanno'] = k + 1\n write_ch_info(fid, c)\n if 'bads' in info and len(info['bads']) > 0:\n # Bad channels\n start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)\n write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, info['bads'])\n end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)\n\n end_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)\n\n\ndef _select_orient_forward(forward, info, noise_cov=None, copy=True):\n \"\"\"Prepare forward solution for inverse solvers.\"\"\"\n # fwd['sol']['row_names'] may be different order from fwd['info']['chs']\n fwd_sol_ch_names = forward['sol']['row_names']\n all_ch_names = set(fwd_sol_ch_names)\n all_bads = set(info['bads'])\n if noise_cov is not None:\n all_ch_names &= set(noise_cov['names'])\n all_bads |= set(noise_cov['bads'])\n else:\n noise_cov = dict(bads=info['bads'])\n ch_names = [c['ch_name'] for c in info['chs']\n if c['ch_name'] not in all_bads and\n c['ch_name'] in all_ch_names]\n\n if not len(info['bads']) == len(noise_cov['bads']) or \\\n not all(b in noise_cov['bads'] for b in info['bads']):\n logger.info('info[\"bads\"] and noise_cov[\"bads\"] do not match, '\n 'excluding bad channels from both')\n\n # check the compensation grade\n _check_compensation_grade(forward['info'], info, 'forward')\n\n n_chan = len(ch_names)\n logger.info(\"Computing inverse operator with %d channels.\" % n_chan)\n forward = pick_channels_forward(forward, ch_names, ordered=True,\n copy=copy)\n info_idx = [info['ch_names'].index(name) for name in ch_names]\n info_picked = pick_info(info, info_idx)\n forward['info']._check_consistency()\n info_picked._check_consistency()\n return forward, info_picked\n\n\n@verbose\ndef compute_orient_prior(forward, loose=0.2, verbose=None):\n \"\"\"Compute orientation prior.\n\n Parameters\n ----------\n forward : instance of Forward\n Forward operator.\n loose : float\n The loose orientation parameter (between 0 and 1).\n %(verbose)s\n\n Returns\n -------\n orient_prior : ndarray, shape (n_vertices,)\n Orientation priors.\n\n See Also\n --------\n compute_depth_prior\n \"\"\"\n is_fixed_ori = is_fixed_orient(forward)\n n_sources = forward['sol']['data'].shape[1]\n loose = float(loose)\n if not (0 <= loose <= 1):\n raise ValueError('loose value should be between 0 and 1, '\n 'got %s.' % (loose,))\n orient_prior = np.ones(n_sources, dtype=np.float)\n if loose > 0.:\n if is_fixed_ori:\n raise ValueError('loose must be 0. with forward operator '\n 'with fixed orientation, got %s' % (loose,))\n if loose < 1:\n if not forward['surf_ori']:\n raise ValueError('Forward operator is not oriented in surface '\n 'coordinates. loose parameter should be 1 '\n 'not %s.' % (loose,))\n logger.info('Applying loose dipole orientations. Loose value '\n 'of %s.' % loose)\n orient_prior[0::3] *= loose\n orient_prior[1::3] *= loose\n\n return orient_prior\n\n\ndef _restrict_gain_matrix(G, info):\n \"\"\"Restrict gain matrix entries for optimal depth weighting.\"\"\"\n # Figure out which ones have been used\n if len(info['chs']) != G.shape[0]:\n raise ValueError('G.shape[0] (%d) and length of info[\"chs\"] (%d) '\n 'do not match' % (G.shape[0], len(info['chs'])))\n for meg, eeg, kind in (\n ('grad', False, 'planar'),\n ('mag', False, 'magnetometer or axial gradiometer'),\n (False, True, 'EEG')):\n sel = pick_types(info, meg=meg, eeg=eeg, ref_meg=False, exclude=[])\n if len(sel) > 0:\n logger.info(' %d %s channels' % (len(sel), kind))\n break\n else:\n warn('Could not find MEG or EEG channels to limit depth channels')\n sel = slice(None)\n return G[sel]\n\n\n@verbose\ndef compute_depth_prior(forward, info, exp=0.8, limit=10.0,\n limit_depth_chs=False, combine_xyz='spectral',\n noise_cov=None, rank=None, verbose=None):\n \"\"\"Compute depth prior for depth weighting.\n\n Parameters\n ----------\n forward : instance of Forward\n The forward solution.\n info : instance of Info\n The measurement info.\n exp : float\n Exponent for the depth weighting, must be between 0 and 1.\n limit : float | None\n The upper bound on depth weighting.\n Can be None to be bounded by the largest finite prior.\n limit_depth_chs : bool | 'whiten'\n How to deal with multiple channel types in depth weighting.\n The default is True, which whitens based on the source sensitivity\n of the highest-SNR channel type. See Notes for details.\n\n .. versionchanged:: 0.18\n Added the \"whiten\" option.\n combine_xyz : 'spectral' | 'fro'\n When a loose (or free) orientation is used, how the depth weighting\n for each triplet should be calculated.\n If 'spectral', use the squared spectral norm of Gk.\n If 'fro', use the squared Frobenius norm of Gk.\n\n .. versionadded:: 0.18\n noise_cov : instance of Covariance | None\n The noise covariance to use to whiten the gain matrix when\n ``limit_depth_chs='whiten'``.\n\n .. versionadded:: 0.18\n %(rank_None)s\n\n .. versionadded:: 0.18\n %(verbose)s\n\n Returns\n -------\n depth_prior : ndarray, shape (n_vertices,)\n The depth prior.\n\n See Also\n --------\n compute_orient_prior\n\n Notes\n -----\n The defaults used by the minimum norm code and sparse solvers differ.\n In particular, the values for MNE are::\n\n compute_depth_prior(..., limit=10., limit_depth_chs=True,\n combine_xyz='spectral')\n\n In sparse solvers and LCMV, the values are::\n\n compute_depth_prior(..., limit=None, limit_depth_chs='whiten',\n combine_xyz='fro')\n\n The ``limit_depth_chs`` argument can take the following values:\n\n * :data:`python:True` (default)\n Use only grad channels in depth weighting (equivalent to MNE C\n minimum-norm code). If grad channels aren't present, only mag\n channels will be used (if no mag, then eeg). This makes the depth\n prior dependent only on the sensor geometry (and relationship\n to the sources).\n * ``'whiten'``\n Compute a whitener and apply it to the gain matirx before computing\n the depth prior. In this case ``noise_cov`` must not be None.\n Whitening the gain matrix makes the depth prior\n depend on both sensor geometry and the data of interest captured\n by the noise covariance (e.g., projections, SNR).\n\n .. versionadded:: 0.18\n * :data:`python:False`\n Use all channels. Not recommended since the depth weighting will be\n biased toward whichever channel type has the largest values in\n SI units (such as EEG being orders of magnitude larger than MEG).\n\n \"\"\"\n from ..cov import Covariance, compute_whitener\n _validate_type(forward, Forward, 'forward')\n patch_areas = forward.get('patch_areas', None)\n is_fixed_ori = is_fixed_orient(forward)\n G = forward['sol']['data']\n logger.info('Creating the depth weighting matrix...')\n _validate_type(noise_cov, (Covariance, None), 'noise_cov',\n 'Covariance or None')\n _validate_type(limit_depth_chs, (str, bool), 'limit_depth_chs')\n if isinstance(limit_depth_chs, str):\n if limit_depth_chs != 'whiten':\n raise ValueError('limit_depth_chs, if str, must be \"whiten\", got '\n '%s' % (limit_depth_chs,))\n if not isinstance(noise_cov, Covariance):\n raise ValueError('With limit_depth_chs=\"whiten\", noise_cov must be'\n ' a Covariance, got %s' % (type(noise_cov),))\n if combine_xyz is not False: # private / expert option\n _check_option('combine_xyz', combine_xyz, ('fro', 'spectral'))\n\n # If possible, pick best depth-weighting channels\n if limit_depth_chs is True:\n G = _restrict_gain_matrix(G, info)\n elif limit_depth_chs == 'whiten':\n whitener, _ = compute_whitener(noise_cov, info, pca=True, rank=rank,\n verbose=False)\n G = np.dot(whitener, G)\n\n # Compute the gain matrix\n if is_fixed_ori or combine_xyz in ('fro', False):\n d = np.sum(G ** 2, axis=0)\n if not (is_fixed_ori or combine_xyz is False):\n d = d.reshape(-1, 3).sum(axis=1)\n # Spherical leadfield can be zero at the center\n d[d == 0.] = np.min(d[d != 0.])\n else: # 'spectral'\n # n_pos = G.shape[1] // 3\n # The following is equivalent to this, but 4-10x faster\n # d = np.zeros(n_pos)\n # for k in range(n_pos):\n # Gk = G[:, 3 * k:3 * (k + 1)]\n # x = np.dot(Gk.T, Gk)\n # d[k] = linalg.svdvals(x)[0]\n G.shape = (G.shape[0], -1, 3)\n d = np.linalg.norm(einsum('svj,svk->vjk', G, G), # vector dot products\n ord=2, axis=(1, 2)) # ord=2 spectral (largest s.v.)\n G.shape = (G.shape[0], -1)\n\n # XXX Currently the fwd solns never have \"patch_areas\" defined\n if patch_areas is not None:\n if not is_fixed_ori and combine_xyz is False:\n patch_areas = np.repeat(patch_areas, 3)\n d /= patch_areas ** 2\n logger.info(' Patch areas taken into account in the depth '\n 'weighting')\n\n w = 1.0 / d\n if limit is not None:\n ws = np.sort(w)\n weight_limit = limit ** 2\n if limit_depth_chs is False:\n # match old mne-python behavor\n # we used to do ind = np.argmin(ws), but this is 0 by sort above\n n_limit = 0\n limit = ws[0] * weight_limit\n else:\n # match C code behavior\n limit = ws[-1]\n n_limit = len(d)\n if ws[-1] > weight_limit * ws[0]:\n ind = np.where(ws > weight_limit * ws[0])[0][0]\n limit = ws[ind]\n n_limit = ind\n\n logger.info(' limit = %d/%d = %f'\n % (n_limit + 1, len(d),\n np.sqrt(limit / ws[0])))\n scale = 1.0 / limit\n logger.info(' scale = %g exp = %g' % (scale, exp))\n w = np.minimum(w / limit, 1)\n depth_prior = w ** exp\n\n if not (is_fixed_ori or combine_xyz is False):\n depth_prior = np.repeat(depth_prior, 3)\n\n return depth_prior\n\n\ndef _stc_src_sel(src, stc, on_missing='raise',\n extra=', likely due to forward calculations'):\n \"\"\"Select the vertex indices of a source space using a source estimate.\"\"\"\n if isinstance(stc, list):\n vertices = stc\n else:\n assert isinstance(stc, _BaseSourceEstimate)\n vertices = stc._vertices_list\n del stc\n if not len(src) == len(vertices):\n raise RuntimeError('Mismatch between number of source spaces (%s) and '\n 'STC vertices (%s)' % (len(src), len(vertices)))\n src_sels, stc_sels, out_vertices = [], [], []\n src_offset = stc_offset = 0\n for s, v in zip(src, vertices):\n joint_sel = np.intersect1d(s['vertno'], v)\n src_sels.append(np.searchsorted(s['vertno'], joint_sel) + src_offset)\n src_offset += len(s['vertno'])\n idx = np.searchsorted(v, joint_sel)\n stc_sels.append(idx + stc_offset)\n stc_offset += len(v)\n out_vertices.append(np.array(v)[idx])\n src_sel = np.concatenate(src_sels)\n stc_sel = np.concatenate(stc_sels)\n assert len(src_sel) == len(stc_sel) == sum(len(v) for v in out_vertices)\n\n n_stc = sum(len(v) for v in vertices)\n n_joint = len(src_sel)\n if n_joint != n_stc:\n msg = ('Only %i of %i SourceEstimate %s found in '\n 'source space%s'\n % (n_joint, n_stc, 'vertex' if n_stc == 1 else 'vertices',\n extra))\n if on_missing == 'raise':\n raise RuntimeError(msg)\n elif on_missing == 'warn':\n warn(msg)\n else:\n assert on_missing == 'ignore'\n return src_sel, stc_sel, out_vertices\n\n\ndef _fill_measurement_info(info, fwd, sfreq):\n \"\"\"Fill the measurement info of a Raw or Evoked object.\"\"\"\n sel = pick_channels(info['ch_names'], fwd['sol']['row_names'])\n info = pick_info(info, sel)\n info['bads'] = []\n\n # this is probably correct based on what's done in meas_info.py...\n info['meas_id'] = fwd['info']['meas_id']\n info['file_id'] = info['meas_id']\n\n now = time()\n sec = np.floor(now)\n usec = 1e6 * (now - sec)\n\n info['meas_date'] = (int(sec), int(usec))\n info['highpass'] = 0.0\n info['lowpass'] = sfreq / 2.0\n info['sfreq'] = sfreq\n info['projs'] = []\n\n return info\n\n\n@verbose\ndef _apply_forward(fwd, stc, start=None, stop=None, on_missing='raise',\n verbose=None):\n \"\"\"Apply forward model and return data, times, ch_names.\"\"\"\n if not is_fixed_orient(fwd):\n raise ValueError('Only fixed-orientation forward operators are '\n 'supported.')\n\n if np.all(stc.data > 0):\n warn('Source estimate only contains currents with positive values. '\n 'Use pick_ori=\"normal\" when computing the inverse to compute '\n 'currents not current magnitudes.')\n\n max_cur = np.max(np.abs(stc.data))\n if max_cur > 1e-7: # 100 nAm threshold for warning\n warn('The maximum current magnitude is %0.1f nAm, which is very large.'\n ' Are you trying to apply the forward model to noise-normalized '\n '(dSPM, sLORETA, or eLORETA) values? The result will only be '\n 'correct if currents (in units of Am) are used.'\n % (1e9 * max_cur))\n\n src_sel, stc_sel, _ = _stc_src_sel(fwd['src'], stc, on_missing=on_missing)\n gain = fwd['sol']['data'][:, src_sel]\n # save some memory if possible\n stc_sel = slice(None) if len(stc_sel) == len(stc.data) else stc_sel\n\n logger.info('Projecting source estimate to sensor space...')\n data = np.dot(gain, stc.data[stc_sel, start:stop])\n logger.info('[done]')\n\n times = deepcopy(stc.times[start:stop])\n\n return data, times\n\n\n@verbose\ndef apply_forward(fwd, stc, info, start=None, stop=None, use_cps=True,\n on_missing='raise', verbose=None):\n \"\"\"Project source space currents to sensor space using a forward operator.\n\n The sensor space data is computed for all channels present in fwd. Use\n pick_channels_forward or pick_types_forward to restrict the solution to a\n subset of channels.\n\n The function returns an Evoked object, which is constructed from\n evoked_template. The evoked_template should be from the same MEG system on\n which the original data was acquired. An exception will be raised if the\n forward operator contains channels that are not present in the template.\n\n\n Parameters\n ----------\n fwd : Forward\n Forward operator to use.\n stc : SourceEstimate\n The source estimate from which the sensor space data is computed.\n info : instance of Info\n Measurement info to generate the evoked.\n start : int, optional\n Index of first time sample (index not time is seconds).\n stop : int, optional\n Index of first time sample not to include (index not time is seconds).\n use_cps : bool (default True)\n Whether to use cortical patch statistics to define normal\n orientations when converting to fixed orientation (if necessary).\n\n .. versionadded:: 0.15\n %(on_missing)s Default is \"raise\".\n\n .. versionadded:: 0.18\n %(verbose)s\n\n Returns\n -------\n evoked : Evoked\n Evoked object with computed sensor space data.\n\n See Also\n --------\n apply_forward_raw: Compute sensor space data and return a Raw object.\n \"\"\"\n # make sure evoked_template contains all channels in fwd\n for ch_name in fwd['sol']['row_names']:\n if ch_name not in info['ch_names']:\n raise ValueError('Channel %s of forward operator not present in '\n 'evoked_template.' % ch_name)\n\n # project the source estimate to the sensor space\n if not is_fixed_orient(fwd):\n fwd = convert_forward_solution(fwd, force_fixed=True, use_cps=use_cps)\n data, times = _apply_forward(fwd, stc, start, stop, on_missing=on_missing)\n\n # fill the measurement info\n sfreq = float(1.0 / stc.tstep)\n info_out = _fill_measurement_info(info, fwd, sfreq)\n\n evoked = EvokedArray(data, info_out, times[0], nave=1)\n\n evoked.times = times\n evoked.first = int(np.round(evoked.times[0] * sfreq))\n evoked.last = evoked.first + evoked.data.shape[1] - 1\n\n return evoked\n\n\n@verbose\ndef apply_forward_raw(fwd, stc, info, start=None, stop=None,\n on_missing='raise', verbose=None):\n \"\"\"Project source space currents to sensor space using a forward operator.\n\n The sensor space data is computed for all channels present in fwd. Use\n pick_channels_forward or pick_types_forward to restrict the solution to a\n subset of channels.\n\n The function returns a Raw object, which is constructed using provided\n info. The info object should be from the same MEG system on which the\n original data was acquired. An exception will be raised if the forward\n operator contains channels that are not present in the info.\n\n Parameters\n ----------\n fwd : Forward\n Forward operator to use. Has to be fixed-orientation.\n stc : SourceEstimate\n The source estimate from which the sensor space data is computed.\n info : instance of Info\n The measurement info.\n start : int, optional\n Index of first time sample (index not time is seconds).\n stop : int, optional\n Index of first time sample not to include (index not time is seconds).\n %(on_missing)s Default is \"raise\".\n\n .. versionadded:: 0.18\n %(verbose)s\n\n Returns\n -------\n raw : Raw object\n Raw object with computed sensor space data.\n\n See Also\n --------\n apply_forward: Compute sensor space data and return an Evoked object.\n \"\"\"\n # make sure info contains all channels in fwd\n for ch_name in fwd['sol']['row_names']:\n if ch_name not in info['ch_names']:\n raise ValueError('Channel %s of forward operator not present in '\n 'info.' % ch_name)\n\n # project the source estimate to the sensor space\n data, times = _apply_forward(fwd, stc, start, stop, on_missing=on_missing)\n\n sfreq = 1.0 / stc.tstep\n info = _fill_measurement_info(info, fwd, sfreq)\n info['projs'] = []\n # store sensor data in Raw object using the info\n raw = RawArray(data, info)\n raw.preload = True\n\n raw._first_samps = np.array([int(np.round(times[0] * sfreq))])\n raw._last_samps = np.array([raw.first_samp + raw._data.shape[1] - 1])\n raw._projector = None\n raw._update_times()\n return raw\n\n\n@fill_doc\ndef restrict_forward_to_stc(fwd, stc, on_missing='ignore'):\n \"\"\"Restrict forward operator to active sources in a source estimate.\n\n Parameters\n ----------\n fwd : instance of Forward\n Forward operator.\n stc : instance of SourceEstimate\n Source estimate.\n %(on_missing)s Default is \"ignore\".\n\n .. versionadded:: 0.18\n\n Returns\n -------\n fwd_out : instance of Forward\n Restricted forward operator.\n\n See Also\n --------\n restrict_forward_to_label\n \"\"\"\n _validate_type(on_missing, str, 'on_missing')\n _check_option('on_missing', on_missing, ('ignore', 'warn', 'raise'))\n src_sel, _, vertices = _stc_src_sel(fwd['src'], stc, on_missing=on_missing)\n del stc\n return _restrict_forward_to_src_sel(fwd, src_sel)\n\n\ndef _restrict_forward_to_src_sel(fwd, src_sel):\n fwd_out = deepcopy(fwd)\n # figure out the vertno we are keeping\n idx_sel = np.concatenate([[[si] * len(s['vertno']), s['vertno']]\n for si, s in enumerate(fwd['src'])], axis=-1)\n assert idx_sel.ndim == 2 and idx_sel.shape[0] == 2\n assert idx_sel.shape[1] == fwd['nsource']\n idx_sel = idx_sel[:, src_sel]\n\n fwd_out['source_rr'] = fwd['source_rr'][src_sel]\n fwd_out['nsource'] = len(src_sel)\n\n if is_fixed_orient(fwd):\n idx = src_sel\n if fwd['sol_grad'] is not None:\n idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel()\n else:\n idx = (3 * src_sel[:, None] + np.arange(3)).ravel()\n if fwd['sol_grad'] is not None:\n idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel()\n\n fwd_out['source_nn'] = fwd['source_nn'][idx]\n fwd_out['sol']['data'] = fwd['sol']['data'][:, idx]\n if fwd['sol_grad'] is not None:\n fwd_out['sol_grad']['data'] = fwd['sol_grad']['data'][:, idx_grad]\n fwd_out['sol']['ncol'] = len(idx)\n\n if is_fixed_orient(fwd, orig=True):\n idx = src_sel\n if fwd['sol_grad'] is not None:\n idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel()\n else:\n idx = (3 * src_sel[:, None] + np.arange(3)).ravel()\n if fwd['sol_grad'] is not None:\n idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel()\n\n fwd_out['_orig_sol'] = fwd['_orig_sol'][:, idx]\n if fwd['sol_grad'] is not None:\n fwd_out['_orig_sol_grad'] = fwd['_orig_sol_grad'][:, idx_grad]\n\n vertices = [idx_sel[1][idx_sel[0] == si]\n for si in range(len(fwd_out['src']))]\n _set_source_space_vertices(fwd_out['src'], vertices)\n\n return fwd_out\n\n\ndef restrict_forward_to_label(fwd, labels):\n \"\"\"Restrict forward operator to labels.\n\n Parameters\n ----------\n fwd : Forward\n Forward operator.\n labels : instance of Label | list\n Label object or list of label objects.\n\n Returns\n -------\n fwd_out : dict\n Restricted forward operator.\n\n See Also\n --------\n restrict_forward_to_stc\n \"\"\"\n vertices = [np.array([], int), np.array([], int)]\n\n if not isinstance(labels, list):\n labels = [labels]\n\n # Get vertices separately of each hemisphere from all label\n for label in labels:\n _validate_type(label, Label, \"label\", \"Label or list\")\n i = 0 if label.hemi == 'lh' else 1\n vertices[i] = np.append(vertices[i], label.vertices)\n # Remove duplicates and sort\n vertices = [np.unique(vert_hemi) for vert_hemi in vertices]\n\n fwd_out = deepcopy(fwd)\n fwd_out['source_rr'] = np.zeros((0, 3))\n fwd_out['nsource'] = 0\n fwd_out['source_nn'] = np.zeros((0, 3))\n fwd_out['sol']['data'] = np.zeros((fwd['sol']['data'].shape[0], 0))\n fwd_out['_orig_sol'] = np.zeros((fwd['_orig_sol'].shape[0], 0))\n if fwd['sol_grad'] is not None:\n fwd_out['sol_grad']['data'] = np.zeros(\n (fwd['sol_grad']['data'].shape[0], 0))\n fwd_out['_orig_sol_grad'] = np.zeros(\n (fwd['_orig_sol_grad'].shape[0], 0))\n fwd_out['sol']['ncol'] = 0\n nuse_lh = fwd['src'][0]['nuse']\n\n for i in range(2):\n fwd_out['src'][i]['vertno'] = np.array([], int)\n fwd_out['src'][i]['nuse'] = 0\n fwd_out['src'][i]['inuse'] = fwd['src'][i]['inuse'].copy()\n fwd_out['src'][i]['inuse'].fill(0)\n fwd_out['src'][i]['use_tris'] = np.array([[]], int)\n fwd_out['src'][i]['nuse_tri'] = np.array([0])\n\n # src_sel is idx to cols in fwd that are in any label per hemi\n src_sel = np.intersect1d(fwd['src'][i]['vertno'], vertices[i])\n src_sel = np.searchsorted(fwd['src'][i]['vertno'], src_sel)\n\n # Reconstruct each src\n vertno = fwd['src'][i]['vertno'][src_sel]\n fwd_out['src'][i]['inuse'][vertno] = 1\n fwd_out['src'][i]['nuse'] += len(vertno)\n fwd_out['src'][i]['vertno'] = np.where(fwd_out['src'][i]['inuse'])[0]\n\n # Reconstruct part of fwd that is not sol data\n src_sel += i * nuse_lh # Add column shift to right hemi\n fwd_out['source_rr'] = np.vstack([fwd_out['source_rr'],\n fwd['source_rr'][src_sel]])\n fwd_out['nsource'] += len(src_sel)\n\n if is_fixed_orient(fwd):\n idx = src_sel\n if fwd['sol_grad'] is not None:\n idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel()\n else:\n idx = (3 * src_sel[:, None] + np.arange(3)).ravel()\n if fwd['sol_grad'] is not None:\n idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel()\n\n fwd_out['source_nn'] = np.vstack(\n [fwd_out['source_nn'], fwd['source_nn'][idx]])\n fwd_out['sol']['data'] = np.hstack(\n [fwd_out['sol']['data'], fwd['sol']['data'][:, idx]])\n if fwd['sol_grad'] is not None:\n fwd_out['sol_grad']['data'] = np.hstack(\n [fwd_out['sol_grad']['data'],\n fwd['sol_rad']['data'][:, idx_grad]])\n fwd_out['sol']['ncol'] += len(idx)\n\n if is_fixed_orient(fwd, orig=True):\n idx = src_sel\n if fwd['sol_grad'] is not None:\n idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel()\n else:\n idx = (3 * src_sel[:, None] + np.arange(3)).ravel()\n if fwd['sol_grad'] is not None:\n idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel()\n\n fwd_out['_orig_sol'] = np.hstack(\n [fwd_out['_orig_sol'], fwd['_orig_sol'][:, idx]])\n if fwd['sol_grad'] is not None:\n fwd_out['_orig_sol_grad'] = np.hstack(\n [fwd_out['_orig_sol_grad'],\n fwd['_orig_sol_grad'][:, idx_grad]])\n\n return fwd_out\n\n\ndef _do_forward_solution(subject, meas, fname=None, src=None, spacing=None,\n mindist=None, bem=None, mri=None, trans=None,\n eeg=True, meg=True, fixed=False, grad=False,\n mricoord=False, overwrite=False, subjects_dir=None,\n verbose=None):\n \"\"\"Calculate a forward solution for a subject using MNE-C routines.\n\n This is kept around for testing purposes.\n\n This function wraps to mne_do_forward_solution, so the mne\n command-line tools must be installed and accessible from Python.\n\n Parameters\n ----------\n subject : str\n Name of the subject.\n meas : Raw | Epochs | Evoked | str\n If Raw or Epochs, a temporary evoked file will be created and\n saved to a temporary directory. If str, then it should be a\n filename to a file with measurement information the mne\n command-line tools can understand (i.e., raw or evoked).\n fname : str | None\n Destination forward solution filename. If None, the solution\n will be created in a temporary directory, loaded, and deleted.\n src : str | None\n Source space name. If None, the MNE default is used.\n spacing : str\n The spacing to use. Can be ``'#'`` for spacing in mm, ``'ico#'`` for a\n recursively subdivided icosahedron, or ``'oct#'`` for a recursively\n subdivided octahedron (e.g., ``spacing='ico4'``). Default is 7 mm.\n mindist : float | str | None\n Minimum distance of sources from inner skull surface (in mm).\n If None, the MNE default value is used. If string, 'all'\n indicates to include all points.\n bem : str | None\n Name of the BEM to use (e.g., \"sample-5120-5120-5120\"). If None\n (Default), the MNE default will be used.\n mri : str | None\n The name of the trans file in FIF format.\n If None, trans must not be None.\n trans : dict | str | None\n File name of the trans file in text format.\n If None, mri must not be None.\n eeg : bool\n If True (Default), include EEG computations.\n meg : bool\n If True (Default), include MEG computations.\n fixed : bool\n If True, make a fixed-orientation forward solution (Default:\n False). Note that fixed-orientation inverses can still be\n created from free-orientation forward solutions.\n grad : bool\n If True, compute the gradient of the field with respect to the\n dipole coordinates as well (Default: False).\n mricoord : bool\n If True, calculate in MRI coordinates (Default: False).\n overwrite : bool\n If True, the destination file (if it exists) will be overwritten.\n If False (default), an error will be raised if the file exists.\n subjects_dir : None | str\n Override the SUBJECTS_DIR environment variable.\n %(verbose)s\n\n See Also\n --------\n make_forward_solution\n\n Returns\n -------\n fwd : Forward\n The generated forward solution.\n \"\"\"\n if not has_mne_c():\n raise RuntimeError('mne command line tools could not be found')\n\n # check for file existence\n temp_dir = tempfile.mkdtemp()\n if fname is None:\n fname = op.join(temp_dir, 'temp-fwd.fif')\n _check_fname(fname, overwrite)\n _validate_type(subject, \"str\", \"subject\")\n\n # check for meas to exist as string, or try to make evoked\n if isinstance(meas, str):\n if not op.isfile(meas):\n raise IOError('measurement file \"%s\" could not be found' % meas)\n elif isinstance(meas, (BaseRaw, BaseEpochs, Evoked)):\n meas_file = op.join(temp_dir, 'info.fif')\n write_info(meas_file, meas.info)\n meas = meas_file\n else:\n raise ValueError('meas must be string, Raw, Epochs, or Evoked')\n\n # deal with trans/mri\n if mri is not None and trans is not None:\n raise ValueError('trans and mri cannot both be specified')\n if mri is None and trans is None:\n # MNE allows this to default to a trans/mri in the subject's dir,\n # but let's be safe here and force the user to pass us a trans/mri\n raise ValueError('Either trans or mri must be specified')\n\n if trans is not None:\n _validate_type(trans, \"str\", \"trans\")\n if not op.isfile(trans):\n raise IOError('trans file \"%s\" not found' % trans)\n if mri is not None:\n # deal with trans\n if not isinstance(mri, str):\n if isinstance(mri, dict):\n mri_data = deepcopy(mri)\n mri = op.join(temp_dir, 'mri-trans.fif')\n try:\n write_trans(mri, mri_data)\n except Exception:\n raise IOError('mri was a dict, but could not be '\n 'written to disk as a transform file')\n else:\n raise ValueError('trans must be a string or dict (trans)')\n if not op.isfile(mri):\n raise IOError('trans file \"%s\" could not be found' % trans)\n\n # deal with meg/eeg\n if not meg and not eeg:\n raise ValueError('meg or eeg (or both) must be True')\n\n path, fname = op.split(fname)\n if not op.splitext(fname)[1] == '.fif':\n raise ValueError('Forward name does not end with .fif')\n path = op.abspath(path)\n\n # deal with mindist\n if mindist is not None:\n if isinstance(mindist, str):\n if not mindist.lower() == 'all':\n raise ValueError('mindist, if string, must be \"all\"')\n mindist = ['--all']\n else:\n mindist = ['--mindist', '%g' % mindist]\n\n # src, spacing, bem\n for element, name, kind in zip((src, spacing, bem),\n (\"src\", \"spacing\", \"bem\"),\n ('path-like', 'str', 'path-like')):\n if element is not None:\n _validate_type(element, kind, name, \"%s or None\" % kind)\n\n # put together the actual call\n cmd = ['mne_do_forward_solution',\n '--subject', subject,\n '--meas', meas,\n '--fwd', fname,\n '--destdir', path]\n if src is not None:\n cmd += ['--src', src]\n if spacing is not None:\n if spacing.isdigit():\n pass # spacing in mm\n else:\n # allow both \"ico4\" and \"ico-4\" style values\n match = re.match(r\"(oct|ico)-?(\\d+)$\", spacing)\n if match is None:\n raise ValueError(\"Invalid spacing parameter: %r\" % spacing)\n spacing = '-'.join(match.groups())\n cmd += ['--spacing', spacing]\n if mindist is not None:\n cmd += mindist\n if bem is not None:\n cmd += ['--bem', bem]\n if mri is not None:\n cmd += ['--mri', '%s' % mri]\n if trans is not None:\n cmd += ['--trans', '%s' % trans]\n if not meg:\n cmd.append('--eegonly')\n if not eeg:\n cmd.append('--megonly')\n if fixed:\n cmd.append('--fixed')\n if grad:\n cmd.append('--grad')\n if mricoord:\n cmd.append('--mricoord')\n if overwrite:\n cmd.append('--overwrite')\n\n env = os.environ.copy()\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n env['SUBJECTS_DIR'] = subjects_dir\n\n try:\n logger.info('Running forward solution generation command with '\n 'subjects_dir %s' % subjects_dir)\n run_subprocess(cmd, env=env)\n except Exception:\n raise\n else:\n fwd = read_forward_solution(op.join(path, fname), verbose=False)\n finally:\n shutil.rmtree(temp_dir, ignore_errors=True)\n return fwd\n\n\n@verbose\ndef average_forward_solutions(fwds, weights=None):\n \"\"\"Average forward solutions.\n\n Parameters\n ----------\n fwds : list of Forward\n Forward solutions to average. Each entry (dict) should be a\n forward solution.\n weights : array | None\n Weights to apply to each forward solution in averaging. If None,\n forward solutions will be equally weighted. Weights must be\n non-negative, and will be adjusted to sum to one.\n\n Returns\n -------\n fwd : Forward\n The averaged forward solution.\n \"\"\"\n # check for fwds being a list\n _validate_type(fwds, list, \"fwds\")\n if not len(fwds) > 0:\n raise ValueError('fwds must not be empty')\n\n # check weights\n if weights is None:\n weights = np.ones(len(fwds))\n weights = np.asanyarray(weights) # in case it's a list, convert it\n if not np.all(weights >= 0):\n raise ValueError('weights must be non-negative')\n if not len(weights) == len(fwds):\n raise ValueError('weights must be None or the same length as fwds')\n w_sum = np.sum(weights)\n if not w_sum > 0:\n raise ValueError('weights cannot all be zero')\n weights /= w_sum\n\n # check our forward solutions\n for fwd in fwds:\n # check to make sure it's a forward solution\n _validate_type(fwd, dict, \"each entry in fwds\", \"dict\")\n # check to make sure the dict is actually a fwd\n check_keys = ['info', 'sol_grad', 'nchan', 'src', 'source_nn', 'sol',\n 'source_rr', 'source_ori', 'surf_ori', 'coord_frame',\n 'mri_head_t', 'nsource']\n if not all(key in fwd for key in check_keys):\n raise KeyError('forward solution dict does not have all standard '\n 'entries, cannot compute average.')\n\n # check forward solution compatibility\n if any(fwd['sol'][k] != fwds[0]['sol'][k]\n for fwd in fwds[1:] for k in ['nrow', 'ncol']):\n raise ValueError('Forward solutions have incompatible dimensions')\n if any(fwd[k] != fwds[0][k] for fwd in fwds[1:]\n for k in ['source_ori', 'surf_ori', 'coord_frame']):\n raise ValueError('Forward solutions have incompatible orientations')\n\n # actually average them (solutions and gradients)\n fwd_ave = deepcopy(fwds[0])\n fwd_ave['sol']['data'] *= weights[0]\n fwd_ave['_orig_sol'] *= weights[0]\n for fwd, w in zip(fwds[1:], weights[1:]):\n fwd_ave['sol']['data'] += w * fwd['sol']['data']\n fwd_ave['_orig_sol'] += w * fwd['_orig_sol']\n if fwd_ave['sol_grad'] is not None:\n fwd_ave['sol_grad']['data'] *= weights[0]\n fwd_ave['_orig_sol_grad'] *= weights[0]\n for fwd, w in zip(fwds[1:], weights[1:]):\n fwd_ave['sol_grad']['data'] += w * fwd['sol_grad']['data']\n fwd_ave['_orig_sol_grad'] += w * fwd['_orig_sol_grad']\n return fwd_ave\n",
"# -*- coding: utf-8 -*-\n\"\"\"Some utility functions.\"\"\"\n# Authors: Alexandre Gramfort <[email protected]>\n#\n# License: BSD (3-clause)\n\nfrom contextlib import contextmanager\nimport hashlib\nfrom io import BytesIO, StringIO\nfrom math import sqrt\nimport numbers\nimport operator\nimport os\nimport os.path as op\nfrom math import ceil\nimport shutil\nimport sys\n\nimport numpy as np\nfrom scipy import sparse\n\nfrom ._logging import logger, warn, verbose\nfrom .check import check_random_state, _ensure_int, _validate_type\nfrom .linalg import _svd_lwork, _repeated_svd, dgemm, zgemm\nfrom ..fixes import _infer_dimension_, svd_flip, stable_cumsum, _safe_svd\nfrom .docs import fill_doc\n\n\ndef split_list(l, n, idx=False):\n \"\"\"Split list in n (approx) equal pieces, possibly giving indices.\"\"\"\n n = int(n)\n tot = len(l)\n sz = tot // n\n start = stop = 0\n for i in range(n - 1):\n stop += sz\n yield (np.arange(start, stop), l[start:stop]) if idx else l[start:stop]\n start += sz\n yield (np.arange(start, tot), l[start:]) if idx else l[start]\n\n\ndef array_split_idx(ary, indices_or_sections, axis=0, n_per_split=1):\n \"\"\"Do what numpy.array_split does, but add indices.\"\"\"\n # this only works for indices_or_sections as int\n indices_or_sections = _ensure_int(indices_or_sections)\n ary_split = np.array_split(ary, indices_or_sections, axis=axis)\n idx_split = np.array_split(np.arange(ary.shape[axis]), indices_or_sections)\n idx_split = (np.arange(sp[0] * n_per_split, (sp[-1] + 1) * n_per_split)\n for sp in idx_split)\n return zip(idx_split, ary_split)\n\n\ndef create_chunks(sequence, size):\n \"\"\"Generate chunks from a sequence.\n\n Parameters\n ----------\n sequence : iterable\n Any iterable object\n size : int\n The chunksize to be returned\n \"\"\"\n return (sequence[p:p + size] for p in range(0, len(sequence), size))\n\n\ndef sum_squared(X):\n \"\"\"Compute norm of an array.\n\n Parameters\n ----------\n X : array\n Data whose norm must be found\n\n Returns\n -------\n value : float\n Sum of squares of the input array X\n \"\"\"\n X_flat = X.ravel(order='F' if np.isfortran(X) else 'C')\n return np.dot(X_flat, X_flat)\n\n\ndef _compute_row_norms(data):\n \"\"\"Compute scaling based on estimated norm.\"\"\"\n norms = np.sqrt(np.sum(data ** 2, axis=1))\n norms[norms == 0] = 1.0\n return norms\n\n\ndef _reg_pinv(x, reg=0, rank='full', rcond=1e-15, svd_lwork=None):\n \"\"\"Compute a regularized pseudoinverse of a square matrix.\n\n Regularization is performed by adding a constant value to each diagonal\n element of the matrix before inversion. This is known as \"diagonal\n loading\". The loading factor is computed as ``reg * np.trace(x) / len(x)``.\n\n The pseudo-inverse is computed through SVD decomposition and inverting the\n singular values. When the matrix is rank deficient, some singular values\n will be close to zero and will not be used during the inversion. The number\n of singular values to use can either be manually specified or automatically\n estimated.\n\n Parameters\n ----------\n x : ndarray, shape (n, n)\n Square matrix to invert.\n reg : float\n Regularization parameter. Defaults to 0.\n rank : int | None | 'full'\n This controls the effective rank of the covariance matrix when\n computing the inverse. The rank can be set explicitly by specifying an\n integer value. If ``None``, the rank will be automatically estimated.\n Since applying regularization will always make the covariance matrix\n full rank, the rank is estimated before regularization in this case. If\n 'full', the rank will be estimated after regularization and hence\n will mean using the full rank, unless ``reg=0`` is used.\n Defaults to 'full'.\n rcond : float | 'auto'\n Cutoff for detecting small singular values when attempting to estimate\n the rank of the matrix (``rank='auto'``). Singular values smaller than\n the cutoff are set to zero. When set to 'auto', a cutoff based on\n floating point precision will be used. Defaults to 1e-15.\n\n Returns\n -------\n x_inv : ndarray, shape (n, n)\n The inverted matrix.\n loading_factor : float\n Value added to the diagonal of the matrix during regularization.\n rank : int\n If ``rank`` was set to an integer value, this value is returned,\n else the estimated rank of the matrix, before regularization, is\n returned.\n \"\"\"\n from ..rank import _estimate_rank_from_s\n if rank is not None and rank != 'full':\n rank = int(operator.index(rank))\n if x.ndim != 2 or x.shape[0] != x.shape[1]:\n raise ValueError('Input matrix must be square.')\n if not np.allclose(x, x.conj().T):\n raise ValueError('Input matrix must be Hermitian (symmetric)')\n\n # Decompose the matrix\n if svd_lwork is None:\n svd_lwork = _svd_lwork(x.shape, x.dtype)\n U, s, V = _repeated_svd(x, lwork=svd_lwork)\n\n # Estimate the rank before regularization\n tol = 'auto' if rcond == 'auto' else rcond * s.max()\n rank_before = _estimate_rank_from_s(s, tol)\n\n # Decompose the matrix again after regularization\n loading_factor = reg * np.mean(s)\n U, s, V = _repeated_svd(x + loading_factor * np.eye(len(x)),\n lwork=svd_lwork)\n\n # Estimate the rank after regularization\n tol = 'auto' if rcond == 'auto' else rcond * s.max()\n rank_after = _estimate_rank_from_s(s, tol)\n\n # Warn the user if both all parameters were kept at their defaults and the\n # matrix is rank deficient.\n if rank_after < len(x) and reg == 0 and rank == 'full' and rcond == 1e-15:\n warn('Covariance matrix is rank-deficient and no regularization is '\n 'done.')\n elif isinstance(rank, int) and rank > len(x):\n raise ValueError('Invalid value for the rank parameter (%d) given '\n 'the shape of the input matrix (%d x %d).' %\n (rank, x.shape[0], x.shape[1]))\n\n # Pick the requested number of singular values\n if rank is None:\n sel_s = s[:rank_before]\n elif rank == 'full':\n sel_s = s[:rank_after]\n else:\n sel_s = s[:rank]\n\n # Invert only non-zero singular values\n s_inv = np.zeros(s.shape)\n nonzero_inds = np.flatnonzero(sel_s != 0)\n if len(nonzero_inds) > 0:\n s_inv[nonzero_inds] = 1. / sel_s[nonzero_inds]\n\n # Compute the pseudo inverse\n U *= s_inv\n if U.dtype == np.float64:\n gemm = dgemm\n else:\n assert U.dtype == np.complex128\n gemm = zgemm\n x_inv = gemm(1., U, V).T\n\n if rank is None or rank == 'full':\n return x_inv, loading_factor, rank_before\n else:\n return x_inv, loading_factor, rank\n\n\ndef _gen_events(n_epochs):\n \"\"\"Generate event structure from number of epochs.\"\"\"\n events = np.c_[np.arange(n_epochs), np.zeros(n_epochs, int),\n np.ones(n_epochs, int)]\n return events\n\n\ndef _reject_data_segments(data, reject, flat, decim, info, tstep):\n \"\"\"Reject data segments using peak-to-peak amplitude.\"\"\"\n from ..epochs import _is_good\n from ..io.pick import channel_indices_by_type\n\n data_clean = np.empty_like(data)\n idx_by_type = channel_indices_by_type(info)\n step = int(ceil(tstep * info['sfreq']))\n if decim is not None:\n step = int(ceil(step / float(decim)))\n this_start = 0\n this_stop = 0\n drop_inds = []\n for first in range(0, data.shape[1], step):\n last = first + step\n data_buffer = data[:, first:last]\n if data_buffer.shape[1] < (last - first):\n break # end of the time segment\n if _is_good(data_buffer, info['ch_names'], idx_by_type, reject,\n flat, ignore_chs=info['bads']):\n this_stop = this_start + data_buffer.shape[1]\n data_clean[:, this_start:this_stop] = data_buffer\n this_start += data_buffer.shape[1]\n else:\n logger.info(\"Artifact detected in [%d, %d]\" % (first, last))\n drop_inds.append((first, last))\n data = data_clean[:, :this_stop]\n if not data.any():\n raise RuntimeError('No clean segment found. Please '\n 'consider updating your rejection '\n 'thresholds.')\n return data, drop_inds\n\n\ndef _get_inst_data(inst):\n \"\"\"Get data view from MNE object instance like Raw, Epochs or Evoked.\"\"\"\n from ..io.base import BaseRaw\n from ..epochs import BaseEpochs\n from .. import Evoked\n from ..time_frequency.tfr import _BaseTFR\n\n _validate_type(inst, (BaseRaw, BaseEpochs, Evoked, _BaseTFR), \"Instance\")\n if not inst.preload:\n inst.load_data()\n return inst._data\n\n\ndef compute_corr(x, y):\n \"\"\"Compute pearson correlations between a vector and a matrix.\"\"\"\n if len(x) == 0 or len(y) == 0:\n raise ValueError('x or y has zero length')\n X = np.array(x, float)\n Y = np.array(y, float)\n X -= X.mean(0)\n Y -= Y.mean(0)\n x_sd = X.std(0, ddof=1)\n # if covariance matrix is fully expanded, Y needs a\n # transpose / broadcasting else Y is correct\n y_sd = Y.std(0, ddof=1)[:, None if X.shape == Y.shape else Ellipsis]\n return (np.dot(X.T, Y) / float(len(X) - 1)) / (x_sd * y_sd)\n\n\n@fill_doc\ndef random_permutation(n_samples, random_state=None):\n \"\"\"Emulate the randperm matlab function.\n\n It returns a vector containing a random permutation of the\n integers between 0 and n_samples-1. It returns the same random numbers\n than randperm matlab function whenever the random_state is the same\n as the matlab's random seed.\n\n This function is useful for comparing against matlab scripts\n which use the randperm function.\n\n Note: the randperm(n_samples) matlab function generates a random\n sequence between 1 and n_samples, whereas\n random_permutation(n_samples, random_state) function generates\n a random sequence between 0 and n_samples-1, that is:\n randperm(n_samples) = random_permutation(n_samples, random_state) - 1\n\n Parameters\n ----------\n n_samples : int\n End point of the sequence to be permuted (excluded, i.e., the end point\n is equal to n_samples-1)\n %(random_state)s\n\n Returns\n -------\n randperm : ndarray, int\n Randomly permuted sequence between 0 and n-1.\n \"\"\"\n rng = check_random_state(random_state)\n # This can't just be rng.permutation(n_samples) because it's not identical\n # to what MATLAB produces\n idx = rng.uniform(size=n_samples)\n randperm = np.argsort(idx)\n return randperm\n\n\n@verbose\ndef _apply_scaling_array(data, picks_list, scalings, verbose=None):\n \"\"\"Scale data type-dependently for estimation.\"\"\"\n scalings = _check_scaling_inputs(data, picks_list, scalings)\n if isinstance(scalings, dict):\n logger.debug(' Scaling using mapping %s.' % (scalings,))\n picks_dict = dict(picks_list)\n scalings = [(picks_dict[k], v) for k, v in scalings.items()\n if k in picks_dict]\n for idx, scaling in scalings:\n data[idx, :] *= scaling # F - order\n else:\n logger.debug(' Scaling using computed norms.')\n data *= scalings[:, np.newaxis] # F - order\n\n\ndef _invert_scalings(scalings):\n if isinstance(scalings, dict):\n scalings = {k: 1. / v for k, v in scalings.items()}\n elif isinstance(scalings, np.ndarray):\n scalings = 1. / scalings\n return scalings\n\n\ndef _undo_scaling_array(data, picks_list, scalings):\n scalings = _invert_scalings(_check_scaling_inputs(data, picks_list,\n scalings))\n return _apply_scaling_array(data, picks_list, scalings, verbose=False)\n\n\n@contextmanager\ndef _scaled_array(data, picks_list, scalings):\n \"\"\"Scale, use, unscale array.\"\"\"\n _apply_scaling_array(data, picks_list=picks_list, scalings=scalings)\n try:\n yield\n finally:\n _undo_scaling_array(data, picks_list=picks_list, scalings=scalings)\n\n\ndef _apply_scaling_cov(data, picks_list, scalings):\n \"\"\"Scale resulting data after estimation.\"\"\"\n scalings = _check_scaling_inputs(data, picks_list, scalings)\n scales = None\n if isinstance(scalings, dict):\n n_channels = len(data)\n covinds = list(zip(*picks_list))[1]\n assert len(data) == sum(len(k) for k in covinds)\n assert list(sorted(np.concatenate(covinds))) == list(range(len(data)))\n scales = np.zeros(n_channels)\n for ch_t, idx in picks_list:\n scales[idx] = scalings[ch_t]\n elif isinstance(scalings, np.ndarray):\n if len(scalings) != len(data):\n raise ValueError('Scaling factors and data are of incompatible '\n 'shape')\n scales = scalings\n elif scalings is None:\n pass\n else:\n raise RuntimeError('Arff...')\n if scales is not None:\n assert np.sum(scales == 0.) == 0\n data *= (scales[None, :] * scales[:, None])\n\n\ndef _undo_scaling_cov(data, picks_list, scalings):\n scalings = _invert_scalings(_check_scaling_inputs(data, picks_list,\n scalings))\n return _apply_scaling_cov(data, picks_list, scalings)\n\n\ndef _check_scaling_inputs(data, picks_list, scalings):\n \"\"\"Aux function.\"\"\"\n rescale_dict_ = dict(mag=1e15, grad=1e13, eeg=1e6)\n\n scalings_ = None\n if isinstance(scalings, str) and scalings == 'norm':\n scalings_ = 1. / _compute_row_norms(data)\n elif isinstance(scalings, dict):\n rescale_dict_.update(scalings)\n scalings_ = rescale_dict_\n elif isinstance(scalings, np.ndarray):\n scalings_ = scalings\n elif scalings is None:\n pass\n else:\n raise NotImplementedError(\"No way! That's not a rescaling \"\n 'option: %s' % scalings)\n return scalings_\n\n\ndef hashfunc(fname, block_size=1048576, hash_type=\"md5\"): # 2 ** 20\n \"\"\"Calculate the hash for a file.\n\n Parameters\n ----------\n fname : str\n Filename.\n block_size : int\n Block size to use when reading.\n\n Returns\n -------\n hash_ : str\n The hexadecimal digest of the hash.\n \"\"\"\n if hash_type == \"md5\":\n hasher = hashlib.md5()\n elif hash_type == \"sha1\":\n hasher = hashlib.sha1()\n with open(fname, 'rb') as fid:\n while True:\n data = fid.read(block_size)\n if not data:\n break\n hasher.update(data)\n return hasher.hexdigest()\n\n\ndef _replace_md5(fname):\n \"\"\"Replace a file based on MD5sum.\"\"\"\n # adapted from sphinx-gallery\n assert fname.endswith('.new')\n fname_old = fname[:-4]\n if op.isfile(fname_old) and hashfunc(fname) == hashfunc(fname_old):\n os.remove(fname)\n else:\n shutil.move(fname, fname_old)\n\n\ndef create_slices(start, stop, step=None, length=1):\n \"\"\"Generate slices of time indexes.\n\n Parameters\n ----------\n start : int\n Index where first slice should start.\n stop : int\n Index where last slice should maximally end.\n length : int\n Number of time sample included in a given slice.\n step: int | None\n Number of time samples separating two slices.\n If step = None, step = length.\n\n Returns\n -------\n slices : list\n List of slice objects.\n \"\"\"\n # default parameters\n if step is None:\n step = length\n\n # slicing\n slices = [slice(t, t + length, 1) for t in\n range(start, stop - length + 1, step)]\n return slices\n\n\ndef _time_mask(times, tmin=None, tmax=None, sfreq=None, raise_error=True,\n include_tmax=True):\n \"\"\"Safely find sample boundaries.\"\"\"\n orig_tmin = tmin\n orig_tmax = tmax\n tmin = -np.inf if tmin is None else tmin\n tmax = np.inf if tmax is None else tmax\n if not np.isfinite(tmin):\n tmin = times[0]\n if not np.isfinite(tmax):\n tmax = times[-1]\n include_tmax = True # ignore this param when tmax is infinite\n if sfreq is not None:\n # Push to a bit past the nearest sample boundary first\n sfreq = float(sfreq)\n tmin = int(round(tmin * sfreq)) / sfreq - 0.5 / sfreq\n tmax = int(round(tmax * sfreq)) / sfreq\n tmax += (0.5 if include_tmax else -0.5) / sfreq\n else:\n assert include_tmax # can only be used when sfreq is known\n if raise_error and tmin > tmax:\n raise ValueError('tmin (%s) must be less than or equal to tmax (%s)'\n % (orig_tmin, orig_tmax))\n mask = (times >= tmin)\n mask &= (times <= tmax)\n if raise_error and not mask.any():\n extra = '' if include_tmax else 'when include_tmax=False '\n raise ValueError('No samples remain when using tmin=%s and tmax=%s %s'\n '(original time bounds are [%s, %s])'\n % (orig_tmin, orig_tmax, extra, times[0], times[-1]))\n return mask\n\n\ndef _freq_mask(freqs, sfreq, fmin=None, fmax=None, raise_error=True):\n \"\"\"Safely find frequency boundaries.\"\"\"\n orig_fmin = fmin\n orig_fmax = fmax\n fmin = -np.inf if fmin is None else fmin\n fmax = np.inf if fmax is None else fmax\n if not np.isfinite(fmin):\n fmin = freqs[0]\n if not np.isfinite(fmax):\n fmax = freqs[-1]\n if sfreq is None:\n raise ValueError('sfreq can not be None')\n # Push 0.5/sfreq past the nearest frequency boundary first\n sfreq = float(sfreq)\n fmin = int(round(fmin * sfreq)) / sfreq - 0.5 / sfreq\n fmax = int(round(fmax * sfreq)) / sfreq + 0.5 / sfreq\n if raise_error and fmin > fmax:\n raise ValueError('fmin (%s) must be less than or equal to fmax (%s)'\n % (orig_fmin, orig_fmax))\n mask = (freqs >= fmin)\n mask &= (freqs <= fmax)\n if raise_error and not mask.any():\n raise ValueError('No frequencies remain when using fmin=%s and '\n 'fmax=%s (original frequency bounds are [%s, %s])'\n % (orig_fmin, orig_fmax, freqs[0], freqs[-1]))\n return mask\n\n\ndef grand_average(all_inst, interpolate_bads=True, drop_bads=True):\n \"\"\"Make grand average of a list evoked or AverageTFR data.\n\n For evoked data, the function interpolates bad channels based on the\n ``interpolate_bads`` parameter. If ``interpolate_bads`` is True, the grand\n average file will contain good channels and the bad channels interpolated\n from the good MEG/EEG channels.\n For AverageTFR data, the function takes the subset of channels not marked\n as bad in any of the instances.\n\n The grand_average.nave attribute will be equal to the number\n of evoked datasets used to calculate the grand average.\n\n Note: Grand average evoked should not be used for source localization.\n\n Parameters\n ----------\n all_inst : list of Evoked or AverageTFR\n The evoked datasets.\n interpolate_bads : bool\n If True, bad MEG and EEG channels are interpolated. Ignored for\n AverageTFR.\n drop_bads : bool\n If True, drop all bad channels marked as bad in any data set.\n If neither interpolate_bads nor drop_bads is True, in the output file,\n every channel marked as bad in at least one of the input files will be\n marked as bad, but no interpolation or dropping will be performed.\n\n Returns\n -------\n grand_average : Evoked | AverageTFR\n The grand average data. Same type as input.\n\n Notes\n -----\n .. versionadded:: 0.11.0\n \"\"\"\n # check if all elements in the given list are evoked data\n from ..evoked import Evoked\n from ..time_frequency import AverageTFR\n from ..channels.channels import equalize_channels\n assert len(all_inst) > 1\n inst_type = type(all_inst[0])\n _validate_type(all_inst[0], (Evoked, AverageTFR), 'All elements')\n for inst in all_inst:\n _validate_type(inst, inst_type, 'All elements', 'of the same type')\n\n # Copy channels to leave the original evoked datasets intact.\n all_inst = [inst.copy() for inst in all_inst]\n\n # Interpolates if necessary\n if isinstance(all_inst[0], Evoked):\n if interpolate_bads:\n all_inst = [inst.interpolate_bads() if len(inst.info['bads']) > 0\n else inst for inst in all_inst]\n equalize_channels(all_inst) # apply equalize_channels\n from ..evoked import combine_evoked as combine\n weights = [1. / len(all_inst)] * len(all_inst)\n else: # isinstance(all_inst[0], AverageTFR):\n from ..time_frequency.tfr import combine_tfr as combine\n weights = 'equal'\n\n if drop_bads:\n bads = list({b for inst in all_inst for b in inst.info['bads']})\n if bads:\n for inst in all_inst:\n inst.drop_channels(bads)\n\n # make grand_average object using combine_[evoked/tfr]\n grand_average = combine(all_inst, weights=weights)\n # change the grand_average.nave to the number of Evokeds\n grand_average.nave = len(all_inst)\n # change comment field\n grand_average.comment = \"Grand average (n = %d)\" % grand_average.nave\n return grand_average\n\n\ndef object_hash(x, h=None):\n \"\"\"Hash a reasonable python object.\n\n Parameters\n ----------\n x : object\n Object to hash. Can be anything comprised of nested versions of:\n {dict, list, tuple, ndarray, str, bytes, float, int, None}.\n h : hashlib HASH object | None\n Optional, object to add the hash to. None creates an MD5 hash.\n\n Returns\n -------\n digest : int\n The digest resulting from the hash.\n \"\"\"\n if h is None:\n h = hashlib.md5()\n if hasattr(x, 'keys'):\n # dict-like types\n keys = _sort_keys(x)\n for key in keys:\n object_hash(key, h)\n object_hash(x[key], h)\n elif isinstance(x, bytes):\n # must come before \"str\" below\n h.update(x)\n elif isinstance(x, (str, float, int, type(None))):\n h.update(str(type(x)).encode('utf-8'))\n h.update(str(x).encode('utf-8'))\n elif isinstance(x, (np.ndarray, np.number, np.bool_)):\n x = np.asarray(x)\n h.update(str(x.shape).encode('utf-8'))\n h.update(str(x.dtype).encode('utf-8'))\n h.update(x.tostring())\n elif hasattr(x, '__len__'):\n # all other list-like types\n h.update(str(type(x)).encode('utf-8'))\n for xx in x:\n object_hash(xx, h)\n else:\n raise RuntimeError('unsupported type: %s (%s)' % (type(x), x))\n return int(h.hexdigest(), 16)\n\n\ndef object_size(x):\n \"\"\"Estimate the size of a reasonable python object.\n\n Parameters\n ----------\n x : object\n Object to approximate the size of.\n Can be anything comprised of nested versions of:\n {dict, list, tuple, ndarray, str, bytes, float, int, None}.\n\n Returns\n -------\n size : int\n The estimated size in bytes of the object.\n \"\"\"\n # Note: this will not process object arrays properly (since those only)\n # hold references\n if isinstance(x, (bytes, str, int, float, type(None))):\n size = sys.getsizeof(x)\n elif isinstance(x, np.ndarray):\n # On newer versions of NumPy, just doing sys.getsizeof(x) works,\n # but on older ones you always get something small :(\n size = sys.getsizeof(np.array([])) + x.nbytes\n elif isinstance(x, np.generic):\n size = x.nbytes\n elif isinstance(x, dict):\n size = sys.getsizeof(x)\n for key, value in x.items():\n size += object_size(key)\n size += object_size(value)\n elif isinstance(x, (list, tuple)):\n size = sys.getsizeof(x) + sum(object_size(xx) for xx in x)\n elif sparse.isspmatrix_csc(x) or sparse.isspmatrix_csr(x):\n size = sum(sys.getsizeof(xx)\n for xx in [x, x.data, x.indices, x.indptr])\n else:\n raise RuntimeError('unsupported type: %s (%s)' % (type(x), x))\n return size\n\n\ndef _sort_keys(x):\n \"\"\"Sort and return keys of dict.\"\"\"\n keys = list(x.keys()) # note: not thread-safe\n idx = np.argsort([str(k) for k in keys])\n keys = [keys[ii] for ii in idx]\n return keys\n\n\ndef _array_equal_nan(a, b):\n try:\n np.testing.assert_array_equal(a, b)\n except AssertionError:\n return False\n return True\n\n\ndef object_diff(a, b, pre=''):\n \"\"\"Compute all differences between two python variables.\n\n Parameters\n ----------\n a : object\n Currently supported: dict, list, tuple, ndarray, int, str, bytes,\n float, StringIO, BytesIO.\n b : object\n Must be same type as ``a``.\n pre : str\n String to prepend to each line.\n\n Returns\n -------\n diffs : str\n A string representation of the differences.\n \"\"\"\n out = ''\n if type(a) != type(b):\n # Deal with NamedInt and NamedFloat\n for sub in (int, float):\n if isinstance(a, sub) and isinstance(b, sub):\n break\n else:\n return pre + ' type mismatch (%s, %s)\\n' % (type(a), type(b))\n if isinstance(a, dict):\n k1s = _sort_keys(a)\n k2s = _sort_keys(b)\n m1 = set(k2s) - set(k1s)\n if len(m1):\n out += pre + ' left missing keys %s\\n' % (m1)\n for key in k1s:\n if key not in k2s:\n out += pre + ' right missing key %s\\n' % key\n else:\n out += object_diff(a[key], b[key],\n pre=(pre + '[%s]' % repr(key)))\n elif isinstance(a, (list, tuple)):\n if len(a) != len(b):\n out += pre + ' length mismatch (%s, %s)\\n' % (len(a), len(b))\n else:\n for ii, (xx1, xx2) in enumerate(zip(a, b)):\n out += object_diff(xx1, xx2, pre + '[%s]' % ii)\n elif isinstance(a, (str, int, float, bytes, np.generic)):\n if a != b:\n out += pre + ' value mismatch (%s, %s)\\n' % (a, b)\n elif a is None:\n if b is not None:\n out += pre + ' left is None, right is not (%s)\\n' % (b)\n elif isinstance(a, np.ndarray):\n if not _array_equal_nan(a, b):\n out += pre + ' array mismatch\\n'\n elif isinstance(a, (StringIO, BytesIO)):\n if a.getvalue() != b.getvalue():\n out += pre + ' StringIO mismatch\\n'\n elif sparse.isspmatrix(a):\n # sparsity and sparse type of b vs a already checked above by type()\n if b.shape != a.shape:\n out += pre + (' sparse matrix a and b shape mismatch'\n '(%s vs %s)' % (a.shape, b.shape))\n else:\n c = a - b\n c.eliminate_zeros()\n if c.nnz > 0:\n out += pre + (' sparse matrix a and b differ on %s '\n 'elements' % c.nnz)\n elif hasattr(a, '__getstate__'):\n out += object_diff(a.__getstate__(), b.__getstate__(), pre)\n else:\n raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a))\n return out\n\n\nclass _PCA(object):\n \"\"\"Principal component analysis (PCA).\"\"\"\n\n # Adapted from sklearn and stripped down to just use linalg.svd\n # and make it easier to later provide a \"center\" option if we want\n\n def __init__(self, n_components=None, whiten=False):\n self.n_components = n_components\n self.whiten = whiten\n\n def fit_transform(self, X, y=None):\n X = X.copy()\n U, S, _ = self._fit(X)\n U = U[:, :self.n_components_]\n\n if self.whiten:\n # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)\n U *= sqrt(X.shape[0] - 1)\n else:\n # X_new = X * V = U * S * V^T * V = U * S\n U *= S[:self.n_components_]\n\n return U\n\n def _fit(self, X):\n if self.n_components is None:\n n_components = min(X.shape)\n else:\n n_components = self.n_components\n n_samples, n_features = X.shape\n\n if n_components == 'mle':\n if n_samples < n_features:\n raise ValueError(\"n_components='mle' is only supported \"\n \"if n_samples >= n_features\")\n elif not 0 <= n_components <= min(n_samples, n_features):\n raise ValueError(\"n_components=%r must be between 0 and \"\n \"min(n_samples, n_features)=%r with \"\n \"svd_solver='full'\"\n % (n_components, min(n_samples, n_features)))\n elif n_components >= 1:\n if not isinstance(n_components, (numbers.Integral, np.integer)):\n raise ValueError(\"n_components=%r must be of type int \"\n \"when greater than or equal to 1, \"\n \"was of type=%r\"\n % (n_components, type(n_components)))\n\n self.mean_ = np.mean(X, axis=0)\n X -= self.mean_\n\n U, S, V = _safe_svd(X, full_matrices=False)\n # flip eigenvectors' sign to enforce deterministic output\n U, V = svd_flip(U, V)\n\n components_ = V\n\n # Get variance explained by singular values\n explained_variance_ = (S ** 2) / (n_samples - 1)\n total_var = explained_variance_.sum()\n explained_variance_ratio_ = explained_variance_ / total_var\n singular_values_ = S.copy() # Store the singular values.\n\n # Postprocess the number of components required\n if n_components == 'mle':\n n_components = \\\n _infer_dimension_(explained_variance_, n_samples, n_features)\n elif 0 < n_components < 1.0:\n # number of components for which the cumulated explained\n # variance percentage is superior to the desired threshold\n ratio_cumsum = stable_cumsum(explained_variance_ratio_)\n n_components = np.searchsorted(ratio_cumsum, n_components) + 1\n\n # Compute noise covariance using Probabilistic PCA model\n # The sigma2 maximum likelihood (cf. eq. 12.46)\n if n_components < min(n_features, n_samples):\n self.noise_variance_ = explained_variance_[n_components:].mean()\n else:\n self.noise_variance_ = 0.\n\n self.n_samples_, self.n_features_ = n_samples, n_features\n self.components_ = components_[:n_components]\n self.n_components_ = n_components\n self.explained_variance_ = explained_variance_[:n_components]\n self.explained_variance_ratio_ = \\\n explained_variance_ratio_[:n_components]\n self.singular_values_ = singular_values_[:n_components]\n\n return U, S, V\n\n\ndef _mask_to_onsets_offsets(mask):\n \"\"\"Group boolean mask into contiguous onset:offset pairs.\"\"\"\n assert mask.dtype == bool and mask.ndim == 1\n mask = mask.astype(int)\n diff = np.diff(mask)\n onsets = np.where(diff > 0)[0] + 1\n if mask[0]:\n onsets = np.concatenate([[0], onsets])\n offsets = np.where(diff < 0)[0] + 1\n if mask[-1]:\n offsets = np.concatenate([offsets, [len(mask)]])\n assert len(onsets) == len(offsets)\n return onsets, offsets\n",
"# -*- coding: utf-8 -*-\n\"\"\"Coregistration between different coordinate frames.\"\"\"\n\n# Authors: Christian Brodbeck <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport configparser\nimport fnmatch\nfrom glob import glob, iglob\nimport os\nimport os.path as op\nimport stat\nimport sys\nimport re\nimport shutil\nfrom functools import reduce\n\nimport numpy as np\n\nfrom .io import read_fiducials, write_fiducials, read_info\nfrom .io.constants import FIFF\nfrom .label import read_label, Label\nfrom .source_space import (add_source_space_distances, read_source_spaces,\n write_source_spaces, _get_mri_header, _read_talxfm)\nfrom .surface import read_surface, write_surface, _normalize_vectors\nfrom .bem import read_bem_surfaces, write_bem_surfaces\nfrom .transforms import (rotation, rotation3d, scaling, translation, Transform,\n _read_fs_xfm, _write_fs_xfm, invert_transform,\n combine_transforms, apply_trans)\nfrom .utils import (get_config, get_subjects_dir, logger, pformat, verbose,\n warn, has_nibabel)\nfrom .viz._3d import _fiducial_coords\n\n# some path templates\ntrans_fname = os.path.join('{raw_dir}', '{subject}-trans.fif')\nsubject_dirname = os.path.join('{subjects_dir}', '{subject}')\nbem_dirname = os.path.join(subject_dirname, 'bem')\nmri_dirname = os.path.join(subject_dirname, 'mri')\nmri_transforms_dirname = os.path.join(subject_dirname, 'mri', 'transforms')\nsurf_dirname = os.path.join(subject_dirname, 'surf')\nbem_fname = os.path.join(bem_dirname, \"{subject}-{name}.fif\")\nhead_bem_fname = pformat(bem_fname, name='head')\nfid_fname = pformat(bem_fname, name='fiducials')\nfid_fname_general = os.path.join(bem_dirname, \"{head}-fiducials.fif\")\nsrc_fname = os.path.join(bem_dirname, '{subject}-{spacing}-src.fif')\n_head_fnames = (os.path.join(bem_dirname, 'outer_skin.surf'),\n head_bem_fname)\n_high_res_head_fnames = (os.path.join(bem_dirname, '{subject}-head-dense.fif'),\n os.path.join(surf_dirname, 'lh.seghead'),\n os.path.join(surf_dirname, 'lh.smseghead'))\n\n\ndef _make_writable(fname):\n \"\"\"Make a file writable.\"\"\"\n os.chmod(fname, stat.S_IMODE(os.lstat(fname)[stat.ST_MODE]) | 128) # write\n\n\ndef _make_writable_recursive(path):\n \"\"\"Recursively set writable.\"\"\"\n if sys.platform.startswith('win'):\n return # can't safely set perms\n for root, dirs, files in os.walk(path, topdown=False):\n for f in dirs + files:\n _make_writable(os.path.join(root, f))\n\n\ndef _find_head_bem(subject, subjects_dir, high_res=False):\n \"\"\"Find a high resolution head.\"\"\"\n # XXX this should be refactored with mne.surface.get_head_surf ...\n fnames = _high_res_head_fnames if high_res else _head_fnames\n for fname in fnames:\n path = fname.format(subjects_dir=subjects_dir, subject=subject)\n if os.path.exists(path):\n return path\n\n\ndef coregister_fiducials(info, fiducials, tol=0.01):\n \"\"\"Create a head-MRI transform by aligning 3 fiducial points.\n\n Parameters\n ----------\n info : Info\n Measurement info object with fiducials in head coordinate space.\n fiducials : str | list of dict\n Fiducials in MRI coordinate space (either path to a ``*-fiducials.fif``\n file or list of fiducials as returned by :func:`read_fiducials`.\n\n Returns\n -------\n trans : Transform\n The device-MRI transform.\n \"\"\"\n if isinstance(info, str):\n info = read_info(info)\n if isinstance(fiducials, str):\n fiducials, coord_frame_to = read_fiducials(fiducials)\n else:\n coord_frame_to = FIFF.FIFFV_COORD_MRI\n frames_from = {d['coord_frame'] for d in info['dig']}\n if len(frames_from) > 1:\n raise ValueError(\"info contains fiducials from different coordinate \"\n \"frames\")\n else:\n coord_frame_from = frames_from.pop()\n coords_from = _fiducial_coords(info['dig'])\n coords_to = _fiducial_coords(fiducials, coord_frame_to)\n trans = fit_matched_points(coords_from, coords_to, tol=tol)\n return Transform(coord_frame_from, coord_frame_to, trans)\n\n\n@verbose\ndef create_default_subject(fs_home=None, update=False, subjects_dir=None,\n verbose=None):\n \"\"\"Create an average brain subject for subjects without structural MRI.\n\n Create a copy of fsaverage from the Freesurfer directory in subjects_dir\n and add auxiliary files from the mne package.\n\n Parameters\n ----------\n fs_home : None | str\n The freesurfer home directory (only needed if FREESURFER_HOME is not\n specified as environment variable).\n update : bool\n In cases where a copy of the fsaverage brain already exists in the\n subjects_dir, this option allows to only copy files that don't already\n exist in the fsaverage directory.\n subjects_dir : None | str\n Override the SUBJECTS_DIR environment variable\n (os.environ['SUBJECTS_DIR']) as destination for the new subject.\n %(verbose)s\n\n Notes\n -----\n When no structural MRI is available for a subject, an average brain can be\n substituted. Freesurfer comes with such an average brain model, and MNE\n comes with some auxiliary files which make coregistration easier.\n :py:func:`create_default_subject` copies the relevant\n files from Freesurfer into the current subjects_dir, and also adds the\n auxiliary files provided by MNE.\n \"\"\"\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n if fs_home is None:\n fs_home = get_config('FREESURFER_HOME', fs_home)\n if fs_home is None:\n raise ValueError(\n \"FREESURFER_HOME environment variable not found. Please \"\n \"specify the fs_home parameter in your call to \"\n \"create_default_subject().\")\n\n # make sure freesurfer files exist\n fs_src = os.path.join(fs_home, 'subjects', 'fsaverage')\n if not os.path.exists(fs_src):\n raise IOError('fsaverage not found at %r. Is fs_home specified '\n 'correctly?' % fs_src)\n for name in ('label', 'mri', 'surf'):\n dirname = os.path.join(fs_src, name)\n if not os.path.isdir(dirname):\n raise IOError(\"Freesurfer fsaverage seems to be incomplete: No \"\n \"directory named %s found in %s\" % (name, fs_src))\n\n # make sure destination does not already exist\n dest = os.path.join(subjects_dir, 'fsaverage')\n if dest == fs_src:\n raise IOError(\n \"Your subjects_dir points to the freesurfer subjects_dir (%r). \"\n \"The default subject can not be created in the freesurfer \"\n \"installation directory; please specify a different \"\n \"subjects_dir.\" % subjects_dir)\n elif (not update) and os.path.exists(dest):\n raise IOError(\n \"Can not create fsaverage because %r already exists in \"\n \"subjects_dir %r. Delete or rename the existing fsaverage \"\n \"subject folder.\" % ('fsaverage', subjects_dir))\n\n # copy fsaverage from freesurfer\n logger.info(\"Copying fsaverage subject from freesurfer directory...\")\n if (not update) or not os.path.exists(dest):\n shutil.copytree(fs_src, dest)\n _make_writable_recursive(dest)\n\n # copy files from mne\n source_fname = os.path.join(os.path.dirname(__file__), 'data', 'fsaverage',\n 'fsaverage-%s.fif')\n dest_bem = os.path.join(dest, 'bem')\n if not os.path.exists(dest_bem):\n os.mkdir(dest_bem)\n logger.info(\"Copying auxiliary fsaverage files from mne...\")\n dest_fname = os.path.join(dest_bem, 'fsaverage-%s.fif')\n _make_writable_recursive(dest_bem)\n for name in ('fiducials', 'head', 'inner_skull-bem', 'trans'):\n if not os.path.exists(dest_fname % name):\n shutil.copy(source_fname % name, dest_bem)\n\n\ndef _decimate_points(pts, res=10):\n \"\"\"Decimate the number of points using a voxel grid.\n\n Create a voxel grid with a specified resolution and retain at most one\n point per voxel. For each voxel, the point closest to its center is\n retained.\n\n Parameters\n ----------\n pts : array, shape (n_points, 3)\n The points making up the head shape.\n res : scalar\n The resolution of the voxel space (side length of each voxel).\n\n Returns\n -------\n pts : array, shape = (n_points, 3)\n The decimated points.\n \"\"\"\n from scipy.spatial.distance import cdist\n pts = np.asarray(pts)\n\n # find the bin edges for the voxel space\n xmin, ymin, zmin = pts.min(0) - res / 2.\n xmax, ymax, zmax = pts.max(0) + res\n xax = np.arange(xmin, xmax, res)\n yax = np.arange(ymin, ymax, res)\n zax = np.arange(zmin, zmax, res)\n\n # find voxels containing one or more point\n H, _ = np.histogramdd(pts, bins=(xax, yax, zax), normed=False)\n\n # for each voxel, select one point\n X, Y, Z = pts.T\n out = np.empty((np.sum(H > 0), 3))\n for i, (xbin, ybin, zbin) in enumerate(zip(*np.nonzero(H))):\n x = xax[xbin]\n y = yax[ybin]\n z = zax[zbin]\n xi = np.logical_and(X >= x, X < x + res)\n yi = np.logical_and(Y >= y, Y < y + res)\n zi = np.logical_and(Z >= z, Z < z + res)\n idx = np.logical_and(zi, np.logical_and(yi, xi))\n ipts = pts[idx]\n\n mid = np.array([x, y, z]) + res / 2.\n dist = cdist(ipts, [mid])\n i_min = np.argmin(dist)\n ipt = ipts[i_min]\n out[i] = ipt\n\n return out\n\n\ndef _trans_from_params(param_info, params):\n \"\"\"Convert transformation parameters into a transformation matrix.\n\n Parameters\n ----------\n param_info : tuple, len = 3\n Tuple describing the parameters in x (do_translate, do_rotate,\n do_scale).\n params : tuple\n The transformation parameters.\n\n Returns\n -------\n trans : array, shape = (4, 4)\n Transformation matrix.\n \"\"\"\n do_rotate, do_translate, do_scale = param_info\n i = 0\n trans = []\n\n if do_rotate:\n x, y, z = params[:3]\n trans.append(rotation(x, y, z))\n i += 3\n\n if do_translate:\n x, y, z = params[i:i + 3]\n trans.insert(0, translation(x, y, z))\n i += 3\n\n if do_scale == 1:\n s = params[i]\n trans.append(scaling(s, s, s))\n elif do_scale == 3:\n x, y, z = params[i:i + 3]\n trans.append(scaling(x, y, z))\n\n trans = reduce(np.dot, trans)\n return trans\n\n\n# XXX this function should be moved out of coreg as used elsewhere\ndef fit_matched_points(src_pts, tgt_pts, rotate=True, translate=True,\n scale=False, tol=None, x0=None, out='trans',\n weights=None):\n \"\"\"Find a transform between matched sets of points.\n\n This minimizes the squared distance between two matching sets of points.\n\n Uses :func:`scipy.optimize.leastsq` to find a transformation involving\n a combination of rotation, translation, and scaling (in that order).\n\n Parameters\n ----------\n src_pts : array, shape = (n, 3)\n Points to which the transform should be applied.\n tgt_pts : array, shape = (n, 3)\n Points to which src_pts should be fitted. Each point in tgt_pts should\n correspond to the point in src_pts with the same index.\n rotate : bool\n Allow rotation of the ``src_pts``.\n translate : bool\n Allow translation of the ``src_pts``.\n scale : bool\n Number of scaling parameters. With False, points are not scaled. With\n True, points are scaled by the same factor along all axes.\n tol : scalar | None\n The error tolerance. If the distance between any of the matched points\n exceeds this value in the solution, a RuntimeError is raised. With\n None, no error check is performed.\n x0 : None | tuple\n Initial values for the fit parameters.\n out : 'params' | 'trans'\n In what format to return the estimate: 'params' returns a tuple with\n the fit parameters; 'trans' returns a transformation matrix of shape\n (4, 4).\n\n Returns\n -------\n trans : array, shape (4, 4)\n Transformation that, if applied to src_pts, minimizes the squared\n distance to tgt_pts. Only returned if out=='trans'.\n params : array, shape (n_params, )\n A single tuple containing the rotation, translation, and scaling\n parameters in that order (as applicable).\n \"\"\"\n # XXX eventually this should be refactored with the cHPI fitting code,\n # which use fmin_cobyla with constraints\n from scipy.optimize import leastsq\n src_pts = np.atleast_2d(src_pts)\n tgt_pts = np.atleast_2d(tgt_pts)\n if src_pts.shape != tgt_pts.shape:\n raise ValueError(\"src_pts and tgt_pts must have same shape (got \"\n \"{}, {})\".format(src_pts.shape, tgt_pts.shape))\n if weights is not None:\n weights = np.array(weights, float)\n if weights.ndim != 1 or weights.size not in (src_pts.shape[0], 1):\n raise ValueError(\"weights (shape=%s) must be None or have shape \"\n \"(%s,)\" % (weights.shape, src_pts.shape[0],))\n weights = weights[:, np.newaxis]\n\n rotate = bool(rotate)\n translate = bool(translate)\n scale = int(scale)\n if translate:\n src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))\n\n param_info = (rotate, translate, scale)\n if param_info == (True, False, 0):\n def error(x):\n rx, ry, rz = x\n trans = rotation3d(rx, ry, rz)\n est = np.dot(src_pts, trans.T)\n d = tgt_pts - est\n if weights is not None:\n d *= weights\n return d.ravel()\n if x0 is None:\n x0 = (0, 0, 0)\n elif param_info == (True, True, 0):\n def error(x):\n rx, ry, rz, tx, ty, tz = x\n trans = np.dot(translation(tx, ty, tz), rotation(rx, ry, rz))\n est = np.dot(src_pts, trans.T)[:, :3]\n d = tgt_pts - est\n if weights is not None:\n d *= weights\n return d.ravel()\n if x0 is None:\n x0 = (0, 0, 0, 0, 0, 0)\n elif param_info == (True, True, 1):\n def error(x):\n rx, ry, rz, tx, ty, tz, s = x\n trans = reduce(np.dot, (translation(tx, ty, tz),\n rotation(rx, ry, rz),\n scaling(s, s, s)))\n est = np.dot(src_pts, trans.T)[:, :3]\n d = tgt_pts - est\n if weights is not None:\n d *= weights\n return d.ravel()\n if x0 is None:\n x0 = (0, 0, 0, 0, 0, 0, 1)\n elif param_info == (True, True, 3):\n def error(x):\n rx, ry, rz, tx, ty, tz, sx, sy, sz = x\n trans = reduce(np.dot, (translation(tx, ty, tz),\n rotation(rx, ry, rz),\n scaling(sx, sy, sz)))\n est = np.dot(src_pts, trans.T)[:, :3]\n d = tgt_pts - est\n if weights is not None:\n d *= weights\n return d.ravel()\n if x0 is None:\n x0 = (0, 0, 0, 0, 0, 0, 1, 1, 1)\n else:\n raise NotImplementedError(\n \"The specified parameter combination is not implemented: \"\n \"rotate=%r, translate=%r, scale=%r\" % param_info)\n\n x, _, _, _, _ = leastsq(error, x0, full_output=True)\n\n # re-create the final transformation matrix\n if (tol is not None) or (out == 'trans'):\n trans = _trans_from_params(param_info, x)\n\n # assess the error of the solution\n if tol is not None:\n if not translate:\n src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))\n est_pts = np.dot(src_pts, trans.T)[:, :3]\n err = np.sqrt(np.sum((est_pts - tgt_pts) ** 2, axis=1))\n if np.any(err > tol):\n raise RuntimeError(\"Error exceeds tolerance. Error = %r\" % err)\n\n if out == 'params':\n return x\n elif out == 'trans':\n return trans\n else:\n raise ValueError(\"Invalid out parameter: %r. Needs to be 'params' or \"\n \"'trans'.\" % out)\n\n\ndef _find_label_paths(subject='fsaverage', pattern=None, subjects_dir=None):\n \"\"\"Find paths to label files in a subject's label directory.\n\n Parameters\n ----------\n subject : str\n Name of the mri subject.\n pattern : str | None\n Pattern for finding the labels relative to the label directory in the\n MRI subject directory (e.g., \"aparc/*.label\" will find all labels\n in the \"subject/label/aparc\" directory). With None, find all labels.\n subjects_dir : None | str\n Override the SUBJECTS_DIR environment variable\n (sys.environ['SUBJECTS_DIR'])\n\n Returns\n -------\n paths : list\n List of paths relative to the subject's label directory\n \"\"\"\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n subject_dir = os.path.join(subjects_dir, subject)\n lbl_dir = os.path.join(subject_dir, 'label')\n\n if pattern is None:\n paths = []\n for dirpath, _, filenames in os.walk(lbl_dir):\n rel_dir = os.path.relpath(dirpath, lbl_dir)\n for filename in fnmatch.filter(filenames, '*.label'):\n path = os.path.join(rel_dir, filename)\n paths.append(path)\n else:\n paths = [os.path.relpath(path, lbl_dir) for path in iglob(pattern)]\n\n return paths\n\n\ndef _find_mri_paths(subject, skip_fiducials, subjects_dir):\n \"\"\"Find all files of an mri relevant for source transformation.\n\n Parameters\n ----------\n subject : str\n Name of the mri subject.\n skip_fiducials : bool\n Do not scale the MRI fiducials. If False, an IOError will be raised\n if no fiducials file can be found.\n subjects_dir : None | str\n Override the SUBJECTS_DIR environment variable\n (sys.environ['SUBJECTS_DIR'])\n\n Returns\n -------\n paths : dict\n Dictionary whose keys are relevant file type names (str), and whose\n values are lists of paths.\n \"\"\"\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n paths = {}\n\n # directories to create\n paths['dirs'] = [bem_dirname, surf_dirname]\n\n # surf/ files\n paths['surf'] = []\n surf_fname = os.path.join(surf_dirname, '{name}')\n surf_names = ('inflated', 'white', 'orig', 'orig_avg', 'inflated_avg',\n 'inflated_pre', 'pial', 'pial_avg', 'smoothwm', 'white_avg',\n 'seghead', 'smseghead')\n if os.getenv('_MNE_FEW_SURFACES', '') == 'true': # for testing\n surf_names = surf_names[:4]\n for surf_name in surf_names:\n for hemi in ('lh.', 'rh.'):\n name = hemi + surf_name\n path = surf_fname.format(subjects_dir=subjects_dir,\n subject=subject, name=name)\n if os.path.exists(path):\n paths['surf'].append(pformat(surf_fname, name=name))\n surf_fname = os.path.join(bem_dirname, '{name}')\n surf_names = ('inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf')\n for surf_name in surf_names:\n path = surf_fname.format(subjects_dir=subjects_dir,\n subject=subject, name=surf_name)\n if os.path.exists(path):\n paths['surf'].append(pformat(surf_fname, name=surf_name))\n del surf_names, surf_name, path, hemi\n\n # BEM files\n paths['bem'] = bem = []\n path = head_bem_fname.format(subjects_dir=subjects_dir, subject=subject)\n if os.path.exists(path):\n bem.append('head')\n bem_pattern = pformat(bem_fname, subjects_dir=subjects_dir,\n subject=subject, name='*-bem')\n re_pattern = pformat(bem_fname, subjects_dir=subjects_dir, subject=subject,\n name='(.+)').replace('\\\\', '\\\\\\\\')\n for path in iglob(bem_pattern):\n match = re.match(re_pattern, path)\n name = match.group(1)\n bem.append(name)\n del bem, path, bem_pattern, re_pattern\n\n # fiducials\n if skip_fiducials:\n paths['fid'] = []\n else:\n paths['fid'] = _find_fiducials_files(subject, subjects_dir)\n # check that we found at least one\n if len(paths['fid']) == 0:\n raise IOError(\"No fiducials file found for %s. The fiducials \"\n \"file should be named \"\n \"{subject}/bem/{subject}-fiducials.fif. In \"\n \"order to scale an MRI without fiducials set \"\n \"skip_fiducials=True.\" % subject)\n\n # duplicate files (curvature and some surfaces)\n paths['duplicate'] = []\n path = os.path.join(surf_dirname, '{name}')\n surf_fname = os.path.join(surf_dirname, '{name}')\n surf_dup_names = ('curv', 'sphere', 'sphere.reg', 'sphere.reg.avg')\n for surf_dup_name in surf_dup_names:\n for hemi in ('lh.', 'rh.'):\n name = hemi + surf_dup_name\n path = surf_fname.format(subjects_dir=subjects_dir,\n subject=subject, name=name)\n if os.path.exists(path):\n paths['duplicate'].append(pformat(surf_fname, name=name))\n del surf_dup_name, name, path, hemi\n\n # transform files (talairach)\n paths['transforms'] = []\n transform_fname = os.path.join(mri_transforms_dirname, 'talairach.xfm')\n path = transform_fname.format(subjects_dir=subjects_dir, subject=subject)\n if os.path.exists(path):\n paths['transforms'].append(transform_fname)\n del transform_fname, path\n\n # find source space files\n paths['src'] = src = []\n bem_dir = bem_dirname.format(subjects_dir=subjects_dir, subject=subject)\n fnames = fnmatch.filter(os.listdir(bem_dir), '*-src.fif')\n prefix = subject + '-'\n for fname in fnames:\n if fname.startswith(prefix):\n fname = \"{subject}-%s\" % fname[len(prefix):]\n path = os.path.join(bem_dirname, fname)\n src.append(path)\n\n # find MRIs\n mri_dir = mri_dirname.format(subjects_dir=subjects_dir, subject=subject)\n fnames = fnmatch.filter(os.listdir(mri_dir), '*.mgz')\n paths['mri'] = [os.path.join(mri_dir, f) for f in fnames]\n\n return paths\n\n\ndef _find_fiducials_files(subject, subjects_dir):\n \"\"\"Find fiducial files.\"\"\"\n fid = []\n # standard fiducials\n if os.path.exists(fid_fname.format(subjects_dir=subjects_dir,\n subject=subject)):\n fid.append(fid_fname)\n # fiducials with subject name\n pattern = pformat(fid_fname_general, subjects_dir=subjects_dir,\n subject=subject, head='*')\n regex = pformat(fid_fname_general, subjects_dir=subjects_dir,\n subject=subject, head='(.+)').replace('\\\\', '\\\\\\\\')\n for path in iglob(pattern):\n match = re.match(regex, path)\n head = match.group(1).replace(subject, '{subject}')\n fid.append(pformat(fid_fname_general, head=head))\n return fid\n\n\ndef _is_mri_subject(subject, subjects_dir=None):\n \"\"\"Check whether a directory in subjects_dir is an mri subject directory.\n\n Parameters\n ----------\n subject : str\n Name of the potential subject/directory.\n subjects_dir : None | str\n Override the SUBJECTS_DIR environment variable.\n\n Returns\n -------\n is_mri_subject : bool\n Whether ``subject`` is an mri subject.\n \"\"\"\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n return bool(_find_head_bem(subject, subjects_dir) or\n _find_head_bem(subject, subjects_dir, high_res=True))\n\n\ndef _is_scaled_mri_subject(subject, subjects_dir=None):\n \"\"\"Check whether a directory in subjects_dir is a scaled mri subject.\n\n Parameters\n ----------\n subject : str\n Name of the potential subject/directory.\n subjects_dir : None | str\n Override the SUBJECTS_DIR environment variable.\n\n Returns\n -------\n is_scaled_mri_subject : bool\n Whether ``subject`` is a scaled mri subject.\n \"\"\"\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n if not _is_mri_subject(subject, subjects_dir):\n return False\n fname = os.path.join(subjects_dir, subject, 'MRI scaling parameters.cfg')\n return os.path.exists(fname)\n\n\ndef _mri_subject_has_bem(subject, subjects_dir=None):\n \"\"\"Check whether an mri subject has a file matching the bem pattern.\n\n Parameters\n ----------\n subject : str\n Name of the subject.\n subjects_dir : None | str\n Override the SUBJECTS_DIR environment variable.\n\n Returns\n -------\n has_bem_file : bool\n Whether ``subject`` has a bem file.\n \"\"\"\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n pattern = bem_fname.format(subjects_dir=subjects_dir, subject=subject,\n name='*-bem')\n fnames = glob(pattern)\n return bool(len(fnames))\n\n\ndef read_mri_cfg(subject, subjects_dir=None):\n \"\"\"Read information from the cfg file of a scaled MRI brain.\n\n Parameters\n ----------\n subject : str\n Name of the scaled MRI subject.\n subjects_dir : None | str\n Override the SUBJECTS_DIR environment variable.\n\n Returns\n -------\n cfg : dict\n Dictionary with entries from the MRI's cfg file.\n \"\"\"\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n fname = os.path.join(subjects_dir, subject, 'MRI scaling parameters.cfg')\n\n if not os.path.exists(fname):\n raise IOError(\"%r does not seem to be a scaled mri subject: %r does \"\n \"not exist.\" % (subject, fname))\n\n logger.info(\"Reading MRI cfg file %s\" % fname)\n config = configparser.RawConfigParser()\n config.read(fname)\n n_params = config.getint(\"MRI Scaling\", 'n_params')\n if n_params == 1:\n scale = config.getfloat(\"MRI Scaling\", 'scale')\n elif n_params == 3:\n scale_str = config.get(\"MRI Scaling\", 'scale')\n scale = np.array([float(s) for s in scale_str.split()])\n else:\n raise ValueError(\"Invalid n_params value in MRI cfg: %i\" % n_params)\n\n out = {'subject_from': config.get(\"MRI Scaling\", 'subject_from'),\n 'n_params': n_params, 'scale': scale}\n return out\n\n\ndef _write_mri_config(fname, subject_from, subject_to, scale):\n \"\"\"Write the cfg file describing a scaled MRI subject.\n\n Parameters\n ----------\n fname : str\n Target file.\n subject_from : str\n Name of the source MRI subject.\n subject_to : str\n Name of the scaled MRI subject.\n scale : float | array_like, shape = (3,)\n The scaling parameter.\n \"\"\"\n scale = np.asarray(scale)\n if np.isscalar(scale) or scale.shape == ():\n n_params = 1\n else:\n n_params = 3\n\n config = configparser.RawConfigParser()\n config.add_section(\"MRI Scaling\")\n config.set(\"MRI Scaling\", 'subject_from', subject_from)\n config.set(\"MRI Scaling\", 'subject_to', subject_to)\n config.set(\"MRI Scaling\", 'n_params', str(n_params))\n if n_params == 1:\n config.set(\"MRI Scaling\", 'scale', str(scale))\n else:\n config.set(\"MRI Scaling\", 'scale', ' '.join([str(s) for s in scale]))\n config.set(\"MRI Scaling\", 'version', '1')\n with open(fname, 'w') as fid:\n config.write(fid)\n\n\ndef _scale_params(subject_to, subject_from, scale, subjects_dir):\n \"\"\"Assemble parameters for scaling.\n\n Returns\n -------\n subjects_dir : str\n Subjects directory.\n subject_from : str\n Name of the source subject.\n scale : array\n Scaling factor, either shape=() for uniform scaling or shape=(3,) for\n non-uniform scaling.\n uniform : bool\n Whether scaling is uniform.\n \"\"\"\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n if (subject_from is None) != (scale is None):\n raise TypeError(\"Need to provide either both subject_from and scale \"\n \"parameters, or neither.\")\n\n if subject_from is None:\n cfg = read_mri_cfg(subject_to, subjects_dir)\n subject_from = cfg['subject_from']\n n_params = cfg['n_params']\n assert n_params in (1, 3)\n scale = cfg['scale']\n scale = np.atleast_1d(scale)\n if scale.ndim != 1 or scale.shape[0] not in (1, 3):\n raise ValueError(\"Invalid shape for scale parameer. Need scalar \"\n \"or array of length 3. Got shape %s.\"\n % (scale.shape,))\n n_params = len(scale)\n return subjects_dir, subject_from, scale, n_params == 1\n\n\n@verbose\ndef scale_bem(subject_to, bem_name, subject_from=None, scale=None,\n subjects_dir=None, verbose=None):\n \"\"\"Scale a bem file.\n\n Parameters\n ----------\n subject_to : str\n Name of the scaled MRI subject (the destination mri subject).\n bem_name : str\n Name of the bem file. For example, to scale\n ``fsaverage-inner_skull-bem.fif``, the bem_name would be\n \"inner_skull-bem\".\n subject_from : None | str\n The subject from which to read the source space. If None, subject_from\n is read from subject_to's config file.\n scale : None | float | array, shape = (3,)\n Scaling factor. Has to be specified if subjects_from is specified,\n otherwise it is read from subject_to's config file.\n subjects_dir : None | str\n Override the SUBJECTS_DIR environment variable.\n %(verbose)s\n \"\"\"\n subjects_dir, subject_from, scale, uniform = \\\n _scale_params(subject_to, subject_from, scale, subjects_dir)\n\n src = bem_fname.format(subjects_dir=subjects_dir, subject=subject_from,\n name=bem_name)\n dst = bem_fname.format(subjects_dir=subjects_dir, subject=subject_to,\n name=bem_name)\n\n if os.path.exists(dst):\n raise IOError(\"File already exists: %s\" % dst)\n\n surfs = read_bem_surfaces(src)\n for surf in surfs:\n surf['rr'] *= scale\n if not uniform:\n assert len(surf['nn']) > 0\n surf['nn'] /= scale\n _normalize_vectors(surf['nn'])\n write_bem_surfaces(dst, surfs)\n\n\ndef scale_labels(subject_to, pattern=None, overwrite=False, subject_from=None,\n scale=None, subjects_dir=None):\n r\"\"\"Scale labels to match a brain that was previously created by scaling.\n\n Parameters\n ----------\n subject_to : str\n Name of the scaled MRI subject (the destination brain).\n pattern : str | None\n Pattern for finding the labels relative to the label directory in the\n MRI subject directory (e.g., \"lh.BA3a.label\" will scale\n \"fsaverage/label/lh.BA3a.label\"; \"aparc/\\*.label\" will find all labels\n in the \"fsaverage/label/aparc\" directory). With None, scale all labels.\n overwrite : bool\n Overwrite any label file that already exists for subject_to (otherwise\n existing labels are skipped).\n subject_from : None | str\n Name of the original MRI subject (the brain that was scaled to create\n subject_to). If None, the value is read from subject_to's cfg file.\n scale : None | float | array_like, shape = (3,)\n Scaling parameter. If None, the value is read from subject_to's cfg\n file.\n subjects_dir : None | str\n Override the SUBJECTS_DIR environment variable.\n \"\"\"\n subjects_dir, subject_from, scale, _ = _scale_params(\n subject_to, subject_from, scale, subjects_dir)\n\n # find labels\n paths = _find_label_paths(subject_from, pattern, subjects_dir)\n if not paths:\n return\n\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n src_root = os.path.join(subjects_dir, subject_from, 'label')\n dst_root = os.path.join(subjects_dir, subject_to, 'label')\n\n # scale labels\n for fname in paths:\n dst = os.path.join(dst_root, fname)\n if not overwrite and os.path.exists(dst):\n continue\n\n dirname = os.path.dirname(dst)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n src = os.path.join(src_root, fname)\n l_old = read_label(src)\n pos = l_old.pos * scale\n l_new = Label(l_old.vertices, pos, l_old.values, l_old.hemi,\n l_old.comment, subject=subject_to)\n l_new.save(dst)\n\n\n@verbose\ndef scale_mri(subject_from, subject_to, scale, overwrite=False,\n subjects_dir=None, skip_fiducials=False, labels=True,\n annot=False, verbose=None):\n \"\"\"Create a scaled copy of an MRI subject.\n\n Parameters\n ----------\n subject_from : str\n Name of the subject providing the MRI.\n subject_to : str\n New subject name for which to save the scaled MRI.\n scale : float | array_like, shape = (3,)\n The scaling factor (one or 3 parameters).\n overwrite : bool\n If an MRI already exists for subject_to, overwrite it.\n subjects_dir : None | str\n Override the SUBJECTS_DIR environment variable.\n skip_fiducials : bool\n Do not scale the MRI fiducials. If False (default), an IOError will be\n raised if no fiducials file can be found.\n labels : bool\n Also scale all labels (default True).\n annot : bool\n Copy ``*.annot`` files to the new location (default False).\n %(verbose)s\n\n See Also\n --------\n scale_labels : add labels to a scaled MRI\n scale_source_space : add a source space to a scaled MRI\n \"\"\"\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n paths = _find_mri_paths(subject_from, skip_fiducials, subjects_dir)\n scale = np.atleast_1d(scale)\n if scale.shape == (3,):\n if np.isclose(scale[1], scale[0]) and np.isclose(scale[2], scale[0]):\n scale = scale[0] # speed up scaling conditionals using a singleton\n elif scale.shape != (1,):\n raise ValueError('scale must have shape (3,) or (1,), got %s'\n % (scale.shape,))\n\n # make sure we have an empty target directory\n dest = subject_dirname.format(subject=subject_to,\n subjects_dir=subjects_dir)\n if os.path.exists(dest):\n if not overwrite:\n raise IOError(\"Subject directory for %s already exists: %r\"\n % (subject_to, dest))\n shutil.rmtree(dest)\n\n logger.debug('create empty directory structure')\n for dirname in paths['dirs']:\n dir_ = dirname.format(subject=subject_to, subjects_dir=subjects_dir)\n os.makedirs(dir_)\n\n logger.debug('save MRI scaling parameters')\n fname = os.path.join(dest, 'MRI scaling parameters.cfg')\n _write_mri_config(fname, subject_from, subject_to, scale)\n\n logger.debug('surf files [in mm]')\n for fname in paths['surf']:\n src = fname.format(subject=subject_from, subjects_dir=subjects_dir)\n src = os.path.realpath(src)\n dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)\n pts, tri = read_surface(src)\n write_surface(dest, pts * scale, tri)\n\n logger.debug('BEM files [in m]')\n for bem_name in paths['bem']:\n scale_bem(subject_to, bem_name, subject_from, scale, subjects_dir,\n verbose=False)\n\n logger.debug('fiducials [in m]')\n for fname in paths['fid']:\n src = fname.format(subject=subject_from, subjects_dir=subjects_dir)\n src = os.path.realpath(src)\n pts, cframe = read_fiducials(src, verbose=False)\n for pt in pts:\n pt['r'] = pt['r'] * scale\n dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)\n write_fiducials(dest, pts, cframe, verbose=False)\n\n logger.debug('MRIs [nibabel]')\n os.mkdir(mri_dirname.format(subjects_dir=subjects_dir,\n subject=subject_to))\n for fname in paths['mri']:\n mri_name = os.path.basename(fname)\n _scale_mri(subject_to, mri_name, subject_from, scale, subjects_dir)\n\n logger.debug('Transforms')\n for mri_name in paths['mri']:\n if mri_name.endswith('T1.mgz'):\n os.mkdir(mri_transforms_dirname.format(subjects_dir=subjects_dir,\n subject=subject_to))\n for fname in paths['transforms']:\n xfm_name = os.path.basename(fname)\n _scale_xfm(subject_to, xfm_name, mri_name,\n subject_from, scale, subjects_dir)\n break\n\n logger.debug('duplicate files')\n for fname in paths['duplicate']:\n src = fname.format(subject=subject_from, subjects_dir=subjects_dir)\n dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)\n shutil.copyfile(src, dest)\n\n logger.debug('source spaces')\n for fname in paths['src']:\n src_name = os.path.basename(fname)\n scale_source_space(subject_to, src_name, subject_from, scale,\n subjects_dir, verbose=False)\n\n logger.debug('labels [in m]')\n os.mkdir(os.path.join(subjects_dir, subject_to, 'label'))\n if labels:\n scale_labels(subject_to, subject_from=subject_from, scale=scale,\n subjects_dir=subjects_dir)\n\n logger.debug('copy *.annot files')\n # they don't contain scale-dependent information\n if annot:\n src_pattern = os.path.join(subjects_dir, subject_from, 'label',\n '*.annot')\n dst_dir = os.path.join(subjects_dir, subject_to, 'label')\n for src_file in iglob(src_pattern):\n shutil.copy(src_file, dst_dir)\n\n\n@verbose\ndef scale_source_space(subject_to, src_name, subject_from=None, scale=None,\n subjects_dir=None, n_jobs=1, verbose=None):\n \"\"\"Scale a source space for an mri created with scale_mri().\n\n Parameters\n ----------\n subject_to : str\n Name of the scaled MRI subject (the destination mri subject).\n src_name : str\n Source space name. Can be a spacing parameter (e.g., ``'7'``,\n ``'ico4'``, ``'oct6'``) or a file name of a source space file relative\n to the bem directory; if the file name contains the subject name, it\n should be indicated as \"{subject}\" in ``src_name`` (e.g.,\n ``\"{subject}-my_source_space-src.fif\"``).\n subject_from : None | str\n The subject from which to read the source space. If None, subject_from\n is read from subject_to's config file.\n scale : None | float | array, shape = (3,)\n Scaling factor. Has to be specified if subjects_from is specified,\n otherwise it is read from subject_to's config file.\n subjects_dir : None | str\n Override the SUBJECTS_DIR environment variable.\n n_jobs : int\n Number of jobs to run in parallel if recomputing distances (only\n applies if scale is an array of length 3, and will not use more cores\n than there are source spaces).\n %(verbose)s\n\n Notes\n -----\n When scaling volume source spaces, the source (vertex) locations are\n scaled, but the reference to the MRI volume is left unchanged. Transforms\n are updated so that source estimates can be plotted on the original MRI\n volume.\n \"\"\"\n subjects_dir, subject_from, scale, uniform = \\\n _scale_params(subject_to, subject_from, scale, subjects_dir)\n # if n_params==1 scale is a scalar; if n_params==3 scale is a (3,) array\n\n # find the source space file names\n if src_name.isdigit():\n spacing = src_name # spacing in mm\n src_pattern = src_fname\n else:\n match = re.match(r\"(oct|ico|vol)-?(\\d+)$\", src_name)\n if match:\n spacing = '-'.join(match.groups())\n src_pattern = src_fname\n else:\n spacing = None\n src_pattern = os.path.join(bem_dirname, src_name)\n\n src = src_pattern.format(subjects_dir=subjects_dir, subject=subject_from,\n spacing=spacing)\n dst = src_pattern.format(subjects_dir=subjects_dir, subject=subject_to,\n spacing=spacing)\n\n # read and scale the source space [in m]\n sss = read_source_spaces(src)\n logger.info(\"scaling source space %s: %s -> %s\", spacing, subject_from,\n subject_to)\n logger.info(\"Scale factor: %s\", scale)\n add_dist = False\n for ss in sss:\n ss['subject_his_id'] = subject_to\n ss['rr'] *= scale\n # additional tags for volume source spaces\n if 'vox_mri_t' in ss:\n # maintain transform to original MRI volume ss['mri_volume_name']\n ss['vox_mri_t']['trans'][:3, :3] /= scale\n ss['src_mri_t']['trans'][:3, :3] /= scale\n # distances and patch info\n if uniform:\n if ss['dist'] is not None:\n ss['dist'] *= scale[0]\n # Sometimes this is read-only due to how it's read\n ss['nearest_dist'] = ss['nearest_dist'] * scale\n ss['dist_limit'] = ss['dist_limit'] * scale\n else: # non-uniform scaling\n ss['nn'] /= scale\n _normalize_vectors(ss['nn'])\n if ss['dist'] is not None:\n add_dist = True\n\n if add_dist:\n logger.info(\"Recomputing distances, this might take a while\")\n dist_limit = float(np.abs(sss[0]['dist_limit']))\n add_source_space_distances(sss, dist_limit, n_jobs)\n\n write_source_spaces(dst, sss)\n\n\ndef _scale_mri(subject_to, mri_fname, subject_from, scale, subjects_dir):\n \"\"\"Scale an MRI by setting its affine.\"\"\"\n subjects_dir, subject_from, scale, _ = _scale_params(\n subject_to, subject_from, scale, subjects_dir)\n\n if not has_nibabel():\n warn('Skipping MRI scaling for %s, please install nibabel')\n return\n\n import nibabel\n fname_from = op.join(mri_dirname.format(\n subjects_dir=subjects_dir, subject=subject_from), mri_fname)\n fname_to = op.join(mri_dirname.format(\n subjects_dir=subjects_dir, subject=subject_to), mri_fname)\n img = nibabel.load(fname_from)\n zooms = np.array(img.header.get_zooms())\n zooms[[0, 2, 1]] *= scale\n img.header.set_zooms(zooms)\n # Hack to fix nibabel problems, see\n # https://github.com/nipy/nibabel/issues/619\n img._affine = img.header.get_affine() # or could use None\n nibabel.save(img, fname_to)\n\n\ndef _scale_xfm(subject_to, xfm_fname, mri_name, subject_from, scale,\n subjects_dir):\n \"\"\"Scale a transform.\"\"\"\n subjects_dir, subject_from, scale, _ = _scale_params(\n subject_to, subject_from, scale, subjects_dir)\n\n # The nibabel warning should already be there in MRI step, if applicable,\n # as we only get here if T1.mgz is present (and thus a scaling was\n # attempted) so we can silently return here.\n if not has_nibabel():\n return\n\n fname_from = os.path.join(\n mri_transforms_dirname.format(\n subjects_dir=subjects_dir, subject=subject_from), xfm_fname)\n fname_to = op.join(\n mri_transforms_dirname.format(\n subjects_dir=subjects_dir, subject=subject_to), xfm_fname)\n assert op.isfile(fname_from), fname_from\n assert op.isdir(op.dirname(fname_to)), op.dirname(fname_to)\n # The \"talairach.xfm\" file stores the ras_mni transform.\n #\n # For \"from\" subj F, \"to\" subj T, F->T scaling S, some equivalent vertex\n # positions F_x and T_x in MRI (Freesurfer RAS) coords, knowing that\n # we have T_x = S @ F_x, we want to have the same MNI coords computed\n # for these vertices:\n #\n # T_mri_mni @ T_x = F_mri_mni @ F_x\n #\n # We need to find the correct T_ras_mni (talaraich.xfm file) that yields\n # this. So we derive (where † indicates inversion):\n #\n # T_mri_mni @ S @ F_x = F_mri_mni @ F_x\n # T_mri_mni @ S = F_mri_mni\n # T_ras_mni @ T_mri_ras @ S = F_ras_mni @ F_mri_ras\n # T_ras_mni @ T_mri_ras = F_ras_mni @ F_mri_ras @ S⁻¹\n # T_ras_mni = F_ras_mni @ F_mri_ras @ S⁻¹ @ T_ras_mri\n #\n\n # prepare the scale (S) transform\n scale = np.atleast_1d(scale)\n scale = np.tile(scale, 3) if len(scale) == 1 else scale\n S = Transform('mri', 'mri', scaling(*scale)) # F_mri->T_mri\n\n #\n # Get the necessary transforms of the \"from\" subject\n #\n xfm, kind = _read_fs_xfm(fname_from)\n assert kind == 'MNI Transform File', kind\n F_ras_mni = Transform('ras', 'mni_tal', xfm)\n hdr = _get_mri_header(mri_name)\n F_vox_ras = Transform('mri_voxel', 'ras', hdr.get_vox2ras())\n F_vox_mri = Transform('mri_voxel', 'mri', hdr.get_vox2ras_tkr())\n F_mri_ras = combine_transforms(\n invert_transform(F_vox_mri), F_vox_ras, 'mri', 'ras')\n del F_vox_ras, F_vox_mri, hdr, xfm\n\n #\n # Get the necessary transforms of the \"to\" subject\n #\n mri_name = op.join(mri_dirname.format(\n subjects_dir=subjects_dir, subject=subject_to), op.basename(mri_name))\n hdr = _get_mri_header(mri_name)\n T_vox_ras = Transform('mri_voxel', 'ras', hdr.get_vox2ras())\n T_vox_mri = Transform('mri_voxel', 'mri', hdr.get_vox2ras_tkr())\n T_ras_mri = combine_transforms(\n invert_transform(T_vox_ras), T_vox_mri, 'ras', 'mri')\n del mri_name, hdr, T_vox_ras, T_vox_mri\n\n # Finally we construct as above:\n #\n # T_ras_mni = F_ras_mni @ F_mri_ras @ S⁻¹ @ T_ras_mri\n #\n # By moving right to left through the equation.\n T_ras_mni = \\\n combine_transforms(\n combine_transforms(\n combine_transforms(\n T_ras_mri, invert_transform(S), 'ras', 'mri'),\n F_mri_ras, 'ras', 'ras'),\n F_ras_mni, 'ras', 'mni_tal')\n _write_fs_xfm(fname_to, T_ras_mni['trans'], kind)\n\n\n@verbose\ndef get_mni_fiducials(subject, subjects_dir=None, verbose=None):\n \"\"\"Estimate fiducials for a subject.\n\n Parameters\n ----------\n subject : str\n Name of the mri subject\n subjects_dir : None | str\n Override the SUBJECTS_DIR environment variable\n (sys.environ['SUBJECTS_DIR'])\n %(verbose)s\n\n Returns\n -------\n fids_mri : list\n List of estimated fiducials (each point in a dict)\n\n Notes\n -----\n This takes the ``fsaverage-fiducials.fif`` file included with MNE—which\n contain the LPA, nasion, and RPA for the ``fsaverage`` subject—and\n transforms them to the given FreeSurfer subject's MRI space.\n The MRI of ``fsaverage`` is already in MNI Talairach space, so applying\n the inverse of the given subject's MNI Talairach affine transformation\n (``$SUBJECTS_DIR/$SUBJECT/mri/transforms/talairach.xfm``) is used\n to estimate the subject's fiducial locations.\n\n For more details about the coordinate systems and transformations involved,\n see https://surfer.nmr.mgh.harvard.edu/fswiki/CoordinateSystems and\n :ref:`plot_source_alignment`.\n \"\"\"\n # Eventually we might want to allow using the MNI Talairach with-skull\n # transformation rather than the standard brain-based MNI Talaranch\n # transformation, and/or project the points onto the head surface\n # (if available).\n fname_fids_fs = os.path.join(os.path.dirname(__file__), 'data',\n 'fsaverage', 'fsaverage-fiducials.fif')\n\n # Read fsaverage fiducials file and subject Talairach.\n fids, coord_frame = read_fiducials(fname_fids_fs)\n assert coord_frame == FIFF.FIFFV_COORD_MRI\n if subject == 'fsaverage':\n return fids # special short-circuit for fsaverage\n mni_mri_t = invert_transform(_read_talxfm(subject, subjects_dir))\n\n # Convert to mm since this is Freesurfer's unit.\n lnr = np.array([f['r'] for f in fids]) * 1000.\n assert lnr.shape == (3, 3)\n\n # Apply transformation, to fsaverage (MNI) fiducials, convert back to m\n lnr = apply_trans(mni_mri_t, lnr) / 1000.\n for ii in range(3):\n fids[ii]['r'] = lnr[ii]\n return fids\n"
] | [
[
"numpy.dot",
"numpy.minimum",
"numpy.sqrt",
"scipy.sparse.block_diag",
"numpy.concatenate",
"numpy.all",
"numpy.round",
"numpy.searchsorted",
"numpy.where",
"numpy.hstack",
"scipy.sparse.issparse",
"numpy.unique",
"numpy.arange",
"numpy.eye",
"numpy.intersect1d",
"numpy.asanyarray",
"scipy.linalg.norm",
"scipy.linalg.inv",
"numpy.repeat",
"numpy.zeros",
"numpy.min",
"numpy.append",
"numpy.floor",
"numpy.array",
"numpy.sum",
"numpy.abs",
"numpy.tile",
"numpy.sort",
"numpy.ones",
"numpy.vstack"
],
[
"numpy.dot",
"numpy.asarray",
"numpy.concatenate",
"numpy.mean",
"numpy.searchsorted",
"numpy.where",
"scipy.sparse.isspmatrix_csc",
"numpy.empty_like",
"numpy.arange",
"numpy.flatnonzero",
"numpy.diff",
"numpy.zeros",
"numpy.argsort",
"scipy.sparse.isspmatrix_csr",
"numpy.array",
"numpy.sum",
"scipy.sparse.isspmatrix",
"numpy.isfinite",
"numpy.isfortran",
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.array_split"
],
[
"numpy.dot",
"numpy.abs",
"numpy.nonzero",
"numpy.asarray",
"numpy.arange",
"scipy.spatial.distance.cdist",
"numpy.histogramdd",
"numpy.tile",
"numpy.atleast_1d",
"numpy.atleast_2d",
"scipy.optimize.leastsq",
"numpy.argmin",
"numpy.any",
"numpy.isscalar",
"numpy.array",
"numpy.logical_and",
"numpy.sum",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
zhangbo2008/facenet | [
"4dfabcb5cf14f99622dbe5f9f12f0539821c169c"
] | [
"etc/tf_tutorial/Tensorflow-101-master/logistic_regression_customdata.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# # LOGISTIC REGRESSION WITH CUSTOM DATA\n\n# In[1]:\n\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nprint (\"Packages loaded\")\n\n\n# # Load data\n\n# In[2]:\n\n\n# Load them!\ncwd = os.getcwd()\nloadpath = cwd + \"/data/custom_data.npz\"\nl = np.load(loadpath)\n\n# See what's in here\nprint (l.files)\n\n# Parse data\ntrainimg = l['trainimg']\ntrainlabel = l['trainlabel']\ntestimg = l['testimg']\ntestlabel = l['testlabel']\nuse_gray = l['use_gray']\nntrain = trainimg.shape[0]\nnclass = trainlabel.shape[1]\ndim = trainimg.shape[1]\nntest = testimg.shape[0]\nprint (\"%d train images loaded\" % (ntrain))\nprint (\"%d test images loaded\" % (ntest))\nprint (\"%d dimensional input\" % (dim))\nprint (\"%d classes\" % (nclass))\n\n\n# # Define network\n\n# In[3]:\n\n\ntf.set_random_seed(0)\n# Parameters of Logistic Regression\nlearning_rate = 0.001\ntraining_epochs = 1000\nbatch_size = 10\ndisplay_step = 100\n\n# Create Graph for Logistic Regression\nx = tf.placeholder(\"float\", [None, dim]) \ny = tf.placeholder(\"float\", [None, nclass]) \nW = tf.Variable(tf.zeros([dim, nclass]), name = 'weights')\nb = tf.Variable(tf.zeros([nclass]))\n\n\n# # Define functions\n\n# In[4]:\n\n\nWEIGHT_DECAY_FACTOR = 1 # 0.000001\nl2_loss = tf.add_n([tf.nn.l2_loss(v) \n for v in tf.trainable_variables()])\n_pred = tf.nn.softmax(tf.matmul(x, W) + b) \ncost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(_pred)\n , reduction_indices=1)) \ncost = cost + WEIGHT_DECAY_FACTOR*l2_loss\noptm = tf.train.GradientDescentOptimizer(\n learning_rate).minimize(cost) \n_corr = tf.equal(tf.argmax(_pred, 1), tf.argmax(y, 1)) \naccr = tf.reduce_mean(tf.cast(_corr, tf.float32))\ninit = tf.initialize_all_variables()\nprint (\"Functions ready\")\n\n\n# # Optimize\n\n# In[5]:\n\n\n# Launch the graph\nsess = tf.Session()\nsess.run(init)\n# Training cycle\nfor epoch in range(training_epochs):\n avg_cost = 0.\n num_batch = int(ntrain/batch_size)\n # Loop over all batches\n for i in range(num_batch): \n randidx = np.random.randint(ntrain, size=batch_size)\n batch_xs = trainimg[randidx, :]\n batch_ys = trainlabel[randidx, :] \n # Fit training using batch data\n sess.run(optm, feed_dict={x: batch_xs, y: batch_ys})\n # Compute average loss\n avg_cost += sess.run(cost\n , feed_dict={x: batch_xs, y: batch_ys})/num_batch\n\n # Display logs per epoch step\n if epoch % display_step == 0:\n print (\"Epoch: %03d/%03d cost: %.9f\" % \n (epoch, training_epochs, avg_cost))\n train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys})\n print (\" Training accuracy: %.3f\" % (train_acc))\n test_acc = sess.run(accr, feed_dict={x: testimg, y: testlabel})\n print (\" Test accuracy: %.3f\" % (test_acc))\nprint (\"Optimization Finished!\")\n\n\n# # CLOSE SESSION\n\n# In[6]:\n\n\nsess.close()\nprint (\"Session closed.\")\n\n"
] | [
[
"tensorflow.matmul",
"tensorflow.zeros",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.trainable_variables",
"tensorflow.initialize_all_variables",
"tensorflow.nn.l2_loss",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.log",
"tensorflow.Session",
"tensorflow.set_random_seed",
"numpy.load",
"tensorflow.argmax",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.