repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
alirkaya/card-fraud-detection
[ "90e4639c55a8502e03c35660a7b86a847c069cfb" ]
[ "Documentation/my_module.py" ]
[ "\"\"\"Prepared by [Ali Rifat Kaya](https://www.linkedin.com/in/alirifatkaya/)\n\"\"\"\n\n\ndef pr_auc_score(y_test, predicted_probabilities):\n \"\"\"Return AUCPR (Area Under Curve Precision-Recall) score\n\n Parameters\n ----------\n\n y_test : Test set target values\n Example:\n # df is a pandas dataframe with features and target variable\n # where 'Class' is the target variable\n >>> import pandas as pd\n >>> from sklearn.model_selection import train_test_split\n\n >>> X = df.drop('Class', axis=1).values # input matrix\n >>> y = df['Class'].values # target array\n >>> X_train, X_test, y_train, y_test = train_test_split(X,\n y,\n test_size=0.3,\n random_state=1)\n >>> # y_test is the target values for test set\n\n\n predicted_probabilities : Predicted probabilities for positive class\n Example:\n >>> from sklearn.linear_model import LogisticRegression\n >>> lr = LogisticRegression()\n >>> lr.fit(X_train, y_train)\n >>> predicted_probabilities = lr.predict_proba(X_test)[:, 1]\n\n\n Returns\n -------\n\n auc_score : The AUCPR score for the given target values and probabilities\n \"\"\"\n from sklearn.metrics import precision_recall_curve\n from sklearn.metrics import auc\n\n precision, recall, threshold = precision_recall_curve(y_test,\n predicted_probabilities)\n auc_score = auc(recall, precision)\n\n return auc_score\n\n\ndef scoring_functions():\n \"\"\"Returns a list of scoring functions as a list\n * Accuracy Score\n * Precision Score\n * Recall Score\n * Specificity Score\n * F1 Score\n * F2 Score\n * Matthews Correlation Coefficient\n * Geometric Mean Score\n * AUCPR Score\n * AUCROC Score\n\n Returns\n -------\n\n List of scoring fucntions\n Example:\n >>> list_of_scoring_functions = scores()\n >>> for scoring_function in list_of_scoring_functions:\n ... print(scoring_function)\n ### prints\n # accuracy_score\n # precision_score\n # recall_score\n # specificity_score\n # f1_score\n # fbeta_score\n # geometric_mean_score\n # matthews_corrcoef\n # roc_auc_score\n # pr_auc_score\n \"\"\"\n from sklearn.metrics import accuracy_score\n from sklearn.metrics import precision_score\n from sklearn.metrics import recall_score\n from imblearn.metrics import specificity_score\n from sklearn.metrics import f1_score\n from sklearn.metrics import fbeta_score\n from imblearn.metrics import geometric_mean_score\n from sklearn.metrics import matthews_corrcoef\n from sklearn.metrics import roc_auc_score\n\n list_of_scoring_functions = [\n accuracy_score,\n precision_score,\n recall_score,\n specificity_score,\n f1_score,\n fbeta_score,\n geometric_mean_score,\n matthews_corrcoef,\n roc_auc_score,\n pr_auc_score\n ]\n\n return list_of_scoring_functions\n\n\ndef do_cross_validation(X, y, estimators, cv=None, resample=None, scalers=[False], verbose=True, sleep_time=None):\n \"\"\" Return Cross-Validation score for each fold by fitting the model from\n scratch.\n\n Parameters\n ----------\n\n X: The input matrix\n Example:\n # df is a pandas dataframe with features and target variable\n # where 'Class' is the target variable\n >>> X = df.drop('Class', axis=1).values\n\n\n y: The target array\n Example:\n # df is a pandas dataframe with features and target variable\n # where the 'Class' is the target variable\n >>> y = df['Class'].values\n\n\n estimators: A list of tuple(s) where the tuple is ('estimator_name', estimator)\n Example:\n >>> from sklearn.linear_model import LogisticRegresion\n >>> lr = LogisticRegresion()\n >>> estimators = [('Logistic Regression', lr)]\n\n >>> from sklearn.linear_model import LogisticRegresion\n >>> from sklearn.ensemble import RandomForestClassifier\n >>> lr = LogisticRegresion()\n >>> rf = RandomForestClassifier()\n >>> estimators = [('Logistic Regression', lr),\n ... ('Random Forest Classifier', rf)]\n\n\n cv: Cross-Validation object. If no cross-validation object is passed to `cv`,\n then cv is `StratifiedKFold(n_splits=5, shuffle=True, random_state=1)`\n by default.\n Example:\n >>> from sklearn.model_selection import StratifiedKFold\n >>> cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)\n\n\n resample: if True, resample the training data and fits the models using the\n resampled training data. Do NOT touch to validation data.\n Default value is `None`.\n Example:\n >>> from imblearn.over_sampling import SMOTE\n >>> from sklearn.linear_model import LogisticRegresion\n >>> smote = SMOTE()\n >>> resample = [('SMOTE', smote)]\n >>> lr = LogisticRegresion()\n >>> estimators = [('Logistic Regression', lr)]\n >>> do_cross_validation(X, y, estimators=estimators, cv, resample=resample, scaler=[True], verbose=False)\n\n\n scalers: An array of boolean values, each value is for the corresponding\n estimator.\n Default value is `[False]`\n Example:\n >>> from sklearn.linear_model import LogisticRegression\n >>> from sklearn.ensemble import RandomForestClassifier\n >>> lr = LogisticRegression()\n >>> rf = RandomForestClassifier()\n >>> models = [lr, rf]\n >>> scalers = [True, False]\n >>> cv_results = do_cross_validation(X, y, estimators=models,\n cv, scalers=scalers, verbose=False)\n\n\n print: if True, prints out information about each fold such as size of the\n training data and test data, AUCPR and AUCROC scores for each fold,\n and predicted labels.\n Default value is `True`.\n\n\n sleep_time: Sleeping time in seconds between each iteration\n Example:\n >>> sleep_time=1800 # 30 mins\n >>> cv_results = do_cross_validation(X, y, estimators=models,\n cv, scalers=scalers, verbose=False,\n sleep_time=sleep_time)\n\n\n\n Returns\n -------\n\n Nested dictionary of results with\n * precisions and recalls to plot precision-recall curve\n * fpr and tpr to plot roc curve\n Example:\n >>> {\n 'Logistic Regression' : {\n 'accuracy_score' : [], # cross validation accuracy scores as a list\n ...\n 'tprs' : [] # cross validation tpr for each fold\n }\n }\n\n\n Verbose\n ------\n\n For each fold of cross-validation, prints the followings:\n * The estimator\n * Training set and validation set sizes\n * AUCPR score for training and validation sets\n * AUCROC score for training and validation sets\n * Number of True Positives in the validation set\n * Number of False Positives in the validation set\n \"\"\"\n from sklearn.preprocessing import scale\n from sklearn.metrics import roc_auc_score\n from sklearn.metrics import fbeta_score\n from sklearn.metrics import confusion_matrix\n from sklearn.metrics import precision_recall_curve\n from sklearn.metrics import roc_curve\n from sklearn.base import clone\n from time import sleep\n\n scores = {}\n list_of_scoring_functions = scoring_functions()\n metrics = ['accuracy_score', 'precision_score', 'recall_score',\n 'specificity_score', 'f1_score', 'f2_score',\n 'geometric_mean_score', 'matthews_corrcoef', 'roc_auc_score',\n 'pr_auc_score']\n\n if not cv:\n cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)\n\n i = 0 # tracks the folds\n for train_idx, validation_idx in cv.split(X, y):\n X_train, X_validation = X[train_idx], X[validation_idx]\n y_train, y_validation = y[train_idx], y[validation_idx]\n\n X_train_copy = X_train.copy()\n y_train_copy = y_train.copy()\n X_validation_copy = X_validation.copy()\n\n if resample:\n if verbose:\n print('Fold {}:'.format(i + 1))\n\n for name, method in resample:\n X_train_copy_resample, y_train_copy_resample = method.fit_resample(\n X_train_copy, y_train_copy)\n if verbose:\n print('\\n'+name)\n print('-' * 81)\n print('Number of transactions in the original training dataset:', X_train_copy.shape[0])\n print('Number of transactions in the resampled training dataset:',\n X_train_copy_resample.shape[0])\n print('-' * 81)\n print('Number of Fraudulent Transactions in the original training dataset:',\n y_train_copy.sum())\n print('Number of Fraudulent Transactions in the resampled training dataset',\n y_train_copy_resample.sum())\n print('=' * 81)\n\n for estimator, scaler in zip(estimators, scalers):\n ml_name, ml = estimator\n estimator_ = clone(ml)\n if scaler:\n X_train_copy_resample_scaled = scale(\n X_train_copy_resample)\n X_validation_scaled = scale(X_validation_copy)\n estimator_.fit(\n X_train_copy_resample_scaled, y_train_copy_resample)\n preds = estimator_.predict(X_validation_scaled)\n probas_training = estimator_.predict_proba(X_train_copy_resample_scaled)[\n :, 1]\n probas = estimator_.predict_proba(\n X_validation_scaled)[:, 1]\n else:\n estimator_.fit(X_train_copy_resample,\n y_train_copy_resample)\n preds = estimator_.predict(X_validation_copy)\n probas_training = estimator_.predict_proba(\n X_train_copy_resample)[:, 1]\n probas = estimator_.predict_proba(\n X_validation_copy)[:, 1]\n\n precision, recall, threshold = precision_recall_curve(\n y_validation, probas)\n fpr, tpr, threshold = roc_curve(y_validation, probas)\n tn, fp, fn, tp = confusion_matrix(y_validation, preds).ravel()\n\n if verbose:\n print('\\n' + ml_name + ' with ' + name)\n print('-' * 81)\n print('Training data AUCPR score: {}'.format(\n pr_auc_score(y_train_copy_resample, probas_training)))\n print('Validation data AUCPR score: {}'.format(\n pr_auc_score(y_validation, probas)))\n\n print('\\nTraining data AUCROC score: {}'.format(\n roc_auc_score(y_train_copy_resample, probas_training)))\n print('Validation data AUCROC score: {}'.format(\n roc_auc_score(y_validation, probas)))\n print('-' * 81)\n print('There are {} fraudulent transactions in the validation '\n 'set'.format(y_validation.sum()))\n print('{} out of {} predicted fraudulent transactions '\n 'are true fraudulent transactions'.format(\n tp, fp + tp))\n print()\n\n key_ = ml_name + '_' + name\n if key_ not in scores.keys():\n scores[key_] = {}\n\n plots = ['precisions', 'recalls', 'fprs', 'tprs']\n for key in plots:\n if key not in scores[key_]:\n scores[key_][key] = []\n\n scores[key_]['precisions'].append(precision)\n scores[key_]['recalls'].append(recall)\n scores[key_]['fprs'].append(fpr)\n scores[key_]['tprs'].append(tpr)\n\n for metric_name, metric in zip(metrics, list_of_scoring_functions):\n if metric_name not in scores[key_].keys():\n scores[key_][metric_name] = []\n if metric in [roc_auc_score, pr_auc_score]:\n scores[key_][metric_name].append(metric(y_validation,\n probas))\n elif metric == fbeta_score:\n scores[key_][metric_name].append(metric(y_validation,\n preds,\n beta=2))\n else:\n scores[key_][metric_name].append(metric(y_validation,\n preds))\n if sleep_time:\n print('sleeping... {} seconds'.format(sleep_time))\n sleep(sleep_time)\n\n if verbose:\n print()\n else:\n if verbose:\n print('Fold {}:'.format(i + 1))\n print('\\nNumber of Observations in the Training Data: {}'\n .format(X_train.shape[0]))\n print('Number of Observations in the Validation Data: {}:'\n .format(y_validation.shape[0]))\n print('=' * 81)\n for estimator, scaler in zip(estimators, scalers):\n ml_name, ml = estimator\n estimator_ = clone(ml)\n if scaler:\n X_train_scaled = scale(X_train_copy)\n X_validation_scaled = scale(X_validation_copy)\n estimator_.fit(X_train_scaled, y_train)\n preds = estimator_.predict(X_validation_scaled)\n probas_training = estimator_.predict_proba(X_train_scaled)[\n :, 1]\n probas = estimator_.predict_proba(X_validation_scaled)[:, 1]\n else:\n estimator_.fit(X_train, y_train)\n preds = estimator_.predict(X_validation)\n probas_training = estimator_.predict_proba(X_train)[:, 1]\n probas = estimator_.predict_proba(X_validation)[:, 1]\n\n precision, recall, threshold = precision_recall_curve(\n y_validation, probas)\n fpr, tpr, threshold = roc_curve(y_validation, probas)\n tn, fp, fn, tp = confusion_matrix(y_validation, preds).ravel()\n\n if verbose:\n print('\\n' + ml_name)\n print(('-' * 81))\n print(('Training data AUCPR score: {}'.format(\n pr_auc_score(y_train, probas_training))))\n print(('Validation data AUCPR score: {}'.format(\n pr_auc_score(y_validation, probas))))\n\n print(('\\nTraining data AUCROC score: {}'.format(\n roc_auc_score(y_train, probas_training))))\n print(('Validation data AUCROC score: {}'.format(\n roc_auc_score(y_validation, probas))))\n print(('-' * 81))\n print('There are {} fraudulent transactions in the validation '\n 'set'.format(y_validation.sum()))\n print('{} out of {} predicted fraudulent transactions '\n 'are true fraudulent transactions'.format(\n tp, fp + tp))\n print()\n\n if ml_name not in scores.keys():\n scores[ml_name] = {}\n\n plots = ['precisions', 'recalls', 'fprs', 'tprs']\n for key in plots:\n if key not in scores[ml_name]:\n scores[ml_name][key] = []\n\n scores[ml_name]['precisions'].append(precision)\n scores[ml_name]['recalls'].append(recall)\n scores[ml_name]['fprs'].append(fpr)\n scores[ml_name]['tprs'].append(tpr)\n\n for metric_name, metric in zip(metrics, list_of_scoring_functions):\n if metric_name not in scores[ml_name].keys():\n scores[ml_name][metric_name] = []\n if metric in [roc_auc_score, pr_auc_score]:\n scores[ml_name][metric_name].append(metric(y_validation,\n probas))\n elif metric == fbeta_score:\n scores[ml_name][metric_name].append(metric(y_validation,\n preds,\n beta=2))\n else:\n scores[ml_name][metric_name].append(metric(y_validation,\n preds))\n if sleep_time:\n print('sleeping... {} seconds'.format(sleep_time))\n sleep(sleep_time)\n if verbose:\n print()\n i += 1\n if verbose:\n print('=' * 81)\n print('=' * 81)\n\n return scores\n\n\ndef plot_confusion_matrix(y, predictions, title=None, ax=None, cmap='Purples', cbar=False):\n \"\"\"Plots Confusion Matrix\n\n Parameters\n ----------\n\n y: The target array\n Example:\n # df is a pandas dataframe with features and target variable\n # where the 'Class' is the target variable\n >>> y = df['Class'].values\n\n predictions: The predicted labels\n Example:\n # df is a pandas dataframe with features and target variable\n # where the 'Class' is the target variable\n >>> from sklearn.linear_model import LogisticRegresion\n >>> X = df.drop('Class', axis=1).values\n >>> y = df['Class'].values\n >>> lr = LogisticRegresion()\n >>> lr.fit(X_train, y_train)\n >>> predictions = lr.predict(X_test) # predicted labels\n\n title: Title of the plot\n Example:\n >>> title = 'Logistic Regression'\n >>> plot_confusion_matrix(y, predictions, title=title, ax, cmap, cbar)\n\n ax: An axis object\n Example:\n >>> fig, ax = plt.subplots()\n >>> plot_confusion_matrix(y, predictions, title, ax=ax)\n\n cmap: The color map for the confusion matrix\n Example:\n >>> import matplotlib.pyplot as plt\n >>> plt.colormaps() # prints all available color maps\n\n cbar: If True shows the color bar next to the confusion matrix\n Example\n >>> plot_confusion_matrix(y, predictions, title, ax=ax, cbar=True)\n\n Returns\n -------\n\n ax: Axes object\n \"\"\"\n\n from pandas import DataFrame\n import matplotlib.pyplot as plt\n from sklearn.metrics import confusion_matrix\n from seaborn import heatmap\n\n if ax is None:\n ax = plt.gca()\n\n cm_df = DataFrame(confusion_matrix(y, predictions))\n\n # Use a seaborn heatmap to plot confusion matrices\n # The dataframe is transposed to make Actual values on x-axis and\n # predicted values on y-axis\n # annot = True includes the numbers in each box\n # vmin and vmax just adjusts the color value\n heatmap(cm_df.T,\n annot=True,\n annot_kws={\"size\": 15},\n cmap=cmap,\n vmin=0,\n vmax=800,\n fmt='.0f',\n linewidths=1,\n linecolor=\"white\",\n cbar=cbar,\n xticklabels=[\"Genuine\", \"Fraud\"],\n yticklabels=[\"Genuine\", \"Fraud\"],\n ax=ax)\n\n # adjusts the heights of the top and bottom squares of the heatmap\n # matplotlib 3.1.1 has a bug that shows only the half of the top\n # and bottom rows of the heatmap\n # bottom, top = ax.get_ylim()\n # _ = ax.set_ylim(bottom + 0.5, top - 0.5)\n\n # ax.set_ylabel(\"Predicted\", fontweight='bold', fontsize=15)\n # ax.set_xlabel(\"Actual\", fontweight='bold', fontsize=15)\n ax.set_xticklabels([\"Genuine\", \"Fraud\"], fontsize=13)\n ax.set_yticklabels([\"Genuine\", \"Fraud\"], fontsize=13)\n ax.set_title(title, fontweight='bold', pad=5)\n\n return ax\n\n\ndef plot_precision_recall_curve(y_test, precisions, recalls, title, ax=None):\n \"\"\"Plots Precision-Recall Curve\n\n Parameters\n ----------\n\n y: The target array of the test set\n Example:\n # df is a pandas dataframe with features and target variable\n # where the 'Class' is the target variable\n >>> X = df.drop('Class', axis=1).values\n >>> y = df.Class.values\n >>> X_train, X_test, y_train, y_test = train_test_split(X,\n y,\n test_size=0.3,\n random_state=1)\n\n\n precisions: Precision score for each threshold\n Example:\n # df is a pandas dataframe with features and target variable\n # where 'Class' is the target variable\n >>> from sklearn.linear_model import LogisticRegresion\n >>> from sklearn.metrics import precision_recall_curve\n >>> X = df.drop('Class', axis=1).values\n >>> y = df.Class.values\n >>> X_train, X_test, y_train, y_test = train_test_split(X,\n y,\n test_size=0.3,\n random_state=1)\n >>> lr = LogisticRegresion()\n >>> lr.fit(X_train, y_train)\n >>> predicted_probabilities = lr.predict_proba(X_test)[:, 1]\n >>> precision, _, _ = precision_recall_curve(y, predicted_probabilities)\n\n\n recalls: Recall score for each threshold\n Example:\n # df is a pandas dataframe with features and target variable\n # where 'Class' is the target variable\n >>> from sklearn.linear_model import LogisticRegresion\n >>> from sklearn.metrics import precision_recall_curve\n >>> X = df.drop('Class', axis=1).values\n >>> y = df.Class.values\n >>> X_train, X_test, y_train, y_test = train_test_split(X,\n y,\n test_size=0.3,\n random_state=1)\n >>> lr = LogisticRegresion()\n >>> lr.fit(X_train, y_train)\n >>> predicted_probabilities = lr.predict_proba(X_test)[:, 1]\n >>> _, recall, _ = precision_recall_curve(y, predicted_probabilities)\n\n\n title: Title of the plot\n Example:\n >>> title = 'Logistic Regression'\n >>> plot_precision_recall_curve(precisions, recalls, title=title, ax)\n\n\n ax: An axis object\n Example:\n >>> fig, ax = plt.subplots()\n >>> plot_confusion_matrix(y, predictions, title, ax=ax)\n\n\n Returns\n -------\n\n ax: Axes object\n \"\"\"\n\n from numpy import linspace\n from numpy import interp\n from numpy import mean\n from numpy import std\n from numpy import minimum\n from numpy import maximum\n from sklearn.metrics import auc\n import matplotlib.pyplot as plt\n\n if ax is None:\n ax = plt.gca()\n\n # Metrics\n prs = []\n aucs = []\n mean_recall = linspace(0, 1, 100)\n\n # plots PR curve for each fold\n i = 0\n for precision, recall in zip(precisions, recalls):\n prs.append(interp(mean_recall, precision, recall))\n pr_auc = auc(recall, precision)\n aucs.append(pr_auc)\n ax.plot(recall,\n precision,\n lw=3,\n alpha=0.5,\n label='Fold %d (AUCPR = %0.2f)' % (i + 1, pr_auc))\n i += 1\n\n # plots the mean AUCPR curve\n ax.axhline(y_test.sum() / y_test.shape[0],\n linestyle='--',\n alpha=0.8,\n label='No Skill')\n mean_precision = mean(prs, axis=0)\n mean_auc = auc(mean_recall, mean_precision)\n std_auc = std(aucs)\n ax.plot(mean_precision,\n mean_recall,\n color='navy',\n label=r'Mean (AUCPR = %0.3f $\\pm$ %0.2f)' % (mean_auc, std_auc),\n lw=4)\n\n ax.set_title(title)\n ax.set_xlim([-0.05, 1.05])\n ax.set_ylim([-0.05, 1.05])\n ax.legend(fontsize='xx-small')\n\n return ax\n\n\ndef plot_roc_curve(fprs, tprs, title, ax=None):\n \"\"\"Plots ROC (Receiver Operating Curve)\n\n Parameters\n ----------\n\n fprs: False Positive Rate for each threshold\n Example:\n # df is a pandas dataframe with features and target variable\n # where 'Class' is the target variable\n >>> from sklearn.linear_model import LogisticRegresion\n >>> from sklearn.metrics import precision_recall_curve\n >>> X = df.drop('Class', axis=1).values\n >>> y = df.Class.values\n >>> X_train, X_test, y_train, y_test = train_test_split(X,\n y,\n test_size=0.3,\n random_state=1)\n >>> lr = LogisticRegresion()\n >>> lr.fit(X_train, y_train)\n >>> predicted_probabilities = lr.predict_proba(X_test)[:, 1]\n >>> fpr, _, _ = roc_curve(y, predicted_probabilities)\n\n\n tprs: True Positive Rate for each threshold\n Example:\n # df is a pandas dataframe with features and target variable\n # where 'Class' is the target variable\n >>> from sklearn.linear_model import LogisticRegresion\n >>> from sklearn.metrics import precision_recall_curve\n >>> X = df.drop('Class', axis=1).values\n >>> y = df.Class.values\n >>> X_train, X_test, y_train, y_test = train_test_split(X,\n y,\n test_size=0.3,\n random_state=1)\n >>> lr = LogisticRegresion()\n >>> lr.fit(X_train, y_train)\n >>> predicted_probabilities = lr.predict_proba(X_test)[:, 1]\n >>> _, tpr, _ = roc_curve(y, predicted_probabilities)\n\n\n title: Title of the plot\n Example:\n >>> title = 'Logistic Regression'\n >>> plot_precision_recall_curve(precisions, recalls, title=title, ax)\n\n\n ax: An axis object\n Example:\n >>> fig, ax = plt.subplots()\n >>> plot_confusion_matrix(y, predictions, title, ax=ax)\n\n\n Returns\n -------\n\n ax: Axes object\n \"\"\"\n\n from numpy import linspace\n from numpy import interp\n from numpy import mean\n from numpy import std\n from numpy import minimum\n from numpy import maximum\n from sklearn.metrics import auc\n import matplotlib.pyplot as plt\n\n if ax is None:\n ax = plt.gca()\n\n # Metrics\n tprs_ = []\n aucs = []\n mean_fpr = linspace(0, 1, 100)\n\n # plots ROC curves for each fold\n i = 0\n for fpr, tpr in zip(fprs, tprs):\n interp_tpr = interp(mean_fpr, fpr, tpr)\n interp_tpr[0] = 0.0\n tprs_.append(interp_tpr)\n roc_auc = auc(fpr, tpr)\n aucs.append(roc_auc)\n ax.plot(fpr,\n tpr,\n lw=3,\n alpha=0.5,\n label='ROC Fold %d (AUC = %0.2f)' % (i + 1, roc_auc))\n\n i += 1\n\n # Plot mean ROC Curve\n ax.plot([0, 1], [0, 1],\n linestyle='--',\n lw=3,\n color='k',\n label='No Skill',\n alpha=.8)\n mean_tpr = mean(tprs_, axis=0)\n mean_tpr[-1] = 1.0\n mean_auc = auc(mean_fpr, mean_tpr)\n std_auc = std(aucs)\n ax.plot(mean_fpr,\n mean_tpr,\n color='navy',\n label=r'Mean ROC (AUC = %0.3f $\\pm$ %0.2f)' % (mean_auc, std_auc),\n lw=4)\n\n # calculates the standard deviation and fills the +-1 standard deviation\n # of the mean ROC curve\n std_tpr = std(tprs_, axis=0)\n tprs_upper = minimum(mean_tpr + std_tpr, 1)\n tprs_lower = maximum(mean_tpr - std_tpr, 0)\n ax.fill_between(mean_fpr,\n tprs_lower,\n tprs_upper,\n color='grey',\n alpha=.2,\n label=r'$\\pm$ 1 Standard Deviation')\n\n ax.set_xlim([-0.05, 1.05])\n ax.set_ylim([-0.05, 1.05])\n ax.set_title(title)\n ax.legend(loc='lower right', fontsize='xx-small')\n\n return ax\n\n\ndef calculate_statistics(cv_scores):\n \"\"\"Returns mean and standard deviation of CV scores\n \"\"\"\n from numpy import array\n\n not_scores = ['precisions', 'recalls', 'fprs', 'tprs', 'predictions']\n mean_scores = {}\n std_dev = {}\n for k, v in cv_scores.items():\n mean_scores[k] = []\n std_dev[k] = []\n for key, value in v.items():\n if key not in not_scores:\n mean_scores[k].append(array(value).mean())\n std_dev[k].append(array(value).std())\n return mean_scores, std_dev\n\n\ndef make_df_statistics(cv_results):\n \"\"\"Return results from `calculate_statistics` into a DataFrame\"\"\"\n from pandas import DataFrame\n\n metrics = [\n 'accuracy_score', 'precision_score', 'recall_score', 'specificity_score',\n 'f1_score', 'f2_score', 'geometric_mean_score', 'matthews_corrcoef',\n 'roc_auc_score', 'pr_auc_score']\n new_metrics = metrics[-3:]\n df = DataFrame(cv_results)\n df['metrics'] = metrics\n df.set_index('metrics', inplace=True)\n df.index.name = None\n df = df.loc[new_metrics, :]\n df = df.T\n return df\n\n\ndef train_model(estimators, X, y, scalers=[False]):\n from sklearn.preprocessing import scale\n from sklearn.metrics import roc_auc_score\n from sklearn.metrics import fbeta_score\n from sklearn.metrics import precision_recall_curve\n from sklearn.metrics import roc_curve\n\n scores = {}\n list_of_scoring_functions = scoring_functions()\n metrics = ['accuracy_score', 'precision_score', 'recall_score',\n 'specificity_score', 'f1_score', 'f2_score',\n 'geometric_mean_score', 'matthews_corrcoef', 'roc_auc_score',\n 'pr_auc_score']\n\n for estimator, scaler in zip(estimators, scalers):\n ml_name, ml = estimator\n X_copy = X.copy()\n\n if scaler:\n X_scaled = scale(X_copy)\n ml.fit(X_scaled, y)\n preds = ml.predict(X_scaled)\n probas = ml.predict_proba(X_scaled)[:, 1]\n else:\n ml.fit(X, y)\n preds = ml.predict(X)\n probas = ml.predict_proba(X)[:, 1]\n\n if ml_name not in scores.keys():\n scores[ml_name] = {}\n\n for metric_name, metric in zip(metrics, list_of_scoring_functions):\n if metric_name not in scores[ml_name].keys():\n scores[ml_name][metric_name] = []\n if metric in [roc_auc_score, pr_auc_score]:\n scores[ml_name][metric_name].append(metric(y,\n probas))\n elif metric == fbeta_score:\n scores[ml_name][metric_name].append(metric(y,\n preds,\n beta=2))\n else:\n scores[ml_name][metric_name].append(metric(y,\n preds))\n return scores\n\n\ndef test_model(estimators, X, y, scalers=[False]):\n from sklearn.preprocessing import scale\n from sklearn.metrics import roc_auc_score\n from sklearn.metrics import fbeta_score\n from sklearn.metrics import precision_recall_curve\n from sklearn.metrics import roc_curve\n\n scores = {}\n list_of_scoring_functions = scoring_functions()\n metrics = ['accuracy_score', 'precision_score', 'recall_score',\n 'specificity_score', 'f1_score', 'f2_score',\n 'geometric_mean_score', 'matthews_corrcoef', 'roc_auc_score',\n 'pr_auc_score']\n\n for estimator, scaler in zip(estimators, scalers):\n ml_name, ml = estimator\n\n X_copy = X.copy()\n if scaler:\n X_scaled = scale(X_copy)\n preds = ml.predict(X_scaled)\n probas = ml.predict_proba(X_scaled)[:, 1]\n else:\n preds = ml.predict(X)\n probas = ml.predict_proba(X)[:, 1]\n\n precision, recall, threshold = precision_recall_curve(\n y, probas)\n fpr, tpr, threshold = roc_curve(y, probas)\n\n if ml_name not in scores.keys():\n scores[ml_name] = {}\n\n keys = ['precisions', 'recalls', 'fprs', 'tprs', 'predictions']\n values = [precision, recall, fpr, tpr, preds]\n for key, value in zip(keys, values):\n if key not in scores[ml_name].keys():\n scores[ml_name][key] = []\n scores[ml_name][key].append(value)\n\n for metric_name, metric in zip(metrics, list_of_scoring_functions):\n if metric_name not in scores[ml_name].keys():\n scores[ml_name][metric_name] = []\n if metric in [roc_auc_score, pr_auc_score]:\n scores[ml_name][metric_name].append(metric(y,\n probas))\n elif metric == fbeta_score:\n scores[ml_name][metric_name].append(metric(y,\n preds,\n beta=2))\n else:\n scores[ml_name][metric_name].append(metric(y,\n preds))\n return scores\n" ]
[ [ "sklearn.base.clone", "numpy.array", "numpy.interp", "sklearn.metrics.roc_curve", "sklearn.metrics.auc", "pandas.DataFrame", "numpy.std", "matplotlib.pyplot.gca", "sklearn.preprocessing.scale", "sklearn.metrics.confusion_matrix", "sklearn.metrics.roc_auc_score", "numpy.maximum", "sklearn.metrics.precision_recall_curve", "numpy.linspace", "numpy.mean", "numpy.minimum" ] ]
onnela-lab/mech-mle
[ "8a36c1a75dbbc665feb642f527aeb09e7462e90b" ]
[ "drosa.py" ]
[ "# JP Onnela\r\n# April 20, 2021\r\n\r\n# Edited May 17, 2021 by Jonathan Larson\r\n\r\nimport networkx as nx\r\nimport random\r\nimport scipy.stats as ss\r\nimport time\r\n\r\ndef generate_DMC(q_mod, q_con, n):\r\n \"\"\"Generate DMC model realization given parameters.\"\"\"\r\n G = nx.Graph()\r\n G.add_edge(0,1)\r\n new_nodes = list(range(2,n))\r\n anchor_nodes = []\r\n for v in new_nodes:\r\n u = random.choice(list(G.nodes()))\r\n anchor_nodes.append(u)\r\n G.add_node(v)\r\n \r\n # duplication\r\n G.add_edges_from([(v,w) for w in G.neighbors(u)])\r\n \r\n # mutation\r\n for w in list(G.neighbors(u)):\r\n if ss.bernoulli.rvs(q_mod):\r\n edge = random.choice([(v,w), (u,w)])\r\n G.remove_edge(*edge)\r\n \r\n # complementation\r\n if ss.bernoulli.rvs(q_con):\r\n G.add_edge(u,v)\r\n return (G, new_nodes, anchor_nodes)\r\n\r\n\r\ndef deconstruct_DMC(G, alpha, beta):\r\n \"\"\"Deconstruct a DMC graph over a single step.\"\"\"\r\n # reverse complementation\r\n if G.has_edge(alpha, beta):\r\n G.remove_edge(alpha, beta)\r\n w = 1\r\n else:\r\n w = 0\r\n\r\n # reverse mutation\r\n alpha_neighbors = set(G.neighbors(alpha))\r\n beta_neighbors = set(G.neighbors(beta))\r\n x = len(alpha_neighbors & beta_neighbors)\r\n y = len(alpha_neighbors | beta_neighbors)\r\n for neighbor in alpha_neighbors:\r\n G.add_edge(beta, neighbor)\r\n\r\n # reverse duplication\r\n G.remove_node(alpha)\r\n return (w, x, y)\r\n\r\n\r\ndef find_min_uni_pair(G):\r\n \"\"\"Find pair of nodes that have minimal cardinality of the union of their neighbors.\"\"\"\r\n alpha = None\r\n beta = None\r\n union_size = G.number_of_nodes()\r\n nodes = list(G.nodes())\r\n random.shuffle(nodes)\r\n for u in nodes:\r\n for v in nodes:\r\n if u > v:\r\n u_neighbors = set(G.neighbors(u))\r\n v_neighbors = set(G.neighbors(v))\r\n y = len(u_neighbors | v_neighbors)\r\n if G.has_edge(u,v):\r\n y = y - 2\r\n if y < union_size:\r\n union_size = y\r\n alpha = u\r\n beta = v\r\n return (alpha, beta, union_size)\r\n\r\n\r\ndef deconstruct(G):\r\n \"\"\"Deconstruct the graph until.\"\"\"\r\n alphas = []\r\n betas = []\r\n W = 0\r\n X = 0\r\n Y = 0\r\n (alpha, beta, union_size) = find_min_uni_pair(G)\r\n while (not alpha is None and not beta is None):\r\n print(\"Number of nodes remaining:\", G.number_of_nodes())\r\n alphas.append(alpha)\r\n betas.append(beta)\r\n (w, x, y) = deconstruct_DMC(G, alpha, beta)\r\n W += w\r\n X += x\r\n Y += y\r\n (alpha, beta, union_size) = find_min_uni_pair(G)\r\n return (alphas, betas, W, X, Y)\r\n\r\n\r\ndef estimate_parms(W, X, Y, n):\r\n \"\"\"Compute estimates of q_mod and q_con parameters.\"\"\"\r\n q_mod_hat = 1 - X / Y\r\n q_con_hat = W / (n - 1)\r\n return (q_mod_hat, q_con_hat)\r\n\r\n\r\ndef read_edgelist(input_file):\r\n \"\"\"Read edgelist from input file\"\"\"\r\n G = nx.Graph()\r\n counter = 0\r\n for line in open(input_file):\r\n counter += 1\r\n line = line.rstrip().split(\"\\t\")\r\n node_i = line[0]\r\n node_j = line[1]\r\n G.add_edge(node_i, node_j)\r\n return (G, counter)\r\n\r\n\r\ndef print_stats(G, new_nodes, anchor_nodes):\r\n \"\"\"Print out some statistics.\"\"\"\r\n print(\"Nodes:\", G.nodes())\r\n print(\"Edges:\", G.edges())\r\n print(\"New nodes (alpha):\", new_nodes)\r\n print(\"Anchor nodes (beta):\", anchor_nodes)\r\n\r\ndef save_results(output_file):\r\n\tF = open(output_file, \"w\")\r\n\t# alphas\r\n\tfor alpha in alphas:\r\n\t\tF.write(str(alpha) + \" \")\r\n\tF.write(\"\\n\")\t\r\n\t# betas\r\n\tfor beta in betas:\r\n\t\tF.write(str(beta) + \" \")\r\n\tF.write(\"\\n\")\t\r\n\t# others\r\n\tF.write(str(W) + \" \" + str(X) + \" \" + str(Y) + \" \" + str(q_mod_hat) + \" \" + str(q_con_hat))\r\n\tF.close()\r\n\r\n\r\n# ----------------------------------------------------------------\r\n\r\n\r\n# input and output files\r\ninput_file = \"drosa.tsv\"\r\n\r\n# read data\r\n(G, counter) = read_edgelist(input_file)\r\nG.remove_edges_from(nx.selfloop_edges(G))\r\nprint(G.number_of_edges())\r\n\r\n# degenerate graph\r\nn = G.number_of_nodes()\r\nstart = time.time()\r\n(alphas, betas, W, X, Y) = deconstruct(G)\r\nend = time.time()\r\nprint(\"Time elapsed:\", end - start)\r\n(q_mod_hat, q_con_hat) = estimate_parms(W, X, Y, n)\r\nprint(\"Parameter estimates:\", q_mod_hat, q_con_hat)\r\n\r\n\r\n\r\n\r\n" ]
[ [ "scipy.stats.bernoulli.rvs" ] ]
jasonfan1997/threeML
[ "21b1c76ad3423f745b9f56413d93ee01d1d5855f" ]
[ "threeML/test/test_fits_file.py" ]
[ "from threeML.io.fits_file import FITSExtension, FITSFile\nimport numpy as np\nimport astropy.io.fits as fits\n\nimport pytest\n\n\nclass DUMMYEXT(FITSExtension):\n def __init__(self, test_value):\n\n data_list = [(\"TEST_VALUE\", test_value)]\n\n super(DUMMYEXT, self).__init__(\n tuple(data_list), ((\"EXTNAME\", \"TEST\", \"Extension name\"),)\n )\n\n\nclass DUMMYFITS(FITSFile):\n def __init__(self, test_value):\n\n dummy_extension = DUMMYEXT(test_value)\n\n super(DUMMYFITS, self).__init__(fits_extensions=[dummy_extension])\n\n\ndef test_fits_file():\n\n dtypes = [\n np.int16,\n np.int32,\n np.int64,\n np.uint16,\n np.uint32,\n np.float32,\n np.float64,\n ]\n dtype_keys = [\"I\", \"J\", \"K\", \"I\", \"J\", \"E\", \"D\"]\n\n for i, dt in enumerate(dtypes):\n\n test_values = np.ones(10, dtype=dt)\n\n dummy_fits = DUMMYFITS(test_value=test_values)\n\n assert len(dummy_fits._hdu_list) == 2\n\n assert dummy_fits.index_of(\"TEST\") == 1\n\n assert dummy_fits[\"TEST\"].header[\"TFORM1\"] == dtype_keys[i]\n\n assert np.alltrue(dummy_fits[\"TEST\"].data[\"TEST_VALUE\"] == test_values)\n\n file_name = \"test_fits%d.fits\" % i\n\n dummy_fits.writeto(file_name, overwrite=True)\n\n with pytest.raises(IOError):\n\n dummy_fits.writeto(file_name, overwrite=False)\n\n read_dummy_fits = fits.open(file_name)\n\n assert len(read_dummy_fits) == 2\n\n assert read_dummy_fits.index_of(\"TEST\") == 1\n\n assert read_dummy_fits[\"TEST\"].header[\"TFORM1\"] == dtype_keys[i]\n\n assert np.alltrue(read_dummy_fits[\"TEST\"].data[\"TEST_VALUE\"] == test_values)\n" ]
[ [ "numpy.alltrue", "numpy.ones" ] ]
benneely/lungmap-pipeline
[ "a38a6d1331468834280ce1ac41f30c76ee553ed4" ]
[ "examples/run_pipeline_60x.py" ]
[ "import os\nimport numpy as np\nfrom micap import pipeline\nfrom glob import glob\nfrom PIL import Image\nimport cv2_extras as cv2x\n\n# weird import style to un-confuse PyCharm\ntry:\n from cv2 import cv2\nexcept ImportError:\n import cv2\n\ncell_radius = 17 * 3\ncell_size = np.pi * (cell_radius ** 2)\n\nseg_config = [\n {\n 'type': 'color',\n 'args': {\n 'blur_kernel': (51, 51),\n 'min_size': 3 * cell_size,\n 'max_size': None,\n 'colors': ['green', 'cyan', 'red', 'violet', 'yellow']\n }\n },\n {\n 'type': 'saturation',\n 'args': {'blur_kernel': (71, 71), 'min_size': 12 * cell_size, 'max_size': None}\n },\n {\n 'type': 'saturation',\n 'args': {'blur_kernel': (53, 53), 'min_size': 3 * cell_size, 'max_size': 45 * cell_size}\n },\n {\n 'type': 'saturation',\n 'args': {'blur_kernel': (35, 35), 'min_size': 3 * cell_size, 'max_size': 45 * cell_size}\n },\n {\n 'type': 'saturation',\n 'args': {'blur_kernel': (17, 17), 'min_size': 3 * cell_size, 'max_size': 45 * cell_size}\n }\n]\n\nimage_set_dir = 'mm_e16.5_60x_sox9_sftpc_acta2'\n\n# make our 'tmp' directory for caching trained & tested pipeline instances\nif not os.path.isdir('tmp'):\n os.mkdir('tmp')\n\n\noutput_path = os.path.join(\n 'tmp',\n '_'.join([image_set_dir, 'pipeline'])\n)\nimage_set_path = os.path.join('data', image_set_dir)\n\nimage_paths = glob(os.path.join(image_set_path, '*.tif'))\n\ntmp_image = Image.open(image_paths[2])\ntmp_image = np.asarray(tmp_image)\ntmp_image = cv2.cvtColor(tmp_image, cv2.COLOR_RGB2HSV)\n\n# and pipeline test steps\ncandidate_contours = pipeline.generate_structure_candidates(\n tmp_image,\n seg_config,\n filter_min_size=3 * cell_size,\n plot=True\n)\ncv2x.plot_contours(tmp_image, candidate_contours)\n# test_data_processed = pipeline.process_test_data(test_img_hsv, candidate_contours)\n\n# plot functions\n# pipeline.plot_test_results(test_img_hsv, candidate_contours, pred_results, output_path)\n\n# optional cell segmentation\n# utils.process_structures_into_cells(\n# test_img_hsv,\n# os.path.join(output_path, 'regions'),\n# candidate_contours,\n# plot=False\n# )\n" ]
[ [ "numpy.asarray" ] ]
salvacarrion/autonlp
[ "5cc462901e451b9259219f44225034fc8eedf6d3" ]
[ "examples/3_plot_results.py" ]
[ "import pandas as pd\nfrom tokenizers import normalizers\nfrom tokenizers.normalizers import NFKC, Strip, Lowercase\n\nfrom autonmt.bundle import utils\nfrom autonmt.bundle.report import generate_multivariable_report\nfrom autonmt.preprocessing import DatasetBuilder\n\n\ndef main():\n\n # Create preprocessing for training\n builder = DatasetBuilder(\n base_path=\"/home/scarrion/datasets/nn/translation\",\n datasets=[\n {\"name\": \"europarl\", \"languages\": [\"de-en\"], \"sizes\": [(\"100k\", 100000)]},\n ],\n encoding=[\n {\"subword_models\": [\"unigram+bytes\"], \"vocab_sizes\": [x+256 for x in [100, 200, 400, 1000, 2000, 4000, 8000, 16000]]},\n ],\n normalizer=lambda x: normalizers.Sequence([NFKC(), Strip(), Lowercase()]).normalize_str(x),\n merge_vocabs=False,\n eval_mode=\"compatible\",\n ).build(make_plots=False, force_overwrite=False)\n\n # Create preprocessing for training and testing\n tr_datasets = builder.get_train_ds()\n ts_datasets = builder.get_test_ds()\n\n # Train & Score a model for each dataset\n stats = []\n for ds in tr_datasets:\n # Get ds stats\n ds_stats = utils.load_json(ds.get_stats_path(\"stats.json\"))\n\n # Add stats\n ds_stats[\"scores\"] = {}\n row = {\n \"subword_model\": ds.subword_model,\n \"vocab_size\": ds.vocab_size,\n \"unknown_avg_tokens\": ds_stats[\"val.en\"][\"unknown_avg_tokens\"],\n }\n stats.append(row)\n\n # Create dataframes\n # assert len(ts_datasets) == 1\n df_report = pd.DataFrame(stats)\n df_report[\"dataset\"] = [f\"{ds.dataset_name}-{ds.dataset_size_name}\".replace(\"_lc\", \"\").title() for ds in tr_datasets]\n df_report[\"vocab_size\"] = df_report[\"vocab_size\"].astype(int)\n\n # Make report and print it\n output_path = f\".outputs/myplots\"\n prefix = \"unknowns_\"\n generate_multivariable_report(data=df_report,\n x=\"vocab_size\",\n y_left=(\"unknown_avg_tokens\", \"subword_model\"), y_right=None,\n output_path=output_path, prefix=prefix,\n save_figures=True, show_figures=False, save_csv=True)\n print(\"Summary:\")\n print(df_report.to_string(index=False))\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.DataFrame" ] ]
hbaspecto-com/tmip-emat
[ "e1c936e88f36f9b3e4379d814ecb7a3c255e16b1" ]
[ "emat/model/core_model.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\" core_model.py - define coure model API\"\"\"\nimport os\nimport abc\nimport yaml\nimport pandas as pd\nimport numpy as np\nimport logging\nimport subprocess\nimport warnings\nfrom contextlib import contextmanager\nfrom typing import Union, Mapping\nfrom ..workbench.em_framework.model import AbstractModel as AbstractWorkbenchModel\nfrom ..workbench.em_framework.evaluators import BaseEvaluator\n\nfrom typing import Collection\nfrom typing import Iterable\n\nfrom ..database.database import Database\nfrom ..scope.scope import Scope\nfrom ..optimization.optimization_result import OptimizationResult\nfrom ..optimization import EpsilonProgress, ConvergenceMetrics, SolutionCount\nfrom ..util.evaluators import prepare_evaluator\nfrom ..exceptions import MissingArchivePathError, ReadOnlyDatabaseError, MissingIdWarning\n\nfrom .._pkg_constants import *\n\nfrom ..util.loggers import get_module_logger\n_logger = get_module_logger(__name__)\n\nclass AbstractCoreModel(abc.ABC, AbstractWorkbenchModel):\n \"\"\"\n An interface for using a model with EMAT.\n\n Individual models should be instantiated using derived\n subclasses of this abstract base class, and not using\n this class directly.\n\n Args:\n configuration (str or Mapping or None):\n The configuration for this core model. This can be given\n explicitly as a `dict`, or as a `str` which gives the\n filename of a YAML file that will be loaded. If there is\n no configuration, giving `None` is also acceptable.\n scope (Scope or str):\n The exploration scope, as a `Scope` object or as\n a `str` which gives the filename of a YAML file that will be\n loaded.\n safe (bool):\n Load the configuration YAML file in 'safe' mode.\n This can be disabled if the configuration requires\n custom Python types or is otherwise not compatible with\n safe mode. Loading configuration files with safe mode\n off is not secure and should not be done with files from\n untrusted sources.\n db (Database, optional):\n An optional Database to store experiments and results.\n name (str, default \"EMAT\"):\n A name for this model, given as an alphanumeric string.\n The name is required by workbench operations.\n metamodel_id (int, optional):\n An identifier for this model, if it is a meta-model.\n Defaults to 0 (i.e., not a meta-model).\n \"\"\"\n\n def __init__(self,\n configuration:Union[str,Mapping,None],\n scope,\n safe=True,\n db=None,\n name='EMAT',\n metamodel_id=0,\n ):\n if isinstance(configuration, str):\n with open(configuration, 'r') as stream:\n if safe:\n configuration = yaml.safe_load(stream)\n else:\n configuration = yaml.load(stream, Loader=yaml.FullLoader)\n if configuration is None:\n configuration = {}\n\n self.config = configuration if configuration is not None else {}\n self.db = db\n if isinstance(scope, Scope):\n self.scope = scope\n else:\n self.scope = Scope(scope)\n\n AbstractWorkbenchModel.__init__(self, name=name.replace('_','').replace(' ',''))\n self.uncertainties = self.scope._x_list\n self.levers = self.scope._l_list\n self.constants = self.scope._c_list\n self.outcomes = self.scope._m_list\n\n self.metamodel_id = metamodel_id\n\n def __getstate__(self):\n # don't pickle the db connection\n return dict((k, v) for (k, v) in self.__dict__.items() if (k != 'db'))\n\n @abc.abstractmethod\n def setup(self, params):\n \"\"\"\n Configure the core model with the experiment variable values.\n\n This method is the place where the core model set up takes place,\n including creating or modifying files as necessary to prepare\n for a core model run. When running experiments, this method\n is called once for each core model experiment, where each experiment\n is defined by a set of particular values for both the exogenous\n uncertainties and the policy levers. These values are passed to\n the experiment only here, and not in the `run` method itself.\n This facilitates debugging, as the `setup` method can potentially\n be used without the `run` method, allowing the user to manually\n inspect the prepared files and ensure they are correct before\n actually running a potentially expensive model.\n\n Each input exogenous uncertainty or policy lever can potentially\n be used to manipulate multiple different aspects of the underlying\n core model. For example, a policy lever that includes a number of\n discrete future network \"build\" options might trigger the replacement\n of multiple related network definition files. Or, a single uncertainty\n relating to the cost of fuel might scale both a parameter linked to\n the modeled per-mile cost of operating an automobile, as well as the\n modeled total cost of fuel used by transit services.\n\n At the end of the `setup` method, a core model experiment should be\n ready to run using the `run` method.\n\n Args:\n params (dict):\n experiment variables including both exogenous\n uncertainty and policy levers\n \n Raises:\n KeyError:\n if a defined experiment variable is not supported\n by the core model \n \"\"\" \n \n @abc.abstractmethod\n def get_experiment_archive_path(\n self,\n experiment_id=None,\n makedirs=False,\n parameters=None,\n run_id=None,\n ):\n \"\"\"\n Returns a file system location to store model run outputs.\n\n For core models with long model run times, it is recommended\n to store the complete model run results in an archive. This\n will facilitate adding additional performance measures to the\n scope at a later time.\n\n Both the scope name and experiment id can be used to create the \n folder path. \n \n Args:\n experiment_id (int):\n The experiment id, which is also the row id of the\n experiment in the database. If this is omitted, an\n experiment id is read or created using the parameters.\n makedirs (bool, default False):\n If this archive directory does not yet exist, create it.\n parameters (dict, optional):\n The parameters for this experiment, used to create or\n lookup an experiment id. The parameters are ignored\n if `experiment_id` is given.\n run_id (UUID, optional):\n The run_id of this model run. If not given but a\n run_id attribute is stored in this FilesCoreModel\n instance, that value is used.\n\n Returns:\n str: Experiment archive path (no trailing backslashes).\n \"\"\" \n \n @abc.abstractmethod\n def run(self):\n \"\"\"\n Run the core model.\n\n This method is the place where the core model run takes place.\n Note that this method takes no arguments; all the input\n exogenous uncertainties and policy levers are delivered to the\n core model in the `setup` method, which will be executed prior\n to calling this method. This facilitates debugging, as the `setup`\n method can potentially be used without the `run` method, allowing\n the user to manually inspect the prepared files and ensure they\n are correct before actually running a potentially expensive model.\n When running experiments, this method is called once for each core\n model experiment, after the `setup` method completes.\n\n If the core model requires some post-processing by `post_process`\n method defined in this API, then when this function terminates\n the model directory should be in a state that is ready to run the\n `post_process` command next.\n\n Raises:\n UserWarning: If model is not properly setup\n \"\"\" \n \n def post_process(self, params, measure_names, output_path=None):\n \"\"\"\n Runs post processors associated with particular performance measures.\n\n This method is the place to conduct automatic post-processing\n of core model run results, in particular any post-processing that\n is expensive or that will write new output files into the core model's\n output directory. The core model run should already have\n been completed using `setup` and `run`. If the relevant performance\n measures do not require any post-processing to create (i.e. they\n can all be read directly from output files created during the core\n model run itself) then this method does not need to be overloaded\n for a particular core model implementation.\n\n Args:\n params (dict):\n Dictionary of experiment variables, with keys as variable names\n and values as the experiment settings. Most post-processing\n scripts will not need to know the particular values of the\n inputs (exogenous uncertainties and policy levers), but this\n method receives the experiment input parameters as an argument\n in case one or more of these parameter values needs to be known\n in order to complete the post-processing.\n measure_names (List[str]):\n List of measures to be processed. Normally for the first pass\n of core model run experiments, post-processing will be completed\n for all performance measures. However, it is possible to use\n this argument to give only a subset of performance measures to\n post-process, which may be desirable if the post-processing\n of some performance measures is expensive. Additionally, this\n method may also be called on archived model results, allowing\n it to run to generate only a subset of (probably new) performance\n measures based on these archived runs.\n output_path (str, optional):\n Path to model outputs. If this is not given (typical for the\n initial run of core model experiments) then the local/default\n model directory is used. This argument is provided primarily\n to facilitate post-processing archived model runs to make new\n performance measures (i.e. measures that were not in-scope when\n the core model was actually run).\n\n Raises:\n KeyError:\n If post process is not available for specified measure\n \"\"\"\n \n @abc.abstractmethod\n def load_measures(\n self,\n measure_names: Collection[str]=None,\n *,\n rel_output_path=None,\n abs_output_path=None,\n ) -> dict:\n \"\"\"\n Import selected measures from the core model.\n \n This method is the place to put code that can actually reach into\n files in the core model's run results and extract performance\n measures. It is expected that it should not do any post-processing\n of results (i.e. it should read from but not write to the model\n outputs directory).\n\n Imports measures from active scenario\n \n Args:\n measure_names (Collection[str]):\n Collection of measures to be loaded.\n rel_output_path, abs_output_path (str, optional):\n Path to model output locations, either relative\n to the `model_path` directory (when a subclass\n is a type that has a model path) or as an absolute\n directory. If neither is given, the default\n value is equivalent to setting `rel_output_path` to\n 'Outputs'.\n\n Returns:\n dict of measure name and values from active scenario\n \n Raises:\n KeyError: If load_measures is not available for specified\n measure\n \"\"\" \n \n\n @abc.abstractmethod\n def archive(self, params, model_results_path, experiment_id:int=0):\n \"\"\"\n Copies model outputs to archive location.\n \n Args:\n params (dict): Dictionary of experiment variables\n model_results_path (str): archive path\n experiment_id (int, optional): The id number for this experiment.\n \n \"\"\"\n\n @property\n def allow_short_circuit(self):\n \"\"\"\n Bool: Allow model runs to be skipped if measures already appear in the database.\n \"\"\"\n return self.config.get('allow_short_circuit', True)\n\n @allow_short_circuit.setter\n def allow_short_circuit(self, value):\n self.config['allow_short_circuit'] = bool(value)\n\n @property\n def ignore_crash(self):\n \"\"\"\n Bool: Allow model runs to `post_process` and `archive` even after an apparent crash in `run`.\n \"\"\"\n return self.config.get('ignore_crash', False)\n\n @ignore_crash.setter\n def ignore_crash(self, value):\n self.config['ignore_crash'] = bool(value)\n\n @property\n def success_indicator(self):\n \"\"\"\n str: The name of a file that indicates the model has run successfully.\n\n The flag is the mere existance of a file with this name, not any particular\n file content. This file is deleted automatically when the model `run` is\n initiated, so that it can be recreated to indicate a success.\n \"\"\"\n return self.config.get('success_indicator', None)\n\n @success_indicator.setter\n def success_indicator(self, value):\n self.config['success_indicator'] = value\n\n @property\n def killed_indicator(self):\n \"\"\"\n str: The name of a file that indicates the model was killed due to an unrecoverable error.\n\n The flag is the mere existance of a file with this name, not any particular\n file content. This file is deleted automatically when the model `run` is\n initiated, so that it can be recreated to indicate an unrecoverable error.\n \"\"\"\n return self.config.get('killed_indicator', None)\n\n @killed_indicator.setter\n def killed_indicator(self, value):\n self.config['killed_indicator'] = value\n\n @property\n def local_directory(self):\n \"\"\"Path: The current local working directory for this model.\"\"\"\n return self.config.get(\"local_directory\", os.getcwd())\n\n @local_directory.setter\n def local_directory(self, value):\n self.config[\"local_directory\"] = value\n\n @property\n def resolved_model_path(self):\n \"\"\"\n Path: The resolved model path.\n\n For core models that don't rely on the file system, this\n is set to the current working directory and is generally\n irrelevant. Overload this property for models that do\n rely on the file system.\n \"\"\"\n return self.local_directory\n\n @property\n def is_db_locked(self):\n if self.db:\n return self.db.is_locked\n return False\n\n @contextmanager\n def lock_db(self, x=True):\n if x and self.db:\n with self.db.lock:\n yield\n else:\n yield\n\n def enter_run_model(self):\n \"\"\"A hook for actions at the very beginning of the run_model step.\"\"\"\n\n def exit_run_model(self):\n \"\"\"A hook for actions at the very end of the run_model step.\"\"\"\n\n def run_model(self, scenario, policy):\n \"\"\"\n Runs an experiment through core model.\n\n This method overloads the `run_model` method given in\n the EMA Workbench, and provides the correct execution\n of a core model within the workbench framework. This\n function assembles and executes the steps laid out in\n other methods of this class, adding some useful logic\n to optimize the process (e.g. optionally short-\n circuiting runs that already have results stored\n in the database).\n\n For each experiment, the core model is called to:\n\n 1. `setup` experiment variables, copy files\n as needed, and otherwise prepare to run the\n core model for a particular experiment,\n 2. `run` the experiment,\n 3. `post_process` the result if needed to\n produce all relevant performance measures,\n 4. `archive` model outputs from this experiment\n (optional), and\n 5. `load_measures` from the experiment and\n store those measures in the associated database.\n\n Note that this method does *not* return any outcomes.\n Outcomes are instead written into self.outcomes_output,\n and can be retrieved from there, or from the database at\n a later time.\n\n In general, it should not be necessary to overload this\n method in derived classes built for particular core models.\n Instead, write overloaded methods for `setup`, `run`,\n `post_process` , `archive`, and `load_measures`. Moreover,\n in typical usage a modeler will generally not want to rely\n on this method directly, but instead use `run_experiments`\n to automatically run multiple experiments with one command.\n\n Args:\n scenario (Scenario): A dict-like object that\n has key-value pairs for each uncertainty.\n policy (Policy): A dict-like object that\n has key-value pairs for each lever.\n\n Raises:\n UserWarning: If there are no experiments associated with\n this type.\n\n \"\"\"\n self.enter_run_model()\n try:\n self.comment_on_run = None\n\n _logger.debug(\"run_core_model read_experiment_parameters\")\n\n experiment_id = policy.get(\"_experiment_id_\", None)\n if experiment_id is None:\n experiment_id = scenario.get(\"_experiment_id_\", None)\n\n if not hasattr(self, 'db') and hasattr(self, '_db'):\n self.db = self._db\n\n # If running a core files model using the DistributedEvaluator,\n # the workers won't have access to the DB directly, so we'll only\n # run the short-circuit test and the ad-hoc write-to-database\n # section of this code if the `db` attribute is available.\n if hasattr(self, 'db') and self.db is not None:\n\n assert isinstance(self.db, Database)\n\n if experiment_id is None:\n with warnings.catch_warnings():\n if self.is_db_locked:\n warnings.simplefilter(\"ignore\", category=MissingIdWarning)\n experiment_id = self.db.read_experiment_id(self.scope.name, scenario, policy)\n\n if experiment_id and self.allow_short_circuit:\n # opportunity to short-circuit run by loading pre-computed values.\n precomputed = self.db.read_experiment_measures(\n self.scope,\n design_name=None,\n experiment_id=experiment_id,\n )\n if not precomputed.empty:\n self.outcomes_output = dict(precomputed.iloc[0])\n self.log(f\"short circuit experiment_id {experiment_id} / {getattr(self, 'uid', 'no uid')}\")\n return\n\n if experiment_id is None and not self.is_db_locked:\n experiment_id = self.db.write_experiment_parameters_1(\n self.scope.name, 'ad hoc', scenario, policy\n )\n self.log(f\"YES DATABASE experiment_id {experiment_id}\", level=logging.DEBUG)\n\n else:\n _logger.debug(f\"NO DATABASE experiment_id {experiment_id}\")\n\n xl = {}\n xl.update(scenario)\n xl.update(policy)\n\n m_names = self.scope.get_measure_names()\n\n _logger.debug(f\"run_core_model setup {experiment_id}\")\n self.setup(xl)\n\n if self.success_indicator is not None:\n success_indicator = os.path.join(self.resolved_model_path, self.success_indicator)\n if os.path.exists(success_indicator):\n os.remove(success_indicator)\n else:\n success_indicator = None\n\n if self.killed_indicator is not None:\n killed_indicator = os.path.join(self.resolved_model_path, self.killed_indicator)\n if os.path.exists(killed_indicator):\n os.remove(killed_indicator)\n else:\n killed_indicator = None\n\n _logger.debug(f\"run_core_model run {experiment_id}\")\n try:\n self.run()\n except subprocess.CalledProcessError as err:\n _logger.error(f\"ERROR in run_core_model run {experiment_id}: {str(err)}\")\n try:\n ex_archive_path = self.get_experiment_archive_path(experiment_id, makedirs=True)\n except MissingArchivePathError:\n pass\n else:\n if isinstance(err, subprocess.CalledProcessError):\n if err.stdout:\n with open(os.path.join(ex_archive_path, 'error.stdout.log'), 'ab') as stdout:\n stdout.write(err.stdout)\n if err.stderr:\n with open(os.path.join(ex_archive_path, 'error.stderr.log'), 'ab') as stderr:\n stderr.write(err.stderr)\n with open(os.path.join(ex_archive_path, 'error.log'), 'a') as errlog:\n errlog.write(str(err))\n measures_dictionary = {name: np.nan for name in m_names}\n # Assign to outcomes_output, for ema_workbench compatibility\n self.outcomes_output = measures_dictionary\n\n if not self.ignore_crash:\n # If 'ignore_crash' is False (the default), then abort now and skip\n # any post-processing and other archiving steps, which will\n # probably fail anyway.\n self.log(f\"run_core_model ABORT {experiment_id}\", level=logging.ERROR)\n self.comment_on_run = f\"FAILED EXPERIMENT {experiment_id}: {str(err)}\"\n return\n else:\n _logger.error(f\"run_core_model CONTINUE AFTER ERROR {experiment_id}\")\n\n try:\n if success_indicator and not os.path.exists(success_indicator):\n # The absence of the `success_indicator` file means that the model\n # did not actually terminate correctly, so we do not want to\n # post-process or store these results in the database.\n self.comment_on_run = f\"NON-SUCCESSFUL EXPERIMENT {experiment_id}: success_indicator missing\"\n raise ValueError(f\"success_indicator missing: {success_indicator}\")\n\n if killed_indicator and os.path.exists(killed_indicator):\n self.comment_on_run = f\"KILLED EXPERIMENT {experiment_id}: killed_indicator present\"\n raise ValueError(f\"killed_indicator present: {killed_indicator}\")\n\n _logger.debug(f\"run_core_model post_process {experiment_id}\")\n self.post_process(xl, m_names)\n\n _logger.debug(f\"run_core_model wrap up {experiment_id}\")\n measures_dictionary = self.load_measures(m_names)\n m_df = pd.DataFrame(measures_dictionary, index=[experiment_id])\n\n except KeyboardInterrupt:\n _logger.exception(\n f\"KeyboardInterrupt in post_process, load_measures or outcome processing {experiment_id}\")\n raise\n except Exception as err:\n _logger.exception(f\"error in post_process, load_measures or outcome processing {experiment_id}\")\n _logger.error(f\"proceeding directly to archive attempt {experiment_id}\")\n if not self.comment_on_run:\n self.comment_on_run = f\"PROBLEM IN EXPERIMENT {experiment_id}: {str(err)}\"\n else:\n # only write to database if there was no error in post_process, load_measures or outcome processing\n if experiment_id and hasattr(self, 'db') and self.db is not None and not self.db.readonly:\n _logger.debug(f\"run_core_model write db {experiment_id}\")\n run_id = getattr(self, 'run_id', None)\n if run_id is None:\n run_id, _ = self.db.new_run_id(\n scope_name=self.scope.name,\n experiment_id=experiment_id,\n source=self.metamodel_id or 0,\n )\n try:\n self.db.write_experiment_measures(self.scope.name, self.metamodel_id, m_df, [run_id])\n except ReadOnlyDatabaseError:\n warnings.warn(\"database is read-only, not storing model outcomes\")\n except Exception as err:\n _logger.exception(f\"error in writing results to database: {str(err)}\")\n else:\n _logger.debug(f\"run_core_model OK write db {experiment_id} {self.metamodel_id} {run_id}\\n{m_df}\")\n else:\n _logger.debug(f\"run_core_model no db to write to {experiment_id}\")\n\n if experiment_id:\n try:\n ex_archive_path = self.get_experiment_archive_path(experiment_id)\n except MissingArchivePathError:\n pass\n else:\n _logger.debug(f\"run_core_model archive {experiment_id}\")\n self.archive(xl, ex_archive_path, experiment_id)\n else:\n _logger.debug(f\"run_core_model no archive because no experiment_id\")\n finally:\n self.exit_run_model()\n\n def read_experiments(\n self,\n design_name,\n db=None,\n only_pending=False,\n only_complete=False,\n only_with_measures=False,\n ):\n \"\"\"\n Reads results from a design of experiments from the database.\n\n Args:\n design_name (str): The name of the design to load.\n db (Database, optional): The Database from which to read experiments.\n If no db is given, the default `db` for this model is used.\n only_pending (bool, default False): If True, only pending\n experiments (which have no performance measure results\n stored in the database) are returned.\n only_complete (bool, default False): If True, only complete\n experiments (which have no performance measure\n results missing in the database) are returned.\n only_with_measures (bool, default False): If True, only\n experiments with at least one stored performance measure\n are returned.\n\n Returns:\n pandas.DataFrame:\n A DataFrame that contains all uncertainties, levers, and measures\n for the experiments.\n\n Raises:\n ValueError:\n If there is no Database connection `db` set.\n \"\"\"\n db = db if db is not None else self.db\n if db is None:\n raise ValueError('no database to read from')\n\n return self.ensure_dtypes(\n db.read_experiment_all(\n self.scope.name,\n design_name,\n only_pending=only_pending,\n only_complete=only_complete,\n only_with_measures=only_with_measures,\n )\n )\n\n def read_experiment_parameters(\n self,\n design_name=None,\n db=None,\n only_pending=False,\n *,\n experiment_ids=None,\n ):\n \"\"\"\n Reads uncertainties and levers from a design of experiments from the database.\n\n Args:\n design_name (str, optional): If given, only experiments\n associated with both the scope and the named design\n are returned, otherwise all experiments associated\n with the scope are returned.\n db (Database, optional): The Database from which to read experiments.\n If no db is given, the default `db` for this model is used.\n only_pending (bool, default False): If True, only pending\n experiments (which have no performance measure results\n stored in the database) are returned.\n experiment_ids (Collection, optional):\n A collection of experiment id's to load. If given,\n both `design_name` and `only_pending` are ignored.\n\n Returns:\n pandas.DataFrame:\n A DataFrame that contains all uncertainties, levers, and measures\n for the experiments.\n\n Raises:\n ValueError:\n If `db` is not given and there is no default\n Database connection set.\n \"\"\"\n db = db if db is not None else self.db\n\n if db is None:\n raise ValueError('no database to read from')\n\n return self.ensure_dtypes(\n db.read_experiment_parameters(\n self.scope.name,\n design_name,\n only_pending=only_pending,\n experiment_ids=experiment_ids,\n )\n )\n\n def read_experiment_measures(\n self,\n design_name,\n experiment_id=None,\n db=None,\n ):\n \"\"\"\n Reads performance measures from a design of experiments from the database.\n\n Args:\n design_name (str): The name of the design to load.\n experiment_id (int, optional): The id of the experiment to load.\n db (Database, optional): The Database from which to read experiment(s).\n If no db is given, the default `db` for this model is used.\n\n Returns:\n pandas.DataFrame:\n A DataFrame that contains all uncertainties, levers, and measures\n for the experiments.\n\n Raises:\n ValueError:\n If `db` is not given and there is no default\n Database connection set.\n \"\"\"\n db = db if db is not None else self.db\n\n if db is None:\n raise ValueError('no database to read from')\n\n measures = self.ensure_dtypes(\n db.read_experiment_measures(\n self.scope.name,\n design_name,\n experiment_id,\n source=self.metamodel_id,\n )\n )\n \n # only return measures within scope\n measures = measures[[i for i in self.scope.get_measure_names()\n if i in measures.columns]]\n \n return measures\n \n\n def ensure_dtypes(self, df:pd.DataFrame):\n \"\"\"\n Convert columns of dataframe to correct dtype as needed.\n\n Args:\n df (pandas.DataFrame): A dataframe with column names\n that are uncertainties, levers, or measures.\n\n Returns:\n pandas.DataFrame:\n The same data as input, but with dtypes as appropriate.\n \"\"\"\n return self.scope.ensure_dtypes(df)\n\n def design_experiments(self, *args, **kwargs):\n \"\"\"\n Create a design of experiments based on this model.\n\n Args:\n n_samples_per_factor (int, default 10): The number of samples in the\n design per random factor.\n n_samples (int or tuple, optional): The total number of samples in the\n design. If `jointly` is False, this is the number of samples in each\n of the uncertainties and the levers, the total number of samples will\n be the square of this value. Give a 2-tuple to set values for\n uncertainties and levers respectively, to set them independently.\n If this argument is given, it overrides `n_samples_per_factor`.\n random_seed (int or None, default 1234): A random seed for reproducibility.\n db (Database, optional): If provided, this design will be stored in the\n database indicated. If not provided, the `db` for this model will\n be used, if one is set.\n design_name (str, optional): A name for this design, to identify it in the\n database. If not given, a unique name will be generated based on the\n selected sampler.\n sampler (str or AbstractSampler, default 'lhs'): The sampler to use for this\n design. Available pre-defined samplers include:\n - 'lhs': Latin Hypercube sampling\n - 'ulhs': Uniform Latin Hypercube sampling, which ignores defined\n distribution shapes from the scope and samples everything\n as if it was from a uniform distribution\n - 'mc': Monte carlo sampling\n - 'uni': Univariate sensitivity testing, whereby experiments are\n generated setting each parameter individually to minimum and\n maximum values (for numeric dtypes) or all possible values\n (for boolean and categorical dtypes). Note that designs for\n univariate sensitivity testing are deterministic and the number\n of samples given is ignored.\n sample_from ('all', 'uncertainties', or 'levers'): Which scope components\n from which to sample. Components not sampled are set at their default\n values in the design.\n jointly (bool, default True): Whether to sample jointly all uncertainties\n and levers in a single design, or, if False, to generate separate samples\n for levers and uncertainties, and then combine the two in a full-factorial\n manner. This argument has no effect unless `sample_from` is 'all'.\n Note that setting `jointly` to False may produce a very large design,\n as the total number of experiments will be the product of the number of\n experiments for the levers and the number of experiments for the\n uncertainties, which are set separately (i.e. if `n_samples` is given,\n the total number of experiments is the square of that value).\n\n Returns:\n pandas.DataFrame: The resulting design.\n \"\"\"\n if 'scope' in kwargs:\n kwargs.pop('scope')\n\n if 'db' not in kwargs:\n kwargs['db'] = self.db\n\n from ..experiment import experimental_design\n return experimental_design.design_experiments(self.scope, *args, **kwargs)\n\n def async_experiments(\n self,\n design:pd.DataFrame=None,\n db=None,\n *,\n design_name=None,\n evaluator=None,\n max_n_workers=None,\n stagger_start=None,\n batch_size=None,\n ):\n \"\"\"\n Asynchronously runs a design of combined experiments using this model.\n\n A combined experiment includes a complete set of input values for\n all exogenous uncertainties (a Scenario) and all policy levers\n (a Policy). Unlike the perform_experiments function in the EMA Workbench,\n this method pairs each Scenario and Policy in sequence, instead\n of running all possible combinations of Scenario and Policy.\n This change ensures compatibility with the EMAT database modules, which\n preserve the complete set of input information (both uncertainties\n and levers) for each experiment. To conduct a full cross-factorial set\n of experiments similar to the default settings for EMA Workbench,\n use a factorial design, by setting the `jointly` argument for the\n `design_experiments` to False, or by designing experiments outside\n of EMAT with your own approach.\n\n Args:\n design (pandas.DataFrame, optional): experiment definitions\n given as a DataFrame, where each exogenous uncertainties and\n policy levers is given as a column, and each row is an experiment.\n db (Database, required): The database to use for loading and saving experiments.\n If none is given, the default database for this model is used.\n If there is no default db, and none is given here,\n these experiments will be aborted.\n design_name (str, optional): The name of a design of experiments to\n load from the database. This design is only used if\n `design` is None.\n evaluator (emat.workbench.Evaluator, optional): Optionally give an\n evaluator instance. If not given, a default DistributedEvaluator\n will be instantiated. Passing any other kind of evaluator will\n currently cause an error, although in the future other async\n compatible evaluators may be provided.\n max_n_workers (int, optional):\n The maximum number of workers that will be created for a default\n dask.distributed LocalCluster. If the number of cores available is\n smaller than this number, fewer workers will be spawned. This value\n is only used if a default LocalCluster has not yet been created.\n stagger_start (int, optional):\n If provided, wait this number of seconds between initial dispatch\n of experiments to the evaluator. For models that do a lot of\n file copying up front, this can prevent over-saturating the file\n storage system.\n batch_size (int, optional):\n For fast-running core models, the overhead from multi-processing\n can represent a big chunk of overall runtime. Grouping experiments\n into batches that are sent to workers as a group can mitigate this.\n Setting batch_size to 1 will process every experiment separately.\n If no batch size is given, a guess is made as to an efficient\n batch_size based on the number of experiments and the number of\n workers.\n\n Raises:\n ValueError:\n If there are no experiments defined. This includes\n the situation where `design` is given but no database is\n available.\n\n \"\"\"\n # catch user gives only a design, not experiment_parameters\n if isinstance(design, str) and design_name is None:\n design_name, design = design, None\n\n if design_name is None and design is None:\n raise ValueError(f\"must give design_name or design\")\n\n if db is None:\n db = self.db\n\n if design_name is not None and design is None:\n if not db:\n raise ValueError(f'cannot load design \"{design_name}\", there is no db')\n design = db.read_experiment_parameters(self.scope.name, design_name)\n\n if design.empty:\n raise ValueError(f\"no experiments available\")\n\n from .asynchronous import asynchronous_experiments\n\n if self.db is None:\n if db is not None:\n self.db = db\n else:\n raise ValueError(\"cannot run async_experiments without a `db` defined\")\n\n return asynchronous_experiments(\n self,\n design,\n evaluator=evaluator,\n max_n_workers=max_n_workers,\n stagger_start=stagger_start,\n batch_size=batch_size,\n )\n\n\n def run_experiments(\n self,\n design=None,\n evaluator=None,\n *,\n design_name=None,\n db=None,\n allow_short_circuit=None,\n ):\n \"\"\"\n Runs a design of combined experiments using this model.\n\n A combined experiment includes a complete set of input values for\n all exogenous uncertainties (a Scenario) and all policy levers\n (a Policy). Unlike the perform_experiments function in the EMA Workbench,\n this method pairs each Scenario and Policy in sequence, instead\n of running all possible combinations of Scenario and Policy.\n This change ensures compatibility with the EMAT database modules, which\n preserve the complete set of input information (both uncertainties\n and levers) for each experiment. To conduct a full cross-factorial set\n of experiments similar to the default settings for EMA Workbench,\n use a factorial design, by setting the `jointly` argument for the\n `design_experiments` to False, or by designing experiments outside\n of EMAT with your own approach.\n\n Args:\n design (pandas.DataFrame, optional): experiment definitions\n given as a DataFrame, where each exogenous uncertainty and\n policy levers is given as a column, and each row is an experiment.\n evaluator (emat.workbench.Evaluator, optional): Optionally give an\n evaluator instance. If not given, a default SequentialEvaluator\n will be instantiated.\n design_name (str, optional): The name of a design of experiments to\n load from the database. This design is only used if\n `design` is None.\n db (Database, optional): The database to use for loading and saving experiments.\n If none is given, the default database for this model is used.\n If there is no default db, and none is given here,\n the results are not stored in a database. Set to False to explicitly\n not use the default database, even if it exists.\n\n Returns:\n pandas.DataFrame:\n A DataFrame that contains all uncertainties, levers, and measures\n for the experiments.\n\n Raises:\n ValueError:\n If there are no experiments defined. This includes\n the situation where `design` is given but no database is\n available.\n\n \"\"\"\n\n from ..workbench import Scenario, Policy, perform_experiments\n\n # catch user gives only a design, not experiment_parameters\n if isinstance(design, str) and design_name is None:\n design_name, design = design, None\n\n if design_name is None and design is None:\n raise ValueError(f\"must give design_name or design\")\n\n if db is None:\n db = self.db\n\n if design_name is not None and design is None:\n if not db:\n raise ValueError(f'cannot load design \"{design_name}\", there is no db')\n design = db.read_experiment_parameters(self.scope.name, design_name)\n\n if design.empty:\n raise ValueError(f\"no experiments available\")\n\n # catch metamodels here and run them as a batch, which is much faster\n function = getattr(self, 'function', None)\n from .meta_model import MetaModel\n if isinstance(function, MetaModel):\n outcomes = function.predict(design)\n result = self.ensure_dtypes(pd.concat([\n design,\n outcomes\n ], axis=1, sort=False))\n from ..experiment.experimental_design import ExperimentalDesign\n result = ExperimentalDesign(result)\n result.scope = self.scope\n result.design_name = getattr(design, 'design_name', None)\n result.sampler_name = getattr(design, 'sampler_name', None)\n if db:\n metamodel_id = self.metamodel_id\n if metamodel_id is None:\n metamodel_id = db.get_new_metamodel_id(self.scope.name)\n db.write_experiment_measures(self.scope.name, metamodel_id, outcomes)\n return result\n\n scenarios = []\n scenario_cols = self.scope._get_uncertainty_and_constant_names()\n design_scenarios = design[scenario_cols]\n for rownum in range(len(design)):\n if design.index.name == 'experiment':\n s = Scenario(\n _experiment_id_=design.index[rownum],\n **design_scenarios.iloc[rownum],\n )\n else:\n s = Scenario(\n _experiment_id_=False,\n **design_scenarios.iloc[rownum],\n )\n scenarios.append(s)\n\n lever_names = self.scope.get_lever_names()\n policies = [\n Policy(f\"Incognito{n}\", **dict(zip(lever_names, i)))\n for n,i in enumerate(design[lever_names].itertuples(index=False, name='ExperimentL'))\n ]\n\n evaluator = prepare_evaluator(evaluator, self)\n\n if getattr(evaluator, 'asynchronous', False):\n # When the evaluator is in asynchronous mode, the core model runs will be\n # dispatched here but the function will not block waiting on the result, and\n # instead depend on the model execution process to write the results into\n # the database when complete.\n with evaluator:\n if allow_short_circuit is not None:\n _stored_allow_short_circuit = self.allow_short_circuit\n self.allow_short_circuit = allow_short_circuit\n else:\n _stored_allow_short_circuit = None\n try:\n perform_experiments(\n self,\n scenarios=scenarios,\n policies=policies,\n zip_over={'scenarios', 'policies'},\n evaluator=evaluator,\n )\n finally:\n if _stored_allow_short_circuit is not None:\n self.allow_short_circuit = _stored_allow_short_circuit\n return\n\n else:\n with evaluator:\n if db is False:\n _stored_db = self.db\n self.db = None\n else:\n _stored_db = None\n if allow_short_circuit is not None:\n _stored_allow_short_circuit = self.allow_short_circuit\n self.allow_short_circuit = allow_short_circuit\n else:\n _stored_allow_short_circuit = None\n try:\n experiments, outcomes = perform_experiments(\n self,\n scenarios=scenarios,\n policies=policies,\n zip_over={'scenarios', 'policies'},\n evaluator=evaluator,\n )\n finally:\n if _stored_db:\n self.db = _stored_db\n if _stored_allow_short_circuit is not None:\n self.allow_short_circuit = _stored_allow_short_circuit\n experiments.index = design.index\n\n outcomes = pd.DataFrame.from_dict(outcomes)\n outcomes.index = design.index\n\n # if db:\n # metamodel_id = self.metamodel_id\n # if metamodel_id is None:\n # metamodel_id = 0\n # db.write_experiment_measures(self.scope.name, metamodel_id, outcomes)\n\n # Put constants back into experiments\n experiments_ = experiments.drop(\n columns=['scenario', 'policy', 'model', '_experiment_id_'],\n errors='ignore',\n )\n for i in self.scope.get_constants():\n experiments_[i.name] = i.value\n\n result = self.ensure_dtypes(pd.concat([\n experiments_,\n outcomes\n ], axis=1, sort=False))\n from ..experiment.experimental_design import ExperimentalDesign\n result = ExperimentalDesign(result)\n result.scope = self.scope\n result.design_name = getattr(design, 'design_name', None)\n result.sampler_name = getattr(design, 'sampler_name', None)\n return result\n\n def run_reference_experiment(\n self,\n evaluator=None,\n *,\n db=None,\n ):\n \"\"\"\n Runs a reference experiment using this model.\n\n This single experiment includes a complete set of input values for\n all exogenous uncertainties (a Scenario) and all policy levers\n (a Policy). Each is set to the default value indicated by the scope.\n\n Args:\n evaluator (emat.workbench.Evaluator, optional): Optionally give an\n evaluator instance. If not given, a default SequentialEvaluator\n will be instantiated.\n db (Database, optional): The database to use for loading and saving experiments.\n If none is given, the default database for this model is used.\n If there is no default db, and none is given here,\n the results are not stored in a database. Set to False to explicitly\n not use the default database, even if it exists.\n\n Returns:\n pandas.DataFrame:\n A DataFrame that contains all uncertainties, levers, and measures\n for the experiments.\n\n \"\"\"\n if db is None:\n db = self.db\n ref = self.design_experiments(sampler='ref', db=db)\n return self.run_experiments(ref, evaluator=evaluator, db=db)\n\n def create_metamodel_from_data(\n self,\n experiment_inputs:pd.DataFrame,\n experiment_outputs:pd.DataFrame,\n output_transforms: dict = None,\n metamodel_id:int=None,\n include_measures=None,\n exclude_measures=None,\n db = None,\n random_state=None,\n experiment_stratification=None,\n suppress_converge_warnings=False,\n regressor = None,\n find_best_metamodeltype=False,\n ):\n \"\"\"\n Create a MetaModel from a set of input and output observations.\n\n Args:\n experiment_inputs (pandas.DataFrame): This dataframe\n should contain all of the experimental inputs, including\n values for each uncertainty, level, and constant.\n experiment_outputs (pandas.DataFrame): This dataframe\n should contain all of the experimental outputs, including\n a column for each performance measure. The index\n for the outputs should match the index for the\n `experiment_inputs`, so that the I-O matches row-by-row.\n output_transforms (dict): Deprecated. Specify the\n output transforms directly in the scope instead.\n metamodel_id (int, optional): An identifier for this meta-model.\n If not given, a unique id number will be created randomly.\n include_measures (Collection[str], optional): If provided, only\n output performance measures with names in this set will be included.\n exclude_measures (Collection[str], optional): If provided, only\n output performance measures with names not in this set will be included.\n db (Database, optional): The database to use for loading and saving metamodels.\n If none is given, the default database for this model is used.\n If there is no default db, and none is given here,\n the metamodel is not stored in a database.\n random_state (int, optional): A random state to use in the metamodel\n regression fitting.\n experiment_stratification (pandas.Series, optional):\n A stratification of experiments, used in cross-validation.\n suppress_converge_warnings (bool, default False):\n Suppress convergence warnings during metamodel fitting.\n regressor (Estimator, optional): A scikit-learn estimator implementing a\n multi-target regression. If not given, a detrended simple Gaussian\n process regression is used.\n find_best_metamodeltype (int, default 0):\n Run a search to find the best metamodeltype for each\n performance measure, repeating each cross-validation\n step this many times. For more stable results, choose\n 3 or more, although larger numbers will be slow. If\n domain knowledge about the normal expected range and\n behavior of each performance measure is available,\n it is better to give the metamodeltype explicitly in\n the Scope.\n\n Returns:\n MetaModel:\n a callable object that, when called as if a\n function, accepts keyword arguments as inputs and\n returns a dictionary of (measure name: value) pairs.\n \"\"\"\n from .meta_model import create_metamodel\n\n # The outputs index typically has a 2-level multi-index,\n # giving both experiment_id and run_id. But for this\n # analysis, we will strip out the run_id.\n if experiment_outputs.index.nlevels == 2:\n experiment_outputs.index = experiment_outputs.index.get_level_values(0)\n\n return create_metamodel(\n scope=self.scope,\n experiments=pd.concat([experiment_inputs, experiment_outputs], axis=1),\n metamodel_id=metamodel_id,\n db=db,\n include_measures=include_measures,\n exclude_measures=exclude_measures,\n random_state=random_state,\n experiment_stratification=experiment_stratification,\n suppress_converge_warnings=suppress_converge_warnings,\n regressor=regressor,\n name=None,\n find_best_metamodeltype=find_best_metamodeltype,\n )\n\n def create_metamodel_from_design(\n self,\n design_name:str,\n metamodel_id:int = None,\n include_measures=None,\n exclude_measures=None,\n db=None,\n random_state=None,\n suppress_converge_warnings=False,\n regressor=None,\n find_best_metamodeltype=False,\n ):\n \"\"\"\n Create a MetaModel from a set of input and output observations.\n\n Args:\n design_name (str): The name of the design to use.\n metamodel_id (int, optional): An identifier for this meta-model.\n If not given, a unique id number will be created randomly.\n include_measures (Collection[str], optional): If provided, only\n output performance measures with names in this set will be included.\n exclude_measures (Collection[str], optional): If provided, only\n output performance measures with names not in this set will be included.\n random_state (int, optional): A random state to use in the metamodel\n regression fitting.\n suppress_converge_warnings (bool, default False):\n Suppress convergence warnings during metamodel fitting.\n regressor (Estimator, optional): A scikit-learn estimator implementing a\n multi-target regression. If not given, a detrended simple Gaussian\n process regression is used.\n find_best_metamodeltype (int, default 0):\n Run a search to find the best metamodeltype for each\n performance measure, repeating each cross-validation\n step this many times. For more stable results, choose\n 3 or more, although larger numbers will be slow. If\n domain knowledge about the normal expected range and\n behavior of each performance measure is available,\n it is better to give the metamodeltype explicitly in\n the Scope.\n\n Returns:\n MetaModel:\n a callable object that, when called as if a\n function, accepts keyword arguments as inputs and\n returns a dictionary of (measure name: value) pairs.\n\n Raises:\n ValueError: If the named design still has pending experiments.\n \"\"\"\n db = db if db is not None else self.db\n\n if db is None:\n raise ValueError(\"db is None\")\n\n check_df = db.read_experiment_parameters(self.scope.name, design_name, only_pending=True)\n if not check_df.empty:\n from ..exceptions import PendingExperimentsError\n raise PendingExperimentsError(f'design \"{design_name}\" has pending experiments')\n\n experiment_inputs = db.read_experiment_parameters(self.scope.name, design_name)\n experiment_outputs = db.read_experiment_measures(self.scope.name, design_name)\n\n transforms = {\n i.name: i.metamodeltype\n for i in self.scope.get_measures()\n }\n\n return self.create_metamodel_from_data(\n experiment_inputs,\n experiment_outputs,\n transforms,\n metamodel_id=metamodel_id,\n include_measures=include_measures,\n exclude_measures=exclude_measures,\n db=db,\n random_state=random_state,\n suppress_converge_warnings=suppress_converge_warnings,\n regressor=regressor,\n find_best_metamodeltype=find_best_metamodeltype,\n )\n\n def create_metamodel_from_designs(\n self,\n design_names:str,\n metamodel_id:int = None,\n include_measures=None,\n exclude_measures=None,\n db=None,\n random_state=None,\n suppress_converge_warnings=False,\n ):\n \"\"\"\n Create a MetaModel from multiple sets of input and output observations.\n\n Args:\n design_names (Collection[str]): The names of the designs to use.\n metamodel_id (int, optional): An identifier for this meta-model.\n If not given, a unique id number will be created randomly.\n include_measures (Collection[str], optional): If provided, only\n output performance measures with names in this set will be included.\n exclude_measures (Collection[str], optional): If provided, only\n output performance measures with names not in this set will be included.\n random_state (int, optional): A random state to use in the metamodel\n regression fitting.\n suppress_converge_warnings (bool, default False):\n Suppress convergence warnings during metamodel fitting.\n\n Returns:\n MetaModel:\n a callable object that, when called as if a\n function, accepts keyword arguments as inputs and\n returns a dictionary of (measure name: value) pairs.\n\n Raises:\n ValueError: If the named design still has pending experiments.\n \"\"\"\n db = db if db is not None else self.db\n\n if db is not None:\n for design_name in design_names:\n check_df = db.read_experiment_parameters(self.scope.name, design_name, only_pending=True)\n if not check_df.empty:\n from ..exceptions import PendingExperimentsError\n raise PendingExperimentsError(f'design \"{design_name}\" has pending experiments')\n\n experiment_inputs = []\n for design_name in design_names:\n f = db.read_experiment_parameters(self.scope.name, design_name)\n f['_design_'] = design_name\n experiment_inputs.append(f)\n experiment_inputs = pd.concat(experiment_inputs)\n\n experiment_outputs = []\n for design_name in design_names:\n f = db.read_experiment_measures(self.scope.name, design_name)\n # f['_design_'] = design_name\n experiment_outputs.append(f)\n experiment_outputs = pd.concat(experiment_outputs)\n\n transforms = {\n i.name: i.metamodeltype\n for i in self.scope.get_measures()\n }\n\n return self.create_metamodel_from_data(\n experiment_inputs.drop('_design_', axis=1),\n experiment_outputs,\n transforms,\n metamodel_id=metamodel_id,\n include_measures=include_measures,\n exclude_measures=exclude_measures,\n db=db,\n random_state=random_state,\n experiment_stratification=experiment_inputs['_design_'],\n suppress_converge_warnings=suppress_converge_warnings,\n )\n\n\n def feature_scores(\n self,\n design,\n return_type='styled',\n random_state=None,\n cmap='viridis',\n measures=None,\n shortnames=None,\n ):\n \"\"\"\n Calculate feature scores based on a design of experiments.\n\n This method is provided as a convenient pass-through to the\n `feature_scores` function in the `analysis` sub-package, using\n the scope and database attached to this model.\n\n Args:\n design (str or pandas.DataFrame): The name of the design\n of experiments to use for feature scoring, or a single\n pandas.DataFrame containing the experimental design and\n results.\n return_type ({'styled', 'figure', 'dataframe'}):\n The format to return, either a heatmap figure as an SVG\n render in and xmle.Elem, or a plain pandas.DataFrame,\n or a styled dataframe.\n random_state (int or numpy.RandomState, optional):\n Random state to use.\n cmap (string or colormap, default 'viridis'): matplotlib\n colormap to use for rendering.\n measures (Collection, optional): The performance measures\n on which feature scores are to be generated. By default,\n all measures are included.\n\n Returns:\n xmle.Elem or pandas.DataFrame:\n Returns a rendered SVG as xml, or a DataFrame,\n depending on the `return_type` argument.\n\n This function internally uses feature_scoring from the EMA Workbench, which in turn\n scores features using the \"extra trees\" regression approach.\n \"\"\"\n from ..analysis.feature_scoring import feature_scores\n if shortnames is True:\n shortnames = self.scope\n return feature_scores(\n self.scope,\n design=design,\n return_type=return_type,\n db=self.db,\n random_state=random_state,\n cmap=cmap,\n measures=measures,\n shortnames=shortnames,\n )\n\n def get_feature_scores(self, *args, **kwargs):\n \"\"\"\n Deprecated, use `Model.feature_scores`.\n \"\"\"\n # for compatability with prior versions of TMIP-EMAT\n return self.feature_scores(*args, **kwargs)\n\n def _common_optimization_setup(\n self,\n epsilons=0.1,\n convergence='default',\n display_convergence=True,\n evaluator=None,\n ):\n import numbers\n if isinstance(epsilons, numbers.Number):\n epsilons = [epsilons]*len(self.outcomes)\n\n if convergence == 'default':\n convergence = ConvergenceMetrics(\n EpsilonProgress(),\n SolutionCount(),\n )\n\n if display_convergence and isinstance(convergence, ConvergenceMetrics):\n from IPython.display import display\n display(convergence)\n\n evaluator = prepare_evaluator(evaluator, self)\n\n return epsilons, convergence, display_convergence, evaluator\n\n def optimize(\n self,\n searchover='levers',\n evaluator=None,\n nfe=10000,\n convergence='default',\n display_convergence=True,\n convergence_freq=100,\n constraints=None,\n reference=None,\n reverse_targets=False,\n algorithm=None,\n epsilons='auto',\n min_epsilon=0.1,\n cache_dir=None,\n cache_file=None,\n check_extremes=False,\n **kwargs,\n ):\n \"\"\"\n Perform multi-objective optimization over levers or uncertainties.\n\n The targets for the multi-objective optimization (i.e. whether each\n individual performance measures is to be maximized or minimized) are\n read from the model's scope.\n\n Args:\n searchover ({'levers', 'uncertainties'}):\n Which group of inputs to search over. The other group\n will be set at their default values, unless other values\n are provided in the `reference` argument.\n evaluator (Evaluator, optional): The evaluator to use to\n run the model. If not given, a SequentialEvaluator will\n be created.\n nfe (int, default 10_000): Number of function evaluations.\n This generally needs to be fairly large to achieve stable\n results in all but the most trivial applications.\n convergence ('default', None, or emat.optimization.ConvergenceMetrics):\n A convergence display during optimization. The default\n value is to report the epsilon-progress (the number of\n solutions that ever enter the candidate pool of non-dominated\n solutions) and the number of solutions remaining in that candidate\n pool. Pass `None` explicitly to disable convergence tracking.\n display_convergence (bool, default True): Whether to automatically\n display figures that dynamically track convergence. Set to\n `False` if you are not using this method within a Jupyter\n interactive environment.\n convergence_freq (int, default 100): How frequently to update the\n convergence measures. There is some computational overhead to\n these convergence updates, so setting a value too small may\n noticeably slow down the process.\n constraints (Collection[Constraint], optional):\n Solutions will be constrained to only include values that\n satisfy these constraints. The constraints can be based on\n the search parameters (levers or uncertainties, depending on the\n value given for `searchover`), or performance measures, or\n some combination thereof.\n reference (Mapping): A set of values for the non-active inputs,\n i.e. the uncertainties if `searchover` is 'levers', or the\n levers if `searchover` is 'uncertainties'. Any values not\n set here revert to the default values identified in the scope.\n reverse_targets (bool, default False): Whether to reverse the\n optimization targets given in the scope (i.e., changing\n minimize to maximize, or vice versa). This will result in\n the optimization searching for the worst outcomes, instead of\n the best outcomes.\n algorithm (platypus.Algorithm, optional): Select an\n algorithm for multi-objective optimization. The default\n algorithm is EpsNSGAII. See `platypus` documentation for details.\n epsilons (float or array-like): Used to limit the number of\n distinct solutions generated. Set to a larger value to get\n fewer distinct solutions.\n cache_dir (path-like, optional): A directory in which to\n cache results. Most of the arguments will be hashed\n to develop a unique filename for these results, making this\n generally safer than `cache_file`.\n cache_file (path-like, optional): A file into which to\n cache results. If this file exists, the contents of the\n file will be loaded and all other arguments are ignored.\n Use with great caution.\n kwargs: Any additional arguments will be passed on to the\n platypus algorithm.\n\n Returns:\n emat.OptimizationResult:\n The set of non-dominated solutions found.\n When `convergence` is given, the convergence measures are\n included, as a pandas.DataFrame in the `convergence` attribute.\n \"\"\"\n from ..util.disk_cache import load_cache_if_available, save_cache\n if isinstance(algorithm, str) or algorithm is None:\n alg = algorithm\n else:\n alg = algorithm.__name__\n\n if reference is not None:\n from ..workbench import Policy, Scenario\n if searchover == 'levers' and not isinstance(reference, Scenario):\n reference = Scenario(\"ReferenceScenario\", **reference)\n elif searchover == 'uncertainties' and not isinstance(reference, Policy):\n reference = Policy(\"ReferencePolicy\", **reference)\n else:\n if searchover == 'levers':\n reference = self.scope.default_scenario()\n elif searchover == 'uncertainties':\n reference = self.scope.default_policy()\n\n x, cache_file = load_cache_if_available(\n cache_file=cache_file,\n cache_dir=cache_dir,\n searchover=searchover,\n nfe=nfe,\n convergence=convergence,\n convergence_freq=convergence_freq,\n constraints=constraints,\n reference=reference,\n reverse_targets=reverse_targets,\n algorithm=alg,\n epsilons=epsilons,\n )\n\n if x is None:\n epsilons, convergence, display_convergence, evaluator = self._common_optimization_setup(\n epsilons, convergence, display_convergence, evaluator\n )\n\n if reverse_targets:\n for k in self.scope.get_measures():\n k.kind_original = k.kind\n k.kind = k.kind * -1\n\n _db_pause = self.db\n\n try:\n self.db = None\n with evaluator:\n\n if epsilons == 'auto':\n from ..workbench import perform_experiments\n if searchover == 'levers':\n _, trial_outcomes = perform_experiments(\n self,\n scenarios=reference,\n policies=30,\n evaluator=evaluator,\n )\n else:\n _, trial_outcomes = perform_experiments(\n self,\n scenarios=30,\n policies=reference,\n evaluator=evaluator,\n )\n epsilons = [max(min_epsilon, np.std(trial_outcomes[mn]) / 20) for mn in self.scope.get_measure_names()]\n\n results = evaluator.optimize(\n searchover=searchover,\n reference=reference,\n nfe=nfe,\n constraints=constraints,\n convergence=convergence,\n convergence_freq=convergence_freq,\n epsilons=epsilons,\n **kwargs,\n )\n\n if isinstance(results, tuple) and len(results) == 2:\n results, result_convergence = results\n else:\n result_convergence = None\n\n # Put constants back in to results\n for i in self.scope.get_constants():\n results[i.name] = i.value\n\n results = self.ensure_dtypes(results)\n x = OptimizationResult(results, result_convergence, scope=self.scope)\n\n if searchover == 'levers':\n x.scenarios = reference\n elif searchover == 'uncertainties':\n x.policies = reference\n\n if check_extremes:\n x.check_extremes(\n self,\n 1 if check_extremes is True else check_extremes,\n evaluator=evaluator,\n searchover=searchover,\n robust=False,\n )\n\n finally:\n if reverse_targets:\n for k in self.scope.get_measures():\n k.kind = k.kind_original\n del k.kind_original\n self.db = _db_pause\n\n elif display_convergence:\n _, convergence, display_convergence, _ = self._common_optimization_setup(\n None, convergence, display_convergence, False\n )\n for c in convergence:\n try:\n c.rebuild(x.convergence)\n except KeyboardInterrupt:\n raise\n except:\n pass\n\n x.cache_file = cache_file\n save_cache(x, cache_file)\n return x\n\n def robust_optimize(\n self,\n robustness_functions,\n scenarios,\n evaluator=None,\n nfe=10000,\n convergence='default',\n display_convergence=True,\n convergence_freq=100,\n constraints=None,\n epsilons=0.1,\n cache_dir=None,\n cache_file=None,\n algorithm=None,\n check_extremes=False,\n **kwargs,\n ):\n \"\"\"\n Perform robust optimization.\n\n The robust optimization generally a multi-objective optimization task.\n It is undertaken using statistical measures of outcomes evaluated across\n a number of scenarios, instead of using the individual outcomes themselves.\n For each candidate policy, the model is evaluated against all of the considered\n scenarios, and then the robustness measures are evaluated using the\n set of outcomes from the original runs. The robustness measures\n are aggregate measures that are computed from a set of outcomes.\n For example, this may be expected value, median, n-th percentile,\n minimum, or maximum value of any individual outcome. It is also\n possible to have joint measures, e.g. expected value of the larger\n of outcome 1 or outcome 2.\n\n Each robustness function is indicated as a maximization or minimization\n target, where higher or lower values are better, respectively.\n The optimization process then tries to identify one or more\n non-dominated solutions for the possible policy levers.\n\n Args:\n robustness_functions (Collection[Measure]): A collection of\n aggregate statistical performance measures.\n scenarios (int or Collection): A collection of scenarios to\n use in the evaluation(s), or give an integer to generate\n that number of random scenarios.\n evaluator (Evaluator, optional): The evaluator to use to\n run the model. If not given, a SequentialEvaluator will\n be created.\n nfe (int, default 10_000): Number of function evaluations.\n This generally needs to be fairly large to achieve stable\n results in all but the most trivial applications.\n convergence ('default', None, or emat.optimization.ConvergenceMetrics):\n A convergence display during optimization.\n display_convergence (bool, default True): Automatically display\n the convergence metric figures when optimizing.\n convergence_freq (int, default 100): The frequency at which\n convergence metric figures are updated.\n constraints (Collection[Constraint], optional)\n Solutions will be constrained to only include values that\n satisfy these constraints. The constraints can be based on\n the policy levers, or on the computed values of the robustness\n functions, or some combination thereof.\n epsilons (float or array-like): Used to limit the number of\n distinct solutions generated. Set to a larger value to get\n fewer distinct solutions.\n cache_dir (path-like, optional): A directory in which to\n cache results. Most of the arguments will be hashed\n to develop a unique filename for these results, making this\n generally safer than `cache_file`.\n cache_file (path-like, optional): A file into which to\n cache results. If this file exists, the contents of the\n file will be loaded and all other arguments are ignored.\n Use with great caution.\n algorithm (platypus.Algorithm or str, optional): Select an\n algorithm for multi-objective optimization. The algorithm can\n be given directly, or named in a string. See `platypus`\n documentation for details.\n check_extremes (bool or int, default False): Conduct additional\n evaluations, setting individual policy levers to their\n extreme values, for each candidate Pareto optimal solution.\n kwargs: any additional arguments will be passed on to the\n platypus algorithm.\n\n Returns:\n emat.OptimizationResult:\n The set of non-dominated solutions found.\n When `convergence` is given, the convergence measures are\n included, as a pandas.DataFrame in the `convergence` attribute.\n \"\"\"\n from ..optimization.optimize import robust_optimize\n\n from ..util.disk_cache import load_cache_if_available, save_cache\n if isinstance(algorithm, str) or algorithm is None:\n alg = algorithm\n else:\n alg = algorithm.__name__\n result, cache_file = load_cache_if_available(\n cache_file=cache_file,\n cache_dir=cache_dir,\n scenarios=scenarios,\n convergence=convergence,\n convergence_freq=convergence_freq,\n constraints=constraints,\n epsilons=epsilons,\n nfe=nfe,\n robustness_functions=robustness_functions,\n alg=alg,\n check_extremes=check_extremes,\n )\n\n if result is None:\n _db_pause = self.db\n try:\n self.db = None\n result = robust_optimize(\n self,\n robustness_functions,\n scenarios,\n evaluator=evaluator,\n nfe=nfe,\n convergence=convergence,\n display_convergence=display_convergence,\n convergence_freq=convergence_freq,\n constraints=constraints,\n epsilons=epsilons,\n check_extremes=check_extremes,\n **kwargs,\n )\n finally:\n self.db = _db_pause\n elif display_convergence:\n _, convergence, display_convergence, _ = self._common_optimization_setup(\n None, convergence, display_convergence, False\n )\n for c in convergence:\n try:\n c.rebuild(result.convergence)\n except KeyboardInterrupt:\n raise\n except:\n pass\n\n result.cache_file = cache_file\n save_cache(result, cache_file)\n return result\n\n def robust_evaluate(\n self,\n robustness_functions,\n scenarios,\n policies,\n evaluator=None,\n cache_dir=None,\n suspend_db=True,\n ):\n \"\"\"\n Perform robust evaluation(s).\n\n The robust evaluation is used to generate statistical measures\n of outcomes, instead of generating the individual outcomes themselves.\n For each policy, the model is evaluated against all of the considered\n scenarios, and then the robustness measures are evaluated using the\n set of outcomes from the original runs. The robustness measures\n are aggregate measures that are computed from a set of outcomes.\n For example, this may be expected value, median, n-th percentile,\n minimum, or maximum value of any individual outcome. It is also\n possible to have joint measures, e.g. expected value of the larger\n of outcome 1 or outcome 2.\n\n Args:\n robustness_functions (Collection[Measure]): A collection of\n aggregate statistical performance measures.\n scenarios (int or Collection): A collection of scenarios to\n use in the evaluation(s), or give an integer to generate\n that number of random scenarios.\n policies (int, or collection): A collection of policies to\n use in the evaluation(s), or give an integer to generate\n that number of random policies.\n evaluator (Evaluator, optional): The evaluator to use to\n run the model. If not given, a SequentialEvaluator will\n be created.\n cache_dir (path-like, optional): A directory in which to\n cache results.\n suspend_db (bool, default True):\n Suspend writing the results of individual model runs to\n the database. Robust evaluation potentially generates a\n large number of model executions, and storing all these\n individual results may not be useful.\n\n Returns:\n pandas.DataFrame: The computed value of each item\n in `robustness_functions`, for each policy in `policies`.\n \"\"\"\n robust_results = None\n cache_file = None\n if cache_dir is not None:\n try:\n from ..util.hasher import hash_it\n hh = hash_it(\n scenarios,\n policies,\n robustness_functions,\n )\n os.makedirs(os.path.join(cache_dir,hh[2:4],hh[4:6]), exist_ok=True)\n cache_file = os.path.join(cache_dir,hh[2:4],hh[4:6],hh[6:]+\".gz\")\n if os.path.exists(cache_file):\n _logger.debug(f\"loading from cache_file={cache_file}\")\n from ..util.filez import load\n robust_results = load(cache_file)\n cache_file = None\n except KeyboardInterrupt:\n raise\n except:\n import traceback\n warnings.warn('unable to manage cache')\n traceback.print_exc()\n\n if robust_results is None:\n with self.lock_db(suspend_db):\n if evaluator is None:\n from ..workbench.em_framework import SequentialEvaluator\n evaluator = SequentialEvaluator(self)\n\n if not isinstance(evaluator, BaseEvaluator):\n from dask.distributed import Client\n if isinstance(evaluator, Client):\n from ..workbench.em_framework.ema_distributed import DistributedEvaluator\n evaluator = DistributedEvaluator(self, client=evaluator)\n\n from ..workbench.em_framework.samplers import sample_uncertainties, sample_levers\n\n if isinstance(scenarios, int):\n n_scenarios = scenarios\n scenarios = sample_uncertainties(self, n_scenarios)\n\n with evaluator:\n robust_results = evaluator.robust_evaluate(\n robustness_functions,\n scenarios,\n policies,\n )\n\n robust_results = self.ensure_dtypes(robust_results)\n\n if cache_file is not None:\n from ..util.filez import save\n save(robust_results, cache_file, overwrite=True)\n with open(cache_file.replace('.gz','.info.txt'), 'wt') as notes:\n print(\"scenarios=\", scenarios, file=notes)\n print(\"robustness_functions=\", robustness_functions, file=notes)\n print(\"policies=\", policies, file=notes)\n\n return robust_results\n\n def io_experiment(self, params):\n \"\"\"\n Run an experiment, and return a dictionary of inputs and outputs together.\n\n Args:\n params: dict\n\n Returns:\n dict\n \"\"\"\n out = self.run_experiment(params).copy()\n out.update(params)\n return out\n\n def log(self, message, level=logging.INFO):\n \"\"\"\n Log a message.\n\n This facility will attempt to send log messages to\n the attached database, falling back to the regular\n module logger in case that fails.\n\n Args:\n message (str): Message to send to log.\n level (int, default logging.INFO): Log level.\n\n Returns:\n\n \"\"\"\n db = getattr(self, 'db', None)\n try:\n db.log(message, level=level)\n except:\n _logger.log(level, message)\n" ]
[ [ "numpy.std", "pandas.DataFrame", "pandas.concat", "pandas.DataFrame.from_dict" ] ]
YoungSkKim/CenterTrack-RideFlux
[ "36c0e4ddc608bddd203c12feb8a5f562c990eacb" ]
[ "src/lib/dataset/datasets/mot.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pycocotools.coco as coco\nfrom pycocotools.cocoeval import COCOeval\nimport numpy as np\nimport json\nimport os\nfrom collections import defaultdict\nfrom ..generic_dataset import GenericDataset\n\nclass MOT(GenericDataset):\n num_categories = 1\n default_resolution = [544, 960]\n class_name = ['']\n max_objs = 256\n cat_ids = {1: 1, -1: -1}\n def __init__(self, opt, split):\n self.dataset_version = opt.dataset_version\n self.year = int(self.dataset_version[:2])\n print('Using MOT {} {}'.format(self.year, self.dataset_version))\n data_dir = os.path.join(opt.data_dir, 'mot{}'.format(self.year))\n\n if opt.dataset_version in ['17trainval', '17test']:\n ann_file = '{}.json'.format('train' if split == 'train' else \\\n 'test')\n elif opt.dataset_version == '17halftrain':\n ann_file = '{}.json'.format('train_half')\n elif opt.dataset_version == '17halfval':\n ann_file = '{}.json'.format('val_half')\n img_dir = os.path.join(data_dir, '{}'.format(\n 'test' if 'test' in self.dataset_version else 'train'))\n\n print('ann_file', ann_file)\n ann_path = os.path.join(data_dir, 'annotations', ann_file)\n\n self.images = None\n # load image list and coco\n super(MOT, self).__init__(opt, split, ann_path, img_dir)\n\n self.num_samples = len(self.images)\n print('Loaded MOT {} {} {} samples'.format(\n self.dataset_version, split, self.num_samples))\n\n def _to_float(self, x):\n return float(\"{:.2f}\".format(x))\n\n def __len__(self):\n return self.num_samples\n\n def save_results(self, results, save_dir):\n results_dir = os.path.join(save_dir, 'results_mot{}'.format(self.dataset_version))\n if not os.path.exists(results_dir):\n os.mkdir(results_dir)\n for video in self.coco.dataset['videos']:\n video_id = video['id']\n file_name = video['file_name']\n out_path = os.path.join(results_dir, '{}.txt'.format(file_name))\n f = open(out_path, 'w')\n images = self.video_to_images[video_id]\n tracks = defaultdict(list)\n for image_info in images:\n if not (image_info['id'] in results):\n continue\n result = results[image_info['id']]\n frame_id = image_info['frame_id']\n for item in result:\n if not ('tracking_id' in item):\n item['tracking_id'] = np.random.randint(100000)\n if item['active'] == 0:\n continue\n tracking_id = item['tracking_id']\n bbox = item['bbox']\n bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]\n tracks[tracking_id].append([frame_id] + bbox)\n rename_track_id = 0\n for track_id in sorted(tracks):\n rename_track_id += 1\n for t in tracks[track_id]:\n f.write('{},{},{:.2f},{:.2f},{:.2f},{:.2f},-1,-1,-1,-1\\n'.format(\n t[0], rename_track_id, t[1], t[2], t[3]-t[1], t[4]-t[2]))\n f.close()\n\n def run_eval(self, results, save_dir):\n self.save_results(results, save_dir)\n gt_type_str = '{}'.format(\n '_train_half' if '17halftrain' in self.opt.dataset_version \\\n else '_val_half' if '17halfval' in self.opt.dataset_version \\\n else '')\n gt_type_str = '_val_half' if self.year in [16, 19] else gt_type_str\n gt_type_str = '--gt_type {}'.format(gt_type_str) if gt_type_str != '' else \\\n ''\n os.system('python tools/eval_motchallenge.py ' + \\\n '../data/mot{}/{}/ '.format(self.year, 'train') + \\\n '{}/results_mot{}/ '.format(save_dir, self.dataset_version) + \\\n gt_type_str + ' --eval_official')\n" ]
[ [ "numpy.random.randint" ] ]
snji-khjuria/RelationClassificationFewShotModels
[ "a5047f44a57a81ab3281bf1290fa149a4c456486" ]
[ "models/metagan_queryattentive.py" ]
[ "#creating the metagan model\nimport torch\nimport torch.nn as nn\n\nimport sys\n\nsys.path.append('..')\nimport fewshot_re_kit\nimport torch\nfrom torch import autograd, optim, nn\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\n\n\n\n\n\n\n\nclass MetaGenerator(fewshot_re_kit.adversarial_framework.FewShotAdversarialREModel):\n def __init__(self, input_size, K, D=230):\n fewshot_re_kit.adversarial_framework.FewShotAdversarialREModel.__init__(self)\n self.generator_model = nn.Sequential(\n nn.Linear(input_size, 4096),\n nn.ReLU(),\n nn.Linear(4096, 2048),\n nn.ReLU(),\n nn.Linear(2048, 1024),\n nn.ReLU(),\n nn.Linear(1024, K*D),\n nn.ReLU()\n )\n\n def forward(self, x):\n x = self.generator_model(x)\n return x\n\n\n\nclass MetaDisc(fewshot_re_kit.adversarial_framework.FewShotAdversarialREModel):\n\n\n def __init__(self, hidden_size=230, relnet_features=230*2):\n fewshot_re_kit.adversarial_framework.FewShotAdversarialREModel.__init__(self)\n self.hidden_size = hidden_size\n self.drop=nn.Dropout()\n self.fc = nn.Sequential(\n nn.Linear(hidden_size, hidden_size, bias=True)\n )\n self.relation_network = nn.Sequential(\n #nn.Dropout(),\n nn.Linear(relnet_features , 64),\n nn.BatchNorm1d(64),\n nn.ReLU(inplace=True),\n nn.Dropout(),\n nn.Linear(64, 1),\n #TODO: Add the sigmoid layer if you want to\n )\n\n\n\n def __dist__(self, x, y, dim):\n return (torch.pow(x - y, 2)).sum(dim)\n\n def compute_distance(self, prototypes, query):\n return self.__dist__(prototypes, query.unsqueeze(2), 3)\n\n def euclidean_similarity(self, S, Q):\n distance = self.__dist__(S.unsqueeze(1), Q.unsqueeze(2), 3)\n return distance\n #return torch.div(1, 1+distance)\n def relation_score(self, support, query):\n return self.euclidean_similarity(support, query)\n #return self.__batch_dist__(support, query)\n #print(\"support is \", support.size())\n #print(\"q query is \", query.size())\n _, nq, _ = query.size()\n B, nc, D = support.size()\n s_s = support.unsqueeze(1).expand(-1, nq, -1, -1)\n q_q = query.unsqueeze(2).expand(-1, -1, nc, -1)\n #cos = nn.CosineSimilarity(dim=3, eps=1e-6)\n #return cos(s_s, q_q)\n\n nn_input = torch.cat([s_s, q_q], 3)\n nn_input = nn_input.view(B*nq*nc, -1)\n nn_out = self.relation_network(nn_input)\n nn_out = nn_out.view(B, nq, nc, 1).squeeze(3)\n return nn_out\n\n\n def forward(self, support, query, N, K, NQ, is_train=False):\n '''\n support: Inputs of the support set.\n query: Inputs of the query set.\n N: Num of classes\n K: Num of instances for each class in the support set\n Q: Num of instances for each class in the query set\n '''\n support = self.drop(support)\n query = self.drop(query)\n support = support.view(-1, N, K, self.hidden_size) # (B, N, K, D)\n query = query.view(-1, NQ, self.hidden_size) # (B, N * Q, D)\n B = support.size(0) # Batch size\n NQ = query.size(1) # Num of instances for each batch in the query set\n support = support.unsqueeze(1).expand(-1, NQ, -1, -1, -1)\n support_for_att = self.fc(support)#(B, NQ, N, D)\n query_for_att = self.fc(query.unsqueeze(2).unsqueeze(3).expand(-1, -1, N, K, -1))\n ins_att_score = F.softmax(torch.tanh(support_for_att * query_for_att).sum(-1), dim=-1)\n support_proto = (support * ins_att_score.unsqueeze(4).expand(-1, -1, -1, -1, self.hidden_size))\n support_proto = support_proto.sum(3)\n prototypes = support_proto\n #prototypes = self.generate_query_attentive_prototype(support, query)\n logits = -self.compute_distance(prototypes, query)\n\n _, pred = torch.max(logits.view(-1, N), 1)\n return logits, pred" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.BatchNorm1d", "torch.tanh", "torch.nn.ReLU", "torch.cat", "torch.pow" ] ]
samysweb/dnnv
[ "58fb95b7300914d9da28eed86c39eca473b1aaef" ]
[ "dnnv/verifiers/marabou/__init__.py" ]
[ "import numpy as np\nimport tempfile\n\nfrom dnnv.verifiers.common.base import Parameter, Verifier\nfrom dnnv.verifiers.common.reductions import IOPolytopeReduction, HalfspacePolytope\nfrom dnnv.verifiers.common.results import SAT, UNSAT, UNKNOWN\nfrom functools import partial\n\nfrom .errors import MarabouError, MarabouTranslatorError\n\n\nclass Marabou(Verifier):\n reduction = partial(IOPolytopeReduction, HalfspacePolytope, HalfspacePolytope)\n translator_error = MarabouTranslatorError\n verifier_error = MarabouError\n parameters = {\n \"num_workers\": Parameter(int, help=\"Maximum number of workers to use.\"),\n }\n\n def build_inputs(self, prop):\n if prop.input_constraint.num_variables > 1:\n raise self.translator_error(\n \"Unsupported network: More than 1 input variable\"\n )\n\n with tempfile.NamedTemporaryFile(\n mode=\"w+\", suffix=\".onnx\", delete=False\n ) as onnx_model_file:\n prop.op_graph.simplify().export_onnx(onnx_model_file.name)\n\n lb, ub = prop.input_constraint.as_bounds()\n A_in, b_in = prop.input_constraint.as_matrix_inequality()\n A_out, b_out = prop.output_constraint.as_matrix_inequality(include_bounds=True)\n\n with tempfile.NamedTemporaryFile(\n mode=\"w+\", suffix=\".npy\", delete=False\n ) as constraint_file:\n np.save(constraint_file.name, ((lb, ub), (A_in, b_in), (A_out, b_out)))\n\n with tempfile.NamedTemporaryFile(\n mode=\"w+\", suffix=\".npy\", delete=False\n ) as output_file:\n self._tmp_output_file = output_file\n args = (\n \"marabou\",\n onnx_model_file.name,\n constraint_file.name,\n \"-o\",\n self._tmp_output_file.name,\n ) + tuple(f\"--{k}={v}\" for k, v in self.parameters.items() if v is not None)\n return args\n\n def parse_results(self, prop, results):\n result, cinput = np.load(self._tmp_output_file.name, allow_pickle=True)\n if result == False:\n return UNSAT, None\n elif result == True:\n input_shape, input_dtype = prop.op_graph.input_details[0]\n cex = cinput.reshape(input_shape).astype(input_dtype)\n return SAT, cex\n raise self.translator_error(f\"Unknown verification result: {result}\")\n" ]
[ [ "numpy.load", "numpy.save" ] ]
gstoica27/ENAS-pytorch
[ "f8b9acbd101ab15c158066d2e4e9012ad11061a7" ]
[ "main.py" ]
[ "\"\"\"Entry point.\"\"\"\nimport os\n\nimport torch\n\nimport data\nimport config\nimport utils\nimport trainer\nimport re_trainer\n\nfrom data.loader import DataLoader\nfrom tacred_utils import scorer, constant, helper\nfrom tacred_utils.vocab import Vocab\nimport numpy as np\n\nlogger = utils.get_logger()\n\n\ndef main(args): # pylint:disable=redefined-outer-name\n \"\"\"main: Entry point.\"\"\"\n utils.prepare_dirs(args)\n\n torch.manual_seed(args.random_seed)\n\n if args.num_gpu > 0:\n torch.cuda.manual_seed(args.random_seed)\n\n if args.network_type == 'rnn':\n if args.dataset != 'tacred':\n dataset = data.text.Corpus(args.data_path)\n # loading tacred data\n else:\n opt = vars(args)\n opt['num_classes'] = len(constant.LABEL_TO_ID)\n\n # load vocab\n #vocab_file = \"/Volumes/External HDD/dataset/tacred/data/vocab/vocab.pkl\"\n #emb_file = '/Volumes/External HDD/dataset/tacred/data/vocab/embedding.npy'\n #opt['data_dir'] = '/Volumes/External HDD/dataset/tacred/data/json'\n\n emb_file = '/home/scratch/gis/datasets/vocab/embedding.npy'\n vocab_file = '/home/scratch/gis/datasets/vocab/vocab.pkl'\n opt['data_dir'] = '/home/scratch/gis/datasets/tacred/data/json'\n\n vocab = Vocab(vocab_file, load=True)\n opt['vocab_size'] = vocab.size\n emb_matrix = np.load(emb_file)\n assert emb_matrix.shape[0] == vocab.size\n assert emb_matrix.shape[1] == args.emb_dim\n\n train_batch = DataLoader(opt['data_dir'] + '/train.json', opt['batch_size'], opt, vocab, evaluation=False)\n score_dev_batch = DataLoader(opt['data_dir'] + '/dev.json', opt['batch_size'], opt, vocab, evaluation=False)\n eval_dev_batch = DataLoader(opt['data_dir'] + '/dev.json', opt['batch_size'], opt, vocab, evaluation=True)\n test_batch = DataLoader(opt['data_dir'] + '/test.json', opt['batch_size'], opt, vocab, evaluation=True)\n\n dataset = {'train_batch': train_batch,\n 'score_dev_batch': score_dev_batch,\n 'eval_dev_batch': eval_dev_batch,\n 'test_batch': test_batch,\n 'emb_matrix': emb_matrix}\n args.num_classes = opt['num_classes']\n args.emb_matrix = emb_matrix\n args.vocab_size = opt['vocab_size']\n\n elif args.dataset == 'cifar':\n dataset = data.image.Image(args.data_path)\n else:\n raise NotImplementedError(f\"{args.dataset} is not supported\")\n if args.dataset != 'tacred':\n trnr = trainer.Trainer(args, dataset)\n else:\n trnr = re_trainer.Trainer(args, dataset)\n\n if args.mode == 'train':\n utils.save_args(args)\n trnr.train()\n elif args.mode == 'derive':\n assert args.load_path != \"\", (\"`--load_path` should be given in \"\n \"`derive` mode\")\n trnr.derive()\n elif args.mode == 'test':\n if not args.load_path:\n raise Exception(\"[!] You should specify `load_path` to load a \"\n \"pretrained model\")\n trnr.test()\n elif args.mode == 'single':\n if not args.dag_path:\n raise Exception(\"[!] You should specify `dag_path` to load a dag\")\n utils.save_args(args)\n trnr.train(single=True)\n else:\n raise Exception(f\"[!] Mode not found: {args.mode}\")\n\nif __name__ == \"__main__\":\n args, unparsed = config.get_args()\n print(args)\n main(args)\n" ]
[ [ "torch.cuda.manual_seed", "numpy.load", "torch.manual_seed" ] ]
bsavelev/medipy
[ "f0da3750a6979750d5f4c96aedc89ad5ae74545f" ]
[ "lib/medipy/gui/image/spectro_dialog.py" ]
[ "##########################################################################\n# MediPy - Copyright (C) Universite de Strasbourg\n# Distributed under the terms of the CeCILL-B license, as published by\n# the CEA-CNRS-INRIA. Refer to the LICENSE file or to\n# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html\n# for details.\n##########################################################################\n\nimport os\nimport xml.dom.minidom as md\n\nimport numpy\nimport wx\nimport wx.xrc\n\nfrom medipy.base import find_resource, ImageAnnotation, ObservableList\nimport medipy.io\nimport medipy.io.rbnmr as rbnmr\nimport medipy.gui.xrc_wrapper\n\nclass SpectroDialog(medipy.gui.xrc_wrapper.Dialog):\n \"\"\" Dialog allowing the user to choose a spectroscopy image\n within a directory.\n Il also gives the choice either to open a 1D spectrum or compute\n the projection histogram.\n \"\"\"\n def __init__(self, parent=None, *args, **kwargs):\n \n resource = wx.xrc.EmptyXmlResource()\n resource.InsertHandler(medipy.gui.xrc_wrapper.DirectoryXMLHandler())\n resource.InsertHandler(medipy.gui.xrc_wrapper.FileXMLHandler())\n \n file = open(find_resource(\"resources/gui/spectro_dialog.xrc\"))\n resource.LoadFromString(file.read())\n \n dialog = resource.LoadDialog(parent, \"main_dialog\")\n medipy.gui.xrc_wrapper.Dialog.__init__(self, dialog, *args, **kwargs)\n \n controls = [\"dir_dialog\", \"dir_listbox\", \"image_listbox\",\n \"reference_listbox\", \"annotations_listbox\", \"annotations_checkbox\",\n \"file_dialog\", \"open_button\", \"cancel_button\"]\n \n for control in controls : \n setattr(self, \"_\"+control, wx.xrc.XRCCTRL(self, control))\n \n self.SetTitle(\"Load spectroscopy image\") \n self._open_button.Disable()\n \n # Attributes initialization \n self._patient_dirname = None\n self._image_dict = {}\n self._rect_dict = {}\n self._image_path = None\n self._reference_path = None\n self._annotations_path = None\n self._file_dialog._wildcard = \"Annotation file|*.xml\"\n self._file_dialog._button.Disable()\n \n # Events\n self.Bind(wx.EVT_CLOSE, self.OnClose)\n self._dir_listbox.Bind(wx.EVT_LISTBOX, self.OnDirChosen)\n self._image_listbox.Bind(wx.EVT_LISTBOX, self.OnImageChosen)\n self._reference_listbox.Bind(wx.EVT_LISTBOX, self.OnReferenceChosen)\n self._annotations_listbox.Bind(wx.EVT_LISTBOX, self.OnAnnotationChosen)\n self._annotations_checkbox.Bind(wx.EVT_CHECKBOX, self.OnCustomChecked)\n self._open_button.Bind(wx.EVT_BUTTON, self.OnOpenClicked)\n self._cancel_button.Bind(wx.EVT_BUTTON, self.OnCancelClicked)\n\n self._dir_dialog.add_observer(\"value\", self.OnPathChanged)\n self._file_dialog.add_observer(\"value\", self.OnCustomPathChanged)\n \n def update_information(self):\n \"\"\" Enable the open button if all the necessary pieces of information\n have been gathered. Disable it if not.\n \"\"\"\n if self._image_path is not None:\n self._open_button.Enable() \n else:\n self._open_button.Disable() \n \n self.Fit()\n self.GetSizer().SetSizeHints(self)\n \n ##########\n # Events #\n ########## \n \n def OnClose(self, event):\n \"\"\" Shut the window\n \"\"\"\n self.Destroy()\n \n def OnPathChanged(self, event):\n \"\"\" Set up the directories and reference spectra listboxes\n \"\"\"\n # Clean all the listboxes\n self._dir_listbox.Clear()\n self._image_listbox.Clear()\n self._reference_listbox.Clear()\n self._annotations_listbox.Clear()\n \n # Set up the directories listbox \n self._patient_dirname = self._dir_dialog._text.GetValue() \n dir_list = []\n for dirpath, dirnames, filenames in os.walk(self._patient_dirname):\n dir_list.append((dirpath, dirnames, filenames))\n dir_list[0][1].sort()\n self._dir_listbox.InsertItems(dir_list[0][1], 0)\n \n # Define the path splitter\n if '/' in self._patient_dirname:\n splitter = '/'\n else:\n splitter = '\\\\'\n \n # Set up the reference spectra and annotations listboxes\n self._ref_dict ={}\n self._annotations_dict ={}\n\n for i in dir_list:\n for filename in i[2]:\n # A reference spectrum has been found\n if filename in ['1r', '1i']:\n self._ref_dict[i[0].split(splitter)[-3] +'->'+ filename] = i[0]\n \n # An annotation file has been found\n if filename == \"peaklist.xml\":\n self._annotations_dict[i[0].split(splitter)[-3] +'->'+ filename] = os.path.join(i[0], \"peaklist.xml\")\n \n sorted_spectra = self._ref_dict.keys()\n sorted_spectra.sort()\n self._reference_listbox.InsertItems(sorted_spectra, 0)\n self._reference_listbox.Insert(\"None\", 0)\n \n if self._annotations_dict.keys() != []:\n sorted_annotations = self._annotations_dict.keys()\n sorted_annotations.sort()\n self._annotations_listbox.InsertItems(sorted_annotations, 0)\n self._annotations_listbox.Insert(\"None\", 0)\n \n self.update_information()\n \n def OnCustomPathChanged(self, event):\n \"\"\" Set up the path to the custom annotation file\n \"\"\"\n self._annotations_path = self._file_dialog.value\n \n def OnDirChosen(self, event):\n \"\"\" Display the available images within the selected directory\n \"\"\"\n self._image_listbox.Clear()\n image_list = []\n for dirpath, dirnames, filenames in os.walk(os.path.join(self._patient_dirname, self._dir_listbox.GetStringSelection())):\n image_list.append((dirpath, dirnames, filenames))\n \n # Dictionary associating a filename with its parent directory \n self._image_dict = {}\n for i in image_list:\n for filename in i[2]:\n if filename in ['2rr','2ri','2ir','2ii']:\n self._image_dict[filename]=i[0]\n \n sorted_images = self._image_dict.keys()\n sorted_images.sort()\n sorted_images.reverse()\n self._image_listbox.InsertItems(sorted_images, 0)\n \n self.update_information()\n \n def OnImageChosen(self, event):\n \"\"\" Set the full path to the selected image\n \"\"\"\n if self._image_listbox.GetStringSelection() != '':\n self._image_path = os.path.join(self._image_dict[self._image_listbox.GetStringSelection()],self._image_listbox.GetStringSelection())\n \n self.update_information()\n \n def OnReferenceChosen(self, event):\n \"\"\"Set the full path to the selected reference spectrum\n\n \"\"\"\n if (self._reference_listbox.GetStringSelection() != \"None\") and (self._reference_listbox.GetStringSelection() != ''):\n self._reference_path = os.path.join(self._ref_dict[self._reference_listbox.GetStringSelection()], self._reference_listbox.GetStringSelection()[-2:])\n else:\n self._reference_path = None\n \n self.update_information()\n \n def OnAnnotationChosen(self, event):\n \"\"\" Set the full path to the selected annotation file\n \"\"\"\n if (self._annotations_listbox.GetStringSelection() != \"None\") and (self._annotations_listbox.GetStringSelection() != ''):\n self._annotations_path = self._annotations_dict[self._annotations_listbox.GetStringSelection()]\n \n def OnCustomChecked(self, event): \n \"\"\" Allow the user to load an annotation file from another patient directory\n \"\"\"\n if self._annotations_checkbox.IsChecked():\n self._annotations_listbox.Disable()\n self._file_dialog._button.Enable()\n if self._file_dialog.validate():\n self._annotations_path = self._file_dialog.value\n else:\n self._annotations_path = None\n else:\n self._annotations_listbox.Enable()\n self._file_dialog._button.Disable()\n if self._annotations_listbox.IsEmpty():\n self._annotations_path = None\n elif self._annotations_listbox.GetStringSelection() != '':\n self._annotations_path = self._annotations_dict[self._annotations_listbox.GetStringSelection()]\n \n def OnOpenClicked(self, event):\n \"\"\" Load the spectrum with either a reference spectrum or a computed histogram\n \"\"\"\n \n # Create the image\n image = medipy.io.load(self._image_path) #, 0, loader_class= nmr2D.Nmr2D)\n \n # Insert a reference spectrum into the image if one has been specified\n if self._reference_path is not None:\n spectrum = numpy.fromfile(self._reference_path, numpy.int32)\n image.metadata[\"header\"][\"proton_spectrum\"] = spectrum\n \n # Load a list of annotations if an annotation file has been specified\n if self._annotations_path is not None:\n image.metadata[\"Data\"] = image.data\n dom = md.parse(self._annotations_path)\n peaks = dom.getElementsByTagName(\"Peak2D\")\n image.annotations = ObservableList()\n for peak in peaks:\n annotation = ImageAnnotation()\n ppm = (float(peak.getAttribute(\"F1\")),float(peak.getAttribute(\"F2\")))\n point = rbnmr.ppm_to_point(ppm, \n image.metadata[\"Procs\"],\n image.metadata[\"Proc2s\"])\n annotation.position = [0, point[-2], point[-1]]\n annotation.label = peak.getAttribute(\"annotation\")\n annotation.shape = ImageAnnotation.Shape.cross\n annotation.size = 10\n annotation.color = [0, 1., 0.]\n annotation.filled = False\n annotation.depth = 10\n image.annotations.append(annotation)\n \n self.GetParent().append_image([{\"image\":image}])\n \n # Close the window\n self.Destroy()\n \n def OnCancelClicked(self, event):\n \"\"\" Abort\n \"\"\"\n self.OnClose(event)\n \nif __name__ == \"__main__\" :\n app = wx.App()\n \n dlg = SpectroDialog()\n dlg.ShowModal()\n dlg.GetSizer().SetSizeHints(dlg)\n \n app.MainLoop()\n" ]
[ [ "numpy.fromfile" ] ]
Xingbaji/pytorch-template
[ "2246bf32a0605f2e8527f296274550a88b1e8fc9" ]
[ "model/attentionPDE_model.py" ]
[ "import torch.nn as nn\nimport torch.nn.functional as F\nfrom base import BaseModel\nimport pdb\nimport torch\nimport math\n\nclass AttentionPDEModel(nn.Module):\n def __init__(self,ndim,T,device_num = 0):\n super(AttentionPDEModel, self).__init__()\n # W_x = torch.randn(ndim, ndim, requires_grad=True)\n # torch.set_default_tensor_type('torch.cuda.DoubleTensor')\n #默认全部都使用cuda\n self.device_num = device_num\n self.ndim = ndim\n self.ndim = ndim\n self.T = T\n self.dt = 0.015\n self.dx = 0.1257\n self.dy = 0.1257\n self.build()\n # xy_path = \"/home1/shenxing/Attention_PDE/data/xy.pt\"\n # self.xy = torch.load(xy_path)\n # xy = xy.double()\n\n\n # Input = xy[:, :, :, 0]\n # Input = Input.repeat(20,1,1,1)\n # Input = Input.permute(1,2,3,0)\n # u_t = self.get_u_t(Input)\n\n #\n # Input = xy[:, :, :, 0]\n # Input = Input.repeat(20,1,1,1)\n # Input = Input.permute(1,2,3,0)\n #\n # u_x = self.get_u_x(Input)\n # u_xx = self.get_u_xx(Input)\n #\n # Input = xy[:, :, :, 1]\n # Input = Input.repeat(20, 1, 1, 1)\n # Input = Input.permute(1, 2, 3, 0)\n #\n # u_y = self.get_u_y(Input)\n # u_yy = self.get_u_yy(Input)\n\n def build(self):\n \"\"\"变量转到gpu device之后运行,构建W\"\"\"\n self.build_W_t()\n self.build_W_x()\n self.build_W_y()\n self.build_W_xx()\n self.build_W_yy()\n\n def build_W_t(self):\n #计算u_t这里没有可训练参数\n dt = 0.015\n #暂时用固定值,之后添加其他\n W_t_diag1 = torch.ones(self.T-1)*(1 / 2)\n W_t_diag1[0] = 1\n #上对角\n W_t_diag2 = torch.ones(self.T-1) * (- 1 / 2)\n W_t_diag2[-1] = -1\n # 下对角\n\n W_t = torch.diag(W_t_diag1,1) + torch.diag(W_t_diag2,-1)\n W_t[0,0] = -1\n W_t[-1,-1] = 1\n self.W_t = W_t*(1/dt)\n # self.W_t = torch.nn.Parameter(W_t, requires_grad=False)\n #shape [20,20]\n\n def build_W_x(self):\n # self.W_x_2D = torch.nn.Parameter(torch.DoubleTensor(self.ndim, self.ndim))\n # torch.nn.init.xavier_normal(self.W_x_2D)\n\n #用准确解来验证模型是否正确\n K = torch.ones(49,49)\n K = K*(2*self.dx)\n K[:,0] = K[:,0] *(1/2)\n K[:, -1] = K[:, -1] * (1 / 2)\n r_ux = self.get_real_coefficient()[0]\n r_W_x_2d = r_ux / K\n # self.W_x_2D = torch.nn.init.constant(self.W_x_2D,r_W_x_2d)\n self.W_x_2D = r_W_x_2d\n self.W_x_2D.requires_grad = True\n\n # self.W_x_2D = torch.arange(1, 49 * 49+1).view(49, 49)\n #shape 49*49\n W_x_tmp = self.W_x_2D[:,:-1]\n #0 to n-1\n W_x_tmp2 = self.W_x_2D[:,1:]\n #1 to end\n #shape 49*48\n W_x_diag1 = torch.diag_embed(W_x_tmp, offset = 1,dim1=0, dim2=1)\n W_x_diag2 = -1 *torch.diag_embed(W_x_tmp2, offset = -1,dim1=0, dim2=1)\n self.W_x_3D = W_x_diag1 + W_x_diag2\n self.W_x_3D[0,0,:] = - self.W_x_2D[:,0]\n self.W_x_3D[-1,-1,:] = self.W_x_2D[:,-1]\n # self.W_x_3D = torch.nn.Parameter(self.W_x_3D,requires_grad=True)\n #变为parameter后这里就不能往后求导了\n self.W_x_3D = self.W_x_3D.double()\n #shape [49,49,49]\n\n\n def build_W_xx(self):\n \"\"\"\n 因为方程u_xx,u_yy前都是固定的参数,所以只有一个需要更新的参数\n :return:\n \"\"\"\n # self.W_xx_k = torch.nn.Parameter(torch.randn(1))\n c = 0.2/0.1257\n self.W_xx_k = torch.tensor([c],requires_grad = False)\n # self.W_xx_k = torch.ones(1)\n W_xx_diag1 = torch.ones(self.ndim)*(-2)\n W_xx_diag1[0] = W_xx_diag1[-1] = 1\n #中心对角\n W_xx_diag2 = torch.ones(self.ndim-1)\n W_xx_diag2[0] = -2\n #上1对角\n W_xx_diag3 = torch.ones(self.ndim-1)\n W_xx_diag3[-1] = -2\n #下1对角\n W_xx = torch.diag(W_xx_diag1) + torch.diag(W_xx_diag2,1) + torch.diag(W_xx_diag3,-1)\n W_xx[0,2] = 1\n W_xx[-1,-3] = 1\n # W_xx = torch.nn.Parameter(W_xx, requires_grad=True)\n self.W_xx = (self.W_xx_k * W_xx).double()\n # self.W_xx = torch.nn.Parameter(self.W_xx)\n\n def build_W_y(self):\n # self.W_y_2D = torch.nn.Parameter(torch.DoubleTensor(self.ndim, self.ndim))\n # torch.nn.init.xavier_normal(self.W_y_2D)\n\n #用准确解来验证模型是否正确\n K = torch.ones(49,49)\n K = K * (2 * self.dy)\n K[:, 0] = K[:, 0] * (1 / 2)\n K[:, -1] = K[:, -1] * (1 / 2)\n r_uy = self.get_real_coefficient()[1]\n r_W_y_2d = r_uy / K\n self.W_y_2D = r_W_y_2d\n self.W_y_2D.requires_grad = True\n #shape 49*49\n W_y_tmp = self.W_y_2D[:,:-1]\n #0 to n-1\n W_y_tmp2 = self.W_y_2D[:,1:]\n #1 to end\n #shape 49*48\n W_y_diag1 = torch.diag_embed(W_y_tmp, offset = 1,dim1=0, dim2=1)\n W_y_diag2 = -1 *torch.diag_embed(W_y_tmp2, offset = -1,dim1=0, dim2=1)\n self.W_y_3D = W_y_diag1 + W_y_diag2\n self.W_y_3D[0,0,:] = - self.W_y_2D[:,0]\n self.W_y_3D[-1,-1,:] = self.W_y_2D[:,-1]\n # self.W_y_3D = torch.nn.Parameter(self.W_y_3D,requires_grad=True)\n self.W_y_3D = self.W_y_3D.double()\n\n def build_W_yy(self):\n # self.W_yy_k = torch.nn.Parameter(torch.randn(1))\n d = 0.3 / 0.1257\n self.W_yy_k = torch.tensor([d], requires_grad=False)\n W_yy_diag1 = torch.ones(self.ndim)*(-2)\n W_yy_diag1[0] = W_yy_diag1[-1] = 1\n #中心对角\n W_yy_diag2 = torch.ones(self.ndim-1)\n W_yy_diag2[0] = -2\n #上1对角\n W_yy_diag3 = torch.ones(self.ndim-1)\n W_yy_diag3[-1] = -2\n #下1对角\n W_yy = torch.diag(W_yy_diag1) + torch.diag(W_yy_diag2,1) + torch.diag(W_yy_diag3,-1)\n W_yy[0,2] = 1\n W_yy[-1,-3] = 1\n # W_yy = torch.nn.Parameter(W_yy,requires_grad=True)\n self.W_yy = (self.W_yy_k * W_yy).double()\n # self.W_yy = torch.nn.Parameter(self.W_yy)\n\n def get_u_t(self,Input):\n \"\"\"\n 计算偏导u_t,在所有空间上\n :param Input: shape[batch_size,49,49,20]\n :return: u_t: shape [batch_size,49,49,20]\n \"\"\"\n batch_size = Input.shape[0]\n T = Input.shape[-1]\n size = Input.shape[1]\n Input = Input.view(batch_size,size*size,T)\n #shape [28,2401,20]\n W_t = torch.transpose(self.W_t,0,1)\n #shape [20,20]\n u_t = torch.matmul(Input,W_t)\n #shape [28,2401,20]\n u_t = u_t.view(batch_size,size,size,T)\n return u_t\n\n def get_u_x(self,Input):\n \"\"\"\n 计算偏导u_x,在所有时间空间\n :param Input: shape[batch_size,49,49,20]\n :return: u_x: shape [batch_size,49,49,20]\n \"\"\"\n batch_size = Input.shape[0]\n T = Input.shape[-1]\n size = Input.shape[1]\n Input = Input.permute(0,3,1,2)\n #shape[batch_size,20, 49, 49]\n Input = Input.contiguous().view(-1,self.ndim,self.ndim)\n # shape[batch_size*20, 49, 49]\n Input = Input.permute(1,0,2)\n # shape[49,batch_size*20, 49]\n W_x_tmp = self.W_x_3D.permute(2, 0, 1)\n # shape[49, 49, 49]\n W_x_tmp = torch.transpose(W_x_tmp, 1, 2)\n u_x = torch.bmm(Input, W_x_tmp)\n #batch matmul 每个49*49的网格点都计算一次u_x,因为alpha(x,y)与t无关,所以用同一个W_X_3D\n #shape [49,batch_size*20,49]\n u_x = u_x.permute(1,0,2)\n u_x = u_x.view(batch_size,T,size,size)\n u_x = u_x.permute(0,2,3,1)\n return u_x\n\n def get_u_xx(self,Input):\n \"\"\"\n 计算偏导u_xx,在所有时间空间\n :param Input: shape[batch_size,49,49,20]\n :return: u_xx: shape [batch_size,49,49,20]\n \"\"\"\n batch_size = Input.shape[0]\n T = Input.shape[-1]\n size = Input.shape[1]\n Input = Input.permute(0,3,1,2)\n #shape[batch_size,20, 49, 49]\n Input = Input.contiguous().view(-1,self.ndim,self.ndim)\n # shape[batch_size*20, 49, 49]\n W_xx_tmp = torch.transpose(self.W_xx, 0, 1)\n # shape [49,49]\n u_xx = torch.matmul(Input, W_xx_tmp)\n #shape [batch_size*20,49,49]\n u_xx = u_xx.view(batch_size,T,size,size)\n u_xx = u_xx.permute(0,2,3,1)\n return u_xx\n\n\n def get_u_y(self, Input):\n \"\"\"\n 计算偏导u_y,在所有时间空间\n :param Input: shape[batch_size,49,49,20]\n :return: u_y: shape [batch_size,49,49,20]\n \"\"\"\n batch_size = Input.shape[0]\n T = Input.shape[-1]\n size = Input.shape[1]\n Input = Input.permute(0, 3, 1, 2)\n # shape[batch_size,20, 49, 49]\n Input = Input.contiguous().view(-1, self.ndim, self.ndim)\n # shape[batch_size*20, 49, 49]\n Input = Input.permute(2, 1, 0)\n # shape[49, 49, batch_size*20]\n W_y_tmp = self.W_y_3D.permute(2, 0, 1)\n # shape[49, 49, 49]\n u_y = torch.bmm(W_y_tmp,Input)\n # batch matmul 每个49*49的网格点都计算一次u_x,因为alpha(x,y)与t无关,所以用同一个W_X_3D\n # shape [49,49,batch_size*20]\n u_y = u_y.permute(2, 1, 0)\n u_y = u_y.view(batch_size, T, size, size)\n u_y = u_y.permute(0, 2, 3, 1)\n return u_y\n\n def get_u_yy(self,Input):\n \"\"\"\n 计算偏导u_yy,在所有时间空间\n :param Input: shape[batch_size,49,49,20]\n :return: u_yy: shape [batch_size,49,49,20]\n \"\"\"\n batch_size = Input.shape[0]\n T = Input.shape[-1]\n size = Input.shape[1]\n Input = Input.permute(0,3,1,2)\n #shape[batch_size,20, 49, 49]\n Input = Input.contiguous().view(-1,self.ndim,self.ndim)\n Input = Input.permute(1,2,0)\n # shape[49, 49,batch_size*20]\n u_yy = torch.matmul(self.W_yy,Input)\n #shape [49,49,batch_size*20]\n u_yy = u_yy.permute(2,0,1)\n u_yy = u_yy.view(batch_size,T,size,size)\n u_yy = u_yy.permute(0,2,3,1)\n return u_yy\n\n def cal_from_u0(self,Input):\n \"\"\"\n 从u0计算u1,u2,...uT\n :param Input: u0 shape [batch_size,size,size,1]\n :return: u_all shape [batch_size,size,size,21]\n \"\"\"\n batch_size = Input.shape[0]\n T = Input.shape[-1]\n size = Input.shape[1]\n u0 = Input[:,:,:,0]\n u0 = u0.view(batch_size,size,size,1)\n u_tmp = u0\n u_all = u_tmp\n for i in range(20):\n u_xi, u_yi, u_xxi, u_yyi = self.get_u_x(u_tmp), self.get_u_y(u_tmp), self.get_u_xx(u_tmp), self.get_u_yy(u_tmp)\n G = u_xi + u_yi + u_xxi + u_yyi\n u_tmp = self.dt * G + u0\n u_all = torch.cat((u_all,u_tmp),3)\n return u_all\n\n def coefficient_ux(self):\n \"\"\"\n 从self.W_x_2D算出方程u_x对应的参数\n :return:\n \"\"\"\n K = torch.ones_like(self.W_x_2D)\n K = K*(2*self.dx)\n K[:,0] = K[:,0] *(1/2)\n self.p_ux = K*self.W_x_2D\n return self.p_ux\n\n def coefficient_uy(self):\n \"\"\"\n 从self.W_y_2D算出方程u_y对应的参数\n :return:\n \"\"\"\n K = torch.ones_like(self.W_y_2D)\n K = K*(2*self.dy)\n K[:,0] = K[:,0] *(1/2)\n self.p_uy = K*self.W_y_2D\n return self.p_uy\n\n def coefficient_uxx(self):\n \"\"\"\n 从self.W_xx_k算出方程u_xx对应的参数\n :return:\n \"\"\"\n self.p_uxx = (self.dx)*self.W_xx_k\n return self.p_uxx\n\n def coefficient_uyy(self):\n \"\"\"\n 从self.W_yy_k算出方程u_yy对应的参数\n :return:\n \"\"\"\n self.p_uyy = (self.dy)*self.W_yy_k\n return self.p_uyy\n\n\n def get_coefficient(self):\n \"\"\"\n :return:算出的方程参数\n \"\"\"\n return [self.coefficient_ux(),self.coefficient_uy(),self.coefficient_uxx(),self.coefficient_uyy()]\n\n def get_real_coefficient(self,xy_batch = None):\n \"\"\"\n :return:方程参数的真实解\n \"\"\"\n if xy_batch == None:\n xy_path = \"/home1/shenxing/Attention_PDE/data/xy.pt\"\n xy_batch = torch.load(xy_path)\n x = xy_batch[0,:,:,0]\n y = xy_batch[0,:,:,1]\n r_ux = 0.5 * torch.cos(y) + 0.5 * x * (2 * math.pi - x) * torch.sin(x) + 0.6\n r_uy = 2 * (torch.cos(y) + torch.sin(x)) +0.8\n r_uxx = torch.tensor([0.2])\n r_uyy = torch.tensor([0.3])\n return [r_ux,r_uy,r_uxx,r_uyy]\n\n\n def forward(self, Input):\n \"\"\"\n 输入:当前u0生成u在t在0.015到0.3的所有的u(x,y)\n input:uT_batch shape:[batch_size,49,49,20]\"\"\"\n return self.get_u_t(Input),self.get_u_x(Input), self.get_u_y(Input), self.get_u_xx(Input), self.get_u_yy(Input)\n\n # def build_W_x(self,device = None):\n # # self.W_x_2D = torch.arange(1, 49 * 49+1).view(49, 49)\n # self.W_x_2D = torch.nn.Parameter(torch.randn(self.ndim, self.ndim))\n # if device != None:\n # self.W_x_2D = self.W_x_2D.to(device)\n # #shape 49*49\n # W_x_tmp = self.W_x_2D[:,:-1]\n # #0 to n-1\n # W_x_tmp2 = self.W_x_2D[:,1:]\n # #1 to end\n # #shape 49*48\n # W_x_diag1 = torch.diag_embed(W_x_tmp, offset = 1,dim1=0, dim2=1)\n # W_x_diag2 = -1 *torch.diag_embed(W_x_tmp2, offset = -1,dim1=0, dim2=1)\n # self.W_x_3D_1 = W_x_diag1 + W_x_diag2\n # self.W_x_3D_2 = self.W_x_3D_1\n # self.W_x_3D_2[0,0,:] = - self.W_x_2D[:,0]\n # self.W_x_3D_2[-1,-1,:] = self.W_x_2D[:,-1]\n # # self.W_x_3D_3 = torch.nn.Parameter(self.W_x_3D_2)\n # #?测试这里能否顺利求导\n # self.W_x_3D = self.W_x_3D_2.double()\n # shape [49,49,49]\n\n\n\n # # W_y = torch.randn(ndim, ndim, requires_grad=True)\n # self.W_y = torch.nn.Parameter(torch.randn(ndim, ndim))\n # ones_tmp = torch.ones(ndim)\n # mask = torch.diag(ones_tmp, 0) + torch.diag(ones_tmp[1:], 1) + torch.diag(ones_tmp[1:], -1)\n # mask = torch.nn.Parameter(mask,requires_grad=False)\n # W_x_mask = torch.mul(mask, self.W_x)\n # self.W_x_mask = torch.nn.Parameter(W_x_mask)\n # #shape 49,49\n # # 三对角阵.点乘\n # W_y_mask = torch.mul(mask,self.W_y)\n # self.W_y_mask = torch.nn.Parameter(W_y_mask)\n\n # def get_u_x(self,Input):\n # \"\"\"\n # 计算偏导u_x,在所有时间空间\n # :param Input: shape[batch_size,49,49,20]\n # :return: u_x: shape [batch_size,49,49,20]\n # \"\"\"\n # Input = Input.permute(0,3,1,2)\n # # shape [batch_size,20,49,49]\n # u_x = torch.matmul(Input,self.W_x_mask)\n # # u_x: shape[batch_size, 20, 49, 49]\n # u_x = u_x.permute(0,2,3,1)\n # #matmul可以broadcast\n # return u_x\n #\n # def get_u_y(self,Input):\n # \"\"\"\n # 计算偏导u_y,在所有时间空间\n # :param Input: shape[batch_size,49,49,20]\n # :return: u_y: shape [batch_size,49,49,20]\n # \"\"\"\n # Input = Input.permute(1, 2, 3, 0)\n # # shape [49,49,20,batch_size]\n # Input = Input.contiguous().view(self.ndim,self.ndim,-1)\n # # shape [49,49,20*batch_size]\n # u_y = torch.matmul(self.W_y_mask,Input)\n # # shape [49,49,20*batch_size]\n # u_y = u_y.view(self.ndim,self.ndim,20,-1)\n # # shape [49,49,20,batch_size]\n # u_y = u_y.permute(3,0,1,2)\n # return u_y\n # Input_x = Input.view(49, 1, 49)\n #\n # W_x_tmp = self.W_x_3D.permute(2,0,1)\n # W_x_tmp = torch.transpose(W_x_tmp,1,2)\n # u_x = torch.bmm(Input_x,W_x_tmp)\n # u_x = torch.squeeze(u_x)\n #\n # self.W_y_2D = torch.nn.Parameter(torch.randn(ndim, ndim))\n # # shape 49*49\n # W_y_tmp = self.W_y_2D[:, :-1]\n # # 0 to n-1\n # W_y_tmp2 = self.W_y_2D[:, 1:]\n # # 1 to end\n # # shape 49*48\n # W_x_diag1 = torch.diag_embed(W_y_tmp, offset=1)\n # W_y_diag2 = -1 * torch.diag_embed(W_y_tmp2, offset=-1)\n # self.W_y_3D = W_x_diag1 + W_x_diag2\n # self.W_y_3D[0, 0, :] = self.W_y_2D[0, 0]\n # self.W_y_3D[-1, -1, :] = self.W_y_2D[-1, -1]\n # self.W_y_3D = torch.nn.Parameter(self.W_y_3D)\n # print(self.W_y_3D)\n" ]
[ [ "torch.ones_like", "torch.ones", "torch.load", "torch.cos", "torch.tensor", "torch.diag", "torch.sin", "torch.diag_embed", "torch.bmm", "torch.cat", "torch.matmul", "torch.transpose" ] ]
tianhm/rqalpha
[ "a2df4cb85fc86a20429c66a5a6d4f1a48520f173" ]
[ "rqalpha/data/data_source.py" ]
[ "import pytz\nimport six\n\nimport pandas as pd\nfrom ..instruments import Instrument\n\n\nclass LocalDataSource:\n DAILY = 'daily.bcolz'\n INSTRUMENTS = 'instruments.pk'\n DIVIDEND = 'dividend.bcolz'\n TRADING_DATES = 'trading_dates.bcolz'\n YIELD_CURVE = 'yield_curve.bcolz'\n\n YIELD_CURVE_TENORS = {\n 0: 'S0',\n 30: 'M1',\n 60: 'M2',\n 90: 'M3',\n 180: 'M6',\n 270: 'M9',\n 365: 'Y1',\n 365 * 2: 'Y2',\n 365 * 3: 'Y3',\n 365 * 4: 'Y4',\n 365 * 5: 'Y5',\n 365 * 6: 'Y6',\n 365 * 7: 'Y7',\n 365 * 8: 'Y8',\n 365 * 9: 'Y9',\n 365 * 10: 'Y10',\n 365 * 15: 'Y15',\n 365 * 20: 'Y20',\n 365 * 30: 'Y30',\n 365 * 40: 'Y40',\n 365 * 50: 'Y50',\n }\n\n YIELD_CURVE_DURATION = sorted(YIELD_CURVE_TENORS.keys())\n\n PRICE_SCALE = 1000.\n\n def __init__(self, root_dir):\n self._root_dir = root_dir\n import bcolz\n import os\n import pickle\n self._daily_table = bcolz.open(os.path.join(root_dir, LocalDataSource.DAILY))\n self._instruments = {d['order_book_id']: Instrument(d)\n for d in pickle.load(open(os.path.join(root_dir, LocalDataSource.INSTRUMENTS), 'rb'))}\n self._dividend = bcolz.open(os.path.join(root_dir, LocalDataSource.DIVIDEND))\n self._yield_curve = bcolz.open(os.path.join(root_dir, LocalDataSource.YIELD_CURVE))\n self._trading_dates = pd.Index(pd.Timestamp(str(d)) for d in\n bcolz.open(os.path.join(root_dir, LocalDataSource.TRADING_DATES)))\n\n def instruments(self, order_book_ids):\n if isinstance(order_book_ids, six.string_types):\n try:\n return self._instruments[order_book_ids]\n except KeyError:\n print('ERROR: order_book_id {} not exists!'.format(order_book_ids))\n return None\n\n return [self._instruments[ob] for ob in order_book_ids\n if ob in self._instruments]\n\n def all_instruments(self, itype='CS'):\n if itype is None:\n return pd.DataFrame([[v.order_book_id, v.symbol, v.abbrev_symbol, v.type]\n for v in self._instruments.values()],\n columns=['order_book_id', 'symbol', 'abbrev_symbol', 'type'])\n\n if itype not in ['CS', 'ETF', 'LOF', 'FenjiA', 'FenjiB', 'FenjiMu', 'INDX', 'Future']:\n raise ValueError('Unknown type {}'.format(itype))\n\n return pd.DataFrame([v.__dict__ for v in self._instruments.values() if v.type == itype])\n\n def sector(self, code):\n return [v.order_book_id for v in self._instruments.values()\n if v.type == 'CS' and v.sector_code == code]\n\n def industry(self, code):\n return [v.order_book_id for v in self._instruments.values()\n if v.type == 'CS' and v.industry_code == code]\n\n def concept(self, *concepts):\n return [v.order_book_id for v in self._instruments.values()\n if v.type == 'CS' and any(c in v.concept_names.split('|') for c in concepts)]\n\n def get_trading_dates(self, start_date, end_date):\n left = self._trading_dates.searchsorted(start_date)\n right = self._trading_dates.searchsorted(end_date, side='right')\n return self._trading_dates[left:right]\n\n def get_yield_curve(self, start_date, end_date):\n duration = (end_date - start_date).days\n tenor = 0\n for t in LocalDataSource.YIELD_CURVE_DURATION:\n if duration >= t:\n tenor = t\n else:\n break\n\n d = start_date.year * 10000 + start_date.month * 100 + start_date.day\n return self._yield_curve.fetchwhere('date<={}'.format(d)).cols[self.YIELD_CURVE_TENORS[tenor]][-1] / 10000.0\n\n def get_dividends(self, order_book_id):\n try:\n sid = self._dividend.attrs['stock_id'][order_book_id]\n except KeyError:\n return pd.DataFrame()\n\n dividends = self._dividend.fetchwhere('id=={}'.format(sid))\n return pd.DataFrame({\n 'book_closure_date': pd.Index(pd.Timestamp(str(d)) for d in dividends.cols['closure_date']),\n 'ex_dividend_date': pd.Index(pd.Timestamp(str(d)) for d in dividends.cols['ex_date']),\n 'payable_date': pd.Index(pd.Timestamp(str(d)) for d in dividends.cols['payable_date']),\n 'dividend_cash_before_tax': dividends.cols['cash_before_tax'][:] / 10000.0,\n 'round_lot': dividends.cols['round_lot']\n }, index=pd.Index(pd.Timestamp(str(d)) for d in dividends.cols['announcement_date']))\n\n def get_all_bars(self, order_book_id):\n try:\n sid = self._daily_table.attrs['id_map'][order_book_id]\n except KeyError:\n raise RuntimeError('No data for {}'.format(order_book_id))\n\n bars = self._daily_table.fetchwhere('id=={}'.format(sid))\n return pd.DataFrame({\n 'open': (bars.cols['open'][:] / self.PRICE_SCALE).round(2),\n 'close': (bars.cols['close'][:] / self.PRICE_SCALE).round(2),\n 'high': (bars.cols['high'][:] / self.PRICE_SCALE).round(2),\n 'low': (bars.cols['low'][:] / self.PRICE_SCALE).round(2),\n 'volume': bars.cols['volume'],\n }, index=pd.Index(pd.Timestamp(str(d)) for d in bars.cols['date']))\n" ]
[ [ "pandas.DataFrame" ] ]
Giselle-Liu/PyTorch-Tutorial
[ "52b7a8c8fc8fa23b2fafb6d539b3b8aff13af45c" ]
[ "tutorial-contents/502_GPU.py" ]
[ "\"\"\"\nView more, visit my tutorial page: https://mofanpy.com/tutorials/\nMy Youtube Channel: https://www.youtube.com/user/MorvanZhou\n\nDependencies:\ntorch: 0.4\ntorchvision\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.utils.data as Data\nimport torchvision\n\n# torch.manual_seed(1)\n\nEPOCH = 1\nBATCH_SIZE = 50\nLR = 0.001\nDOWNLOAD_MNIST = False\n\ntrain_data = torchvision.datasets.MNIST(\n root='./mnist/',\n train=True,\n transform=torchvision.transforms.ToTensor(),\n download=DOWNLOAD_MNIST,\n)\ntrain_loader = Data.DataLoader(dataset=train_data,\n batch_size=BATCH_SIZE,\n shuffle=True)\n\ntest_data = torchvision.datasets.MNIST(root='./mnist/', train=False)\n\n# !!!!!!!! Change in here !!!!!!!!! #\ntest_x = torch.unsqueeze(test_data.test_data, dim=1).type(\n torch.FloatTensor)[:2000].cuda() / 255. # Tensor on GPU\ntest_y = test_data.test_labels[:2000].cuda()\n\n\nclass CNN(nn.Module):\n\n def __init__(self):\n super(CNN, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(\n in_channels=1,\n out_channels=16,\n kernel_size=5,\n stride=1,\n padding=2,\n ),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2),\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(16, 32, 5, 1, 2),\n nn.ReLU(),\n nn.MaxPool2d(2),\n )\n self.out = nn.Linear(32 * 7 * 7, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = x.view(x.size(0), -1)\n output = self.out(x)\n return output\n\n\ncnn = CNN()\n\n# !!!!!!!! Change in here !!!!!!!!! #\ncnn.cuda() # Moves all model parameters and buffers to the GPU.\n\noptimizer = torch.optim.Adam(cnn.parameters(), lr=LR)\nloss_func = nn.CrossEntropyLoss()\n\nfor epoch in range(EPOCH):\n for step, (x, y) in enumerate(train_loader):\n\n # !!!!!!!! Change in here !!!!!!!!! #\n b_x = x.cuda() # Tensor on GPU\n b_y = y.cuda() # Tensor on GPU\n\n output = cnn(b_x)\n loss = loss_func(output, b_y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if step % 50 == 0:\n test_output = cnn(test_x)\n\n # !!!!!!!! Change in here !!!!!!!!! #\n pred_y = torch.max(test_output,\n 1)[1].cuda().data # move the computation in GPU\n\n accuracy = torch.sum(pred_y == test_y).type(\n torch.FloatTensor) / test_y.size(0)\n print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.cpu().numpy(),\n '| test accuracy: %.2f' % accuracy)\n\ntest_output = cnn(test_x[:10])\n\n# !!!!!!!! Change in here !!!!!!!!! #\npred_y = torch.max(test_output, 1)[1].cuda().data # move the computation in GPU\n\nprint(pred_y, 'prediction number')\nprint(test_y[:10], 'real number')\n" ]
[ [ "torch.unsqueeze", "torch.utils.data.DataLoader", "torch.nn.MaxPool2d", "torch.sum", "torch.nn.Linear", "torch.nn.CrossEntropyLoss", "torch.nn.Conv2d", "torch.max", "torch.nn.ReLU" ] ]
Crazy-Jack/SpatialExpGeneCluster
[ "9e57c308d1c577a936a2358d0641c65b8130034f" ]
[ "src/trainDEC.py" ]
[ "import argparse\nimport os, sys\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torchvision\nfrom torchvision import transforms\nfrom PIL import Image\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.cluster import KMeans\nfrom scipy.stats import ortho_group\n\nfrom utlis import set_args, set_optimizer\nfrom utlis import save_model\nfrom utlis import AverageMeter\nfrom utlis import txt_logger\nfrom network.DECnetwork import DECNetwork\nfrom DEC_loss import DECLoss\nfrom data_utlis import SpatialDataset\n\n\ndef costomize_args(args):\n return args\n\n\ndef set_dataloader(args):\n \"\"\"use args.dataset decide which dataset to use and return dataloader\"\"\"\n if args.dataset == 'mnist':\n transform = transforms.Compose([\n transforms.Resize((32, 32)),\n transforms.ToTensor(),\n ])\n train_dataset = torchvision.datasets.MNIST(root=args.loading_path, train=True, download=True, \n transform=transform)\n test_dataset = torchvision.datasets.MNIST(root=args.loading_path, train=False, download=True, \n transform=transform)\n elif args.dataset == 'spatial':\n transform = transforms.Compose([\n transforms.Resize((32, 32)),\n transforms.ToTensor(),\n ])\n train_dataset = SpatialDataset(args.data_root, args.data_file_name)\n test_dataset = SpatialDataset(args.data_root, args.data_file_name)\n\n else:\n raise NotImplemented(\"dataset {} is not implemented.\".format(args.dataset))\n # train loader\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.num_workers, pin_memory=True)\n # test loader\n test_dataloader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.num_workers, pin_memory=True)\n\n return train_dataloader, test_dataloader\n\n\ndef get_model(args, logger):\n model = DECNetwork(args.input_channel, args.feature_dim, args.latent_class_num,\n alpha=1.0, decode_constraint=False)\n latent_class_criterion = DECLoss()\n\n rec_criterion = torch.nn.MSELoss()\n\n if torch.cuda.is_available():\n if torch.cuda.device_count() > 1:\n print(\"Used devices: {}\".format(torch.cuda.device_count()))\n model.encoder = torch.nn.DataParallel(model.encoder)\n model = model.cuda()\n latent_class_criterion = latent_class_criterion.cuda()\n rec_criterion = rec_criterion.cuda()\n cudnn.benchmark = True\n\n if args.resume_model_path:\n # get pre ssl epoch\n ckpt = torch.load(args.resume_model_path, map_location='cpu')\n state_dict = ckpt['model']\n new_state_dict = {}\n for k, v in state_dict.items():\n if torch.cuda.device_count() > 1:\n print(k)\n #if k.split(\".\")[0] != 'head':\n # k = \".\".join([k.split(\".\")[0], \"module\"] + k.split(\".\")[1:])\n else:\n k = k.replace(\"module.\", \"\")\n new_state_dict[k] = v\n state_dict = new_state_dict\n model.load_state_dict(state_dict)\n\n logger.logger.info(\"Model loaded! Pretrained from epoch {}\".format(opt.pre_ssl_epoch))\n\n return model, latent_class_criterion, rec_criterion\n\n\n\ndef pre_train(train_loader, model, epoch, args, optimizer, scheduler, pretrain_criterion):\n model.train()\n model.setPretrain(True)\n\n losses = AverageMeter()\n\n for idx, (img, labels) in tqdm(enumerate(train_loader), total=len(train_loader)):\n img = img.cuda()\n bsz = img.shape[0]\n\n # compute probrability\n feature, rec_img = model(img)\n\n # compute loss\n loss = pretrain_criterion(rec_img, img)\n\n # update metric\n losses.update(loss.item(), bsz)\n\n # SGD\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if args.use_scheduler_pretrain:\n scheduler.step(loss)\n\n return losses.avg\n\n\ndef train(train_loader, model, optimizer, epoch, args, scheduler, UnSup_criterion, rec_criterion=None):\n \"\"\"one epoch training\"\"\"\n # TODO: rewrite this and fill all empty lines!\n model.train()\n model.setPretrain(False)\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n\n Y_assignment = []\n T_assignment = []\n for idx, (img, labels) in tqdm(enumerate(train_loader), total=len(train_loader)):\n \"\"\"params:\n img: [bz, C, H, W]\n labels: [bz,]\n \"\"\"\n img = img.cuda()\n # labels = labels.cuda()\n bsz = img.shape[0]\n\n # compute probrability - p(y|a) dim: [bsz, |Y|]\n features, prob = model(img)\n prob = prob.float()\n # get Y and T assignment\n Y_assignment.extend(prob.argmax(dim=1).cpu().numpy())\n\n # compute loss\n # DEC loss\n loss = UnSup_criterion(prob)\n \n # update metric\n losses.update(loss.item(), bsz)\n\n # SGD\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if args.lr_scheduling == 'reduce': # reduce on pleatau\n scheduler.step(loss)\n\n # # compute H(Y|T) and I(Y;T)\n # # TODO: LOW PRIORITY!! use pytorch to implement H and MI calucalation\n # Y_assignment = np.array(Y_assignment)\n # # print(\"Y_assign\", Y_assignment[:200])\n # T_assignment = np.array(T_assignment)\n # H_Y_T = conditional_entropy(Y_assignment, T_assignment)\n # MI = mutual_information(Y_assignment, T_assignment)\n\n return losses.avg, Y_assignment\n\n\n\ndef main():\n args = set_args()\n args = costomize_args(args)\n\n train_loader, test_loader = set_dataloader(args)\n\n scalar_logger = txt_logger(args.saving_path, args, 'python ' + ' '.join(sys.argv))\n model, UnSup_criterion, pretrain_criterion = get_model(args, scalar_logger)\n optimizer, scheduler = set_optimizer(args, model)\n\n # training routine\n # resume model path\n if args.resume_model_path:\n start = opt.pre_ssl_epoch\n else:\n start = 0\n\n if args.pretrain_mode == 'autoencoder':\n # pre_train\n pre_train_optimizer = optim.Adam(model.parameters(), weight_decay=5e-4)\n pre_train_scheduler = optim.lr_scheduler.ReduceLROnPlateau(pre_train_optimizer, mode='min', factor=0.5, patience=20, verbose=True)\n for epoch in range(start + 1, args.pre_train_epochs + 1):\n pre_train_loss = pre_train(train_loader, model, epoch, args, pre_train_optimizer, pre_train_scheduler, pretrain_criterion)\n\n scalar_logger.log_value(epoch, ('pre_train loss', pre_train_loss))\n\n # k-means clustering for initialization\n print(\"Initialization (K-means) ---------\")\n features = []\n model.eval()\n for idx, (img, _) in tqdm(enumerate(train_loader), total=len(train_loader)):\n with torch.no_grad():\n img = img.cuda()\n feature, rec_img = model(img)\n features.extend(feature.cpu().numpy())\n\n features = np.array(features)\n features = features.reshape(features.shape[0], features.shape[1])\n print(features.shape)\n k_means = KMeans(n_clusters=args.latent_class_num, n_init=20)\n k_means.fit(features)\n model.clusterCenterInitialization(k_means.cluster_centers_)\n\n elif args.pretrain_mode == 'None':\n # random othogonal init\n if args.latent_class_num < args.feature_dim:\n mu_init = ortho_group.rvs(dim=args.feature_dim)[:args.latent_class_num]\n else:\n mu_init = np.random.rand(args.latent_class_num, args.feature_dim)\n model.clusterCenterInitialization(mu_init)\n else:\n raise NotImplementedError(\"pretrain mode {} has not been implemented.\".format(args.pretrain_mode))\n\n # train\n print(\"Begin Training -------------------------\")\n for epoch in range(start + 1, args.epochs + 1):\n # train for one epoch\n loss, Y_assignment = train(train_loader, model, optimizer, epoch, args, scheduler, UnSup_criterion)\n # latent_class statistics\n unique_latent_class = set(Y_assignment)\n\n # file logger\n scalar_logger.log_value(epoch, ('loss', loss),\n ('learning_rate', optimizer.param_groups[0]['lr']),\n ('lc_len', len(unique_latent_class)),\n )\n\n\n if epoch % args.save_freq == 0:\n save_file = os.path.join(\n args.saving_path, 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch))\n save_model(model, optimizer, args, epoch, save_file)\n\n # TODO: save latent class assignment\n save_file_lat_class_assign = os.path.join(args.saving_path, 'latent_class.npy')\n np.save(save_file_lat_class_assign, Y_assignment)\n # log latent class statistics\n latent_class_stats = {}\n for i in unique_latent_class:\n latent_class_stats[i] = np.where(Y_assignment == i)[0].shape[0]\n scalar_logger.log_value(epoch, ('final_lc_assign', latent_class_stats))\n\n # save the last model\n save_file = os.path.join(\n args.saving_path, 'last.pth')\n save_model(model, optimizer, args, args.epochs, save_file)\n\n return \n\n \nif __name__ == '__main__':\n main()\n\n\n\n\n\n " ]
[ [ "torch.utils.data.DataLoader", "numpy.save", "torch.nn.MSELoss", "torch.load", "torch.no_grad", "torch.cuda.device_count", "sklearn.cluster.KMeans", "numpy.where", "scipy.stats.ortho_group.rvs", "torch.cuda.is_available", "numpy.random.rand", "numpy.array", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.nn.DataParallel" ] ]
BeyondTheProof/metrics
[ "8af688daff819a95f4cb3d757ffc919c86072ee9" ]
[ "tests/retrieval/inputs.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom collections import namedtuple\n\nimport torch\n\nfrom tests.helpers.testers import BATCH_SIZE, EXTRA_DIM, NUM_BATCHES\n\nInput = namedtuple('InputMultiple', [\"indexes\", \"preds\", \"target\"])\n\n# correct\n_input_retrieval_scores = Input(\n indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),\n preds=torch.rand(NUM_BATCHES, BATCH_SIZE),\n target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),\n)\n\n_input_retrieval_scores_extra = Input(\n indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),\n preds=torch.rand(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM),\n target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE, EXTRA_DIM)),\n)\n\n_input_retrieval_scores_non_binary_target = Input(\n indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),\n preds=torch.rand(NUM_BATCHES, BATCH_SIZE),\n target=torch.randint(high=4, size=(NUM_BATCHES, BATCH_SIZE)),\n)\n\n# with errors\n_input_retrieval_scores_no_target = Input(\n indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),\n preds=torch.rand(NUM_BATCHES, BATCH_SIZE),\n target=torch.randint(high=1, size=(NUM_BATCHES, BATCH_SIZE)),\n)\n\n_input_retrieval_scores_all_target = Input(\n indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),\n preds=torch.rand(NUM_BATCHES, BATCH_SIZE),\n target=torch.randint(low=1, high=2, size=(NUM_BATCHES, BATCH_SIZE)),\n)\n\n_input_retrieval_scores_empty = Input(\n indexes=torch.randint(high=10, size=[0]),\n preds=torch.rand(0),\n target=torch.randint(high=2, size=[0]),\n)\n\n_input_retrieval_scores_mismatching_sizes = Input(\n indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE - 2)),\n preds=torch.rand(NUM_BATCHES, BATCH_SIZE),\n target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),\n)\n\n_input_retrieval_scores_mismatching_sizes_func = Input(\n indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),\n preds=torch.rand(NUM_BATCHES, BATCH_SIZE - 2),\n target=torch.randint(high=2, size=(NUM_BATCHES, BATCH_SIZE)),\n)\n\n_input_retrieval_scores_wrong_targets = Input(\n indexes=torch.randint(high=10, size=(NUM_BATCHES, BATCH_SIZE)),\n preds=torch.rand(NUM_BATCHES, BATCH_SIZE),\n target=torch.randint(low=-2**31, high=2**31, size=(NUM_BATCHES, BATCH_SIZE)),\n)\n" ]
[ [ "torch.rand", "torch.randint" ] ]
mkolod/Vitis-Tutorials
[ "33d6cf9686398ef1179778dc0da163291c68b465" ]
[ "Machine_Learning/Design_Tutorials/03-using_densenetx/files/datadownload.py" ]
[ "'''\n Copyright 2020 Xilinx Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n'''\n\nimport numpy as np\nimport os\n\n# Silence TensorFlow messages\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport tensorflow as tf\n\n\n\ndef datadownload():\n \n # CIFAR10 dataset has 60k images. Training set is 50k, test set is 10k.\n # Each image is 32x32x8bits\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()\n \n # Scale image data from range 0:255 to range 0:1.0\n # Also converts train & test data to float from uint8\n x_train = (x_train/255.0).astype(np.float32)\n x_test = (x_test/255.0).astype(np.float32)\n\n # one-hot encode the labels\n y_train = tf.keras.utils.to_categorical(y_train, num_classes=10)\n y_test = tf.keras.utils.to_categorical(y_test, num_classes=10)\n \n \n return (x_train,y_train), (x_test,y_test)\n" ]
[ [ "tensorflow.keras.datasets.cifar10.load_data", "tensorflow.keras.utils.to_categorical" ] ]
srkc95/kaggle-landmark-2021-1st-place
[ "034a7d8665bb4696981698348c9370f2d4e61e35" ]
[ "configs/cfg_ch_hybrid_swin224_b5_s3x.py" ]
[ "from default_config import basic_cfg\nimport albumentations as A\nimport os\nimport pandas as pd\nimport cv2\n\ncfg = basic_cfg\ncfg.debug = True\n\n# paths\ncfg.name = os.path.basename(__file__).split(\".\")[0]\ncfg.data_dir = \"/raid/landmark-recognition-2019/\"\n\ncfg.train\ncfg.data_folder = cfg.data_dir + \"train/\"\ncfg.train_df = \"/mount/glr2021/data/2021/train_gldv2x.csv\"\n\ncfg.val_df = '/raid/landmark-recognition-2019/' + \"recognition_solution_v2.1.csv\"\ncfg.output_dir = f\"/mount/glr2021/models/{os.path.basename(__file__).split('.')[0]}\"\ncfg.val_data_folder = \"/raid/landmark-recognition-2019/\" + \"test/\"\n\ncfg.test = False\ncfg.test_data_folder = cfg.data_dir + \"test/\"\n# cfg.test_df = cfg.data_dir + \"sample_submission_v1.csv\"\n\ncfg.eval_retrieval = True\ncfg.query_data_folder = \"/raid/landmark-recognition-2019/\" + \"test/\"\ncfg.index_data_folder = \"/raid/landmark-recognition-2019/\" + \"index/\"\ncfg.query_df = '/mount/glr2021/data/2019/query_v2.csv'\ncfg.index_df = '/mount/glr2021/data/2019/index_v2.csv'\n\n#logging\ncfg.neptune_project = \"christofhenkel/glr2021\"\ncfg.neptune_connection_mode = \"async\"\ncfg.tags = \"debug\"\n\n\n\n\n\n# MODEL\ncfg.model = \"ch_mdl_hybrid_transformer_2x\"\ncfg.stride = (1,1)\ncfg.embedder = \"tf_efficientnet_b5_ns\"\ncfg.backbone = \"swin_base_patch4_window7_224\"\ncfg.freeze_backbone_head = False\ncfg.find_unused_parameters = True\ncfg.neck = \"option-D\"\ncfg.embedding_size = 512\ncfg.pool = \"gem\"\ncfg.gem_p_trainable = True\ncfg.pretrained_weights ='/mount/glr2021/models/cfg_ch_hybrid_swin224_2x_b5_cutout_s2x/fold0/checkpoint_last_seed248126.pth'\ncfg.pretrained_weights_strict = False\ncfg.pretrained=True\ncfg.pop_weights = ['patch_embed.proj.weight']\n# DATASET\ncfg.dataset = \"ch_ds_1\"\ncfg.normalization = 'imagenet'\ncfg.landmark_id2class_id = pd.read_csv('./assets/landmark_id2class.csv')\ncfg.num_workers = 8\n# cfg.data_sample = 100000\ncfg.loss = 'adaptive_arcface'\ncfg.arcface_s = 45\ncfg.arcface_m = 0.3\n\n\n# OPTIMIZATION & SCHEDULE\n\n# cfg.fold = 0\ncfg.lr = 0.00005\n# cfg.optimizer = \"adam\"\n# cfg.weight_decay = 1e-4\ncfg.warmup = 1\ncfg.epochs = 40\ncfg.stop_at = 16\ncfg.batch_size = 8\ncfg.mixed_precision = True\ncfg.pin_memory = False\ncfg.grad_accumulation = 1.\n\n#inference\ncfg.train = True\ncfg.val = True\ncfg.test = False\ncfg.save_val_data = True\ncfg.train_val = False\ncfg.save_only_last_ckpt = False\ncfg.eval_ddp =True\ncfg.save_headless = False\n# AUGS\n\ncfg.img_size = (448,448)\n# AUGS\n\nimage_size = cfg.img_size[0]\n\ncfg.train_aug = A.Compose([\n A.HorizontalFlip(p=0.5),\n A.ImageCompression(quality_lower=99, quality_upper=100),\n A.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.2, rotate_limit=10, border_mode=0, p=0.7),\n A.Resize(image_size, image_size),\n A.Cutout(max_h_size=int(image_size * 0.4), max_w_size=int(image_size * 0.4), num_holes=1, p=0.5),\n ])\n\ncfg.val_aug = A.Compose([\n A.Resize(image_size, image_size),\n ])\n\n" ]
[ [ "pandas.read_csv" ] ]
Artur-UF/MetCompA
[ "1198f861f4e5190f7435314bf476c594471e79fa" ]
[ "Ark.MetCompA/Aula-py14/atv_ajuste.py" ]
[ "# Ajuste de funções\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef aj_lin(xi, yi):\n '''\n Realiza o ajuste linear de pontos em uma reta de ajuste no formato \"y = ax + b\"\n :param xi: coordenadas x dos pontos\n :param yi: coordenadas y dos pontos\n :return: coeficiente angular \"a\" e coeficiente linear \"b\" da reta de ajuste\n '''\n n = len(xi)\n mxy = sum(xi*yi)/n\n mx = sum(xi)/n\n my = sum(yi)/n\n mqx = sum(xi**2)/n\n a = (mxy - (mx*my))/(mqx - (mx**2))\n b = ((mqx*my) - (mx*mxy))/(mqx - (mx**2))\n return a, b\n\n\nx, y = np.loadtxt('dados.dat', unpack=True)\n\nxi = np.linspace(0, 9.50)\npars = aj_lin(x, y)\nyi = lambda p: pars[0]*p + pars[1]\n\nplt.scatter(x, y, s=30, c='k', marker='.', label='Pontos')\nplt.plot(xi, yi(xi), 'g', label='Reta de ajuste')\nplt.xlim(0, 9.5)\nplt.legend()\nplt.xlabel('x')\nplt.ylabel('y')\nplt.grid()\nplt.show()\n\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlim", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.loadtxt", "numpy.linspace", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.scatter" ] ]
vophihungvn/h1st
[ "d421995bb0b8de6a5a76788261efef5b26bc7c12" ]
[ "examples/AutoCyber/streamlit_app.py" ]
[ "import streamlit as st\nimport time\nimport numpy as np\nimport pandas as pd\n\nfrom aegis_datagen import build_constant_val_msg_stream\n\[email protected]\ndef get_data():\n AWS_BUCKET_URL = \"s3://h1st-tutorial-autocyber/attack-samples\"\n df = pd.read_parquet(AWS_BUCKET_URL + \"/20181114_Driver2_Trip1-0.parquet\")\n return df\n\ndef do_injection(df, sensor_name, values, period):\n df = df.copy()\n \n dt = df[\"Timestamp\"].max() - df[\"Timestamp\"].min()\n\n start = df[\"Timestamp\"].min()\n end = df[\"Timestamp\"].max()\n \n value_start = df[df[\"Timestamp\"] < start][sensor_name].fillna(method=\"ffill\").fillna(method=\"bfill\").values\n value_start = value_start[-1]\n \n value = 0.0\n rows = build_constant_val_msg_stream(value, start, end, period=period, value_jitter=0./20.)\n dfinj = pd.DataFrame.from_records(rows, columns =['Timestamp', sensor_name])\n\n dfinj[\"Label\"] = \"Attack\"\n dfinj[\"AttackSensor\"] = sensor_name\n dfinj[\"AttackMethod\"] = method\n dfinj[\"AttackParams\"] = scale\n\n # # # double check time diff / msg freq of injected values\n # actual_period = (dfinj[\"Timestamp\"] - dfinj[\"Timestamp\"].shift(1)).mean() * 1000\n # assert np.abs(period - actual_period) / period < 0.05, \"unexpected injection msg freq, actual_period = %s\" % actual_period\n \n df2 = pd.concat([df, dfinj]).sort_values(\"Timestamp\")\n\n # these values always go together \n if sensor_name in (\"YawRate\", \"Gx\", \"Gy\"):\n df2_filled = df2.fillna(method=\"ffill\")\n df2.loc[df2.Label == \"Attack\", [\"YawRate\", \"Gx\", \"Gy\"]] = df2_filled.loc[df2_filled.Label == \"Attack\", [\"YawRate\", \"Gx\", \"Gy\"]]\n\n if DEBUG: print(\"injected %s rows, before = %s, after = %s\" % (len(dfinj), len(df), len(df2)))\n # print(df2)\n return df2, start, end\n\ntry:\n df = get_data()\n print(df.head())\nexcept Exception as e:\n st.error(\n \"\"\"\n **This demo requires internet access.**\n\n Connection error: %s\n \"\"\"\n % e\n )\n\nattk_events = df.AttackEventIndex.dropna().unique()\nprint(\"unique attk_events = %s\" % attk_events)\n\nimport random\neid = st.selectbox(\"Select an sample index\", attk_events)\ndf = df[df.AttackEventIndex == eid]\n\nSENSORS = [\"SteeringAngle\", \"CarSpeed\", \"YawRate\", \"Gx\", \"Gy\"]\nattack_sensor = st.selectbox(\"Select a sensor to attack\", SENSORS)\n\nimport matplotlib.pyplot as plt\nz = df.dropna(subset=[attack_sensor])\nnormal = z[z[\"Label\"] == \"Normal\"]\nfig = plt.figure(figsize=(9, 3))\nplt.plot(normal.Timestamp, normal[attack_sensor], label=\"normal %s\" % attack_sensor)\nplt.legend()\n# plt.savefig(\"out.png\")\n\nst.write(fig)\n\n\nimport streamlit as st\nfrom PIL import Image\nfrom streamlit_drawable_canvas import st_canvas\n\nattack_msg_freq = st.sidebar.slider(\"Attack msg period (ms)\", 12, 96, 24, step=12)\nattack_msg_timing = st.sidebar.slider(\"Attack msg time jitter (ns)\", 500, 5000, 1000, step=500)\n\ndrawing_mode = st.sidebar.selectbox(\n \"Drawing tool:\", (\"freedraw\", \"line\")\n)\n\ncanvas_result = st_canvas(\n # fill_color=\"rgba(255, 165, 0, 0.3)\", # Fixed fill color with some opacity\n stroke_width=2,\n #stroke_color=stroke_color,\n background_color=\"transparent\",\n #background_image=Image.open(\"out.png\"),\n update_streamlit=True,\n height=240,\n width=600,\n drawing_mode=drawing_mode,\n key=\"canvas\",\n)\n\nif canvas_result.image_data is not None:\n print(\"canvas_result.image_data\")\n print(type(canvas_result.image_data))\n print(canvas_result.image_data.shape) # shape (240, 600, 4)\n x = canvas_result.image_data[:,:,3]\n print(x.shape)\n print(x)\n values = np.argmax(x, axis=0)\n print(\"Raw values\")\n print(values)\n values = (255 - values)/255.0\n values = pd.Series(values)\n values = values.replace(1.0, float(\"NaN\"))\n print(\"pd.Series values\")\n print(values)\n zmax, zmin = z[attack_sensor].max(), z[attack_sensor].min()\n print((zmax, zmin))\n values = values * (zmax - zmin) + zmin\n st.write(\"Scaled values\")\n st.write(values)\n\n\nimport matplotlib.pyplot as plt\nz = df.dropna(subset=[attack_sensor])\nnormal = z[z[\"Label\"] == \"Normal\"]\nfig = plt.figure(figsize=(9, 3))\nplt.plot(normal.Timestamp, normal[attack_sensor], label=\"normal %s\" % attack_sensor)\nplt.legend()\n# plt.savefig(\"out.png\")\n\nst.write(fig)\n\n\n# Streamlit widgets automatically run the script from top to bottom. Since\n# this button is not connected to any other logic, it just causes a plain\n# rerun.\nst.button(\"Re-run\")" ]
[ [ "matplotlib.pyplot.legend", "pandas.Series", "pandas.read_parquet", "matplotlib.pyplot.figure", "numpy.argmax", "pandas.DataFrame.from_records", "pandas.concat", "matplotlib.pyplot.plot" ] ]
srijan-deepsource/dask
[ "0673d9084e02f985f3fdf5ba6ede80e8de5ac15c" ]
[ "dask/array/core.py" ]
[ "import math\nimport operator\nimport os\nimport pickle\nimport re\nimport sys\nimport traceback\nimport uuid\nimport warnings\nfrom bisect import bisect\nfrom collections.abc import Iterable, Iterator, Mapping\nfrom functools import partial, wraps, reduce\nfrom itertools import product, zip_longest\nfrom numbers import Number, Integral\nfrom operator import add, getitem, mul\nfrom threading import Lock\n\nfrom tlz import partition, concat, first, groupby, accumulate, frequencies\nfrom tlz.curried import pluck\nimport numpy as np\n\nfrom . import chunk\nfrom .. import config, compute\nfrom ..base import (\n DaskMethodsMixin,\n tokenize,\n dont_optimize,\n compute_as_if_collection,\n persist,\n is_dask_collection,\n)\nfrom ..blockwise import broadcast_dimensions\nfrom ..context import globalmethod\nfrom ..utils import (\n ndeepmap,\n ignoring,\n concrete,\n derived_from,\n is_integer,\n IndexCallable,\n funcname,\n SerializableLock,\n Dispatch,\n factors,\n parse_bytes,\n has_keyword,\n M,\n ndimlist,\n format_bytes,\n typename,\n)\nfrom ..core import quote\nfrom ..delayed import delayed, Delayed\nfrom .. import threaded, core\nfrom ..sizeof import sizeof\nfrom ..highlevelgraph import HighLevelGraph\nfrom .numpy_compat import _Recurser, _make_sliced_dtype\nfrom .slicing import slice_array, replace_ellipsis, cached_cumsum\nfrom .blockwise import blockwise\n\nconfig.update_defaults({\"array\": {\"chunk-size\": \"128MiB\", \"rechunk-threshold\": 4}})\n\n\nconcatenate_lookup = Dispatch(\"concatenate\")\ntensordot_lookup = Dispatch(\"tensordot\")\neinsum_lookup = Dispatch(\"einsum\")\nconcatenate_lookup.register((object, np.ndarray), np.concatenate)\ntensordot_lookup.register((object, np.ndarray), np.tensordot)\neinsum_lookup.register((object, np.ndarray), np.einsum)\n\nunknown_chunk_message = (\n \"\\n\\n\"\n \"A possible solution: \"\n \"https://docs.dask.org/en/latest/array-chunks.html#unknown-chunks\\n\"\n \"Summary: to compute chunks sizes, use\\n\\n\"\n \" x.compute_chunk_sizes() # for Dask Array `x`\\n\"\n \" ddf.to_dask_array(lengths=True) # for Dask DataFrame `ddf`\"\n)\n\n\nclass PerformanceWarning(Warning):\n \"\"\" A warning given when bad chunking may cause poor performance \"\"\"\n\n\ndef getter(a, b, asarray=True, lock=None):\n if isinstance(b, tuple) and any(x is None for x in b):\n b2 = tuple(x for x in b if x is not None)\n b3 = tuple(\n None if x is None else slice(None, None)\n for x in b\n if not isinstance(x, Integral)\n )\n return getter(a, b2, asarray=asarray, lock=lock)[b3]\n\n if lock:\n lock.acquire()\n try:\n c = a[b]\n if asarray:\n c = np.asarray(c)\n finally:\n if lock:\n lock.release()\n return c\n\n\ndef getter_nofancy(a, b, asarray=True, lock=None):\n \"\"\" A simple wrapper around ``getter``.\n\n Used to indicate to the optimization passes that the backend doesn't\n support fancy indexing.\n \"\"\"\n return getter(a, b, asarray=asarray, lock=lock)\n\n\ndef getter_inline(a, b, asarray=True, lock=None):\n \"\"\" A getter function that optimizations feel comfortable inlining\n\n Slicing operations with this function may be inlined into a graph, such as\n in the following rewrite\n\n **Before**\n\n >>> a = x[:10] # doctest: +SKIP\n >>> b = a + 1 # doctest: +SKIP\n >>> c = a * 2 # doctest: +SKIP\n\n **After**\n\n >>> b = x[:10] + 1 # doctest: +SKIP\n >>> c = x[:10] * 2 # doctest: +SKIP\n\n This inlining can be relevant to operations when running off of disk.\n \"\"\"\n return getter(a, b, asarray=asarray, lock=lock)\n\n\nfrom .optimization import optimize, fuse_slice\n\n\n# __array_function__ dict for mapping aliases and mismatching names\n_HANDLED_FUNCTIONS = {}\n\n\ndef implements(*numpy_functions):\n \"\"\"Register an __array_function__ implementation for dask.array.Array\n\n Register that a function implements the API of a NumPy function (or several\n NumPy functions in case of aliases) which is handled with\n ``__array_function__``.\n\n Parameters\n ----------\n \\\\*numpy_functions : callables\n One or more NumPy functions that are handled by ``__array_function__``\n and will be mapped by `implements` to a `dask.array` function.\n \"\"\"\n\n def decorator(dask_func):\n for numpy_function in numpy_functions:\n _HANDLED_FUNCTIONS[numpy_function] = dask_func\n\n return dask_func\n\n return decorator\n\n\ndef slices_from_chunks(chunks):\n \"\"\" Translate chunks tuple to a set of slices in product order\n\n >>> slices_from_chunks(((2, 2), (3, 3, 3))) # doctest: +NORMALIZE_WHITESPACE\n [(slice(0, 2, None), slice(0, 3, None)),\n (slice(0, 2, None), slice(3, 6, None)),\n (slice(0, 2, None), slice(6, 9, None)),\n (slice(2, 4, None), slice(0, 3, None)),\n (slice(2, 4, None), slice(3, 6, None)),\n (slice(2, 4, None), slice(6, 9, None))]\n \"\"\"\n cumdims = [cached_cumsum(bds, initial_zero=True) for bds in chunks]\n slices = [\n [slice(s, s + dim) for s, dim in zip(starts, shapes)]\n for starts, shapes in zip(cumdims, chunks)\n ]\n return list(product(*slices))\n\n\ndef getem(\n arr,\n chunks,\n getitem=getter,\n shape=None,\n out_name=None,\n lock=False,\n asarray=True,\n dtype=None,\n):\n \"\"\" Dask getting various chunks from an array-like\n\n >>> getem('X', chunks=(2, 3), shape=(4, 6)) # doctest: +SKIP\n {('X', 0, 0): (getter, 'X', (slice(0, 2), slice(0, 3))),\n ('X', 1, 0): (getter, 'X', (slice(2, 4), slice(0, 3))),\n ('X', 1, 1): (getter, 'X', (slice(2, 4), slice(3, 6))),\n ('X', 0, 1): (getter, 'X', (slice(0, 2), slice(3, 6)))}\n\n >>> getem('X', chunks=((2, 2), (3, 3))) # doctest: +SKIP\n {('X', 0, 0): (getter, 'X', (slice(0, 2), slice(0, 3))),\n ('X', 1, 0): (getter, 'X', (slice(2, 4), slice(0, 3))),\n ('X', 1, 1): (getter, 'X', (slice(2, 4), slice(3, 6))),\n ('X', 0, 1): (getter, 'X', (slice(0, 2), slice(3, 6)))}\n \"\"\"\n out_name = out_name or arr\n chunks = normalize_chunks(chunks, shape, dtype=dtype)\n keys = product([out_name], *(range(len(bds)) for bds in chunks))\n slices = slices_from_chunks(chunks)\n\n if (\n has_keyword(getitem, \"asarray\")\n and has_keyword(getitem, \"lock\")\n and (not asarray or lock)\n ):\n values = [(getitem, arr, x, asarray, lock) for x in slices]\n else:\n # Common case, drop extra parameters\n values = [(getitem, arr, x) for x in slices]\n\n return dict(zip(keys, values))\n\n\ndef dotmany(A, B, leftfunc=None, rightfunc=None, **kwargs):\n \"\"\" Dot product of many aligned chunks\n\n >>> x = np.array([[1, 2], [1, 2]])\n >>> y = np.array([[10, 20], [10, 20]])\n >>> dotmany([x, x, x], [y, y, y])\n array([[ 90, 180],\n [ 90, 180]])\n\n Optionally pass in functions to apply to the left and right chunks\n\n >>> dotmany([x, x, x], [y, y, y], rightfunc=np.transpose)\n array([[150, 150],\n [150, 150]])\n \"\"\"\n if leftfunc:\n A = map(leftfunc, A)\n if rightfunc:\n B = map(rightfunc, B)\n return sum(map(partial(np.dot, **kwargs), A, B))\n\n\ndef _concatenate2(arrays, axes=[]):\n \"\"\" Recursively Concatenate nested lists of arrays along axes\n\n Each entry in axes corresponds to each level of the nested list. The\n length of axes should correspond to the level of nesting of arrays.\n If axes is an empty list or tuple, return arrays, or arrays[0] if\n arrays is a list.\n\n >>> x = np.array([[1, 2], [3, 4]])\n >>> _concatenate2([x, x], axes=[0])\n array([[1, 2],\n [3, 4],\n [1, 2],\n [3, 4]])\n\n >>> _concatenate2([x, x], axes=[1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n >>> _concatenate2([[x, x], [x, x]], axes=[0, 1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4],\n [1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n Supports Iterators\n >>> _concatenate2(iter([x, x]), axes=[1])\n array([[1, 2, 1, 2],\n [3, 4, 3, 4]])\n\n Special Case\n >>> _concatenate2([x, x], axes=())\n array([[1, 2],\n [3, 4]])\n \"\"\"\n if axes == ():\n if isinstance(arrays, list):\n return arrays[0]\n else:\n return arrays\n\n if isinstance(arrays, Iterator):\n arrays = list(arrays)\n if not isinstance(arrays, (list, tuple)):\n return arrays\n if len(axes) > 1:\n arrays = [_concatenate2(a, axes=axes[1:]) for a in arrays]\n concatenate = concatenate_lookup.dispatch(\n type(max(arrays, key=lambda x: getattr(x, \"__array_priority__\", 0)))\n )\n return concatenate(arrays, axis=axes[0])\n\n\ndef apply_infer_dtype(func, args, kwargs, funcname, suggest_dtype=\"dtype\", nout=None):\n \"\"\"\n Tries to infer output dtype of ``func`` for a small set of input arguments.\n\n Parameters\n ----------\n func: Callable\n Function for which output dtype is to be determined\n\n args: List of array like\n Arguments to the function, which would usually be used. Only attributes\n ``ndim`` and ``dtype`` are used.\n\n kwargs: dict\n Additional ``kwargs`` to the ``func``\n\n funcname: String\n Name of calling function to improve potential error messages\n\n suggest_dtype: None/False or String\n If not ``None`` adds suggestion to potential error message to specify a dtype\n via the specified kwarg. Defaults to ``'dtype'``.\n\n nout: None or Int\n ``None`` if function returns single output, integer if many.\n Deafults to ``None``.\n\n Returns\n -------\n : dtype or List of dtype\n One or many dtypes (depending on ``nout``)\n \"\"\"\n args = [\n np.ones((1,) * x.ndim, dtype=x.dtype) if isinstance(x, Array) else x\n for x in args\n ]\n try:\n with np.errstate(all=\"ignore\"):\n o = func(*args, **kwargs)\n except Exception as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n tb = \"\".join(traceback.format_tb(exc_traceback))\n suggest = (\n (\n \"Please specify the dtype explicitly using the \"\n \"`{dtype}` kwarg.\\n\\n\".format(dtype=suggest_dtype)\n )\n if suggest_dtype\n else \"\"\n )\n msg = (\n \"`dtype` inference failed in `{0}`.\\n\\n\"\n \"{1}\"\n \"Original error is below:\\n\"\n \"------------------------\\n\"\n \"{2}\\n\\n\"\n \"Traceback:\\n\"\n \"---------\\n\"\n \"{3}\"\n ).format(funcname, suggest, repr(e), tb)\n else:\n msg = None\n if msg is not None:\n raise ValueError(msg)\n return o.dtype if nout is None else tuple(e.dtype for e in o)\n\n\ndef normalize_arg(x):\n \"\"\" Normalize user provided arguments to blockwise or map_blocks\n\n We do a few things:\n\n 1. If they are string literals that might collide with blockwise_token then we\n quote them\n 2. IF they are large (as defined by sizeof) then we put them into the\n graph on their own by using dask.delayed\n \"\"\"\n if is_dask_collection(x):\n return x\n elif isinstance(x, str) and re.match(r\"_\\d+\", x):\n return delayed(x)\n elif isinstance(x, list) and len(x) >= 10:\n return delayed(x)\n elif sizeof(x) > 1e6:\n return delayed(x)\n else:\n return x\n\n\ndef _pass_extra_kwargs(func, keys, *args, **kwargs):\n \"\"\" Helper for :func:`map_blocks` to pass `block_info` or `block_id`.\n\n For each element of `keys`, a corresponding element of args is changed\n to a keyword argument with that key, before all arguments re passed on\n to `func`.\n \"\"\"\n kwargs.update(zip(keys, args))\n return func(*args[len(keys) :], **kwargs)\n\n\ndef map_blocks(\n func,\n *args,\n name=None,\n token=None,\n dtype=None,\n chunks=None,\n drop_axis=[],\n new_axis=None,\n meta=None,\n **kwargs,\n):\n \"\"\" Map a function across all blocks of a dask array.\n\n Parameters\n ----------\n func : callable\n Function to apply to every block in the array.\n args : dask arrays or other objects\n dtype : np.dtype, optional\n The ``dtype`` of the output array. It is recommended to provide this.\n If not provided, will be inferred by applying the function to a small\n set of fake data.\n chunks : tuple, optional\n Chunk shape of resulting blocks if the function does not preserve\n shape. If not provided, the resulting array is assumed to have the same\n block structure as the first input array.\n drop_axis : number or iterable, optional\n Dimensions lost by the function.\n new_axis : number or iterable, optional\n New dimensions created by the function. Note that these are applied\n after ``drop_axis`` (if present).\n token : string, optional\n The key prefix to use for the output array. If not provided, will be\n determined from the function name.\n name : string, optional\n The key name to use for the output array. Note that this fully\n specifies the output key name, and must be unique. If not provided,\n will be determined by a hash of the arguments.\n **kwargs :\n Other keyword arguments to pass to function. Values must be constants\n (not dask.arrays)\n\n See Also\n --------\n dask.array.blockwise : Generalized operation with control over block alignment.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(6, chunks=3)\n\n >>> x.map_blocks(lambda x: x * 2).compute()\n array([ 0, 2, 4, 6, 8, 10])\n\n The ``da.map_blocks`` function can also accept multiple arrays.\n\n >>> d = da.arange(5, chunks=2)\n >>> e = da.arange(5, chunks=2)\n\n >>> f = map_blocks(lambda a, b: a + b**2, d, e)\n >>> f.compute()\n array([ 0, 2, 6, 12, 20])\n\n If the function changes shape of the blocks then you must provide chunks\n explicitly.\n\n >>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))\n\n You have a bit of freedom in specifying chunks. If all of the output chunk\n sizes are the same, you can provide just that chunk size as a single tuple.\n\n >>> a = da.arange(18, chunks=(6,))\n >>> b = a.map_blocks(lambda x: x[:3], chunks=(3,))\n\n If the function changes the dimension of the blocks you must specify the\n created or destroyed dimensions.\n\n >>> b = a.map_blocks(lambda x: x[None, :, None], chunks=(1, 6, 1),\n ... new_axis=[0, 2])\n\n If ``chunks`` is specified but ``new_axis`` is not, then it is inferred to\n add the necessary number of axes on the left.\n\n Map_blocks aligns blocks by block positions without regard to shape. In the\n following example we have two arrays with the same number of blocks but\n with different shape and chunk sizes.\n\n >>> x = da.arange(1000, chunks=(100,))\n >>> y = da.arange(100, chunks=(10,))\n\n The relevant attribute to match is numblocks.\n\n >>> x.numblocks\n (10,)\n >>> y.numblocks\n (10,)\n\n If these match (up to broadcasting rules) then we can map arbitrary\n functions across blocks\n\n >>> def func(a, b):\n ... return np.array([a.max(), b.max()])\n\n >>> da.map_blocks(func, x, y, chunks=(2,), dtype='i8')\n dask.array<func, shape=(20,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>\n\n >>> _.compute()\n array([ 99, 9, 199, 19, 299, 29, 399, 39, 499, 49, 599, 59, 699,\n 69, 799, 79, 899, 89, 999, 99])\n\n Your block function get information about where it is in the array by\n accepting a special ``block_info`` keyword argument.\n\n >>> def func(block, block_info=None):\n ... pass\n\n This will receive the following information:\n\n >>> block_info # doctest: +SKIP\n {0: {'shape': (1000,),\n 'num-chunks': (10,),\n 'chunk-location': (4,),\n 'array-location': [(400, 500)]},\n None: {'shape': (1000,),\n 'num-chunks': (10,),\n 'chunk-location': (4,),\n 'array-location': [(400, 500)],\n 'chunk-shape': (100,),\n 'dtype': dtype('float64')}}\n\n For each argument and keyword arguments that are dask arrays (the positions\n of which are the first index), you will receive the shape of the full\n array, the number of chunks of the full array in each dimension, the chunk\n location (for example the fourth chunk over in the first dimension), and\n the array location (for example the slice corresponding to ``40:50``). The\n same information is provided for the output, with the key ``None``, plus\n the shape and dtype that should be returned.\n\n These features can be combined to synthesize an array from scratch, for\n example:\n\n >>> def func(block_info=None):\n ... loc = block_info[None]['array-location'][0]\n ... return np.arange(loc[0], loc[1])\n\n >>> da.map_blocks(func, chunks=((4, 4),), dtype=np.float_)\n dask.array<func, shape=(8,), dtype=float64, chunksize=(4,), chunktype=numpy.ndarray>\n\n >>> _.compute()\n array([0, 1, 2, 3, 4, 5, 6, 7])\n\n You may specify the key name prefix of the resulting task in the graph with\n the optional ``token`` keyword argument.\n\n >>> x.map_blocks(lambda x: x + 1, name='increment') # doctest: +SKIP\n dask.array<increment, shape=(100,), dtype=int64, chunksize=(10,), chunktype=numpy.ndarray>\n \"\"\"\n if not callable(func):\n msg = (\n \"First argument must be callable function, not %s\\n\"\n \"Usage: da.map_blocks(function, x)\\n\"\n \" or: da.map_blocks(function, x, y, z)\"\n )\n raise TypeError(msg % type(func).__name__)\n if token:\n warnings.warn(\"The token= keyword to map_blocks has been moved to name=\")\n name = token\n\n name = \"%s-%s\" % (name or funcname(func), tokenize(func, *args, **kwargs))\n new_axes = {}\n\n if isinstance(drop_axis, Number):\n drop_axis = [drop_axis]\n if isinstance(new_axis, Number):\n new_axis = [new_axis] # TODO: handle new_axis\n\n arrs = [a for a in args if isinstance(a, Array)]\n\n argpairs = [\n (a, tuple(range(a.ndim))[::-1]) if isinstance(a, Array) else (a, None)\n for a in args\n ]\n if arrs:\n out_ind = tuple(range(max(a.ndim for a in arrs)))[::-1]\n else:\n out_ind = ()\n\n original_kwargs = kwargs\n\n if dtype is None and meta is None:\n dtype = apply_infer_dtype(func, args, original_kwargs, \"map_blocks\")\n\n if drop_axis:\n out_ind = tuple(x for i, x in enumerate(out_ind) if i not in drop_axis)\n if new_axis is None and chunks is not None and len(out_ind) < len(chunks):\n new_axis = range(len(chunks) - len(out_ind))\n if new_axis:\n # new_axis = [x + len(drop_axis) for x in new_axis]\n out_ind = list(out_ind)\n for ax in sorted(new_axis):\n n = len(out_ind) + len(drop_axis)\n out_ind.insert(ax, n)\n if chunks is not None:\n new_axes[n] = chunks[ax]\n else:\n new_axes[n] = 1\n out_ind = tuple(out_ind)\n if max(new_axis) > max(out_ind):\n raise ValueError(\"New_axis values do not fill in all dimensions\")\n\n if chunks is not None:\n if len(chunks) != len(out_ind):\n raise ValueError(\n \"Provided chunks have {0} dims, expected {1} \"\n \"dims.\".format(len(chunks), len(out_ind))\n )\n adjust_chunks = dict(zip(out_ind, chunks))\n else:\n adjust_chunks = None\n\n out = blockwise(\n func,\n out_ind,\n *concat(argpairs),\n name=name,\n new_axes=new_axes,\n dtype=dtype,\n concatenate=True,\n align_arrays=False,\n adjust_chunks=adjust_chunks,\n meta=meta,\n **kwargs,\n )\n\n extra_argpairs = []\n extra_names = []\n # If func has block_id as an argument, construct an array of block IDs and\n # prepare to inject it.\n if has_keyword(func, \"block_id\"):\n block_id_name = \"block-id-\" + out.name\n block_id_dsk = {\n (block_id_name,) + block_id: block_id\n for block_id in product(*(range(len(c)) for c in out.chunks))\n }\n block_id_array = Array(\n block_id_dsk,\n block_id_name,\n chunks=tuple((1,) * len(c) for c in out.chunks),\n dtype=np.object_,\n )\n extra_argpairs.append((block_id_array, out_ind))\n extra_names.append(\"block_id\")\n\n # If func has block_info as an argument, construct an array of block info\n # objects and prepare to inject it.\n if has_keyword(func, \"block_info\"):\n starts = {}\n num_chunks = {}\n shapes = {}\n\n for i, (arg, in_ind) in enumerate(argpairs):\n if in_ind is not None:\n shapes[i] = arg.shape\n if drop_axis:\n # We concatenate along dropped axes, so we need to treat them\n # as if there is only a single chunk.\n starts[i] = [\n (\n cached_cumsum(arg.chunks[j], initial_zero=True)\n if ind in out_ind\n else [0, arg.shape[j]]\n )\n for j, ind in enumerate(in_ind)\n ]\n num_chunks[i] = tuple(len(s) - 1 for s in starts[i])\n else:\n starts[i] = [\n cached_cumsum(c, initial_zero=True) for c in arg.chunks\n ]\n num_chunks[i] = arg.numblocks\n out_starts = [cached_cumsum(c, initial_zero=True) for c in out.chunks]\n\n block_info_name = \"block-info-\" + out.name\n block_info_dsk = {}\n for block_id in product(*(range(len(c)) for c in out.chunks)):\n # Get position of chunk, indexed by axis labels\n location = {out_ind[i]: loc for i, loc in enumerate(block_id)}\n info = {}\n for i, shape in shapes.items():\n # Compute chunk key in the array, taking broadcasting into\n # account. We don't directly know which dimensions are\n # broadcast, but any dimension with only one chunk can be\n # treated as broadcast.\n arr_k = tuple(\n location.get(ind, 0) if num_chunks[i][j] > 1 else 0\n for j, ind in enumerate(argpairs[i][1])\n )\n info[i] = {\n \"shape\": shape,\n \"num-chunks\": num_chunks[i],\n \"array-location\": [\n (starts[i][ij][j], starts[i][ij][j + 1])\n for ij, j in enumerate(arr_k)\n ],\n \"chunk-location\": arr_k,\n }\n\n info[None] = {\n \"shape\": out.shape,\n \"num-chunks\": out.numblocks,\n \"array-location\": [\n (out_starts[ij][j], out_starts[ij][j + 1])\n for ij, j in enumerate(block_id)\n ],\n \"chunk-location\": block_id,\n \"chunk-shape\": tuple(\n out.chunks[ij][j] for ij, j in enumerate(block_id)\n ),\n \"dtype\": dtype,\n }\n block_info_dsk[(block_info_name,) + block_id] = info\n\n block_info = Array(\n block_info_dsk,\n block_info_name,\n chunks=tuple((1,) * len(c) for c in out.chunks),\n dtype=np.object_,\n )\n extra_argpairs.append((block_info, out_ind))\n extra_names.append(\"block_info\")\n\n if extra_argpairs:\n # Rewrite the Blockwise layer. It would be nice to find a way to\n # avoid doing it twice, but it's currently needed to determine\n # out.chunks from the first pass. Since it constructs a Blockwise\n # rather than an expanded graph, it shouldn't be too expensive.\n out = blockwise(\n _pass_extra_kwargs,\n out_ind,\n func,\n None,\n tuple(extra_names),\n None,\n *concat(extra_argpairs),\n *concat(argpairs),\n name=out.name,\n dtype=out.dtype,\n concatenate=True,\n align_arrays=False,\n adjust_chunks=dict(zip(out_ind, out.chunks)),\n meta=meta,\n **kwargs,\n )\n\n return out\n\n\ndef broadcast_chunks(*chunkss):\n \"\"\" Construct a chunks tuple that broadcasts many chunks tuples\n\n >>> a = ((5, 5),)\n >>> b = ((5, 5),)\n >>> broadcast_chunks(a, b)\n ((5, 5),)\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((5, 5),)\n >>> broadcast_chunks(a, b)\n ((10, 10, 10), (5, 5))\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((1,), (5, 5),)\n >>> broadcast_chunks(a, b)\n ((10, 10, 10), (5, 5))\n\n >>> a = ((10, 10, 10), (5, 5),)\n >>> b = ((3, 3,), (5, 5),)\n >>> broadcast_chunks(a, b)\n Traceback (most recent call last):\n ...\n ValueError: Chunks do not align: [(10, 10, 10), (3, 3)]\n \"\"\"\n if not chunkss:\n return ()\n elif len(chunkss) == 1:\n return chunkss[0]\n n = max(map(len, chunkss))\n chunkss2 = [((1,),) * (n - len(c)) + c for c in chunkss]\n result = []\n for i in range(n):\n step1 = [c[i] for c in chunkss2]\n if all(c == (1,) for c in step1):\n step2 = step1\n else:\n step2 = [c for c in step1 if c != (1,)]\n if len(set(step2)) != 1:\n raise ValueError(\"Chunks do not align: %s\" % str(step2))\n result.append(step2[0])\n return tuple(result)\n\n\ndef store(\n sources,\n targets,\n lock=True,\n regions=None,\n compute=True,\n return_stored=False,\n **kwargs,\n):\n \"\"\" Store dask arrays in array-like objects, overwrite data in target\n\n This stores dask arrays into object that supports numpy-style setitem\n indexing. It stores values chunk by chunk so that it does not have to\n fill up memory. For best performance you can align the block size of\n the storage target with the block size of your array.\n\n If your data fits in memory then you may prefer calling\n ``np.array(myarray)`` instead.\n\n Parameters\n ----------\n\n sources: Array or iterable of Arrays\n targets: array-like or Delayed or iterable of array-likes and/or Delayeds\n These should support setitem syntax ``target[10:20] = ...``\n lock: boolean or threading.Lock, optional\n Whether or not to lock the data stores while storing.\n Pass True (lock each file individually), False (don't lock) or a\n particular ``threading.Lock`` object to be shared among all writes.\n regions: tuple of slices or list of tuples of slices\n Each ``region`` tuple in ``regions`` should be such that\n ``target[region].shape = source.shape``\n for the corresponding source and target in sources and targets,\n respectively. If this is a tuple, the contents will be assumed to be\n slices, so do not provide a tuple of tuples.\n compute: boolean, optional\n If true compute immediately, return ``dask.delayed.Delayed`` otherwise\n return_stored: boolean, optional\n Optionally return the stored result (default False).\n\n Examples\n --------\n >>> x = ... # doctest: +SKIP\n\n >>> import h5py # doctest: +SKIP\n >>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP\n >>> dset = f.create_dataset('/data', shape=x.shape,\n ... chunks=x.chunks,\n ... dtype='f8') # doctest: +SKIP\n\n >>> store(x, dset) # doctest: +SKIP\n\n Alternatively store many arrays at the same time\n\n >>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP\n \"\"\"\n\n if isinstance(sources, Array):\n sources = [sources]\n targets = [targets]\n\n if any(not isinstance(s, Array) for s in sources):\n raise ValueError(\"All sources must be dask array objects\")\n\n if len(sources) != len(targets):\n raise ValueError(\n \"Different number of sources [%d] and targets [%d]\"\n % (len(sources), len(targets))\n )\n\n if isinstance(regions, tuple) or regions is None:\n regions = [regions]\n\n if len(sources) > 1 and len(regions) == 1:\n regions *= len(sources)\n\n if len(sources) != len(regions):\n raise ValueError(\n \"Different number of sources [%d] and targets [%d] than regions [%d]\"\n % (len(sources), len(targets), len(regions))\n )\n\n # Optimize all sources together\n sources_dsk = HighLevelGraph.merge(*[e.__dask_graph__() for e in sources])\n sources_dsk = Array.__dask_optimize__(\n sources_dsk, list(core.flatten([e.__dask_keys__() for e in sources]))\n )\n sources2 = [Array(sources_dsk, e.name, e.chunks, meta=e) for e in sources]\n\n # Optimize all targets together\n targets2 = []\n targets_keys = []\n targets_dsk = []\n for e in targets:\n if isinstance(e, Delayed):\n targets2.append(e.key)\n targets_keys.extend(e.__dask_keys__())\n targets_dsk.append(e.__dask_graph__())\n elif is_dask_collection(e):\n raise TypeError(\"Targets must be either Delayed objects or array-likes\")\n else:\n targets2.append(e)\n\n targets_dsk = HighLevelGraph.merge(*targets_dsk)\n targets_dsk = Delayed.__dask_optimize__(targets_dsk, targets_keys)\n\n load_stored = return_stored and not compute\n toks = [str(uuid.uuid1()) for _ in range(len(sources))]\n store_dsk = HighLevelGraph.merge(\n *[\n insert_to_ooc(s, t, lock, r, return_stored, load_stored, tok)\n for s, t, r, tok in zip(sources2, targets2, regions, toks)\n ]\n )\n store_keys = list(store_dsk.keys())\n\n store_dsk = HighLevelGraph.merge(store_dsk, targets_dsk, sources_dsk)\n\n if return_stored:\n load_store_dsk = store_dsk\n if compute:\n store_dlyds = [Delayed(k, store_dsk) for k in store_keys]\n store_dlyds = persist(*store_dlyds, **kwargs)\n store_dsk_2 = HighLevelGraph.merge(*[e.dask for e in store_dlyds])\n\n load_store_dsk = retrieve_from_ooc(store_keys, store_dsk, store_dsk_2)\n\n result = tuple(\n Array(load_store_dsk, \"load-store-%s\" % t, s.chunks, meta=s)\n for s, t in zip(sources, toks)\n )\n\n return result\n else:\n name = \"store-\" + str(uuid.uuid1())\n dsk = HighLevelGraph.merge({name: store_keys}, store_dsk)\n result = Delayed(name, dsk)\n\n if compute:\n result.compute(**kwargs)\n return None\n else:\n return result\n\n\ndef blockdims_from_blockshape(shape, chunks):\n \"\"\"\n\n >>> blockdims_from_blockshape((10, 10), (4, 3))\n ((4, 4, 2), (3, 3, 3, 1))\n >>> blockdims_from_blockshape((10, 0), (4, 0))\n ((4, 4, 2), (0,))\n \"\"\"\n if chunks is None:\n raise TypeError(\"Must supply chunks= keyword argument\")\n if shape is None:\n raise TypeError(\"Must supply shape= keyword argument\")\n if np.isnan(sum(shape)) or np.isnan(sum(chunks)):\n raise ValueError(\n \"Array chunk sizes are unknown. shape: %s, chunks: %s%s\"\n % (shape, chunks, unknown_chunk_message)\n )\n if not all(map(is_integer, chunks)):\n raise ValueError(\"chunks can only contain integers.\")\n if not all(map(is_integer, shape)):\n raise ValueError(\"shape can only contain integers.\")\n shape = tuple(map(int, shape))\n chunks = tuple(map(int, chunks))\n return tuple(\n ((bd,) * (d // bd) + ((d % bd,) if d % bd else ()) if d else (0,))\n for d, bd in zip(shape, chunks)\n )\n\n\ndef finalize(results):\n if not results:\n return concatenate3(results)\n results2 = results\n while isinstance(results2, (tuple, list)):\n if len(results2) > 1:\n return concatenate3(results)\n else:\n results2 = results2[0]\n return unpack_singleton(results)\n\n\nCHUNKS_NONE_ERROR_MESSAGE = \"\"\"\nYou must specify a chunks= keyword argument.\nThis specifies the chunksize of your array blocks.\n\nSee the following documentation page for details:\n https://docs.dask.org/en/latest/array-creation.html#chunks\n\"\"\".strip()\n\n\nclass Array(DaskMethodsMixin):\n \"\"\" Parallel Dask Array\n\n A parallel nd-array comprised of many numpy arrays arranged in a grid.\n\n This constructor is for advanced uses only. For normal use see the\n ``da.from_array`` function.\n\n Parameters\n ----------\n dask : dict\n Task dependency graph\n name : string\n Name of array in dask\n shape : tuple of ints\n Shape of the entire array\n chunks: iterable of tuples\n block sizes along each dimension\n dtype : str or dtype\n Typecode or data-type for the new Dask Array\n meta : empty ndarray\n empty ndarray created with same NumPy backend, ndim and dtype as the\n Dask Array being created (overrides dtype)\n\n See Also\n --------\n dask.array.from_array\n \"\"\"\n\n __slots__ = \"dask\", \"_name\", \"_cached_keys\", \"_chunks\", \"_meta\"\n\n def __new__(cls, dask, name, chunks, dtype=None, meta=None, shape=None):\n self = super(Array, cls).__new__(cls)\n assert isinstance(dask, Mapping)\n if not isinstance(dask, HighLevelGraph):\n dask = HighLevelGraph.from_collections(name, dask, dependencies=())\n self.dask = dask\n self.name = str(name)\n meta = meta_from_array(meta, dtype=dtype)\n\n if (\n isinstance(chunks, str)\n or isinstance(chunks, tuple)\n and chunks\n and any(isinstance(c, str) for c in chunks)\n ):\n dt = meta.dtype\n else:\n dt = None\n self._chunks = normalize_chunks(chunks, shape, dtype=dt)\n if self._chunks is None:\n raise ValueError(CHUNKS_NONE_ERROR_MESSAGE)\n\n self._meta = meta_from_array(meta, ndim=self.ndim, dtype=dtype)\n\n for plugin in config.get(\"array_plugins\", ()):\n result = plugin(self)\n if result is not None:\n self = result\n\n return self\n\n def __reduce__(self):\n return (Array, (self.dask, self.name, self.chunks, self.dtype))\n\n def __dask_graph__(self):\n return self.dask\n\n def __dask_layers__(self):\n return (self.name,)\n\n def __dask_keys__(self):\n if self._cached_keys is not None:\n return self._cached_keys\n\n name, chunks, numblocks = self.name, self.chunks, self.numblocks\n\n def keys(*args):\n if not chunks:\n return [(name,)]\n ind = len(args)\n if ind + 1 == len(numblocks):\n result = [(name,) + args + (i,) for i in range(numblocks[ind])]\n else:\n result = [keys(*(args + (i,))) for i in range(numblocks[ind])]\n return result\n\n self._cached_keys = result = keys()\n return result\n\n def __dask_tokenize__(self):\n return self.name\n\n __dask_optimize__ = globalmethod(\n optimize, key=\"array_optimize\", falsey=dont_optimize\n )\n __dask_scheduler__ = staticmethod(threaded.get)\n\n def __dask_postcompute__(self):\n return finalize, ()\n\n def __dask_postpersist__(self):\n return Array, (self.name, self.chunks, self.dtype, self._meta)\n\n @property\n def numblocks(self):\n return tuple(map(len, self.chunks))\n\n @property\n def npartitions(self):\n return reduce(mul, self.numblocks, 1)\n\n def compute_chunk_sizes(self):\n \"\"\"\n Compute the chunk sizes for a Dask array. This is especially useful\n when the chunk sizes are unknown (e.g., when indexing one Dask array\n with another).\n\n Notes\n -----\n This function modifies the Dask array in-place.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = da.from_array([-2, -1, 0, 1, 2], chunks=2)\n >>> x.chunks\n ((2, 2, 1),)\n >>> y = x[x <= 0]\n >>> y.chunks\n ((nan, nan, nan),)\n >>> y.compute_chunk_sizes() # in-place computation\n dask.array<getitem, shape=(3,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>\n >>> y.chunks\n ((2, 1, 0),)\n\n \"\"\"\n x = self\n chunk_shapes = x.map_blocks(\n _get_chunk_shape,\n dtype=int,\n chunks=tuple(len(c) * (1,) for c in x.chunks) + ((x.ndim,),),\n new_axis=x.ndim,\n )\n\n c = []\n for i in range(x.ndim):\n s = x.ndim * [0] + [i]\n s[i] = slice(None)\n s = tuple(s)\n\n c.append(tuple(chunk_shapes[s]))\n\n # `map_blocks` assigns numpy dtypes\n # cast chunk dimensions back to python int before returning\n x._chunks = tuple(\n [tuple([int(chunk) for chunk in chunks]) for chunks in compute(tuple(c))[0]]\n )\n return x\n\n @property\n def shape(self):\n return tuple(cached_cumsum(c, initial_zero=True)[-1] for c in self.chunks)\n\n @property\n def chunksize(self):\n return tuple(max(c) for c in self.chunks)\n\n @property\n def dtype(self):\n return self._meta.dtype\n\n def _get_chunks(self):\n return self._chunks\n\n def _set_chunks(self, chunks):\n msg = (\n \"Can not set chunks directly\\n\\n\"\n \"Please use the rechunk method instead:\\n\"\n \" x.rechunk({})\\n\\n\"\n \"If trying to avoid unknown chunks, use\\n\"\n \" x.compute_chunk_sizes()\"\n )\n raise TypeError(msg.format(chunks))\n\n chunks = property(_get_chunks, _set_chunks, \"chunks property\")\n\n def __len__(self):\n if not self.chunks:\n raise TypeError(\"len() of unsized object\")\n return sum(self.chunks[0])\n\n def __array_ufunc__(self, numpy_ufunc, method, *inputs, **kwargs):\n out = kwargs.get(\"out\", ())\n for x in inputs + out:\n if not isinstance(x, (np.ndarray, Number, Array)):\n return NotImplemented\n\n if method == \"__call__\":\n if numpy_ufunc is np.matmul:\n from .routines import matmul\n\n # special case until apply_gufunc handles optional dimensions\n return matmul(*inputs, **kwargs)\n if numpy_ufunc.signature is not None:\n from .gufunc import apply_gufunc\n\n return apply_gufunc(\n numpy_ufunc, numpy_ufunc.signature, *inputs, **kwargs\n )\n if numpy_ufunc.nout > 1:\n from . import ufunc\n\n try:\n da_ufunc = getattr(ufunc, numpy_ufunc.__name__)\n except AttributeError:\n return NotImplemented\n return da_ufunc(*inputs, **kwargs)\n else:\n return elemwise(numpy_ufunc, *inputs, **kwargs)\n elif method == \"outer\":\n from . import ufunc\n\n try:\n da_ufunc = getattr(ufunc, numpy_ufunc.__name__)\n except AttributeError:\n return NotImplemented\n return da_ufunc.outer(*inputs, **kwargs)\n else:\n return NotImplemented\n\n def __repr__(self):\n \"\"\"\n\n >>> import dask.array as da\n >>> da.ones((10, 10), chunks=(5, 5), dtype='i4')\n dask.array<..., shape=(10, 10), dtype=int32, chunksize=(5, 5), chunktype=numpy.ndarray>\n \"\"\"\n chunksize = str(self.chunksize)\n name = self.name.rsplit(\"-\", 1)[0]\n return \"dask.array<%s, shape=%s, dtype=%s, chunksize=%s, chunktype=%s.%s>\" % (\n name,\n self.shape,\n self.dtype,\n chunksize,\n type(self._meta).__module__.split(\".\")[0],\n type(self._meta).__name__,\n )\n\n def _repr_html_(self):\n table = self._repr_html_table()\n try:\n grid = self.to_svg(size=config.get(\"array.svg.size\", 120))\n except NotImplementedError:\n grid = \"\"\n\n both = [\n \"<table>\",\n \"<tr>\",\n \"<td>\",\n table,\n \"</td>\",\n \"<td>\",\n grid,\n \"</td>\",\n \"</tr>\",\n \"</table>\",\n ]\n return \"\\n\".join(both)\n\n def _repr_html_table(self):\n if \"sparse\" in typename(type(self._meta)):\n nbytes = None\n cbytes = None\n elif not math.isnan(self.nbytes):\n nbytes = format_bytes(self.nbytes)\n cbytes = format_bytes(np.prod(self.chunksize) * self.dtype.itemsize)\n else:\n nbytes = \"unknown\"\n cbytes = \"unknown\"\n\n table = [\n \"<table>\",\n \" <thead>\",\n \" <tr><td> </td><th> Array </th><th> Chunk </th></tr>\",\n \" </thead>\",\n \" <tbody>\",\n \" <tr><th> Bytes </th><td> %s </td> <td> %s </td></tr>\"\n % (nbytes, cbytes)\n if nbytes is not None\n else \"\",\n \" <tr><th> Shape </th><td> %s </td> <td> %s </td></tr>\"\n % (str(self.shape), str(self.chunksize)),\n \" <tr><th> Count </th><td> %d Tasks </td><td> %d Chunks </td></tr>\"\n % (len(self.__dask_graph__()), self.npartitions),\n \" <tr><th> Type </th><td> %s </td><td> %s.%s </td></tr>\"\n % (\n self.dtype,\n type(self._meta).__module__.split(\".\")[0],\n type(self._meta).__name__,\n ),\n \" </tbody>\",\n \"</table>\",\n ]\n return \"\\n\".join(table)\n\n @property\n def ndim(self):\n return len(self.shape)\n\n @property\n def size(self):\n \"\"\" Number of elements in array \"\"\"\n return reduce(mul, self.shape, 1)\n\n @property\n def nbytes(self):\n \"\"\" Number of bytes in array \"\"\"\n return self.size * self.dtype.itemsize\n\n @property\n def itemsize(self):\n \"\"\" Length of one array element in bytes \"\"\"\n return self.dtype.itemsize\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, val):\n self._name = val\n # Clear the key cache when the name is reset\n self._cached_keys = None\n\n __array_priority__ = 11 # higher than numpy.ndarray and numpy.matrix\n\n def __array__(self, dtype=None, **kwargs):\n x = self.compute()\n if dtype and x.dtype != dtype:\n x = x.astype(dtype)\n if not isinstance(x, np.ndarray):\n x = np.array(x)\n return x\n\n def __array_function__(self, func, types, args, kwargs):\n import dask.array as module\n\n def handle_nonmatching_names(func, args, kwargs):\n if func not in _HANDLED_FUNCTIONS:\n warnings.warn(\n \"The `{}` function is not implemented by Dask array. \"\n \"You may want to use the da.map_blocks function \"\n \"or something similar to silence this warning. \"\n \"Your code may stop working in a future release.\".format(\n func.__module__ + \".\" + func.__name__\n ),\n FutureWarning,\n )\n # Need to convert to array object (e.g. numpy.ndarray or\n # cupy.ndarray) as needed, so we can call the NumPy function\n # again and it gets the chance to dispatch to the right\n # implementation.\n args, kwargs = compute(args, kwargs)\n return func(*args, **kwargs)\n\n return _HANDLED_FUNCTIONS[func](*args, **kwargs)\n\n # First try to find a matching function name. If that doesn't work, we may\n # be dealing with an alias or a function that's simply not in the Dask API.\n # Handle aliases via the _HANDLED_FUNCTIONS dict mapping, and warn otherwise.\n for submodule in func.__module__.split(\".\")[1:]:\n try:\n module = getattr(module, submodule)\n except AttributeError:\n return handle_nonmatching_names(func, args, kwargs)\n\n if not hasattr(module, func.__name__):\n return handle_nonmatching_names(func, args, kwargs)\n\n da_func = getattr(module, func.__name__)\n if da_func is func:\n return handle_nonmatching_names(func, args, kwargs)\n return da_func(*args, **kwargs)\n\n @property\n def _elemwise(self):\n return elemwise\n\n @wraps(store)\n def store(self, target, **kwargs):\n r = store([self], [target], **kwargs)\n\n if kwargs.get(\"return_stored\", False):\n r = r[0]\n\n return r\n\n def to_svg(self, size=500):\n \"\"\" Convert chunks from Dask Array into an SVG Image\n\n Parameters\n ----------\n chunks: tuple\n size: int\n Rough size of the image\n\n Examples\n --------\n >>> x.to_svg(size=500) # doctest: +SKIP\n\n Returns\n -------\n text: An svg string depicting the array as a grid of chunks\n \"\"\"\n from .svg import svg\n\n return svg(self.chunks, size=size)\n\n def to_hdf5(self, filename, datapath, **kwargs):\n \"\"\" Store array in HDF5 file\n\n >>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP\n\n Optionally provide arguments as though to ``h5py.File.create_dataset``\n\n >>> x.to_hdf5('myfile.hdf5', '/x', compression='lzf', shuffle=True) # doctest: +SKIP\n\n See Also\n --------\n da.store\n h5py.File.create_dataset\n \"\"\"\n return to_hdf5(filename, datapath, self, **kwargs)\n\n def to_dask_dataframe(self, columns=None, index=None, meta=None):\n \"\"\" Convert dask Array to dask Dataframe\n\n Parameters\n ----------\n columns: list or string\n list of column names if DataFrame, single string if Series\n index : dask.dataframe.Index, optional\n An optional *dask* Index to use for the output Series or DataFrame.\n\n The default output index depends on whether the array has any unknown\n chunks. If there are any unknown chunks, the output has ``None``\n for all the divisions (one per chunk). If all the chunks are known,\n a default index with known divsions is created.\n\n Specifying ``index`` can be useful if you're conforming a Dask Array\n to an existing dask Series or DataFrame, and you would like the\n indices to match.\n meta : object, optional\n An optional `meta` parameter can be passed for dask\n to specify the concrete dataframe type to use for partitions of\n the Dask dataframe. By default, pandas DataFrame is used.\n\n See Also\n --------\n dask.dataframe.from_dask_array\n \"\"\"\n from ..dataframe import from_dask_array\n\n return from_dask_array(self, columns=columns, index=index, meta=meta)\n\n def __bool__(self):\n if self.size > 1:\n raise ValueError(\n \"The truth value of a {0} is ambiguous. \"\n \"Use a.any() or a.all().\".format(self.__class__.__name__)\n )\n else:\n return bool(self.compute())\n\n __nonzero__ = __bool__ # python 2\n\n def _scalarfunc(self, cast_type):\n if self.size > 1:\n raise TypeError(\"Only length-1 arrays can be converted to Python scalars\")\n else:\n return cast_type(self.compute())\n\n def __int__(self):\n return self._scalarfunc(int)\n\n __long__ = __int__ # python 2\n\n def __float__(self):\n return self._scalarfunc(float)\n\n def __complex__(self):\n return self._scalarfunc(complex)\n\n def __setitem__(self, key, value):\n from .routines import where\n\n if isinstance(key, Array):\n if isinstance(value, Array) and value.ndim > 1:\n raise ValueError(\"boolean index array should have 1 dimension\")\n y = where(key, value, self)\n self._meta = y._meta\n self.dask = y.dask\n self.name = y.name\n self._chunks = y.chunks\n return self\n else:\n raise NotImplementedError(\n \"Item assignment with %s not supported\" % type(key)\n )\n\n def __getitem__(self, index):\n # Field access, e.g. x['a'] or x[['a', 'b']]\n if isinstance(index, str) or (\n isinstance(index, list) and index and all(isinstance(i, str) for i in index)\n ):\n if isinstance(index, str):\n dt = self.dtype[index]\n else:\n dt = _make_sliced_dtype(self.dtype, index)\n\n if dt.shape:\n new_axis = list(range(self.ndim, self.ndim + len(dt.shape)))\n chunks = self.chunks + tuple((i,) for i in dt.shape)\n return self.map_blocks(\n getitem, index, dtype=dt.base, chunks=chunks, new_axis=new_axis\n )\n else:\n return self.map_blocks(getitem, index, dtype=dt)\n\n if not isinstance(index, tuple):\n index = (index,)\n\n from .slicing import (\n normalize_index,\n slice_with_int_dask_array,\n slice_with_bool_dask_array,\n )\n\n index2 = normalize_index(index, self.shape)\n\n dependencies = {self.name}\n for i in index2:\n if isinstance(i, Array):\n dependencies.add(i.name)\n\n if any(isinstance(i, Array) and i.dtype.kind in \"iu\" for i in index2):\n self, index2 = slice_with_int_dask_array(self, index2)\n if any(isinstance(i, Array) and i.dtype == bool for i in index2):\n self, index2 = slice_with_bool_dask_array(self, index2)\n\n if all(isinstance(i, slice) and i == slice(None) for i in index2):\n return self\n\n out = \"getitem-\" + tokenize(self, index2)\n dsk, chunks = slice_array(out, self.name, self.chunks, index2)\n\n graph = HighLevelGraph.from_collections(out, dsk, dependencies=[self])\n\n meta = meta_from_array(self._meta, ndim=len(chunks))\n if np.isscalar(meta):\n meta = np.array(meta)\n\n return Array(graph, out, chunks, meta=meta)\n\n def _vindex(self, key):\n if not isinstance(key, tuple):\n key = (key,)\n if any(k is None for k in key):\n raise IndexError(\n \"vindex does not support indexing with None (np.newaxis), \"\n \"got {}\".format(key)\n )\n if all(isinstance(k, slice) for k in key):\n if all(\n k.indices(d) == slice(0, d).indices(d) for k, d in zip(key, self.shape)\n ):\n return self\n raise IndexError(\n \"vindex requires at least one non-slice to vectorize over \"\n \"when the slices are not over the entire array (i.e, x[:]). \"\n \"Use normal slicing instead when only using slices. Got: {}\".format(key)\n )\n return _vindex(self, *key)\n\n @property\n def vindex(self):\n \"\"\"Vectorized indexing with broadcasting.\n\n This is equivalent to numpy's advanced indexing, using arrays that are\n broadcast against each other. This allows for pointwise indexing:\n\n >>> x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n >>> x = from_array(x, chunks=2)\n >>> x.vindex[[0, 1, 2], [0, 1, 2]].compute()\n array([1, 5, 9])\n\n Mixed basic/advanced indexing with slices/arrays is also supported. The\n order of dimensions in the result follows those proposed for\n `ndarray.vindex <https://github.com/numpy/numpy/pull/6256>`_:\n the subspace spanned by arrays is followed by all slices.\n\n Note: ``vindex`` provides more general functionality than standard\n indexing, but it also has fewer optimizations and can be significantly\n slower.\n \"\"\"\n return IndexCallable(self._vindex)\n\n def _blocks(self, index):\n from .slicing import normalize_index\n\n if not isinstance(index, tuple):\n index = (index,)\n if sum(isinstance(ind, (np.ndarray, list)) for ind in index) > 1:\n raise ValueError(\"Can only slice with a single list\")\n if any(ind is None for ind in index):\n raise ValueError(\"Slicing with np.newaxis or None is not supported\")\n index = normalize_index(index, self.numblocks)\n index = tuple(slice(k, k + 1) if isinstance(k, Number) else k for k in index)\n\n name = \"blocks-\" + tokenize(self, index)\n\n new_keys = np.array(self.__dask_keys__(), dtype=object)[index]\n\n chunks = tuple(\n tuple(np.array(c)[i].tolist()) for c, i in zip(self.chunks, index)\n )\n\n keys = product(*(range(len(c)) for c in chunks))\n\n layer = {(name,) + key: tuple(new_keys[key].tolist()) for key in keys}\n\n graph = HighLevelGraph.from_collections(name, layer, dependencies=[self])\n return Array(graph, name, chunks, meta=self)\n\n @property\n def blocks(self):\n \"\"\" Slice an array by blocks\n\n This allows blockwise slicing of a Dask array. You can perform normal\n Numpy-style slicing but now rather than slice elements of the array you\n slice along blocks so, for example, ``x.blocks[0, ::2]`` produces a new\n dask array with every other block in the first row of blocks.\n\n You can index blocks in any way that could index a numpy array of shape\n equal to the number of blocks in each dimension, (available as\n array.numblocks). The dimension of the output array will be the same\n as the dimension of this array, even if integer indices are passed.\n This does not support slicing with ``np.newaxis`` or multiple lists.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(10, chunks=2)\n >>> x.blocks[0].compute()\n array([0, 1])\n >>> x.blocks[:3].compute()\n array([0, 1, 2, 3, 4, 5])\n >>> x.blocks[::2].compute()\n array([0, 1, 4, 5, 8, 9])\n >>> x.blocks[[-1, 0]].compute()\n array([8, 9, 0, 1])\n\n Returns\n -------\n A Dask array\n \"\"\"\n return IndexCallable(self._blocks)\n\n @property\n def partitions(self):\n \"\"\"Slice an array by partitions. Alias of dask array .blocks attribute.\n\n This alias allows you to write agnostic code that works with both\n dask arrays and dask dataframes.\n\n This allows blockwise slicing of a Dask array. You can perform normal\n Numpy-style slicing but now rather than slice elements of the array you\n slice along blocks so, for example, ``x.blocks[0, ::2]`` produces a new\n dask array with every other block in the first row of blocks.\n\n You can index blocks in any way that could index a numpy array of shape\n equal to the number of blocks in each dimension, (available as\n array.numblocks). The dimension of the output array will be the same\n as the dimension of this array, even if integer indices are passed.\n This does not support slicing with ``np.newaxis`` or multiple lists.\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.arange(10, chunks=2)\n >>> x.partitions[0].compute()\n array([0, 1])\n >>> x.partitions[:3].compute()\n array([0, 1, 2, 3, 4, 5])\n >>> x.partitions[::2].compute()\n array([0, 1, 4, 5, 8, 9])\n >>> x.partitions[[-1, 0]].compute()\n array([8, 9, 0, 1])\n >>> all(x.partitions[:].compute() == x.blocks[:].compute())\n True\n\n Returns\n -------\n A Dask array\n \"\"\"\n return self.blocks\n\n @derived_from(np.ndarray)\n def dot(self, other):\n from .routines import tensordot\n\n return tensordot(self, other, axes=((self.ndim - 1,), (other.ndim - 2,)))\n\n @property\n def A(self):\n return self\n\n @property\n def T(self):\n return self.transpose()\n\n @derived_from(np.ndarray)\n def transpose(self, *axes):\n from .routines import transpose\n\n if not axes:\n axes = None\n elif len(axes) == 1 and isinstance(axes[0], Iterable):\n axes = axes[0]\n if (axes == tuple(range(self.ndim))) or (axes == tuple(range(-self.ndim, 0))):\n # no transpose necessary\n return self\n else:\n return transpose(self, axes=axes)\n\n @derived_from(np.ndarray)\n def ravel(self):\n from .routines import ravel\n\n return ravel(self)\n\n flatten = ravel\n\n @derived_from(np.ndarray)\n def choose(self, choices):\n from .routines import choose\n\n return choose(self, choices)\n\n @derived_from(np.ndarray)\n def reshape(self, *shape):\n from .reshape import reshape\n\n if len(shape) == 1 and not isinstance(shape[0], Number):\n shape = shape[0]\n return reshape(self, shape)\n\n def topk(self, k, axis=-1, split_every=None):\n \"\"\"The top k elements of an array.\n\n See ``da.topk`` for docstring\"\"\"\n from .reductions import topk\n\n return topk(self, k, axis=axis, split_every=split_every)\n\n def argtopk(self, k, axis=-1, split_every=None):\n \"\"\"The indices of the top k elements of an array.\n\n See ``da.argtopk`` for docstring\"\"\"\n from .reductions import argtopk\n\n return argtopk(self, k, axis=axis, split_every=split_every)\n\n def astype(self, dtype, **kwargs):\n \"\"\"Copy of the array, cast to a specified type.\n\n Parameters\n ----------\n dtype : str or dtype\n Typecode or data-type to which the array is cast.\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur. Defaults to 'unsafe'\n for backwards compatibility.\n\n * 'no' means the data types should not be cast at all.\n * 'equiv' means only byte-order changes are allowed.\n * 'safe' means only casts which can preserve values are allowed.\n * 'same_kind' means only safe casts or casts within a kind,\n like float64 to float32, are allowed.\n * 'unsafe' means any data conversions may be done.\n copy : bool, optional\n By default, astype always returns a newly allocated array. If this\n is set to False and the `dtype` requirement is satisfied, the input\n array is returned instead of a copy.\n \"\"\"\n # Scalars don't take `casting` or `copy` kwargs - as such we only pass\n # them to `map_blocks` if specified by user (different than defaults).\n extra = set(kwargs) - {\"casting\", \"copy\"}\n if extra:\n raise TypeError(\n \"astype does not take the following keyword \"\n \"arguments: {0!s}\".format(list(extra))\n )\n casting = kwargs.get(\"casting\", \"unsafe\")\n dtype = np.dtype(dtype)\n if self.dtype == dtype:\n return self\n elif not np.can_cast(self.dtype, dtype, casting=casting):\n raise TypeError(\n \"Cannot cast array from {0!r} to {1!r}\"\n \" according to the rule \"\n \"{2!r}\".format(self.dtype, dtype, casting)\n )\n return self.map_blocks(chunk.astype, dtype=dtype, astype_dtype=dtype, **kwargs)\n\n def __abs__(self):\n return elemwise(operator.abs, self)\n\n def __add__(self, other):\n return elemwise(operator.add, self, other)\n\n def __radd__(self, other):\n return elemwise(operator.add, other, self)\n\n def __and__(self, other):\n return elemwise(operator.and_, self, other)\n\n def __rand__(self, other):\n return elemwise(operator.and_, other, self)\n\n def __div__(self, other):\n return elemwise(operator.div, self, other)\n\n def __rdiv__(self, other):\n return elemwise(operator.div, other, self)\n\n def __eq__(self, other):\n return elemwise(operator.eq, self, other)\n\n def __gt__(self, other):\n return elemwise(operator.gt, self, other)\n\n def __ge__(self, other):\n return elemwise(operator.ge, self, other)\n\n def __invert__(self):\n return elemwise(operator.invert, self)\n\n def __lshift__(self, other):\n return elemwise(operator.lshift, self, other)\n\n def __rlshift__(self, other):\n return elemwise(operator.lshift, other, self)\n\n def __lt__(self, other):\n return elemwise(operator.lt, self, other)\n\n def __le__(self, other):\n return elemwise(operator.le, self, other)\n\n def __mod__(self, other):\n return elemwise(operator.mod, self, other)\n\n def __rmod__(self, other):\n return elemwise(operator.mod, other, self)\n\n def __mul__(self, other):\n return elemwise(operator.mul, self, other)\n\n def __rmul__(self, other):\n return elemwise(operator.mul, other, self)\n\n def __ne__(self, other):\n return elemwise(operator.ne, self, other)\n\n def __neg__(self):\n return elemwise(operator.neg, self)\n\n def __or__(self, other):\n return elemwise(operator.or_, self, other)\n\n def __pos__(self):\n return self\n\n def __ror__(self, other):\n return elemwise(operator.or_, other, self)\n\n def __pow__(self, other):\n return elemwise(operator.pow, self, other)\n\n def __rpow__(self, other):\n return elemwise(operator.pow, other, self)\n\n def __rshift__(self, other):\n return elemwise(operator.rshift, self, other)\n\n def __rrshift__(self, other):\n return elemwise(operator.rshift, other, self)\n\n def __sub__(self, other):\n return elemwise(operator.sub, self, other)\n\n def __rsub__(self, other):\n return elemwise(operator.sub, other, self)\n\n def __truediv__(self, other):\n return elemwise(operator.truediv, self, other)\n\n def __rtruediv__(self, other):\n return elemwise(operator.truediv, other, self)\n\n def __floordiv__(self, other):\n return elemwise(operator.floordiv, self, other)\n\n def __rfloordiv__(self, other):\n return elemwise(operator.floordiv, other, self)\n\n def __xor__(self, other):\n return elemwise(operator.xor, self, other)\n\n def __rxor__(self, other):\n return elemwise(operator.xor, other, self)\n\n def __matmul__(self, other):\n from .routines import matmul\n\n return matmul(self, other)\n\n def __rmatmul__(self, other):\n from .routines import matmul\n\n return matmul(other, self)\n\n def __divmod__(self, other):\n from .ufunc import divmod\n\n return divmod(self, other)\n\n def __rdivmod__(self, other):\n from .ufunc import divmod\n\n return divmod(other, self)\n\n @derived_from(np.ndarray)\n def any(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import any\n\n return any(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def all(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import all\n\n return all(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def min(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import min\n\n return min(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def max(self, axis=None, keepdims=False, split_every=None, out=None):\n from .reductions import max\n\n return max(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def argmin(self, axis=None, split_every=None, out=None):\n from .reductions import argmin\n\n return argmin(self, axis=axis, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def argmax(self, axis=None, split_every=None, out=None):\n from .reductions import argmax\n\n return argmax(self, axis=axis, split_every=split_every, out=out)\n\n @derived_from(np.ndarray)\n def sum(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import sum\n\n return sum(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def trace(self, offset=0, axis1=0, axis2=1, dtype=None):\n from .reductions import trace\n\n return trace(self, offset=offset, axis1=axis1, axis2=axis2, dtype=dtype)\n\n @derived_from(np.ndarray)\n def prod(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import prod\n\n return prod(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def mean(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n from .reductions import mean\n\n return mean(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def std(\n self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None\n ):\n from .reductions import std\n\n return std(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n\n @derived_from(np.ndarray)\n def var(\n self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None\n ):\n from .reductions import var\n\n return var(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n\n def moment(\n self,\n order,\n axis=None,\n dtype=None,\n keepdims=False,\n ddof=0,\n split_every=None,\n out=None,\n ):\n \"\"\"Calculate the nth centralized moment.\n\n Parameters\n ----------\n order : int\n Order of the moment that is returned, must be >= 2.\n axis : int, optional\n Axis along which the central moment is computed. The default is to\n compute the moment of the flattened array.\n dtype : data-type, optional\n Type to use in computing the moment. For arrays of integer type the\n default is float64; for arrays of float types it is the same as the\n array type.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the\n result as dimensions with size one. With this option, the result\n will broadcast correctly against the original array.\n ddof : int, optional\n \"Delta Degrees of Freedom\": the divisor used in the calculation is\n N - ddof, where N represents the number of elements. By default\n ddof is zero.\n\n Returns\n -------\n moment : ndarray\n\n References\n ----------\n .. [1] Pebay, Philippe (2008), \"Formulas for Robust, One-Pass Parallel\n Computation of Covariances and Arbitrary-Order Statistical Moments\",\n Technical Report SAND2008-6212, Sandia National Laboratories.\n\n \"\"\"\n\n from .reductions import moment\n\n return moment(\n self,\n order,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n ddof=ddof,\n split_every=split_every,\n out=out,\n )\n\n @wraps(map_blocks)\n def map_blocks(self, func, *args, **kwargs):\n return map_blocks(func, self, *args, **kwargs)\n\n def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):\n \"\"\" Map a function over blocks of the array with some overlap\n\n We share neighboring zones between blocks of the array, then map a\n function, then trim away the neighboring strips.\n\n Parameters\n ----------\n func: function\n The function to apply to each extended block\n depth: int, tuple, or dict\n The number of elements that each block should share with its neighbors\n If a tuple or dict then this can be different per axis\n boundary: str, tuple, dict\n How to handle the boundaries.\n Values include 'reflect', 'periodic', 'nearest', 'none',\n or any constant value like 0 or np.nan\n trim: bool\n Whether or not to trim ``depth`` elements from each block after\n calling the map function.\n Set this to False if your mapping function already does this for you\n **kwargs:\n Other keyword arguments valid in ``map_blocks``\n\n Examples\n --------\n >>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1])\n >>> x = from_array(x, chunks=5)\n >>> def derivative(x):\n ... return x - np.roll(x, 1)\n\n >>> y = x.map_overlap(derivative, depth=1, boundary=0)\n >>> y.compute()\n array([ 1, 0, 1, 1, 0, 0, -1, -1, 0])\n\n >>> import dask.array as da\n >>> x = np.arange(16).reshape((4, 4))\n >>> d = da.from_array(x, chunks=(2, 2))\n >>> d.map_overlap(lambda x: x + x.size, depth=1).compute()\n array([[16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27],\n [28, 29, 30, 31]])\n\n >>> func = lambda x: x + x.size\n >>> depth = {0: 1, 1: 1}\n >>> boundary = {0: 'reflect', 1: 'none'}\n >>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE\n array([[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23],\n [24, 25, 26, 27]])\n \"\"\"\n from .overlap import map_overlap\n\n return map_overlap(\n func, self, depth=depth, boundary=boundary, trim=trim, **kwargs\n )\n\n @derived_from(np.ndarray)\n def cumsum(self, axis, dtype=None, out=None):\n from .reductions import cumsum\n\n return cumsum(self, axis, dtype, out=out)\n\n @derived_from(np.ndarray)\n def cumprod(self, axis, dtype=None, out=None):\n from .reductions import cumprod\n\n return cumprod(self, axis, dtype, out=out)\n\n @derived_from(np.ndarray)\n def squeeze(self, axis=None):\n from .routines import squeeze\n\n return squeeze(self, axis)\n\n def rechunk(self, chunks=\"auto\", threshold=None, block_size_limit=None):\n \"\"\" See da.rechunk for docstring \"\"\"\n from . import rechunk # avoid circular import\n\n return rechunk(self, chunks, threshold, block_size_limit)\n\n @property\n def real(self):\n from .ufunc import real\n\n return real(self)\n\n @property\n def imag(self):\n from .ufunc import imag\n\n return imag(self)\n\n def conj(self):\n from .ufunc import conj\n\n return conj(self)\n\n @derived_from(np.ndarray)\n def clip(self, min=None, max=None):\n from .ufunc import clip\n\n return clip(self, min, max)\n\n def view(self, dtype=None, order=\"C\"):\n \"\"\" Get a view of the array as a new data type\n\n Parameters\n ----------\n dtype:\n The dtype by which to view the array.\n The default, None, results in the view having the same data-type\n as the original array.\n order: string\n 'C' or 'F' (Fortran) ordering\n\n This reinterprets the bytes of the array under a new dtype. If that\n dtype does not have the same size as the original array then the shape\n will change.\n\n Beware that both numpy and dask.array can behave oddly when taking\n shape-changing views of arrays under Fortran ordering. Under some\n versions of NumPy this function will fail when taking shape-changing\n views of Fortran ordered arrays if the first dimension has chunks of\n size one.\n \"\"\"\n if dtype is None:\n dtype = self.dtype\n else:\n dtype = np.dtype(dtype)\n mult = self.dtype.itemsize / dtype.itemsize\n\n if order == \"C\":\n chunks = self.chunks[:-1] + (\n tuple(ensure_int(c * mult) for c in self.chunks[-1]),\n )\n elif order == \"F\":\n chunks = (\n tuple(ensure_int(c * mult) for c in self.chunks[0]),\n ) + self.chunks[1:]\n else:\n raise ValueError(\"Order must be one of 'C' or 'F'\")\n\n return self.map_blocks(\n chunk.view, dtype, order=order, dtype=dtype, chunks=chunks\n )\n\n @derived_from(np.ndarray)\n def swapaxes(self, axis1, axis2):\n from .routines import swapaxes\n\n return swapaxes(self, axis1, axis2)\n\n @derived_from(np.ndarray)\n def round(self, decimals=0):\n from .routines import round\n\n return round(self, decimals=decimals)\n\n def copy(self):\n \"\"\"\n Copy array. This is a no-op for dask.arrays, which are immutable\n \"\"\"\n if self.npartitions == 1:\n return self.map_blocks(M.copy)\n else:\n return Array(self.dask, self.name, self.chunks, meta=self)\n\n def __deepcopy__(self, memo):\n c = self.copy()\n memo[id(self)] = c\n return c\n\n def to_delayed(self, optimize_graph=True):\n \"\"\"Convert into an array of ``dask.delayed`` objects, one per chunk.\n\n Parameters\n ----------\n optimize_graph : bool, optional\n If True [default], the graph is optimized before converting into\n ``dask.delayed`` objects.\n\n See Also\n --------\n dask.array.from_delayed\n \"\"\"\n keys = self.__dask_keys__()\n graph = self.__dask_graph__()\n if optimize_graph:\n graph = self.__dask_optimize__(graph, keys) # TODO, don't collape graph\n name = \"delayed-\" + self.name\n graph = HighLevelGraph.from_collections(name, graph, dependencies=())\n L = ndeepmap(self.ndim, lambda k: Delayed(k, graph), keys)\n return np.array(L, dtype=object)\n\n @derived_from(np.ndarray)\n def repeat(self, repeats, axis=None):\n from .creation import repeat\n\n return repeat(self, repeats, axis=axis)\n\n @derived_from(np.ndarray)\n def nonzero(self):\n from .routines import nonzero\n\n return nonzero(self)\n\n def to_zarr(self, *args, **kwargs):\n \"\"\"Save array to the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n See function ``to_zarr()`` for parameters.\n \"\"\"\n return to_zarr(self, *args, **kwargs)\n\n def to_tiledb(self, uri, *args, **kwargs):\n \"\"\"Save array to the TileDB storage manager\n\n See function ``to_tiledb()`` for argument documentation.\n\n See https://docs.tiledb.io for details about the format and engine.\n \"\"\"\n from .tiledb_io import to_tiledb\n\n return to_tiledb(self, uri, *args, **kwargs)\n\n\ndef ensure_int(f):\n i = int(f)\n if i != f:\n raise ValueError(\"Could not coerce %f to integer\" % f)\n return i\n\n\ndef normalize_chunks(chunks, shape=None, limit=None, dtype=None, previous_chunks=None):\n \"\"\" Normalize chunks to tuple of tuples\n\n This takes in a variety of input types and information and produces a full\n tuple-of-tuples result for chunks, suitable to be passed to Array or\n rechunk or any other operation that creates a Dask array.\n\n Parameters\n ----------\n chunks: tuple, int, dict, or string\n The chunks to be normalized. See examples below for more details\n shape: Tuple[int]\n The shape of the array\n limit: int (optional)\n The maximum block size to target in bytes,\n if freedom is given to choose\n dtype: np.dtype\n previous_chunks: Tuple[Tuple[int]] optional\n Chunks from a previous array that we should use for inspiration when\n rechunking auto dimensions. If not provided but auto-chunking exists\n then auto-dimensions will prefer square-like chunk shapes.\n\n Examples\n --------\n Specify uniform chunk sizes\n\n >>> normalize_chunks((2, 2), shape=(5, 6))\n ((2, 2, 1), (2, 2, 2))\n\n Also passes through fully explicit tuple-of-tuples\n\n >>> normalize_chunks(((2, 2, 1), (2, 2, 2)), shape=(5, 6))\n ((2, 2, 1), (2, 2, 2))\n\n Cleans up lists to tuples\n\n >>> normalize_chunks([[2, 2], [3, 3]])\n ((2, 2), (3, 3))\n\n Expands integer inputs 10 -> (10, 10)\n\n >>> normalize_chunks(10, shape=(30, 5))\n ((10, 10, 10), (5,))\n\n Expands dict inputs\n\n >>> normalize_chunks({0: 2, 1: 3}, shape=(6, 6))\n ((2, 2, 2), (3, 3))\n\n The values -1 and None get mapped to full size\n\n >>> normalize_chunks((5, -1), shape=(10, 10))\n ((5, 5), (10,))\n\n Use the value \"auto\" to automatically determine chunk sizes along certain\n dimensions. This uses the ``limit=`` and ``dtype=`` keywords to\n determine how large to make the chunks. The term \"auto\" can be used\n anywhere an integer can be used. See array chunking documentation for more\n information.\n\n >>> normalize_chunks((\"auto\",), shape=(20,), limit=5, dtype='uint8')\n ((5, 5, 5, 5),)\n\n You can also use byte sizes (see ``dask.utils.parse_bytes``) in place of\n \"auto\" to ask for a particular size\n\n >>> normalize_chunks(\"1kiB\", shape=(2000,), dtype='float32')\n ((250, 250, 250, 250, 250, 250, 250, 250),)\n\n Respects null dimensions\n\n >>> normalize_chunks((), shape=(0, 0))\n ((0,), (0,))\n \"\"\"\n if dtype and not isinstance(dtype, np.dtype):\n dtype = np.dtype(dtype)\n if chunks is None:\n raise ValueError(CHUNKS_NONE_ERROR_MESSAGE)\n if isinstance(chunks, list):\n chunks = tuple(chunks)\n if isinstance(chunks, (Number, str)):\n chunks = (chunks,) * len(shape)\n if isinstance(chunks, dict):\n chunks = tuple(chunks.get(i, None) for i in range(len(shape)))\n if isinstance(chunks, np.ndarray):\n chunks = chunks.tolist()\n if not chunks and shape and all(s == 0 for s in shape):\n chunks = ((0,),) * len(shape)\n\n if (\n shape\n and len(shape) == 1\n and len(chunks) > 1\n and all(isinstance(c, (Number, str)) for c in chunks)\n ):\n chunks = (chunks,)\n\n if shape and len(chunks) != len(shape):\n raise ValueError(\n \"Chunks and shape must be of the same length/dimension. \"\n \"Got chunks=%s, shape=%s\" % (chunks, shape)\n )\n if -1 in chunks or None in chunks:\n chunks = tuple(s if c == -1 or c is None else c for c, s in zip(chunks, shape))\n\n # If specifying chunk size in bytes, use that value to set the limit.\n # Verify there is only one consistent value of limit or chunk-bytes used.\n for c in chunks:\n if isinstance(c, str) and c != \"auto\":\n parsed = parse_bytes(c)\n if limit is None:\n limit = parsed\n elif parsed != limit:\n raise ValueError(\n \"Only one consistent value of limit or chunk is allowed.\"\n \"Used %s != %s\" % (parsed, limit)\n )\n # Substitute byte limits with 'auto' now that limit is set.\n chunks = tuple(\"auto\" if isinstance(c, str) and c != \"auto\" else c for c in chunks)\n\n if any(c == \"auto\" for c in chunks):\n chunks = auto_chunks(chunks, shape, limit, dtype, previous_chunks)\n\n if shape is not None:\n chunks = tuple(c if c not in {None, -1} else s for c, s in zip(chunks, shape))\n\n if chunks and shape is not None:\n chunks = sum(\n (\n blockdims_from_blockshape((s,), (c,))\n if not isinstance(c, (tuple, list))\n else (c,)\n for s, c in zip(shape, chunks)\n ),\n (),\n )\n for c in chunks:\n if not c:\n raise ValueError(\n \"Empty tuples are not allowed in chunks. Express \"\n \"zero length dimensions with 0(s) in chunks\"\n )\n\n if shape is not None:\n if len(chunks) != len(shape):\n raise ValueError(\n \"Input array has %d dimensions but the supplied \"\n \"chunks has only %d dimensions\" % (len(shape), len(chunks))\n )\n if not all(\n c == s or (math.isnan(c) or math.isnan(s))\n for c, s in zip(map(sum, chunks), shape)\n ):\n raise ValueError(\n \"Chunks do not add up to shape. \"\n \"Got chunks=%s, shape=%s\" % (chunks, shape)\n )\n\n return tuple(tuple(int(x) if not math.isnan(x) else x for x in c) for c in chunks)\n\n\ndef _compute_multiplier(limit: int, dtype, largest_block: int, result):\n \"\"\"\n Utility function for auto_chunk, to fin how much larger or smaller the ideal\n chunk size is relative to what we have now.\n \"\"\"\n return (\n limit\n / dtype.itemsize\n / largest_block\n / np.prod(list(r if r != 0 else 1 for r in result.values()))\n )\n\n\ndef auto_chunks(chunks, shape, limit, dtype, previous_chunks=None):\n \"\"\" Determine automatic chunks\n\n This takes in a chunks value that contains ``\"auto\"`` values in certain\n dimensions and replaces those values with concrete dimension sizes that try\n to get chunks to be of a certain size in bytes, provided by the ``limit=``\n keyword. If multiple dimensions are marked as ``\"auto\"`` then they will\n all respond to meet the desired byte limit, trying to respect the aspect\n ratio of their dimensions in ``previous_chunks=``, if given.\n\n Parameters\n ----------\n chunks: Tuple\n A tuple of either dimensions or tuples of explicit chunk dimensions\n Some entries should be \"auto\"\n shape: Tuple[int]\n limit: int, str\n The maximum allowable size of a chunk in bytes\n previous_chunks: Tuple[Tuple[int]]\n\n See also\n --------\n normalize_chunks: for full docstring and parameters\n \"\"\"\n if previous_chunks is not None:\n previous_chunks = tuple(\n c if isinstance(c, tuple) else (c,) for c in previous_chunks\n )\n chunks = list(chunks)\n\n autos = {i for i, c in enumerate(chunks) if c == \"auto\"}\n if not autos:\n return tuple(chunks)\n\n if limit is None:\n limit = config.get(\"array.chunk-size\")\n if isinstance(limit, str):\n limit = parse_bytes(limit)\n\n if dtype is None:\n raise TypeError(\"DType must be known for auto-chunking\")\n\n if dtype.hasobject:\n raise NotImplementedError(\n \"Can not use auto rechunking with object dtype. \"\n \"We are unable to estimate the size in bytes of object data\"\n )\n\n for x in tuple(chunks) + tuple(shape):\n if (\n isinstance(x, Number)\n and np.isnan(x)\n or isinstance(x, tuple)\n and np.isnan(x).any()\n ):\n raise ValueError(\n \"Can not perform automatic rechunking with unknown \"\n \"(nan) chunk sizes.%s\" % unknown_chunk_message\n )\n\n limit = max(1, limit)\n\n largest_block = np.prod(\n [cs if isinstance(cs, Number) else max(cs) for cs in chunks if cs != \"auto\"]\n )\n\n if previous_chunks:\n # Base ideal ratio on the median chunk size of the previous chunks\n result = {a: np.median(previous_chunks[a]) for a in autos}\n\n ideal_shape = []\n for i, s in enumerate(shape):\n chunk_frequencies = frequencies(previous_chunks[i])\n mode, count = max(chunk_frequencies.items(), key=lambda kv: kv[1])\n if mode > 1 and count >= len(previous_chunks[i]) / 2:\n ideal_shape.append(mode)\n else:\n ideal_shape.append(s)\n\n # How much larger or smaller the ideal chunk size is relative to what we have now\n multiplier = _compute_multiplier(limit, dtype, largest_block, result)\n\n last_multiplier = 0\n last_autos = set()\n while (\n multiplier != last_multiplier or autos != last_autos\n ): # while things change\n last_multiplier = multiplier # record previous values\n last_autos = set(autos) # record previous values\n\n # Expand or contract each of the dimensions appropriately\n for a in sorted(autos):\n if ideal_shape[a] == 0:\n result[a] = 0\n continue\n proposed = result[a] * multiplier ** (1 / len(autos))\n if proposed > shape[a]: # we've hit the shape boundary\n autos.remove(a)\n largest_block *= shape[a]\n chunks[a] = shape[a]\n del result[a]\n else:\n result[a] = round_to(proposed, ideal_shape[a])\n\n # recompute how much multiplier we have left, repeat\n multiplier = _compute_multiplier(limit, dtype, largest_block, result)\n\n for k, v in result.items():\n chunks[k] = v\n return tuple(chunks)\n\n else:\n size = (limit / dtype.itemsize / largest_block) ** (1 / len(autos))\n small = [i for i in autos if shape[i] < size]\n if small:\n for i in small:\n chunks[i] = (shape[i],)\n return auto_chunks(chunks, shape, limit, dtype)\n\n for i in autos:\n chunks[i] = round_to(size, shape[i])\n\n return tuple(chunks)\n\n\ndef round_to(c, s):\n \"\"\" Return a chunk dimension that is close to an even multiple or factor\n\n We want values for c that are nicely aligned with s.\n\n If c is smaller than s then we want the largest factor of s that is less than the\n desired chunk size, but not less than half, which is too much. If no such\n factor exists then we just go with the original chunk size and accept an\n uneven chunk at the end.\n\n If c is larger than s then we want the largest multiple of s that is still\n smaller than c.\n \"\"\"\n if c <= s:\n try:\n return max(f for f in factors(s) if c / 2 <= f <= c)\n except ValueError: # no matching factors within factor of two\n return max(1, int(c))\n else:\n return c // s * s\n\n\ndef _get_chunk_shape(a):\n s = np.asarray(a.shape, dtype=int)\n return s[len(s) * (None,) + (slice(None),)]\n\n\ndef from_array(\n x,\n chunks=\"auto\",\n name=None,\n lock=False,\n asarray=None,\n fancy=True,\n getitem=None,\n meta=None,\n):\n \"\"\" Create dask array from something that looks like an array\n\n Input must have a ``.shape``, ``.ndim``, ``.dtype`` and support numpy-style slicing.\n\n Parameters\n ----------\n x : array_like\n chunks : int, tuple\n How to chunk the array. Must be one of the following forms:\n\n - A blocksize like 1000.\n - A blockshape like (1000, 1000).\n - Explicit sizes of all blocks along all dimensions like\n ((1000, 1000, 500), (400, 400)).\n - A size in bytes, like \"100 MiB\" which will choose a uniform\n block-like shape\n - The word \"auto\" which acts like the above, but uses a configuration\n value ``array.chunk-size`` for the chunk size\n\n -1 or None as a blocksize indicate the size of the corresponding\n dimension.\n name : str, optional\n The key name to use for the array. Defaults to a hash of ``x``.\n By default, hash uses python's standard sha1. This behaviour can be\n changed by installing cityhash, xxhash or murmurhash. If installed,\n a large-factor speedup can be obtained in the tokenisation step.\n Use ``name=False`` to generate a random name instead of hashing (fast)\n\n .. note::\n\n Because this ``name`` is used as the key in task graphs, you should\n ensure that it uniquely identifies the data contained within. If\n you'd like to provide a descriptive name that is still unique, combine\n the descriptive name with :func:`dask.base.tokenize` of the\n ``array_like``. See :ref:`graphs` for more.\n\n lock : bool or Lock, optional\n If ``x`` doesn't support concurrent reads then provide a lock here, or\n pass in True to have dask.array create one for you.\n asarray : bool, optional\n If True then call np.asarray on chunks to convert them to numpy arrays.\n If False then chunks are passed through unchanged.\n If None (default) then we use True if the ``__array_function__`` method\n is undefined.\n fancy : bool, optional\n If ``x`` doesn't support fancy indexing (e.g. indexing with lists or\n arrays) then set to False. Default is True.\n meta : Array-like, optional\n The metadata for the resulting dask array. This is the kind of array\n that will result from slicing the input array.\n Defaults to the input array.\n\n Examples\n --------\n\n >>> x = h5py.File('...')['/data/path'] # doctest: +SKIP\n >>> a = da.from_array(x, chunks=(1000, 1000)) # doctest: +SKIP\n\n If your underlying datastore does not support concurrent reads then include\n the ``lock=True`` keyword argument or ``lock=mylock`` if you want multiple\n arrays to coordinate around the same lock.\n\n >>> a = da.from_array(x, chunks=(1000, 1000), lock=True) # doctest: +SKIP\n\n If your underlying datastore has a ``.chunks`` attribute (as h5py and zarr\n datasets do) then a multiple of that chunk shape will be used if you\n do not provide a chunk shape.\n\n >>> a = da.from_array(x, chunks='auto') # doctest: +SKIP\n >>> a = da.from_array(x, chunks='100 MiB') # doctest: +SKIP\n >>> a = da.from_array(x) # doctest: +SKIP\n\n If providing a name, ensure that it is unique\n\n >>> import dask.base\n >>> token = dask.base.tokenize(x) # doctest: +SKIP\n >>> a = da.from_array('myarray-' + token) # doctest: +SKIP\n \"\"\"\n if isinstance(x, Array):\n raise ValueError(\n \"Array is already a dask array. Use 'asarray' or \" \"'rechunk' instead.\"\n )\n elif is_dask_collection(x):\n warnings.warn(\n \"Passing an object to dask.array.from_array which is already a \"\n \"Dask collection. This can lead to unexpected behavior.\"\n )\n\n if isinstance(x, (list, tuple, memoryview) + np.ScalarType):\n x = np.array(x)\n\n if asarray is None:\n asarray = not hasattr(x, \"__array_function__\")\n\n previous_chunks = getattr(x, \"chunks\", None)\n\n chunks = normalize_chunks(\n chunks, x.shape, dtype=x.dtype, previous_chunks=previous_chunks\n )\n\n if name in (None, True):\n token = tokenize(x, chunks)\n original_name = \"array-original-\" + token\n name = name or \"array-\" + token\n elif name is False:\n original_name = name = \"array-\" + str(uuid.uuid1())\n else:\n original_name = name\n\n if lock is True:\n lock = SerializableLock()\n\n # Always use the getter for h5py etc. Not using isinstance(x, np.ndarray)\n # because np.matrix is a subclass of np.ndarray.\n if type(x) is np.ndarray and all(len(c) == 1 for c in chunks):\n # No slicing needed\n dsk = {(name,) + (0,) * x.ndim: x}\n else:\n if getitem is None:\n if type(x) is np.ndarray and not lock:\n # simpler and cleaner, but missing all the nuances of getter\n getitem = operator.getitem\n elif fancy:\n getitem = getter\n else:\n getitem = getter_nofancy\n\n dsk = getem(\n original_name,\n chunks,\n getitem=getitem,\n shape=x.shape,\n out_name=name,\n lock=lock,\n asarray=asarray,\n dtype=x.dtype,\n )\n dsk[original_name] = x\n\n # Workaround for TileDB, its indexing is 1-based,\n # and doesn't seems to support 0-length slicing\n if x.__class__.__module__.split(\".\")[0] == \"tiledb\" and hasattr(x, \"_ctx_\"):\n return Array(dsk, name, chunks, dtype=x.dtype)\n\n if meta is None:\n meta = x\n\n return Array(dsk, name, chunks, meta=meta, dtype=getattr(x, \"dtype\", None))\n\n\ndef from_zarr(\n url, component=None, storage_options=None, chunks=None, name=None, **kwargs\n):\n \"\"\"Load array from the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n Parameters\n ----------\n url: Zarr Array or str or MutableMapping\n Location of the data. A URL can include a protocol specifier like s3://\n for remote data. Can also be any MutableMapping instance, which should\n be serializable if used in multiple processes.\n component: str or None\n If the location is a zarr group rather than an array, this is the\n subcomponent that should be loaded, something like ``'foo/bar'``.\n storage_options: dict\n Any additional parameters for the storage backend (ignored for local\n paths)\n chunks: tuple of ints or tuples of ints\n Passed to ``da.from_array``, allows setting the chunks on\n initialisation, if the chunking scheme in the on-disc dataset is not\n optimal for the calculations to follow.\n name : str, optional\n An optional keyname for the array. Defaults to hashing the input\n kwargs: passed to ``zarr.Array``.\n \"\"\"\n import zarr\n\n storage_options = storage_options or {}\n if isinstance(url, zarr.Array):\n z = url\n elif isinstance(url, str):\n from ..bytes.core import get_mapper\n\n mapper = get_mapper(url, **storage_options)\n z = zarr.Array(mapper, read_only=True, path=component, **kwargs)\n else:\n mapper = url\n z = zarr.Array(mapper, read_only=True, path=component, **kwargs)\n chunks = chunks if chunks is not None else z.chunks\n if name is None:\n name = \"from-zarr-\" + tokenize(z, component, storage_options, chunks, **kwargs)\n return from_array(z, chunks, name=name)\n\n\ndef to_zarr(\n arr,\n url,\n component=None,\n storage_options=None,\n overwrite=False,\n compute=True,\n return_stored=False,\n **kwargs,\n):\n \"\"\"Save array to the zarr storage format\n\n See https://zarr.readthedocs.io for details about the format.\n\n Parameters\n ----------\n arr: dask.array\n Data to store\n url: Zarr Array or str or MutableMapping\n Location of the data. A URL can include a protocol specifier like s3://\n for remote data. Can also be any MutableMapping instance, which should\n be serializable if used in multiple processes.\n component: str or None\n If the location is a zarr group rather than an array, this is the\n subcomponent that should be created/over-written.\n storage_options: dict\n Any additional parameters for the storage backend (ignored for local\n paths)\n overwrite: bool\n If given array already exists, overwrite=False will cause an error,\n where overwrite=True will replace the existing data. Note that this\n check is done at computation time, not during graph creation.\n compute, return_stored: see ``store()``\n kwargs: passed to the ``zarr.create()`` function, e.g., compression options\n\n Raises\n ------\n ValueError\n If ``arr`` has unknown chunk sizes, which is not supported by Zarr.\n\n See Also\n --------\n dask.array.Array.compute_chunk_sizes\n\n \"\"\"\n import zarr\n\n if np.isnan(arr.shape).any():\n raise ValueError(\n \"Saving a dask array with unknown chunk sizes is not \"\n \"currently supported by Zarr.%s\" % unknown_chunk_message\n )\n\n if isinstance(url, zarr.Array):\n z = url\n if isinstance(z.store, (dict, zarr.DictStore)) and \"distributed\" in config.get(\n \"scheduler\", \"\"\n ):\n raise RuntimeError(\n \"Cannot store into in memory Zarr Array using \"\n \"the Distributed Scheduler.\"\n )\n arr = arr.rechunk(z.chunks)\n return arr.store(z, lock=False, compute=compute, return_stored=return_stored)\n\n if not _check_regular_chunks(arr.chunks):\n raise ValueError(\n \"Attempt to save array to zarr with irregular \"\n \"chunking, please call `arr.rechunk(...)` first.\"\n )\n\n storage_options = storage_options or {}\n\n if isinstance(url, str):\n from ..bytes.core import get_mapper\n\n mapper = get_mapper(url, **storage_options)\n else:\n # assume the object passed is already a mapper\n mapper = url\n\n chunks = [c[0] for c in arr.chunks]\n\n # The zarr.create function has the side-effect of immediately\n # creating metadata on disk. This may not be desired,\n # particularly if compute=False. The caller may be creating many\n # arrays on a slow filesystem, with the desire that any I/O be\n # sharded across workers (not done serially on the originating\n # machine). Or the caller may decide later to not to do this\n # computation, and so nothing should be written to disk.\n z = delayed(zarr.create)(\n shape=arr.shape,\n chunks=chunks,\n dtype=arr.dtype,\n store=mapper,\n path=component,\n overwrite=overwrite,\n **kwargs,\n )\n return arr.store(z, lock=False, compute=compute, return_stored=return_stored)\n\n\ndef _check_regular_chunks(chunkset):\n \"\"\"Check if the chunks are regular\n\n \"Regular\" in this context means that along every axis, the chunks all\n have the same size, except the last one, which may be smaller\n\n Parameters\n ----------\n chunkset: tuple of tuples of ints\n From the ``.chunks`` attribute of an ``Array``\n\n Returns\n -------\n True if chunkset passes, else False\n\n Examples\n --------\n >>> import dask.array as da\n >>> arr = da.zeros(10, chunks=(5, ))\n >>> _check_regular_chunks(arr.chunks)\n True\n\n >>> arr = da.zeros(10, chunks=((3, 3, 3, 1), ))\n >>> _check_regular_chunks(arr.chunks)\n True\n\n >>> arr = da.zeros(10, chunks=((3, 1, 3, 3), ))\n >>> _check_regular_chunks(arr.chunks)\n False\n \"\"\"\n for chunks in chunkset:\n if len(chunks) == 1:\n continue\n if len(set(chunks[:-1])) > 1:\n return False\n if chunks[-1] > chunks[0]:\n return False\n return True\n\n\ndef from_delayed(value, shape, dtype=None, meta=None, name=None):\n \"\"\" Create a dask array from a dask delayed value\n\n This routine is useful for constructing dask arrays in an ad-hoc fashion\n using dask delayed, particularly when combined with stack and concatenate.\n\n The dask array will consist of a single chunk.\n\n Examples\n --------\n >>> import dask\n >>> import dask.array as da\n >>> value = dask.delayed(np.ones)(5)\n >>> array = da.from_delayed(value, (5,), dtype=float)\n >>> array\n dask.array<from-value, shape=(5,), dtype=float64, chunksize=(5,), chunktype=numpy.ndarray>\n >>> array.compute()\n array([1., 1., 1., 1., 1.])\n \"\"\"\n from ..delayed import delayed, Delayed\n\n if not isinstance(value, Delayed) and hasattr(value, \"key\"):\n value = delayed(value)\n\n name = name or \"from-value-\" + tokenize(value, shape, dtype, meta)\n dsk = {(name,) + (0,) * len(shape): value.key}\n chunks = tuple((d,) for d in shape)\n # TODO: value._key may not be the name of the layer in value.dask\n # This should be fixed after we build full expression graphs\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[value])\n return Array(graph, name, chunks, dtype=dtype, meta=meta)\n\n\ndef from_func(func, shape, dtype=None, name=None, args=(), kwargs={}):\n \"\"\" Create dask array in a single block by calling a function\n\n Calling the provided function with func(*args, **kwargs) should return a\n NumPy array of the indicated shape and dtype.\n\n Examples\n --------\n\n >>> a = from_func(np.arange, (3,), dtype='i8', args=(3,))\n >>> a.compute()\n array([0, 1, 2])\n\n This works particularly well when coupled with dask.array functions like\n concatenate and stack:\n\n >>> arrays = [from_func(np.array, (), dtype='i8', args=(n,)) for n in range(5)]\n >>> stack(arrays).compute()\n array([0, 1, 2, 3, 4])\n \"\"\"\n name = name or \"from_func-\" + tokenize(func, shape, dtype, args, kwargs)\n if args or kwargs:\n func = partial(func, *args, **kwargs)\n dsk = {(name,) + (0,) * len(shape): (func,)}\n chunks = tuple((i,) for i in shape)\n return Array(dsk, name, chunks, dtype)\n\n\ndef common_blockdim(blockdims):\n \"\"\" Find the common block dimensions from the list of block dimensions\n\n Currently only implements the simplest possible heuristic: the common\n block-dimension is the only one that does not span fully span a dimension.\n This is a conservative choice that allows us to avoid potentially very\n expensive rechunking.\n\n Assumes that each element of the input block dimensions has all the same\n sum (i.e., that they correspond to dimensions of the same size).\n\n Examples\n --------\n >>> common_blockdim([(3,), (2, 1)])\n (2, 1)\n >>> common_blockdim([(1, 2), (2, 1)])\n (1, 1, 1)\n >>> common_blockdim([(2, 2), (3, 1)]) # doctest: +SKIP\n Traceback (most recent call last):\n ...\n ValueError: Chunks do not align\n \"\"\"\n if not any(blockdims):\n return ()\n non_trivial_dims = set([d for d in blockdims if len(d) > 1])\n if len(non_trivial_dims) == 1:\n return first(non_trivial_dims)\n if len(non_trivial_dims) == 0:\n return max(blockdims, key=first)\n\n if np.isnan(sum(map(sum, blockdims))):\n raise ValueError(\n \"Arrays chunk sizes (%s) are unknown.\\n\\n\"\n \"A possible solution:\\n\"\n \" x.compute_chunk_sizes()\" % blockdims\n )\n\n if len(set(map(sum, non_trivial_dims))) > 1:\n raise ValueError(\"Chunks do not add up to same value\", blockdims)\n\n # We have multiple non-trivial chunks on this axis\n # e.g. (5, 2) and (4, 3)\n\n # We create a single chunk tuple with the same total length\n # that evenly divides both, e.g. (4, 1, 2)\n\n # To accomplish this we walk down all chunk tuples together, finding the\n # smallest element, adding it to the output, and subtracting it from all\n # other elements and remove the element itself. We stop once we have\n # burned through all of the chunk tuples.\n # For efficiency's sake we reverse the lists so that we can pop off the end\n rchunks = [list(ntd)[::-1] for ntd in non_trivial_dims]\n total = sum(first(non_trivial_dims))\n i = 0\n\n out = []\n while i < total:\n m = min(c[-1] for c in rchunks)\n out.append(m)\n for c in rchunks:\n c[-1] -= m\n if c[-1] == 0:\n c.pop()\n i += m\n\n return tuple(out)\n\n\ndef unify_chunks(*args, **kwargs):\n \"\"\"\n Unify chunks across a sequence of arrays\n\n This utility function is used within other common operations like\n ``map_blocks`` and ``blockwise``. It is not commonly used by end-users\n directly.\n\n Parameters\n ----------\n *args: sequence of Array, index pairs\n Sequence like (x, 'ij', y, 'jk', z, 'i')\n\n Examples\n --------\n >>> import dask.array as da\n >>> x = da.ones(10, chunks=((5, 2, 3),))\n >>> y = da.ones(10, chunks=((2, 3, 5),))\n >>> chunkss, arrays = unify_chunks(x, 'i', y, 'i')\n >>> chunkss\n {'i': (2, 3, 2, 3)}\n\n >>> x = da.ones((100, 10), chunks=(20, 5))\n >>> y = da.ones((10, 100), chunks=(4, 50))\n >>> chunkss, arrays = unify_chunks(x, 'ij', y, 'jk', 'constant', None)\n >>> chunkss # doctest: +SKIP\n {'k': (50, 50), 'i': (20, 20, 20, 20, 20), 'j': (4, 1, 3, 2)}\n\n >>> unify_chunks(0, None)\n ({}, [0])\n\n Returns\n -------\n chunkss : dict\n Map like {index: chunks}.\n arrays : list\n List of rechunked arrays.\n\n See Also\n --------\n common_blockdim\n \"\"\"\n if not args:\n return {}, []\n\n arginds = [\n (asanyarray(a) if ind is not None else a, ind) for a, ind in partition(2, args)\n ] # [x, ij, y, jk]\n args = list(concat(arginds)) # [(x, ij), (y, jk)]\n warn = kwargs.get(\"warn\", True)\n\n arrays, inds = zip(*arginds)\n if all(ind is None for ind in inds):\n return {}, list(arrays)\n if all(ind == inds[0] for ind in inds) and all(\n a.chunks == arrays[0].chunks for a in arrays\n ):\n return dict(zip(inds[0], arrays[0].chunks)), arrays\n\n nameinds = []\n blockdim_dict = dict()\n max_parts = 0\n for a, ind in arginds:\n if ind is not None:\n nameinds.append((a.name, ind))\n blockdim_dict[a.name] = a.chunks\n max_parts = max(max_parts, a.npartitions)\n else:\n nameinds.append((a, ind))\n\n chunkss = broadcast_dimensions(nameinds, blockdim_dict, consolidate=common_blockdim)\n nparts = np.prod(list(map(len, chunkss.values())))\n\n if warn and nparts and nparts >= max_parts * 10:\n warnings.warn(\n \"Increasing number of chunks by factor of %d\" % (nparts / max_parts),\n PerformanceWarning,\n stacklevel=3,\n )\n\n arrays = []\n for a, i in arginds:\n if i is None:\n arrays.append(a)\n else:\n chunks = tuple(\n chunkss[j]\n if a.shape[n] > 1\n else a.shape[n]\n if not np.isnan(sum(chunkss[j]))\n else None\n for n, j in enumerate(i)\n )\n if chunks != a.chunks and all(a.chunks):\n arrays.append(a.rechunk(chunks))\n else:\n arrays.append(a)\n return chunkss, arrays\n\n\ndef unpack_singleton(x):\n \"\"\"\n\n >>> unpack_singleton([[[[1]]]])\n 1\n >>> unpack_singleton(np.array(np.datetime64('2000-01-01')))\n array('2000-01-01', dtype='datetime64[D]')\n \"\"\"\n while isinstance(x, (list, tuple)):\n try:\n x = x[0]\n except (IndexError, TypeError, KeyError):\n break\n return x\n\n\ndef block(arrays, allow_unknown_chunksizes=False):\n \"\"\"\n Assemble an nd-array from nested lists of blocks.\n\n Blocks in the innermost lists are concatenated along the last\n dimension (-1), then these are concatenated along the second-last\n dimension (-2), and so on until the outermost list is reached\n\n Blocks can be of any dimension, but will not be broadcasted using the normal\n rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``\n the same for all blocks. This is primarily useful for working with scalars,\n and means that code like ``block([v, 1])`` is valid, where\n ``v.ndim == 1``.\n\n When the nested list is two levels deep, this allows block matrices to be\n constructed from their components.\n\n Parameters\n ----------\n arrays : nested list of array_like or scalars (but not tuples)\n If passed a single ndarray or scalar (a nested list of depth 0), this\n is returned unmodified (and not copied).\n\n Elements shapes must match along the appropriate axes (without\n broadcasting), but leading 1s will be prepended to the shape as\n necessary to make the dimensions match.\n\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n\n Returns\n -------\n block_array : ndarray\n The array assembled from the given blocks.\n\n The dimensionality of the output is equal to the greatest of:\n * the dimensionality of all the inputs\n * the depth to which the input list is nested\n\n Raises\n ------\n ValueError\n * If list depths are mismatched - for instance, ``[[a, b], c]`` is\n illegal, and should be spelt ``[[a, b], [c]]``\n * If lists are empty - for instance, ``[[a, b], []]``\n\n See Also\n --------\n concatenate : Join a sequence of arrays together.\n stack : Stack arrays in sequence along a new dimension.\n hstack : Stack arrays in sequence horizontally (column wise).\n vstack : Stack arrays in sequence vertically (row wise).\n dstack : Stack arrays in sequence depth wise (along third dimension).\n vsplit : Split array into a list of multiple sub-arrays vertically.\n\n Notes\n -----\n\n When called with only scalars, ``block`` is equivalent to an ndarray\n call. So ``block([[1, 2], [3, 4]])`` is equivalent to\n ``array([[1, 2], [3, 4]])``.\n\n This function does not enforce that the blocks lie on a fixed grid.\n ``block([[a, b], [c, d]])`` is not restricted to arrays of the form::\n\n AAAbb\n AAAbb\n cccDD\n\n But is also allowed to produce, for some ``a, b, c, d``::\n\n AAAbb\n AAAbb\n cDDDD\n\n Since concatenation happens along the last axis first, `block` is _not_\n capable of producing the following directly::\n\n AAAbb\n cccbb\n cccDD\n\n Matlab's \"square bracket stacking\", ``[A, B, ...; p, q, ...]``, is\n equivalent to ``block([[A, B, ...], [p, q, ...]])``.\n \"\"\"\n\n # This was copied almost verbatim from numpy.core.shape_base.block\n # See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt\n # or NUMPY_LICENSE.txt within this directory\n\n def atleast_nd(x, ndim):\n x = asanyarray(x)\n diff = max(ndim - x.ndim, 0)\n if diff == 0:\n return x\n else:\n return x[(None,) * diff + (Ellipsis,)]\n\n def format_index(index):\n return \"arrays\" + \"\".join(\"[{}]\".format(i) for i in index)\n\n rec = _Recurser(recurse_if=lambda x: type(x) is list)\n\n # ensure that the lists are all matched in depth\n list_ndim = None\n any_empty = False\n for index, value, entering in rec.walk(arrays):\n if type(value) is tuple:\n # not strictly necessary, but saves us from:\n # - more than one way to do things - no point treating tuples like\n # lists\n # - horribly confusing behaviour that results when tuples are\n # treated like ndarray\n raise TypeError(\n \"{} is a tuple. \"\n \"Only lists can be used to arrange blocks, and np.block does \"\n \"not allow implicit conversion from tuple to ndarray.\".format(\n format_index(index)\n )\n )\n if not entering:\n curr_depth = len(index)\n elif len(value) == 0:\n curr_depth = len(index) + 1\n any_empty = True\n else:\n continue\n\n if list_ndim is not None and list_ndim != curr_depth:\n raise ValueError(\n \"List depths are mismatched. First element was at depth {}, \"\n \"but there is an element at depth {} ({})\".format(\n list_ndim, curr_depth, format_index(index)\n )\n )\n list_ndim = curr_depth\n\n # do this here so we catch depth mismatches first\n if any_empty:\n raise ValueError(\"Lists cannot be empty\")\n\n # convert all the arrays to ndarrays\n arrays = rec.map_reduce(arrays, f_map=asanyarray, f_reduce=list)\n\n # determine the maximum dimension of the elements\n elem_ndim = rec.map_reduce(arrays, f_map=lambda xi: xi.ndim, f_reduce=max)\n ndim = max(list_ndim, elem_ndim)\n\n # first axis to concatenate along\n first_axis = ndim - list_ndim\n\n # Make all the elements the same dimension\n arrays = rec.map_reduce(\n arrays, f_map=lambda xi: atleast_nd(xi, ndim), f_reduce=list\n )\n\n # concatenate innermost lists on the right, outermost on the left\n return rec.map_reduce(\n arrays,\n f_reduce=lambda xs, axis: concatenate(\n list(xs), axis=axis, allow_unknown_chunksizes=allow_unknown_chunksizes\n ),\n f_kwargs=lambda axis: dict(axis=(axis + 1)),\n axis=first_axis,\n )\n\n\ndef concatenate(seq, axis=0, allow_unknown_chunksizes=False):\n \"\"\"\n Concatenate arrays along an existing axis\n\n Given a sequence of dask Arrays form a new dask Array by stacking them\n along an existing dimension (axis=0 by default)\n\n Parameters\n ----------\n seq: list of dask.arrays\n axis: int\n Dimension along which to align all of the arrays\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n\n Examples\n --------\n\n Create slices\n\n >>> import dask.array as da\n >>> import numpy as np\n\n >>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))\n ... for i in range(3)]\n\n >>> x = da.concatenate(data, axis=0)\n >>> x.shape\n (12, 4)\n\n >>> da.concatenate(data, axis=1).shape\n (4, 12)\n\n Result is a new dask Array\n\n See Also\n --------\n stack\n \"\"\"\n from . import wrap\n\n seq = [asarray(a) for a in seq]\n\n if not seq:\n raise ValueError(\"Need array(s) to concatenate\")\n\n seq_metas = [meta_from_array(s) for s in seq]\n _concatenate = concatenate_lookup.dispatch(\n type(max(seq_metas, key=lambda x: getattr(x, \"__array_priority__\", 0)))\n )\n meta = _concatenate(seq_metas, axis=axis)\n\n # Promote types to match meta\n seq = [a.astype(meta.dtype) for a in seq]\n\n # Find output array shape\n ndim = len(seq[0].shape)\n shape = tuple(\n sum((a.shape[i] for a in seq)) if i == axis else seq[0].shape[i]\n for i in range(ndim)\n )\n\n # Drop empty arrays\n seq2 = [a for a in seq if a.size]\n if not seq2:\n seq2 = seq\n\n if axis < 0:\n axis = ndim + axis\n if axis >= ndim:\n msg = (\n \"Axis must be less than than number of dimensions\"\n \"\\nData has %d dimensions, but got axis=%d\"\n )\n raise ValueError(msg % (ndim, axis))\n\n n = len(seq2)\n if n == 0:\n try:\n return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)\n except TypeError:\n return wrap.empty(shape, chunks=shape, dtype=meta.dtype)\n elif n == 1:\n return seq2[0]\n\n if not allow_unknown_chunksizes and not all(\n i == axis or all(x.shape[i] == seq2[0].shape[i] for x in seq2)\n for i in range(ndim)\n ):\n if any(map(np.isnan, seq2[0].shape)):\n raise ValueError(\n \"Tried to concatenate arrays with unknown\"\n \" shape %s.\\n\\nTwo solutions:\\n\"\n \" 1. Force concatenation pass\"\n \" allow_unknown_chunksizes=True.\\n\"\n \" 2. Compute shapes with \"\n \"[x.compute_chunk_sizes() for x in seq]\" % str(seq2[0].shape)\n )\n raise ValueError(\"Shapes do not align: %s\", [x.shape for x in seq2])\n\n inds = [list(range(ndim)) for i in range(n)]\n for i, ind in enumerate(inds):\n ind[axis] = -(i + 1)\n\n uc_args = list(concat(zip(seq2, inds)))\n _, seq2 = unify_chunks(*uc_args, warn=False)\n\n bds = [a.chunks for a in seq2]\n\n chunks = (\n seq2[0].chunks[:axis]\n + (sum([bd[axis] for bd in bds], ()),)\n + seq2[0].chunks[axis + 1 :]\n )\n\n cum_dims = [0] + list(accumulate(add, [len(a.chunks[axis]) for a in seq2]))\n\n names = [a.name for a in seq2]\n\n name = \"concatenate-\" + tokenize(names, axis)\n keys = list(product([name], *[range(len(bd)) for bd in chunks]))\n\n values = [\n (names[bisect(cum_dims, key[axis + 1]) - 1],)\n + key[1 : axis + 1]\n + (key[axis + 1] - cum_dims[bisect(cum_dims, key[axis + 1]) - 1],)\n + key[axis + 2 :]\n for key in keys\n ]\n\n dsk = dict(zip(keys, values))\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=seq2)\n\n return Array(graph, name, chunks, meta=meta)\n\n\ndef load_store_chunk(x, out, index, lock, return_stored, load_stored):\n \"\"\"\n A function inserted in a Dask graph for storing a chunk.\n\n Parameters\n ----------\n x: array-like\n An array (potentially a NumPy one)\n out: array-like\n Where to store results too.\n index: slice-like\n Where to store result from ``x`` in ``out``.\n lock: Lock-like or False\n Lock to use before writing to ``out``.\n return_stored: bool\n Whether to return ``out``.\n load_stored: bool\n Whether to return the array stored in ``out``.\n Ignored if ``return_stored`` is not ``True``.\n\n Examples\n --------\n\n >>> a = np.ones((5, 6))\n >>> b = np.empty(a.shape)\n >>> load_store_chunk(a, b, (slice(None), slice(None)), False, False, False)\n \"\"\"\n\n result = None\n if return_stored and not load_stored:\n result = out\n\n if lock:\n lock.acquire()\n try:\n if x is not None:\n out[index] = np.asanyarray(x)\n if return_stored and load_stored:\n result = out[index]\n finally:\n if lock:\n lock.release()\n\n return result\n\n\ndef store_chunk(x, out, index, lock, return_stored):\n return load_store_chunk(x, out, index, lock, return_stored, False)\n\n\ndef load_chunk(out, index, lock):\n return load_store_chunk(None, out, index, lock, True, True)\n\n\ndef insert_to_ooc(\n arr, out, lock=True, region=None, return_stored=False, load_stored=False, tok=None\n):\n \"\"\"\n Creates a Dask graph for storing chunks from ``arr`` in ``out``.\n\n Parameters\n ----------\n arr: da.Array\n A dask array\n out: array-like\n Where to store results too.\n lock: Lock-like or bool, optional\n Whether to lock or with what (default is ``True``,\n which means a ``threading.Lock`` instance).\n region: slice-like, optional\n Where in ``out`` to store ``arr``'s results\n (default is ``None``, meaning all of ``out``).\n return_stored: bool, optional\n Whether to return ``out``\n (default is ``False``, meaning ``None`` is returned).\n load_stored: bool, optional\n Whether to handling loading from ``out`` at the same time.\n Ignored if ``return_stored`` is not ``True``.\n (default is ``False``, meaning defer to ``return_stored``).\n tok: str, optional\n Token to use when naming keys\n\n Examples\n --------\n >>> import dask.array as da\n >>> d = da.ones((5, 6), chunks=(2, 3))\n >>> a = np.empty(d.shape)\n >>> insert_to_ooc(d, a) # doctest: +SKIP\n \"\"\"\n\n if lock is True:\n lock = Lock()\n\n slices = slices_from_chunks(arr.chunks)\n if region:\n slices = [fuse_slice(region, slc) for slc in slices]\n\n name = \"store-%s\" % (tok or str(uuid.uuid1()))\n func = store_chunk\n args = ()\n if return_stored and load_stored:\n name = \"load-%s\" % name\n func = load_store_chunk\n args = args + (load_stored,)\n\n dsk = {\n (name,) + t[1:]: (func, t, out, slc, lock, return_stored) + args\n for t, slc in zip(core.flatten(arr.__dask_keys__()), slices)\n }\n\n return dsk\n\n\ndef retrieve_from_ooc(keys, dsk_pre, dsk_post=None):\n \"\"\"\n Creates a Dask graph for loading stored ``keys`` from ``dsk``.\n\n Parameters\n ----------\n keys: Sequence\n A sequence containing Dask graph keys to load\n dsk_pre: Mapping\n A Dask graph corresponding to a Dask Array before computation\n dsk_post: Mapping, optional\n A Dask graph corresponding to a Dask Array after computation\n\n Examples\n --------\n >>> import dask.array as da\n >>> d = da.ones((5, 6), chunks=(2, 3))\n >>> a = np.empty(d.shape)\n >>> g = insert_to_ooc(d, a)\n >>> retrieve_from_ooc(g.keys(), g) # doctest: +SKIP\n \"\"\"\n\n if not dsk_post:\n dsk_post = {k: k for k in keys}\n\n load_dsk = {\n (\"load-\" + k[0],) + k[1:]: (load_chunk, dsk_post[k]) + dsk_pre[k][3:-1]\n for k in keys\n }\n\n return load_dsk\n\n\ndef asarray(a, **kwargs):\n \"\"\"Convert the input to a dask array.\n\n Parameters\n ----------\n a : array-like\n Input data, in any form that can be converted to a dask array.\n\n Returns\n -------\n out : dask array\n Dask array interpretation of a.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = np.arange(3)\n >>> da.asarray(x)\n dask.array<array, shape=(3,), dtype=int64, chunksize=(3,), chunktype=numpy.ndarray>\n\n >>> y = [[1, 2, 3], [4, 5, 6]]\n >>> da.asarray(y)\n dask.array<array, shape=(2, 3), dtype=int64, chunksize=(2, 3), chunktype=numpy.ndarray>\n \"\"\"\n if isinstance(a, Array):\n return a\n elif hasattr(a, \"to_dask_array\"):\n return a.to_dask_array()\n elif type(a).__module__.startswith(\"xarray.\") and hasattr(a, \"data\"):\n return asarray(a.data)\n elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):\n return stack(a)\n elif not isinstance(getattr(a, \"shape\", None), Iterable):\n a = np.asarray(a)\n return from_array(a, getitem=getter_inline, **kwargs)\n\n\ndef asanyarray(a):\n \"\"\"Convert the input to a dask array.\n\n Subclasses of ``np.ndarray`` will be passed through as chunks unchanged.\n\n Parameters\n ----------\n a : array-like\n Input data, in any form that can be converted to a dask array.\n\n Returns\n -------\n out : dask array\n Dask array interpretation of a.\n\n Examples\n --------\n >>> import dask.array as da\n >>> import numpy as np\n >>> x = np.arange(3)\n >>> da.asanyarray(x)\n dask.array<array, shape=(3,), dtype=int64, chunksize=(3,), chunktype=numpy.ndarray>\n\n >>> y = [[1, 2, 3], [4, 5, 6]]\n >>> da.asanyarray(y)\n dask.array<array, shape=(2, 3), dtype=int64, chunksize=(2, 3), chunktype=numpy.ndarray>\n \"\"\"\n if isinstance(a, Array):\n return a\n elif hasattr(a, \"to_dask_array\"):\n return a.to_dask_array()\n elif type(a).__module__.startswith(\"xarray.\") and hasattr(a, \"data\"):\n return asanyarray(a.data)\n elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):\n a = stack(a)\n elif not isinstance(getattr(a, \"shape\", None), Iterable):\n a = np.asanyarray(a)\n return from_array(a, chunks=a.shape, getitem=getter_inline, asarray=False)\n\n\ndef is_scalar_for_elemwise(arg):\n \"\"\"\n\n >>> is_scalar_for_elemwise(42)\n True\n >>> is_scalar_for_elemwise('foo')\n True\n >>> is_scalar_for_elemwise(True)\n True\n >>> is_scalar_for_elemwise(np.array(42))\n True\n >>> is_scalar_for_elemwise([1, 2, 3])\n True\n >>> is_scalar_for_elemwise(np.array([1, 2, 3]))\n False\n >>> is_scalar_for_elemwise(from_array(np.array(0), chunks=()))\n False\n >>> is_scalar_for_elemwise(np.dtype('i4'))\n True\n \"\"\"\n # the second half of shape_condition is essentially just to ensure that\n # dask series / frame are treated as scalars in elemwise.\n maybe_shape = getattr(arg, \"shape\", None)\n shape_condition = not isinstance(maybe_shape, Iterable) or any(\n is_dask_collection(x) for x in maybe_shape\n )\n\n return (\n np.isscalar(arg)\n or shape_condition\n or isinstance(arg, np.dtype)\n or (isinstance(arg, np.ndarray) and arg.ndim == 0)\n )\n\n\ndef broadcast_shapes(*shapes):\n \"\"\"\n Determines output shape from broadcasting arrays.\n\n Parameters\n ----------\n shapes : tuples\n The shapes of the arguments.\n\n Returns\n -------\n output_shape : tuple\n\n Raises\n ------\n ValueError\n If the input shapes cannot be successfully broadcast together.\n \"\"\"\n if len(shapes) == 1:\n return shapes[0]\n out = []\n for sizes in zip_longest(*map(reversed, shapes), fillvalue=-1):\n if np.isnan(sizes).any():\n dim = np.nan\n else:\n dim = 0 if 0 in sizes else np.max(sizes)\n if any(i not in [-1, 0, 1, dim] and not np.isnan(i) for i in sizes):\n raise ValueError(\n \"operands could not be broadcast together with \"\n \"shapes {0}\".format(\" \".join(map(str, shapes)))\n )\n out.append(dim)\n return tuple(reversed(out))\n\n\ndef elemwise(op, *args, **kwargs):\n \"\"\" Apply elementwise function across arguments\n\n Respects broadcasting rules\n\n Examples\n --------\n >>> elemwise(add, x, y) # doctest: +SKIP\n >>> elemwise(sin, x) # doctest: +SKIP\n\n See Also\n --------\n blockwise\n \"\"\"\n out = kwargs.pop(\"out\", None)\n if not set([\"name\", \"dtype\"]).issuperset(kwargs):\n msg = \"%s does not take the following keyword arguments %s\"\n raise TypeError(\n msg % (op.__name__, str(sorted(set(kwargs) - set([\"name\", \"dtype\"]))))\n )\n\n args = [np.asarray(a) if isinstance(a, (list, tuple)) else a for a in args]\n\n shapes = []\n for arg in args:\n shape = getattr(arg, \"shape\", ())\n if any(is_dask_collection(x) for x in shape):\n # Want to excluded Delayed shapes and dd.Scalar\n shape = ()\n shapes.append(shape)\n\n shapes = [s if isinstance(s, Iterable) else () for s in shapes]\n out_ndim = len(\n broadcast_shapes(*shapes)\n ) # Raises ValueError if dimensions mismatch\n expr_inds = tuple(range(out_ndim))[::-1]\n\n need_enforce_dtype = False\n if \"dtype\" in kwargs:\n dt = kwargs[\"dtype\"]\n else:\n # We follow NumPy's rules for dtype promotion, which special cases\n # scalars and 0d ndarrays (which it considers equivalent) by using\n # their values to compute the result dtype:\n # https://github.com/numpy/numpy/issues/6240\n # We don't inspect the values of 0d dask arrays, because these could\n # hold potentially very expensive calculations. Instead, we treat\n # them just like other arrays, and if necessary cast the result of op\n # to match.\n vals = [\n np.empty((1,) * max(1, a.ndim), dtype=a.dtype)\n if not is_scalar_for_elemwise(a)\n else a\n for a in args\n ]\n try:\n dt = apply_infer_dtype(op, vals, {}, \"elemwise\", suggest_dtype=False)\n except Exception:\n return NotImplemented\n need_enforce_dtype = any(\n not is_scalar_for_elemwise(a) and a.ndim == 0 for a in args\n )\n\n name = kwargs.get(\"name\", None) or \"%s-%s\" % (funcname(op), tokenize(op, dt, *args))\n\n blockwise_kwargs = dict(dtype=dt, name=name, token=funcname(op).strip(\"_\"))\n if need_enforce_dtype:\n blockwise_kwargs[\"enforce_dtype\"] = dt\n blockwise_kwargs[\"enforce_dtype_function\"] = op\n op = _enforce_dtype\n result = blockwise(\n op,\n expr_inds,\n *concat(\n (a, tuple(range(a.ndim)[::-1]) if not is_scalar_for_elemwise(a) else None)\n for a in args\n ),\n **blockwise_kwargs,\n )\n\n return handle_out(out, result)\n\n\ndef handle_out(out, result):\n \"\"\" Handle out parameters\n\n If out is a dask.array then this overwrites the contents of that array with\n the result\n \"\"\"\n if isinstance(out, tuple):\n if len(out) == 1:\n out = out[0]\n elif len(out) > 1:\n raise NotImplementedError(\"The out parameter is not fully supported\")\n else:\n out = None\n if isinstance(out, Array):\n if out.shape != result.shape:\n raise ValueError(\n \"Mismatched shapes between result and out parameter. \"\n \"out=%s, result=%s\" % (str(out.shape), str(result.shape))\n )\n out._chunks = result.chunks\n out.dask = result.dask\n out._meta = result._meta\n out.name = result.name\n elif out is not None:\n msg = (\n \"The out parameter is not fully supported.\"\n \" Received type %s, expected Dask Array\" % type(out).__name__\n )\n raise NotImplementedError(msg)\n else:\n return result\n\n\ndef _enforce_dtype(*args, **kwargs):\n \"\"\"Calls a function and converts its result to the given dtype.\n\n The parameters have deliberately been given unwieldy names to avoid\n clashes with keyword arguments consumed by blockwise\n\n A dtype of `object` is treated as a special case and not enforced,\n because it is used as a dummy value in some places when the result will\n not be a block in an Array.\n\n Parameters\n ----------\n enforce_dtype : dtype\n Result dtype\n enforce_dtype_function : callable\n The wrapped function, which will be passed the remaining arguments\n \"\"\"\n dtype = kwargs.pop(\"enforce_dtype\")\n function = kwargs.pop(\"enforce_dtype_function\")\n\n result = function(*args, **kwargs)\n if hasattr(result, \"dtype\") and dtype != result.dtype and dtype != object:\n if not np.can_cast(result, dtype, casting=\"same_kind\"):\n raise ValueError(\n \"Inferred dtype from function %r was %r \"\n \"but got %r, which can't be cast using \"\n \"casting='same_kind'\"\n % (funcname(function), str(dtype), str(result.dtype))\n )\n if np.isscalar(result):\n # scalar astype method doesn't take the keyword arguments, so\n # have to convert via 0-dimensional array and back.\n result = result.astype(dtype)\n else:\n try:\n result = result.astype(dtype, copy=False)\n except TypeError:\n # Missing copy kwarg\n result = result.astype(dtype)\n return result\n\n\ndef broadcast_to(x, shape, chunks=None):\n \"\"\"Broadcast an array to a new shape.\n\n Parameters\n ----------\n x : array_like\n The array to broadcast.\n shape : tuple\n The shape of the desired array.\n chunks : tuple, optional\n If provided, then the result will use these chunks instead of the same\n chunks as the source array. Setting chunks explicitly as part of\n broadcast_to is more efficient than rechunking afterwards. Chunks are\n only allowed to differ from the original shape along dimensions that\n are new on the result or have size 1 the input array.\n\n Returns\n -------\n broadcast : dask array\n\n See Also\n --------\n :func:`numpy.broadcast_to`\n \"\"\"\n x = asarray(x)\n shape = tuple(shape)\n\n if x.shape == shape and (chunks is None or chunks == x.chunks):\n return x\n\n ndim_new = len(shape) - x.ndim\n if ndim_new < 0 or any(\n new != old for new, old in zip(shape[ndim_new:], x.shape) if old != 1\n ):\n raise ValueError(\"cannot broadcast shape %s to shape %s\" % (x.shape, shape))\n\n if chunks is None:\n chunks = tuple((s,) for s in shape[:ndim_new]) + tuple(\n bd if old > 1 else (new,)\n for bd, old, new in zip(x.chunks, x.shape, shape[ndim_new:])\n )\n else:\n chunks = normalize_chunks(\n chunks, shape, dtype=x.dtype, previous_chunks=x.chunks\n )\n for old_bd, new_bd in zip(x.chunks, chunks[ndim_new:]):\n if old_bd != new_bd and old_bd != (1,):\n raise ValueError(\n \"cannot broadcast chunks %s to chunks %s: \"\n \"new chunks must either be along a new \"\n \"dimension or a dimension of size 1\" % (x.chunks, chunks)\n )\n\n name = \"broadcast_to-\" + tokenize(x, shape, chunks)\n dsk = {}\n\n enumerated_chunks = product(*(enumerate(bds) for bds in chunks))\n for new_index, chunk_shape in (zip(*ec) for ec in enumerated_chunks):\n old_index = tuple(\n 0 if bd == (1,) else i for bd, i in zip(x.chunks, new_index[ndim_new:])\n )\n old_key = (x.name,) + old_index\n new_key = (name,) + new_index\n dsk[new_key] = (np.broadcast_to, old_key, quote(chunk_shape))\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])\n return Array(graph, name, chunks, dtype=x.dtype)\n\n\n@derived_from(np)\ndef broadcast_arrays(*args, **kwargs):\n subok = bool(kwargs.pop(\"subok\", False))\n\n to_array = asanyarray if subok else asarray\n args = tuple(to_array(e) for e in args)\n\n if kwargs:\n raise TypeError(\"unsupported keyword argument(s) provided\")\n\n # Unify uneven chunking\n inds = [list(reversed(range(x.ndim))) for x in args]\n uc_args = concat(zip(args, inds))\n _, args = unify_chunks(*uc_args, warn=False)\n\n shape = broadcast_shapes(*(e.shape for e in args))\n chunks = broadcast_chunks(*(e.chunks for e in args))\n\n result = [broadcast_to(e, shape=shape, chunks=chunks) for e in args]\n\n return result\n\n\ndef offset_func(func, offset, *args):\n \"\"\" Offsets inputs by offset\n\n >>> double = lambda x: x * 2\n >>> f = offset_func(double, (10,))\n >>> f(1)\n 22\n >>> f(300)\n 620\n \"\"\"\n\n def _offset(*args):\n args2 = list(map(add, args, offset))\n return func(*args2)\n\n with ignoring(Exception):\n _offset.__name__ = \"offset_\" + func.__name__\n\n return _offset\n\n\ndef chunks_from_arrays(arrays):\n \"\"\" Chunks tuple from nested list of arrays\n\n >>> x = np.array([1, 2])\n >>> chunks_from_arrays([x, x])\n ((2, 2),)\n\n >>> x = np.array([[1, 2]])\n >>> chunks_from_arrays([[x], [x]])\n ((1, 1), (2,))\n\n >>> x = np.array([[1, 2]])\n >>> chunks_from_arrays([[x, x]])\n ((1,), (2, 2))\n\n >>> chunks_from_arrays([1, 1])\n ((1, 1),)\n \"\"\"\n if not arrays:\n return ()\n result = []\n dim = 0\n\n def shape(x):\n try:\n return x.shape\n except AttributeError:\n return (1,)\n\n while isinstance(arrays, (list, tuple)):\n result.append(tuple([shape(deepfirst(a))[dim] for a in arrays]))\n arrays = arrays[0]\n dim += 1\n return tuple(result)\n\n\ndef deepfirst(seq):\n \"\"\" First element in a nested list\n\n >>> deepfirst([[[1, 2], [3, 4]], [5, 6], [7, 8]])\n 1\n \"\"\"\n if not isinstance(seq, (list, tuple)):\n return seq\n else:\n return deepfirst(seq[0])\n\n\ndef shapelist(a):\n \"\"\" Get the shape of nested list \"\"\"\n if type(a) is list:\n return tuple([len(a)] + list(shapelist(a[0])))\n else:\n return ()\n\n\ndef reshapelist(shape, seq):\n \"\"\" Reshape iterator to nested shape\n\n >>> reshapelist((2, 3), range(6))\n [[0, 1, 2], [3, 4, 5]]\n \"\"\"\n if len(shape) == 1:\n return list(seq)\n else:\n n = int(len(seq) / shape[0])\n return [reshapelist(shape[1:], part) for part in partition(n, seq)]\n\n\ndef transposelist(arrays, axes, extradims=0):\n \"\"\" Permute axes of nested list\n\n >>> transposelist([[1,1,1],[1,1,1]], [2,1])\n [[[1, 1], [1, 1], [1, 1]]]\n\n >>> transposelist([[1,1,1],[1,1,1]], [2,1], extradims=1)\n [[[[1], [1]], [[1], [1]], [[1], [1]]]]\n \"\"\"\n if len(axes) != ndimlist(arrays):\n raise ValueError(\"Length of axes should equal depth of nested arrays\")\n if extradims < 0:\n raise ValueError(\"`newdims` should be positive\")\n if len(axes) > len(set(axes)):\n raise ValueError(\"`axes` should be unique\")\n\n ndim = max(axes) + 1\n shape = shapelist(arrays)\n newshape = [\n shape[axes.index(i)] if i in axes else 1 for i in range(ndim + extradims)\n ]\n\n result = list(core.flatten(arrays))\n return reshapelist(newshape, result)\n\n\ndef stack(seq, axis=0, allow_unknown_chunksizes=False):\n \"\"\"\n Stack arrays along a new axis\n\n Given a sequence of dask arrays, form a new dask array by stacking them\n along a new dimension (axis=0 by default)\n\n Parameters\n ----------\n seq: list of dask.arrays\n axis: int\n Dimension along which to align all of the arrays\n allow_unknown_chunksizes: bool\n Allow unknown chunksizes, such as come from converting from dask\n dataframes. Dask.array is unable to verify that chunks line up. If\n data comes from differently aligned sources then this can cause\n unexpected results.\n\n Examples\n --------\n\n Create slices\n\n >>> import dask.array as da\n >>> import numpy as np\n\n >>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))\n ... for i in range(3)]\n\n >>> x = da.stack(data, axis=0)\n >>> x.shape\n (3, 4, 4)\n\n >>> da.stack(data, axis=1).shape\n (4, 3, 4)\n\n >>> da.stack(data, axis=-1).shape\n (4, 4, 3)\n\n Result is a new dask Array\n\n See Also\n --------\n concatenate\n \"\"\"\n from . import wrap\n\n seq = [asarray(a) for a in seq]\n\n if not seq:\n raise ValueError(\"Need array(s) to stack\")\n if not allow_unknown_chunksizes and not all(x.shape == seq[0].shape for x in seq):\n idx = first(i for i in enumerate(seq) if i[1].shape != seq[0].shape)\n raise ValueError(\n \"Stacked arrays must have the same shape. \"\n \"The first array had shape {0}, while array \"\n \"{1} has shape {2}.\".format(seq[0].shape, idx[0] + 1, idx[1].shape)\n )\n\n meta = np.stack([meta_from_array(a) for a in seq], axis=axis)\n seq = [x.astype(meta.dtype) for x in seq]\n\n ndim = meta.ndim - 1\n if axis < 0:\n axis = ndim + axis + 1\n shape = tuple(\n len(seq)\n if i == axis\n else (seq[0].shape[i] if i < axis else seq[0].shape[i - 1])\n for i in range(meta.ndim)\n )\n\n seq2 = [a for a in seq if a.size]\n if not seq2:\n seq2 = seq\n\n n = len(seq2)\n if n == 0:\n try:\n return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)\n except TypeError:\n return wrap.empty(shape, chunks=shape, dtype=meta.dtype)\n\n ind = list(range(ndim))\n uc_args = list(concat((x, ind) for x in seq2))\n _, seq2 = unify_chunks(*uc_args)\n\n assert len(set(a.chunks for a in seq2)) == 1 # same chunks\n chunks = seq2[0].chunks[:axis] + ((1,) * n,) + seq2[0].chunks[axis:]\n\n names = [a.name for a in seq2]\n name = \"stack-\" + tokenize(names, axis)\n keys = list(product([name], *[range(len(bd)) for bd in chunks]))\n\n inputs = [\n (names[key[axis + 1]],) + key[1 : axis + 1] + key[axis + 2 :] for key in keys\n ]\n values = [\n (\n getitem,\n inp,\n (slice(None, None, None),) * axis\n + (None,)\n + (slice(None, None, None),) * (ndim - axis),\n )\n for inp in inputs\n ]\n\n layer = dict(zip(keys, values))\n graph = HighLevelGraph.from_collections(name, layer, dependencies=seq2)\n\n return Array(graph, name, chunks, meta=meta)\n\n\ndef concatenate3(arrays):\n \"\"\" Recursive np.concatenate\n\n Input should be a nested list of numpy arrays arranged in the order they\n should appear in the array itself. Each array should have the same number\n of dimensions as the desired output and the nesting of the lists.\n\n >>> x = np.array([[1, 2]])\n >>> concatenate3([[x, x, x], [x, x, x]])\n array([[1, 2, 1, 2, 1, 2],\n [1, 2, 1, 2, 1, 2]])\n\n >>> concatenate3([[x, x], [x, x], [x, x]])\n array([[1, 2, 1, 2],\n [1, 2, 1, 2],\n [1, 2, 1, 2]])\n \"\"\"\n from .utils import IS_NEP18_ACTIVE\n\n # We need this as __array_function__ may not exist on older NumPy versions.\n # And to reduce verbosity.\n NDARRAY_ARRAY_FUNCTION = getattr(np.ndarray, \"__array_function__\", None)\n\n arrays = concrete(arrays)\n if not arrays:\n return np.empty(0)\n\n advanced = max(\n core.flatten(arrays, container=(list, tuple)),\n key=lambda x: getattr(x, \"__array_priority__\", 0),\n )\n\n if IS_NEP18_ACTIVE and not all(\n NDARRAY_ARRAY_FUNCTION\n is getattr(arr, \"__array_function__\", NDARRAY_ARRAY_FUNCTION)\n for arr in arrays\n ):\n try:\n x = unpack_singleton(arrays)\n return _concatenate2(arrays, axes=tuple(range(x.ndim)))\n except TypeError:\n pass\n\n if concatenate_lookup.dispatch(type(advanced)) is not np.concatenate:\n x = unpack_singleton(arrays)\n return _concatenate2(arrays, axes=list(range(x.ndim)))\n\n ndim = ndimlist(arrays)\n if not ndim:\n return arrays\n chunks = chunks_from_arrays(arrays)\n shape = tuple(map(sum, chunks))\n\n def dtype(x):\n try:\n return x.dtype\n except AttributeError:\n return type(x)\n\n result = np.empty(shape=shape, dtype=dtype(deepfirst(arrays)))\n\n for (idx, arr) in zip(slices_from_chunks(chunks), core.flatten(arrays)):\n if hasattr(arr, \"ndim\"):\n while arr.ndim < ndim:\n arr = arr[None, ...]\n result[idx] = arr\n\n return result\n\n\ndef concatenate_axes(arrays, axes):\n \"\"\" Recursively call np.concatenate along axes \"\"\"\n if len(axes) != ndimlist(arrays):\n raise ValueError(\"Length of axes should equal depth of nested arrays\")\n\n extradims = max(0, deepfirst(arrays).ndim - (max(axes) + 1))\n return concatenate3(transposelist(arrays, axes, extradims=extradims))\n\n\ndef to_hdf5(filename, *args, **kwargs):\n \"\"\" Store arrays in HDF5 file\n\n This saves several dask arrays into several datapaths in an HDF5 file.\n It creates the necessary datasets and handles clean file opening/closing.\n\n >>> da.to_hdf5('myfile.hdf5', '/x', x) # doctest: +SKIP\n\n or\n\n >>> da.to_hdf5('myfile.hdf5', {'/x': x, '/y': y}) # doctest: +SKIP\n\n Optionally provide arguments as though to ``h5py.File.create_dataset``\n\n >>> da.to_hdf5('myfile.hdf5', '/x', x, compression='lzf', shuffle=True) # doctest: +SKIP\n\n This can also be used as a method on a single Array\n\n >>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP\n\n See Also\n --------\n da.store\n h5py.File.create_dataset\n \"\"\"\n if len(args) == 1 and isinstance(args[0], dict):\n data = args[0]\n elif len(args) == 2 and isinstance(args[0], str) and isinstance(args[1], Array):\n data = {args[0]: args[1]}\n else:\n raise ValueError(\"Please provide {'/data/path': array} dictionary\")\n\n chunks = kwargs.pop(\"chunks\", True)\n\n import h5py\n\n with h5py.File(filename, mode=\"a\") as f:\n dsets = [\n f.require_dataset(\n dp,\n shape=x.shape,\n dtype=x.dtype,\n chunks=tuple([c[0] for c in x.chunks]) if chunks is True else chunks,\n **kwargs,\n )\n for dp, x in data.items()\n ]\n store(list(data.values()), dsets)\n\n\ndef interleave_none(a, b):\n \"\"\"\n\n >>> interleave_none([0, None, 2, None], [1, 3])\n (0, 1, 2, 3)\n \"\"\"\n result = []\n i = j = 0\n n = len(a) + len(b)\n while i + j < n:\n if a[i] is not None:\n result.append(a[i])\n i += 1\n else:\n result.append(b[j])\n i += 1\n j += 1\n return tuple(result)\n\n\ndef keyname(name, i, okey):\n \"\"\"\n\n >>> keyname('x', 3, [None, None, 0, 2])\n ('x', 3, 0, 2)\n \"\"\"\n return (name, i) + tuple(k for k in okey if k is not None)\n\n\ndef _vindex(x, *indexes):\n \"\"\"Point wise indexing with broadcasting.\n\n >>> x = np.arange(56).reshape((7, 8))\n >>> x\n array([[ 0, 1, 2, 3, 4, 5, 6, 7],\n [ 8, 9, 10, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 52, 53, 54, 55]])\n\n >>> d = from_array(x, chunks=(3, 4))\n >>> result = _vindex(d, [0, 1, 6, 0], [0, 1, 0, 7])\n >>> result.compute()\n array([ 0, 9, 48, 7])\n \"\"\"\n indexes = replace_ellipsis(x.ndim, indexes)\n\n nonfancy_indexes = []\n reduced_indexes = []\n for i, ind in enumerate(indexes):\n if isinstance(ind, Number):\n nonfancy_indexes.append(ind)\n elif isinstance(ind, slice):\n nonfancy_indexes.append(ind)\n reduced_indexes.append(slice(None))\n else:\n nonfancy_indexes.append(slice(None))\n reduced_indexes.append(ind)\n\n nonfancy_indexes = tuple(nonfancy_indexes)\n reduced_indexes = tuple(reduced_indexes)\n\n x = x[nonfancy_indexes]\n\n array_indexes = {}\n for i, (ind, size) in enumerate(zip(reduced_indexes, x.shape)):\n if not isinstance(ind, slice):\n ind = np.array(ind, copy=True)\n if ind.dtype.kind == \"b\":\n raise IndexError(\"vindex does not support indexing with boolean arrays\")\n if ((ind >= size) | (ind < -size)).any():\n raise IndexError(\n \"vindex key has entries out of bounds for \"\n \"indexing along axis %s of size %s: %r\" % (i, size, ind)\n )\n ind %= size\n array_indexes[i] = ind\n\n if array_indexes:\n x = _vindex_array(x, array_indexes)\n\n return x\n\n\ndef _vindex_array(x, dict_indexes):\n \"\"\"Point wise indexing with only NumPy Arrays.\"\"\"\n\n try:\n broadcast_indexes = np.broadcast_arrays(*dict_indexes.values())\n except ValueError as e:\n # note: error message exactly matches numpy\n shapes_str = \" \".join(str(a.shape) for a in dict_indexes.values())\n raise IndexError(\n \"shape mismatch: indexing arrays could not be \"\n \"broadcast together with shapes \" + shapes_str\n ) from e\n broadcast_shape = broadcast_indexes[0].shape\n\n lookup = dict(zip(dict_indexes, broadcast_indexes))\n flat_indexes = [\n lookup[i].ravel().tolist() if i in lookup else None for i in range(x.ndim)\n ]\n flat_indexes.extend([None] * (x.ndim - len(flat_indexes)))\n\n flat_indexes = [\n list(index) if index is not None else index for index in flat_indexes\n ]\n bounds = [list(accumulate(add, (0,) + c)) for c in x.chunks]\n bounds2 = [b for i, b in zip(flat_indexes, bounds) if i is not None]\n axis = _get_axis(flat_indexes)\n token = tokenize(x, flat_indexes)\n out_name = \"vindex-merge-\" + token\n\n points = list()\n for i, idx in enumerate(zip(*[i for i in flat_indexes if i is not None])):\n block_idx = [\n np.searchsorted(b, ind, \"right\") - 1 for b, ind in zip(bounds2, idx)\n ]\n inblock_idx = [\n ind - bounds2[k][j] for k, (ind, j) in enumerate(zip(idx, block_idx))\n ]\n points.append((i, tuple(block_idx), tuple(inblock_idx)))\n\n chunks = [c for i, c in zip(flat_indexes, x.chunks) if i is None]\n chunks.insert(0, (len(points),) if points else (0,))\n chunks = tuple(chunks)\n\n if points:\n per_block = groupby(1, points)\n per_block = dict((k, v) for k, v in per_block.items() if v)\n\n other_blocks = list(\n product(\n *[\n list(range(len(c))) if i is None else [None]\n for i, c in zip(flat_indexes, x.chunks)\n ]\n )\n )\n\n full_slices = [slice(None, None) if i is None else None for i in flat_indexes]\n\n name = \"vindex-slice-\" + token\n vindex_merge_name = \"vindex-merge-\" + token\n dsk = {}\n for okey in other_blocks:\n for i, key in enumerate(per_block):\n dsk[keyname(name, i, okey)] = (\n _vindex_transpose,\n (\n _vindex_slice,\n (x.name,) + interleave_none(okey, key),\n interleave_none(\n full_slices, list(zip(*pluck(2, per_block[key])))\n ),\n ),\n axis,\n )\n dsk[keyname(vindex_merge_name, 0, okey)] = (\n _vindex_merge,\n [list(pluck(0, per_block[key])) for key in per_block],\n [keyname(name, i, okey) for i in range(len(per_block))],\n )\n\n result_1d = Array(\n HighLevelGraph.from_collections(out_name, dsk, dependencies=[x]),\n out_name,\n chunks,\n x.dtype,\n )\n return result_1d.reshape(broadcast_shape + result_1d.shape[1:])\n\n # output has a zero dimension, just create a new zero-shape array with the\n # same dtype\n from .wrap import empty\n\n result_1d = empty(\n tuple(map(sum, chunks)), chunks=chunks, dtype=x.dtype, name=out_name\n )\n return result_1d.reshape(broadcast_shape + result_1d.shape[1:])\n\n\ndef _get_axis(indexes):\n \"\"\" Get axis along which point-wise slicing results lie\n\n This is mostly a hack because I can't figure out NumPy's rule on this and\n can't be bothered to go reading.\n\n >>> _get_axis([[1, 2], None, [1, 2], None])\n 0\n >>> _get_axis([None, [1, 2], [1, 2], None])\n 1\n >>> _get_axis([None, None, [1, 2], [1, 2]])\n 2\n \"\"\"\n ndim = len(indexes)\n indexes = [slice(None, None) if i is None else [0] for i in indexes]\n x = np.empty((2,) * ndim)\n x2 = x[tuple(indexes)]\n return x2.shape.index(1)\n\n\ndef _vindex_slice(block, points):\n \"\"\" Pull out point-wise slices from block \"\"\"\n points = [p if isinstance(p, slice) else list(p) for p in points]\n return block[tuple(points)]\n\n\ndef _vindex_transpose(block, axis):\n \"\"\" Rotate block so that points are on the first dimension \"\"\"\n axes = [axis] + list(range(axis)) + list(range(axis + 1, block.ndim))\n return block.transpose(axes)\n\n\ndef _vindex_merge(locations, values):\n \"\"\"\n\n >>> locations = [0], [2, 1]\n >>> values = [np.array([[1, 2, 3]]),\n ... np.array([[10, 20, 30], [40, 50, 60]])]\n\n >>> _vindex_merge(locations, values)\n array([[ 1, 2, 3],\n [40, 50, 60],\n [10, 20, 30]])\n \"\"\"\n locations = list(map(list, locations))\n values = list(values)\n\n n = sum(map(len, locations))\n\n shape = list(values[0].shape)\n shape[0] = n\n shape = tuple(shape)\n\n dtype = values[0].dtype\n\n x = np.empty(shape, dtype=dtype)\n\n ind = [slice(None, None) for i in range(x.ndim)]\n for loc, val in zip(locations, values):\n ind[0] = loc\n x[tuple(ind)] = val\n\n return x\n\n\ndef to_npy_stack(dirname, x, axis=0):\n \"\"\" Write dask array to a stack of .npy files\n\n This partitions the dask.array along one axis and stores each block along\n that axis as a single .npy file in the specified directory\n\n Examples\n --------\n >>> x = da.ones((5, 10, 10), chunks=(2, 4, 4)) # doctest: +SKIP\n >>> da.to_npy_stack('data/', x, axis=0) # doctest: +SKIP\n\n The ``.npy`` files store numpy arrays for ``x[0:2], x[2:4], and x[4:5]``\n respectively, as is specified by the chunk size along the zeroth axis::\n\n $ tree data/\n data/\n |-- 0.npy\n |-- 1.npy\n |-- 2.npy\n |-- info\n\n The ``info`` file stores the dtype, chunks, and axis information of the array.\n You can load these stacks with the ``da.from_npy_stack`` function.\n\n >>> y = da.from_npy_stack('data/') # doctest: +SKIP\n\n See Also\n --------\n from_npy_stack\n \"\"\"\n\n chunks = tuple((c if i == axis else (sum(c),)) for i, c in enumerate(x.chunks))\n xx = x.rechunk(chunks)\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n\n meta = {\"chunks\": chunks, \"dtype\": x.dtype, \"axis\": axis}\n\n with open(os.path.join(dirname, \"info\"), \"wb\") as f:\n pickle.dump(meta, f)\n\n name = \"to-npy-stack-\" + str(uuid.uuid1())\n dsk = {\n (name, i): (np.save, os.path.join(dirname, \"%d.npy\" % i), key)\n for i, key in enumerate(core.flatten(xx.__dask_keys__()))\n }\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[xx])\n compute_as_if_collection(Array, graph, list(dsk))\n\n\ndef from_npy_stack(dirname, mmap_mode=\"r\"):\n \"\"\" Load dask array from stack of npy files\n\n See ``da.to_npy_stack`` for docstring\n\n Parameters\n ----------\n dirname: string\n Directory of .npy files\n mmap_mode: (None or 'r')\n Read data in memory map mode\n \"\"\"\n with open(os.path.join(dirname, \"info\"), \"rb\") as f:\n info = pickle.load(f)\n\n dtype = info[\"dtype\"]\n chunks = info[\"chunks\"]\n axis = info[\"axis\"]\n\n name = \"from-npy-stack-%s\" % dirname\n keys = list(product([name], *[range(len(c)) for c in chunks]))\n values = [\n (np.load, os.path.join(dirname, \"%d.npy\" % i), mmap_mode)\n for i in range(len(chunks[axis]))\n ]\n dsk = dict(zip(keys, values))\n\n return Array(dsk, name, chunks, dtype)\n\n\nfrom .utils import meta_from_array\n" ]
[ [ "numpy.ones", "numpy.empty", "numpy.searchsorted", "numpy.dtype", "numpy.asarray", "numpy.median", "numpy.errstate", "numpy.asanyarray", "numpy.can_cast", "numpy.prod", "numpy.max", "numpy.isnan", "numpy.array", "numpy.isscalar" ] ]
cophus/PhaseContrastTomographySolver
[ "e75cfd5af5cc0fdf363d3754c22d91f4c2dec8e8" ]
[ "transform.py" ]
[ "\"\"\"\nTransform functions for Tomography in Numpy, Scipy, Torch, and Skimage\nEstimates affine transform between measured image and predicted image\nhttps://github.com/scikit-image/scikit-image\n\nDavid Ren [email protected]\n\nDec 28, 2020\n\"\"\"\n\nimport numpy as np\nimport torch\nfrom skimage.registration import optical_flow_tvl1\nfrom skimage import transform\nimport scipy.optimize as sop\n\nclass ImageTransformOpticalFlow():\n \"\"\"\n Class written to register stack of images for AET.\n Uses correlation based method to determine subpixel shift between predicted and measured images.\n Input parameters:\n - shape: shape of the image\n \"\"\" \n def __init__(self, shape, method=\"optical_flow\"):\n self.shape = shape\n self.x_lin, self.y_lin = np.meshgrid(np.arange(self.shape[1]), np.arange(self.shape[0]))\n self.xy_lin = np.concatenate((self.x_lin[np.newaxis,], self.y_lin[np.newaxis,])).astype('float32')\n \n\n def _coordinate_warp(self, transform_vec, xy_lin, xy_flow):\n transform_vec = transform_vec.astype('float32')\n rot_mat = [np.cos(transform_vec[0]), \\\n -np.sin(transform_vec[0]), \\\n np.sin(transform_vec[0]), \\\n np.cos(transform_vec[0])]\n xy_predict = np.zeros_like(xy_lin)\n xy_predict[0,] = rot_mat[0] * xy_lin[0,] + rot_mat[1] * xy_lin[1,] + transform_vec[1]\n xy_predict[1,] = rot_mat[2] * xy_lin[0,] + rot_mat[3] * xy_lin[1,] + transform_vec[2]\n resid = xy_predict - xy_flow\n f_val = 0.5 * np.sum(resid.transpose((1,2,0)).flatten() ** 2)\n f_grad = []\n #theta\n f_grad.append(np.sum((rot_mat[1] * xy_lin[0,] * resid[0,]).flatten()) +\\\n np.sum((-rot_mat[0] * xy_lin[1,] * resid[0,]).flatten()) + \\\n np.sum((rot_mat[0] * xy_lin[0,] * resid[1,]).flatten()) + \\\n np.sum((rot_mat[1] * xy_lin[1,] * resid[1,]).flatten()))\n #dx\n f_grad.append(np.sum((resid[0,]).flatten()))\n #dy\n f_grad.append(np.sum((resid[1,]).flatten()))\n f_grad = np.array(f_grad)\n return f_val.astype('float64'), np.array(f_grad).astype('float64')\n\n def _estimate_single(self, predicted, measured):\n assert predicted.shape == self.shape\n assert measured.shape == self.shape\n flow = optical_flow_tvl1(predicted, measured)\n flow[[1,0],] = flow[[0,1],]\n xy_flow = self.xy_lin - flow\n _Afunc_coord_warp = lambda transform_vec: self._coordinate_warp(transform_vec, self.xy_lin, xy_flow) \n\n #estimate transform matrix from optical flow\n results = sop.fmin_l_bfgs_b(_Afunc_coord_warp, np.array([0.0,0,0]))\n transform_final = results[0]\n if results[2][\"warnflag\"]:\n transform_final *= 0.0\n print(\"Transform estimation not converged\")\n\n #inverse warp measured image\n transform_mat = np.array([np.cos(transform_final[0]), \\\n -np.sin(transform_final[0]), \\\n np.sin(transform_final[0]), \\\n np.cos(transform_final[0]), \\\n transform_final[1], \\\n transform_final[2]]) \n aff_mat = np.array([transform_mat[[0,1,4]], transform_mat[[2,3,5]],[0,0,1]])\n tform = transform.AffineTransform(matrix = aff_mat)\n measured_warp = transform.warp(measured, tform.inverse, cval = 1.0)\n\n return measured_warp, transform_final\n\n def estimate(self, predicted_stack, measured_stack):\n assert predicted_stack.shape == measured_stack.shape\n transform_vec_list = np.zeros((3,measured_stack.shape[2]), dtype=\"float32\")\n\n #Change from torch array to numpy array\n flag_predicted_gpu = predicted_stack.is_cuda\n if flag_predicted_gpu:\n predicted_stack = predicted_stack.cpu()\n\n flag_measured_gpu = measured_stack.is_cuda\n if flag_measured_gpu:\n measured_stack = measured_stack.cpu() \n \n predicted_np = np.array(predicted_stack.detach())\n measured_np = np.array(measured_stack.detach())\n \n #For each image, estimate the affine transform error\n for img_idx in range(measured_np.shape[2]):\n measured_np[...,img_idx], transform_vec = self._estimate_single(predicted_np[...,img_idx], \\\n measured_np[...,img_idx])\n transform_vec_list[...,img_idx] = transform_vec\n \n #Change data back to torch tensor format\n if flag_predicted_gpu:\n predicted_stack = predicted_stack.cuda()\n\n measured_np = torch.tensor(measured_np)\n if flag_measured_gpu:\n measured_stack = measured_stack.cuda() \n measured_np = measured_np.cuda()\n\n return measured_np, torch.tensor(transform_vec_list)\n" ]
[ [ "numpy.zeros_like", "numpy.zeros", "torch.tensor", "numpy.cos", "numpy.arange", "numpy.array", "numpy.sin", "numpy.concatenate" ] ]
mnassar/deep-learning
[ "b69617993b2e67cfd5635460d1a295e91b6c66d6" ]
[ "tv-script-generation/problem_unittests.py" ]
[ "import numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib import rnn\n\n\ndef _print_success_message():\n print('Tests Passed')\n\n\ndef test_create_lookup_tables(create_lookup_tables):\n with tf.Graph().as_default():\n test_text = '''\n Moe_Szyslak Moe's Tavern Where the elite meet to drink\n Bart_Simpson Eh yeah hello is Mike there Last name Rotch\n Moe_Szyslak Hold on I'll check Mike Rotch Mike Rotch Hey has anybody seen Mike Rotch lately\n Moe_Szyslak Listen you little puke One of these days I'm gonna catch you and I'm gonna carve my name on your back with an ice pick\n Moe_Szyslak Whats the matter Homer You're not your normal effervescent self\n Homer_Simpson I got my problems Moe Give me another one\n Moe_Szyslak Homer hey you should not drink to forget your problems\n Barney_Gumble Yeah you should only drink to enhance your social skills'''\n\n test_text = test_text.lower()\n test_text = test_text.split()\n\n vocab_to_int, int_to_vocab = create_lookup_tables(test_text)\n\n # Check types\n assert isinstance(vocab_to_int, dict),\\\n 'vocab_to_int is not a dictionary.'\n assert isinstance(int_to_vocab, dict),\\\n 'int_to_vocab is not a dictionary.'\n\n # Compare lengths of dicts\n assert len(vocab_to_int) == len(int_to_vocab),\\\n 'Length of vocab_to_int and int_to_vocab don\\'t match. ' \\\n 'vocab_to_int is length {}. int_to_vocab is length {}'.format(len(vocab_to_int), len(int_to_vocab))\n\n # Make sure the dicts have the same words\n vocab_to_int_word_set = set(vocab_to_int.keys())\n int_to_vocab_word_set = set(int_to_vocab.values())\n\n assert not (vocab_to_int_word_set - int_to_vocab_word_set),\\\n 'vocab_to_int and int_to_vocab don\\'t have the same words.' \\\n '{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_set - int_to_vocab_word_set)\n assert not (int_to_vocab_word_set - vocab_to_int_word_set),\\\n 'vocab_to_int and int_to_vocab don\\'t have the same words.' \\\n '{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_set - vocab_to_int_word_set)\n\n # Make sure the dicts have the same word ids\n vocab_to_int_word_id_set = set(vocab_to_int.values())\n int_to_vocab_word_id_set = set(int_to_vocab.keys())\n\n assert not (vocab_to_int_word_id_set - int_to_vocab_word_id_set),\\\n 'vocab_to_int and int_to_vocab don\\'t contain the same word ids.' \\\n '{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_id_set - int_to_vocab_word_id_set)\n assert not (int_to_vocab_word_id_set - vocab_to_int_word_id_set),\\\n 'vocab_to_int and int_to_vocab don\\'t contain the same word ids.' \\\n '{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_id_set - vocab_to_int_word_id_set)\n\n # Make sure the dicts make the same lookup\n missmatches = [(word, id, id, int_to_vocab[id]) for word, id in vocab_to_int.items() if int_to_vocab[id] != word]\n\n assert not missmatches,\\\n 'Found {} missmatche(s). First missmatch: vocab_to_int[{}] = {} and int_to_vocab[{}] = {}'.format(\n len(missmatches),\n *missmatches[0])\n\n assert len(vocab_to_int) > len(set(test_text))/2,\\\n 'The length of vocab seems too small. Found a length of {}'.format(len(vocab_to_int))\n\n _print_success_message()\n\n\ndef test_get_batches(get_batches):\n with tf.Graph().as_default():\n test_batch_size = 128\n test_seq_length = 5\n test_int_text = list(range(1000*test_seq_length))\n batches = get_batches(test_int_text, test_batch_size, test_seq_length)\n\n # Check type\n assert isinstance(batches, np.ndarray),\\\n 'Batches is not a Numpy array'\n\n # Check shape\n assert batches.shape == (7, 2, 128, 5),\\\n 'Batches returned wrong shape. Found {}'.format(batches.shape)\n\n for x in range(batches.shape[2]):\n assert np.array_equal(batches[0,0,x], np.array(range(x * 35, x * 35 + batches.shape[3]))),\\\n 'Batches returned wrong contents. For example, input sequence {} in the first batch was {}'.format(x, batches[0,0,x])\n assert np.array_equal(batches[0,1,x], np.array(range(x * 35 + 1, x * 35 + 1 + batches.shape[3]))),\\\n 'Batches returned wrong contents. For example, target sequence {} in the first batch was {}'.format(x, batches[0,1,x])\n\n\n last_seq_target = (test_batch_size-1) * 35 + 31\n last_seq = np.array(range(last_seq_target, last_seq_target+ batches.shape[3]))\n last_seq[-1] = batches[0,0,0,0]\n\n assert np.array_equal(batches[-1,1,-1], last_seq),\\\n 'The last target of the last batch should be the first input of the first batch. Found {} but expected {}'.format(batches[-1,1,-1], last_seq)\n\n _print_success_message()\n\n\ndef test_tokenize(token_lookup):\n with tf.Graph().as_default():\n symbols = set(['.', ',', '\"', ';', '!', '?', '(', ')', '--', '\\n'])\n token_dict = token_lookup()\n\n # Check type\n assert isinstance(token_dict, dict), \\\n 'Returned type is {}.'.format(type(token_dict))\n\n # Check symbols\n missing_symbols = symbols - set(token_dict.keys())\n unknown_symbols = set(token_dict.keys()) - symbols\n\n assert not missing_symbols, \\\n 'Missing symbols: {}'.format(missing_symbols)\n assert not unknown_symbols, \\\n 'Unknown symbols: {}'.format(unknown_symbols)\n\n # Check values type\n bad_value_type = [type(val) for val in token_dict.values() if not isinstance(val, str)]\n\n assert not bad_value_type,\\\n 'Found token as {} type.'.format(bad_value_type[0])\n\n # Check for spaces\n key_has_spaces = [k for k in token_dict.keys() if ' ' in k]\n val_has_spaces = [val for val in token_dict.values() if ' ' in val]\n\n assert not key_has_spaces,\\\n 'The key \"{}\" includes spaces. Remove spaces from keys and values'.format(key_has_spaces[0])\n assert not val_has_spaces,\\\n 'The value \"{}\" includes spaces. Remove spaces from keys and values'.format(val_has_spaces[0])\n\n # Check for symbols in values\n symbol_val = ()\n for symbol in symbols:\n for val in token_dict.values():\n if symbol in val:\n symbol_val = (symbol, val)\n\n assert not symbol_val,\\\n 'Don\\'t use a symbol that will be replaced in your tokens. Found the symbol {} in value {}'.format(*symbol_val)\n\n _print_success_message()\n\n\ndef test_get_inputs(get_inputs):\n with tf.Graph().as_default():\n input_data, targets, lr = get_inputs()\n\n # Check type\n assert input_data.op.type == 'Placeholder',\\\n 'Input not a Placeholder.'\n assert targets.op.type == 'Placeholder',\\\n 'Targets not a Placeholder.'\n assert lr.op.type == 'Placeholder',\\\n 'Learning Rate not a Placeholder.'\n\n # Check name\n assert input_data.name == 'input:0',\\\n 'Input has bad name. Found name {}'.format(input_data.name)\n\n # Check rank\n input_rank = 0 if input_data.get_shape() == None else len(input_data.get_shape())\n targets_rank = 0 if targets.get_shape() == None else len(targets.get_shape())\n lr_rank = 0 if lr.get_shape() == None else len(lr.get_shape())\n\n assert input_rank == 2,\\\n 'Input has wrong rank. Rank {} found.'.format(input_rank)\n assert targets_rank == 2,\\\n 'Targets has wrong rank. Rank {} found.'.format(targets_rank)\n assert lr_rank == 0,\\\n 'Learning Rate has wrong rank. Rank {} found'.format(lr_rank)\n\n _print_success_message()\n\n\ndef test_get_init_cell(get_init_cell):\n with tf.Graph().as_default():\n test_batch_size_ph = tf.placeholder(tf.int32, [])\n test_rnn_size = 256\n\n cell, init_state = get_init_cell(test_batch_size_ph, test_rnn_size)\n\n # Check type\n assert isinstance(cell, tf.contrib.rnn.MultiRNNCell),\\\n 'Cell is wrong type. Found {} type'.format(type(cell))\n\n # Check for name attribute\n assert hasattr(init_state, 'name'),\\\n 'Initial state doesn\\'t have the \"name\" attribute. Try using `tf.identity` to set the name.'\n\n # Check name\n assert init_state.name == 'initial_state:0',\\\n 'Initial state doesn\\'t have the correct name. Found the name {}'.format(init_state.name)\n\n _print_success_message()\n\n\ndef test_get_embed(get_embed):\n with tf.Graph().as_default():\n embed_shape = [50, 5, 256]\n test_input_data = tf.placeholder(tf.int32, embed_shape[:2])\n test_vocab_size = 27\n test_embed_dim = embed_shape[2]\n\n embed = get_embed(test_input_data, test_vocab_size, test_embed_dim)\n\n # Check shape\n assert embed.shape == embed_shape,\\\n 'Wrong shape. Found shape {}'.format(embed.shape)\n\n _print_success_message()\n\n\ndef test_build_rnn(build_rnn):\n with tf.Graph().as_default():\n test_rnn_size = 256\n test_rnn_layer_size = 2\n test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size) for _ in range(test_rnn_layer_size)])\n\n test_inputs = tf.placeholder(tf.float32, [None, None, test_rnn_size])\n outputs, final_state = build_rnn(test_cell, test_inputs)\n\n # Check name\n assert hasattr(final_state, 'name'),\\\n 'Final state doesn\\'t have the \"name\" attribute. Try using `tf.identity` to set the name.'\n assert final_state.name == 'final_state:0',\\\n 'Final state doesn\\'t have the correct name. Found the name {}'.format(final_state.name)\n\n # Check shape\n assert outputs.get_shape().as_list() == [None, None, test_rnn_size],\\\n 'Outputs has wrong shape. Found shape {}'.format(outputs.get_shape())\n assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size],\\\n 'Final state wrong shape. Found shape {}'.format(final_state.get_shape())\n\n _print_success_message()\n\n\ndef test_build_nn(build_nn):\n with tf.Graph().as_default():\n test_input_data_shape = [None, 5]\n test_input_data = tf.placeholder(tf.int32, test_input_data_shape)\n test_rnn_size = 256\n test_embed_dim = 300\n test_rnn_layer_size = 2\n test_vocab_size = 27\n test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size) for _ in range(test_rnn_layer_size)])\n\n logits, final_state = build_nn(test_cell, test_rnn_size, test_input_data, test_vocab_size, test_embed_dim)\n\n # Check name\n assert hasattr(final_state, 'name'), \\\n 'Final state doesn\\'t have the \"name\" attribute. Are you using build_rnn?'\n assert final_state.name == 'final_state:0', \\\n 'Final state doesn\\'t have the correct name. Found the name {}. Are you using build_rnn?'.format(final_state.name)\n\n # Check Shape\n assert logits.get_shape().as_list() == test_input_data_shape + [test_vocab_size], \\\n 'Outputs has wrong shape. Found shape {}'.format(logits.get_shape())\n assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size], \\\n 'Final state wrong shape. Found shape {}'.format(final_state.get_shape())\n\n _print_success_message()\n\n\ndef test_get_tensors(get_tensors):\n test_graph = tf.Graph()\n with test_graph.as_default():\n test_input = tf.placeholder(tf.int32, name='input')\n test_initial_state = tf.placeholder(tf.int32, name='initial_state')\n test_final_state = tf.placeholder(tf.int32, name='final_state')\n test_probs = tf.placeholder(tf.float32, name='probs')\n\n input_text, initial_state, final_state, probs = get_tensors(test_graph)\n\n # Check correct tensor\n assert input_text == test_input,\\\n 'Test input is wrong tensor'\n assert initial_state == test_initial_state, \\\n 'Initial state is wrong tensor'\n assert final_state == test_final_state, \\\n 'Final state is wrong tensor'\n assert probs == test_probs, \\\n 'Probabilities is wrong tensor'\n\n _print_success_message()\n\n\ndef test_pick_word(pick_word):\n with tf.Graph().as_default():\n test_probabilities = np.array([0.1, 0.8, 0.05, 0.05])\n test_int_to_vocab = {word_i: word for word_i, word in enumerate(['this', 'is', 'a', 'test'])}\n\n pred_word = pick_word(test_probabilities, test_int_to_vocab)\n\n # Check type\n assert isinstance(pred_word, str),\\\n 'Predicted word is wrong type. Found {} type.'.format(type(pred_word))\n\n # Check word is from vocab\n assert pred_word in test_int_to_vocab.values(),\\\n 'Predicted word not found in int_to_vocab.'\n\n\n _print_success_message()\n\n" ]
[ [ "tensorflow.placeholder", "tensorflow.contrib.rnn.BasicLSTMCell", "tensorflow.Graph", "numpy.array_equal", "numpy.array" ] ]
otosense/oplot
[ "5b4b4b96ebfa5486501c02e7051d1c11b1c3b86c" ]
[ "oplot/multiplots.py" ]
[ "\"\"\"Drawing multiple plots in a single figure\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import gridspec\n\n\ndef make_space_above(axes, topmargin=1):\n \"\"\" increase figure size to make topmargin (in inches) space for\n titles, without changing the axes sizes\"\"\"\n\n fig = axes.flatten()[0].figure\n s = fig.subplotpars\n w, h = fig.get_size_inches()\n\n figh = h - (1 - s.top) * h + topmargin\n fig.subplots_adjust(bottom=s.bottom * h / figh, top=1 - topmargin / figh)\n fig.set_figheight(figh)\n\n\ndef ax_func_to_plot(\n list_func_per_ax,\n n_per_row=3,\n title=None,\n title_font_size=10,\n width=15,\n height_row=10,\n saving_path=None,\n x_labels=None,\n y_labels=None,\n outer_axis_labels_only=False,\n dpi=200,\n plot=True,\n h_pad=0,\n w_pad=0,\n title_offset=0,\n):\n \"\"\"\n Draw one grid of plots from the individual plots\n\n :param list_func_per_ax: a list/generator of functions, each taking an ax object as an input and plotting something on it\n :param n_per_row: number of plots per row\n :param title: global title of the plot\n :param title_font_size: font size of the global title\n :param width: width of the global plot\n :param height_row: height of each row\n :param saving_path: path where to save the plot, can be left to none in which case the plot is not saved\n :param x_labels: label of the x axis\n :param y_labels: label of the y axis\n :param outer_axis_labels_only: if set to true, only the axis labels on the left column and bottom row will show\n :return:\n \"\"\"\n\n n_rows = int(np.ceil(len(list_func_per_ax) / n_per_row))\n fig, axes = plt.subplots(\n nrows=n_rows,\n ncols=n_per_row,\n figsize=(width, height_row * n_rows),\n squeeze=False,\n )\n\n # fig.legend(fancybox=True, framealpha=1, shadow=True, borderpad=1)\n fig.suptitle(title, fontsize=title_font_size)\n\n for idx, ax in enumerate(axes.flat):\n if idx < len(list_func_per_ax):\n ax.set(xlabel=x_labels, ylabel=y_labels)\n\n if outer_axis_labels_only:\n for idx, ax in enumerate(axes.flat):\n if idx < len(list_func_per_ax):\n ax.label_outer()\n\n for idx, (ax, func) in enumerate(zip(axes.flatten(), list_func_per_ax)):\n if idx < len(list_func_per_ax):\n func(ax=ax)\n\n # Delete the remaining empty plots if any\n for i in range(len(list_func_per_ax), n_rows * n_per_row):\n fig.delaxes(axes.flatten()[i])\n\n handles, labels = ax.get_legend_handles_labels()\n fig.legend(handles, labels, loc=1)\n plt.tight_layout(h_pad=h_pad, w_pad=w_pad)\n\n make_space_above(axes, topmargin=title_offset)\n\n if saving_path:\n fig.savefig(saving_path, dpi=dpi)\n if plot:\n plt.show()\n\n\ndef multiplot_with_max_size(\n list_func_per_ax,\n max_plot_per_file=60,\n n_per_row=3,\n title=None,\n title_font_size=10,\n width=15,\n height_row=10,\n saving_path_format=None,\n x_labels=None,\n y_labels=None,\n outer_axis_labels_only=False,\n dpi=300,\n plot=True,\n):\n \"\"\"\n Same as ax_func_to_plot but saves on several files\n :param max_plot_per_file: the maximum number of plots per file\n \"\"\"\n\n n_files, n_remainder_rows = divmod(len(list_func_per_ax), max_plot_per_file)\n file_idx = 0\n for file_idx in range(n_files):\n funcs = list_func_per_ax[\n file_idx * max_plot_per_file : (file_idx + 1) * max_plot_per_file\n ]\n if saving_path_format:\n saving_path = saving_path_format.format(file_idx)\n else:\n saving_path = None\n ax_func_to_plot(\n funcs,\n n_per_row=n_per_row,\n title=title,\n title_font_size=title_font_size,\n width=width,\n height_row=height_row,\n saving_path=saving_path,\n x_labels=x_labels,\n y_labels=y_labels,\n outer_axis_labels_only=outer_axis_labels_only,\n )\n file_idx += 1\n if saving_path_format:\n saving_path = saving_path_format.format(file_idx)\n else:\n saving_path = None\n funcs = list_func_per_ax[-n_remainder_rows:]\n ax_func_to_plot(\n funcs,\n n_per_row=n_per_row,\n title=title,\n title_font_size=title_font_size,\n width=width,\n height_row=height_row,\n saving_path=saving_path,\n x_labels=x_labels,\n y_labels=y_labels,\n outer_axis_labels_only=outer_axis_labels_only,\n dpi=dpi,\n plot=plot,\n )\n\n\n# # Example of usage\n# if __name__ == '__main__':\n# def ax_func(ax):\n# ax.plot([1, 5, 3])\n# ax.set_title('test_test')\n#\n#\n# ax_func_to_plot([ax_func] * 6, title='Test', x_labels='x_name_here', y_labels='something',\n# outer_axis_labels_only=True)\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
ICASSP-2020-Robustness-Tutorial/Robust-Signal-Processing-Toolbox-Python
[ "293f4281fdbd475549aa42eae9fe615976af27a0" ]
[ "robustsp/Regression/enetpath.py" ]
[ "'''\n enethpath computes the elastic net (EN) regularization path (over grid \n of penalty parameter values). Uses pathwise CCD algorithm. \n INPUT: \n y : Numeric 1darray of size N (output, respones)\n X : Nnumeric 2darray of size N x p. Each row represents one \n observation, and each column represents one predictor (feature). \n intcpt: Logical flag to indicate if intercept is in the model\n alpha : Numeric scalar, elastic net tuning parameter in the range [0,1].\n If not given then use alpha = 1 (Lasso)\n eps: Positive scalar, the ratio of the smallest to the \n largest Lambda value in the grid. Default is eps = 10^-4. \n L : Positive integer, the number of lambda values EN/Lasso uses. \n Default is L=100. \n printitn: print iteration number (default = 0, no printing)\n OUTPUT:\n B : Fitted EN/Lasso regression coefficients, a p-by-(L+1) matrix, \n where p is the number of predictors (columns) in X, and L is \n the number of Lambda values. If intercept is in the model, then\n B is (p+1)-by-(L+1) matrix, with first element the intercept.\n stats : Dictionary with following fields: \n Lambda = lambda parameters in ascending order\n MSE = Mean squared error (MSE)\n BIC = Bayesian information criterion values \n'''\nimport numpy as np\nfrom robustsp.Regression.enet import enet\n\ndef enetpath(yx,Xx,alpha=1,L=120,eps=10**-3,intcpt=True,printitn=0):\n\n # ensure inputs are ndarrays\n Xc = np.copy(np.asarray(Xx))\n y = np.copy(np.asarray(yx))\n if len(y.shape) == 2: y = y.flatten()\n n,p = Xc.shape\n\n # if intercept is in the model, center the data\n if intcpt:\n meanX = np.mean(Xc,axis=0)\n meany = np.mean(y)\n Xc -= meanX\n y -= meany\n \n \n if printitn > 0:\n print('enetpath: using alpha = %.1f \\n' % alpha)\n\n sdX = np.sqrt(np.sum(Xc*np.conj(Xc),axis=0)) \n Xc /= sdX\n \n lam0 = np.linalg.norm(Xc.T @ y,np.inf)/alpha # smallest penalty value giving zero solution\n \n lamgrid = eps**(np.arange(0,L+1,1)/L) * lam0 # grid of penalty values\n\n B = np.zeros([p,L+1])\n\n for jj in range(L):\n B[:,jj+1], b = enet(y,Xc,B[:,jj], lamgrid[jj+1], alpha, printitn)\n\n B[np.abs(B) < 5e-8] = 0\n\n DF = np.sum([np.abs(B)!=0],axis=1) # non-zero values in each column\n\n if n > p:\n MSE = np.sum(np.abs(np.repeat(y[:,np.newaxis],L+1,axis=1)\n -Xc@B)**2,axis=0) *(1/(n-DF-1))\n BIC = n * np.log(MSE) + DF * np.log(n)\n else:\n MSE = []\n BIC = []\n\n B = B / sdX[:,None]\n if intcpt:\n B = np.vstack([meany - meanX @ B, B])\n\n stats = {'MSE':MSE,'BIC':BIC,'Lambda':lamgrid} \n\n\n return B, stats" ]
[ [ "numpy.vstack", "numpy.zeros", "numpy.conj", "numpy.abs", "numpy.asarray", "numpy.repeat", "numpy.arange", "numpy.log", "numpy.linalg.norm", "numpy.mean" ] ]
neuropoly/ivadomed
[ "e5f14c02a6c73d9360eee130ff39f0a364e0a697" ]
[ "ivadomed/uncertainty.py" ]
[ "import nibabel as nib\nfrom tqdm import tqdm\nfrom scipy.ndimage import label, generate_binary_structure\nfrom pathlib import Path\nimport json\nimport numpy as np\nfrom ivadomed import postprocessing as imed_postpro\nfrom typing import List\n\n\ndef run_uncertainty(image_folder):\n \"\"\"Compute uncertainty from model prediction.\n\n This function loops across the model predictions (nifti masks) and estimates the uncertainty from the Monte Carlo\n samples. Both voxel-wise and structure-wise uncertainty are estimates.\n\n Args:\n image_folder (str): Folder containing the Monte Carlo samples.\n \"\"\"\n # list subj_acq prefixes\n subj_acq_lst = [file.name.split('_pred')[0] for file in Path(image_folder).iterdir()\n if file.name.endswith('.nii.gz') and '_pred' in file.name]\n # remove duplicates\n subj_acq_lst = list(set(subj_acq_lst))\n # keep only the images where unc has not been computed yet\n subj_acq_lst = [file for file in subj_acq_lst if not Path(image_folder, file + '_unc-cv.nii.gz').is_file()]\n\n # loop across subj_acq\n for subj_acq in tqdm(subj_acq_lst, desc=\"Uncertainty Computation\"):\n # hard segmentation from MC samples\n fname_pred: Path = Path(image_folder, subj_acq + '_pred.nii.gz')\n # fname for soft segmentation from MC simulations\n fname_soft: Path = Path(image_folder, subj_acq + '_soft.nii.gz')\n # find Monte Carlo simulations\n fname_pred_lst: List[str] = []\n for file in Path(image_folder).iterdir():\n if subj_acq + '_pred_' in file.name and ('_painted' not in file.name) and ('_color' not in file.name):\n fname_pred_lst.append(str(file))\n\n # if final segmentation from Monte Carlo simulations has not been generated yet\n if not fname_pred.is_file() or not fname_soft.is_file():\n # threshold used for the hard segmentation\n thr = 1. / len(fname_pred_lst) # 1 for all voxels where at least on MC sample predicted 1\n # average then argmax\n combine_predictions(fname_pred_lst, str(fname_pred), str(fname_soft), thr=thr)\n\n fname_unc_vox = Path(image_folder, subj_acq + '_unc-vox.nii.gz')\n if not fname_unc_vox.is_file():\n # compute voxel-wise uncertainty map\n voxelwise_uncertainty(fname_pred_lst, str(fname_unc_vox))\n\n fname_unc_struct = Path(image_folder, subj_acq + '_unc.nii.gz')\n if not Path(image_folder, subj_acq + '_unc-cv.nii.gz').is_file():\n # compute structure-wise uncertainty\n structurewise_uncertainty(fname_pred_lst, str(fname_pred), str(fname_unc_vox), str(fname_unc_struct))\n\n\ndef combine_predictions(fname_lst, fname_hard, fname_prob, thr=0.5):\n \"\"\"Combine predictions from Monte Carlo simulations.\n\n Combine predictions from Monte Carlo simulations and save the resulting as:\n (1) `fname_prob`, a soft segmentation obtained by averaging the Monte Carlo samples.\n (2) `fname_hard`, a hard segmentation obtained thresholding with `thr`.\n\n Args:\n fname_lst (list of str): List of the Monte Carlo samples.\n fname_hard (str): Filename for the output hard segmentation.\n fname_prob (str): Filename for the output soft segmentation.\n thr (float): Between 0 and 1. Used to threshold the soft segmentation and generate the hard segmentation.\n \"\"\"\n # collect all MC simulations\n mc_data = np.array([nib.load(fname).get_fdata() for fname in fname_lst])\n first_file_header = nib.load(fname_lst[0]).header\n\n # average over all the MC simulations\n data_prob = np.mean(mc_data, axis=0)\n # save prob segmentation\n nib_prob = nib.Nifti1Image(\n dataobj=data_prob,\n affine=first_file_header.get_best_affine(),\n header=first_file_header.copy()\n )\n nib.save(nib_prob, fname_prob)\n\n # argmax operator\n data_hard = imed_postpro.threshold_predictions(data_prob, thr=thr).astype(np.uint8)\n # save hard segmentation\n nib_hard = nib.Nifti1Image(\n dataobj=data_hard,\n affine=first_file_header.get_best_affine(),\n header=first_file_header.copy()\n )\n nib.save(nib_hard, fname_hard)\n\n\ndef voxelwise_uncertainty(fname_lst, fname_out, eps=1e-5):\n \"\"\"Estimate voxel wise uncertainty.\n\n Voxel-wise uncertainty is estimated as entropy over all N MC probability maps, and saved in `fname_out`.\n\n Args:\n fname_lst (list of str): List of the Monte Carlo samples.\n fname_out (str): Output filename.\n eps (float): Epsilon value to deal with np.log(0).\n \"\"\"\n # collect all MC simulations\n mc_data = np.array([nib.load(fname).get_fdata() for fname in fname_lst])\n affine = nib.load(fname_lst[0]).header.get_best_affine()\n\n # entropy\n unc = np.repeat(np.expand_dims(mc_data, -1), 2, -1) # n_it, x, y, z, 2\n unc[..., 0] = 1 - unc[..., 1]\n unc = -np.sum(np.mean(unc, 0) * np.log(np.mean(unc, 0) + eps), -1)\n\n # Clip values to 0\n unc[unc < 0] = 0\n\n # save uncertainty map\n nib_unc = nib.Nifti1Image(unc, affine)\n nib.save(nib_unc, fname_out)\n\n\ndef structurewise_uncertainty(fname_lst, fname_hard, fname_unc_vox, fname_out):\n \"\"\"Estimate structure wise uncertainty.\n\n Structure-wise uncertainty from N MC probability maps (`fname_lst`) and saved in `fname_out` with the following\n suffixes:\n\n * '-cv.nii.gz': coefficient of variation\n * '-iou.nii.gz': intersection over union\n * '-avgUnc.nii.gz': average voxel-wise uncertainty within the structure.\n\n Args:\n fname_lst (list of str): List of the Monte Carlo samples.\n fname_hard (str): Filename of the hard segmentation, which is used to compute the `avgUnc` by providing a mask\n of the structures.\n fname_unc_vox (str): Filename of the voxel-wise uncertainty, which is used to compute the `avgUnc`.\n fname_out (str): Output filename.\n \"\"\"\n # 18-connectivity\n bin_struct = np.array(generate_binary_structure(3, 2))\n\n # load hard segmentation\n nib_hard = nib.load(fname_hard)\n data_hard = nib_hard.get_fdata()\n # Label each object of each class\n data_hard_labeled = [label(data_hard[..., i_class], structure=bin_struct)[0] for i_class in\n range(data_hard.shape[-1])]\n\n # load all MC simulations (in mc_dict[\"mc_data\"]) and label them (in mc_dict[\"mc_labeled\"])\n mc_dict = {\"mc_data\": [], \"mc_labeled\": []}\n for fname in fname_lst:\n data = nib.load(fname).get_fdata()\n mc_dict[\"mc_data\"].append([data[..., i_class] for i_class in range(data.shape[-1])])\n\n labeled_list = [label(data[..., i_class], structure=bin_struct)[0] for i_class in range(data.shape[-1])]\n mc_dict[\"mc_labeled\"].append(labeled_list)\n\n # load uncertainty map\n data_uncVox = nib.load(fname_unc_vox).get_fdata()\n\n # Init output arrays\n data_iou, data_cv, data_avgUnc = np.zeros(data_hard.shape), np.zeros(data_hard.shape), np.zeros(data_hard.shape)\n\n # Loop across classes\n for i_class in range(data_hard.shape[-1]):\n # Hard segmentation of the i_class that has been labeled\n data_hard_labeled_class = data_hard_labeled[i_class]\n # Get number of objects in\n l, l_count = np.unique(data_hard_labeled_class, return_counts=True)\n\n # Get all non zero labels and exclude structure of 1 pixel\n labels = l[l_count != 1][1:]\n # Loop across objects\n for i_obj in labels:\n # select the current structure, remaining voxels are set to zero\n data_hard_labeled_class_obj = (np.array(data_hard_labeled_class) == i_obj).astype(int)\n\n # Get object coordinates\n xx_obj, yy_obj, zz_obj = np.where(data_hard_labeled_class_obj)\n\n # Loop across the MC samples and mask the structure of interest\n data_class_obj_mc = []\n for i_mc in range(len(fname_lst)):\n # Get index of the structure of interest in the MC sample labeled\n i_mc_labels, i_mc_counts = np.unique(data_hard_labeled_class_obj * mc_dict[\"mc_labeled\"][i_mc][i_class],\n return_counts=True)\n i_mc_label = i_mc_labels[np.argmax(i_mc_counts[1:]) + 1] if len(i_mc_counts) > 1 else 0\n\n data_tmp = np.zeros(mc_dict[\"mc_data\"][i_mc][i_class].shape)\n # If i_mc_label is zero, it means the structure is not present in this mc_sample\n if i_mc_label > 0:\n data_tmp[mc_dict[\"mc_labeled\"][i_mc][i_class] == i_mc_label] = 1.\n\n data_class_obj_mc.append(data_tmp.astype(np.bool))\n\n # COMPUTE IoU\n # Init intersection and union\n intersection = np.logical_and(data_class_obj_mc[0], data_class_obj_mc[1])\n union = np.logical_or(data_class_obj_mc[0], data_class_obj_mc[1])\n # Loop across remaining MC samples\n for i_mc in range(2, len(data_class_obj_mc)):\n intersection = np.logical_and(intersection, data_class_obj_mc[i_mc])\n union = np.logical_or(union, data_class_obj_mc[i_mc])\n # Compute float\n iou = np.sum(intersection) * 1. / np.sum(union)\n # assign uncertainty value to the structure\n data_iou[xx_obj, yy_obj, zz_obj, i_class] = iou\n\n # COMPUTE COEFFICIENT OF VARIATION\n # List of volumes for each MC sample\n vol_mc_lst = [np.sum(data_class_obj_mc[i_mc]) for i_mc in range(len(data_class_obj_mc))]\n # Mean volume\n mu_mc = np.mean(vol_mc_lst)\n # STD volume\n sigma_mc = np.std(vol_mc_lst)\n # Coefficient of variation\n cv = sigma_mc / mu_mc\n # assign uncertainty value to the structure\n data_cv[xx_obj, yy_obj, zz_obj, i_class] = cv\n\n # COMPUTE AVG VOXEL WISE UNC\n avgUnc = np.mean(data_uncVox[xx_obj, yy_obj, zz_obj, i_class])\n # assign uncertainty value to the structure\n data_avgUnc[xx_obj, yy_obj, zz_obj, i_class] = avgUnc\n\n # save nifti files\n fname_iou = fname_out.split('.nii.gz')[0] + '-iou.nii.gz'\n fname_cv = fname_out.split('.nii.gz')[0] + '-cv.nii.gz'\n fname_avgUnc = fname_out.split('.nii.gz')[0] + '-avgUnc.nii.gz'\n\n nib_iou = nib.Nifti1Image(\n dataobj=data_iou,\n affine=nib_hard.header.get_best_affine(),\n header=nib_hard.header.copy()\n )\n nib_cv = nib.Nifti1Image(\n dataobj=data_cv,\n affine=nib_hard.header.get_best_affine(),\n header=nib_hard.header.copy()\n )\n nib_avgUnc = nib.Nifti1Image(\n data_avgUnc,\n affine=nib_hard.header.get_best_affine(),\n header=nib_hard.header.copy()\n )\n\n nib.save(nib_iou, fname_iou)\n nib.save(nib_cv, fname_cv)\n nib.save(nib_avgUnc, fname_avgUnc)\n" ]
[ [ "numpy.logical_or", "numpy.sum", "numpy.zeros", "scipy.ndimage.label", "numpy.logical_and", "numpy.argmax", "numpy.expand_dims", "numpy.array", "numpy.std", "numpy.where", "numpy.unique", "numpy.mean", "scipy.ndimage.generate_binary_structure" ] ]
AghilasSini/AT-Annotator
[ "532c6de0fe143e2b6ace0d382cc79f1f0f2cf941" ]
[ "cnn-for-sentence-classification/train_keras.py" ]
[ "import numpy as np\nimport codecs\nimport os\nimport random\n\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers import Input, Dense, Lambda, Permute, Dropout\nfrom keras.layers import Conv2D, MaxPooling1D\n\ndef load_data(fpath, label):\n data = []\n with codecs.open(fpath, 'r', 'utf-8', errors='ignore') as f:\n lines = f.readlines()\n for l in lines:\n l = l.rstrip()\n data.append((l.split(' '), label))\n return data\n\ndef vectorize(data, sentence_maxlen, w2i):\n vec_data = []\n labels = []\n for d, label in data:\n vec = [w2i[w] for w in d if w in w2i]\n pad_len = max(0, sentence_maxlen - len(vec))\n vec += [0] * pad_len\n vec_data.append(vec)\n \n labels.append(label)\n vec_data = np.array(vec_data)\n labels = np.array(labels)\n return vec_data, labels\n\ndef load_glove_weights(glove_dir, embd_dim, vocab_size, word_index):\n embeddings_index = {}\n f = open(os.path.join(glove_dir, 'glove.6B.' + str(embd_dim) + 'd.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n\n print('Found %s word vectors.' % len(embeddings_index)) \n embedding_matrix = np.zeros((vocab_size, embd_dim))\n print('embed_matrix.shape', embedding_matrix.shape)\n for word, i in word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n # words not found in embedding index will be all-zeros.\n embedding_matrix[i] = embedding_vector\n\n return embedding_matrix\n\npos = load_data('./dataset/rt-polaritydata/rt-polarity.pos', 1)\nneg = load_data('./dataset/rt-polaritydata/rt-polarity.neg', 0)\ndata = pos + neg\n\nsentence_maxlen = max(map(len, (d for d, _ in data)))\nprint('sentence maxlen', sentence_maxlen)\n\nvocab = []\nfor d, _ in data:\n for w in d:\n if w not in vocab: vocab.append(w)\nvocab = sorted(vocab)\nvocab_size = len(vocab)\nprint('vocab size', len(vocab))\nw2i = {w:i for i,w in enumerate(vocab)}\n\nrandom.shuffle(data)\nvecX, vecY = vectorize(data, sentence_maxlen, w2i)\nn_data = len(vecX)\nsplit_ind = (int)(n_data * 0.9)\ntrainX, trainY = vecX[:split_ind], vecY[:split_ind]\ntestX, testY = vecX[split_ind:], vecY[split_ind:]\n\nembd_dim = 300\nglove_embd_w = load_glove_weights('./dataset', embd_dim, vocab_size, w2i)\n\ndef Net(vocab_size, embd_size, sentence_maxlen, glove_embd_w):\n sentence = Input((sentence_maxlen,), name='SentenceInput')\n \n # embedding\n embd_layer = Embedding(input_dim=vocab_size, \n output_dim=embd_size, \n weights=[glove_embd_w], \n trainable=False,\n name='shared_embd')\n embd_sentence = embd_layer(sentence)\n embd_sentence = Permute((2,1))(embd_sentence)\n embd_sentence = Lambda(lambda x: K.expand_dims(x, -1))(embd_sentence)\n \n # cnn\n cnn = Conv2D(1, \n kernel_size=(3, sentence_maxlen),\n activation='relu')(embd_sentence)\n cnn = Lambda(lambda x: K.sum(x, axis=3))(cnn)\n cnn = MaxPooling1D(3)(cnn)\n cnn = Lambda(lambda x: K.sum(x, axis=2))(cnn)\n out = Dense(1, activation='sigmoid')(cnn)\n\n model = Model(inputs=sentence, outputs=out, name='sentence_claccification')\n model.compile(optimizer='adagrad', loss='binary_crossentropy', metrics=['accuracy']) \n return model\n\nmodel = Net(vocab_size, embd_dim, sentence_maxlen, glove_embd_w)\nprint(model.summary())\n\nmodel.fit(trainX, trainY,\n batch_size=32,\n epochs=10,\n validation_data=(testX, testY)\n )\n" ]
[ [ "numpy.array", "numpy.asarray", "numpy.zeros" ] ]
ostap-viniavskyi/acme
[ "8fbae90217557a35e1d773aa63ab80890e799765" ]
[ "acme/agents/tf/svg0_prior/networks.py" ]
[ "# python3\n# Copyright 2018 DeepMind Technologies Limited. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Shared helpers for different experiment flavours.\"\"\"\n\nimport functools\nfrom typing import Mapping, Sequence, Optional\n\nfrom acme import specs\nfrom acme import types\nfrom acme.agents.tf.svg0_prior import utils as svg0_utils\nfrom acme.tf import networks\nfrom acme.tf import utils as tf2_utils\n\nimport numpy as np\nimport sonnet as snt\n\n\ndef make_default_networks(\n action_spec: specs.BoundedArray,\n policy_layer_sizes: Sequence[int] = (256, 256, 256),\n critic_layer_sizes: Sequence[int] = (512, 512, 256),\n) -> Mapping[str, types.TensorTransformation]:\n \"\"\"Creates networks used by the agent.\"\"\"\n\n # Get total number of action dimensions from action spec.\n num_dimensions = np.prod(action_spec.shape, dtype=int)\n\n policy_network = snt.Sequential([\n tf2_utils.batch_concat,\n networks.LayerNormMLP(policy_layer_sizes, activate_final=True),\n networks.MultivariateNormalDiagHead(\n num_dimensions,\n tanh_mean=True,\n min_scale=0.3,\n init_scale=0.7,\n fixed_scale=False,\n use_tfd_independent=False)\n ])\n # The multiplexer concatenates the (maybe transformed) observations/actions.\n multiplexer = networks.CriticMultiplexer(\n action_network=networks.ClipToSpec(action_spec))\n critic_network = snt.Sequential([\n multiplexer,\n networks.LayerNormMLP(critic_layer_sizes, activate_final=True),\n networks.NearZeroInitializedLinear(1),\n ])\n\n return {\n \"policy\": policy_network,\n \"critic\": critic_network,\n }\n\n\ndef make_network_with_prior(\n action_spec: specs.BoundedArray,\n policy_layer_sizes: Sequence[int] = (200, 100),\n critic_layer_sizes: Sequence[int] = (400, 300),\n prior_layer_sizes: Sequence[int] = (200, 100),\n policy_keys: Optional[Sequence[str]] = None,\n prior_keys: Optional[Sequence[str]] = None,\n) -> Mapping[str, types.TensorTransformation]:\n \"\"\"Creates networks used by the agent.\"\"\"\n\n # Get total number of action dimensions from action spec.\n num_dimensions = np.prod(action_spec.shape, dtype=int)\n flatten_concat_policy = functools.partial(\n svg0_utils.batch_concat_selection, concat_keys=policy_keys)\n flatten_concat_prior = functools.partial(\n svg0_utils.batch_concat_selection, concat_keys=prior_keys)\n\n policy_network = snt.Sequential([\n flatten_concat_policy,\n networks.LayerNormMLP(policy_layer_sizes, activate_final=True),\n networks.MultivariateNormalDiagHead(\n num_dimensions,\n tanh_mean=True,\n min_scale=0.1,\n init_scale=0.7,\n fixed_scale=False,\n use_tfd_independent=False)\n ])\n # The multiplexer concatenates the (maybe transformed) observations/actions.\n multiplexer = networks.CriticMultiplexer(\n observation_network=flatten_concat_policy,\n action_network=networks.ClipToSpec(action_spec))\n critic_network = snt.Sequential([\n multiplexer,\n networks.LayerNormMLP(critic_layer_sizes, activate_final=True),\n networks.NearZeroInitializedLinear(1),\n ])\n prior_network = snt.Sequential([\n flatten_concat_prior,\n networks.LayerNormMLP(prior_layer_sizes, activate_final=True),\n networks.MultivariateNormalDiagHead(\n num_dimensions,\n tanh_mean=True,\n min_scale=0.1,\n init_scale=0.7,\n fixed_scale=False,\n use_tfd_independent=False)\n ])\n return {\n \"policy\": policy_network,\n \"critic\": critic_network,\n \"prior\": prior_network,\n }\n" ]
[ [ "numpy.prod" ] ]
chappers/multiagent-particle-envs
[ "5c56a0cc4241fb2e5a6f4dc62bf1735862d6e30c" ]
[ "examples/ppo_agent.py" ]
[ "\"\"\"\nExample using this environment with stable_baseline library\n\nfrom stable_baselines.common.vec_env import DummyVecEnv\nenv = DummyVecEnv([lambda: env])\n\nmodel = PPO2(MlpPolicy, env, verbose=1)\nmodel.learn(total_timesteps=25000)\nmodel.save(\"ppo2_cartpole\")\n\n# Enjoy trained agent\nmodel = PPO2.load(\"ppo2_cartpole\")\n\nobs = env.reset()\nwhile True:\n action, _states = model.predict(obs)\n obs, rewards, dones, info = env.step(action)\n env.render()\n\"\"\"\n\nimport gym\nimport numpy as np\nimport copy\n\nfrom stable_baselines.common.policies import MlpPolicy\nfrom stable_baselines.common.vec_env import DummyVecEnv\nfrom stable_baselines import PPO2\nfrom multiagent.environment import MultiAgentEnv\nimport multiagent.scenarios as scenarios\nfrom stable_baselines.common.base_class import BaseRLModel\nfrom stable_baselines.common.runners import AbstractEnvRunner\n\nfrom collections import OrderedDict\nimport numpy as np\n\nfrom stable_baselines.common.vec_env import VecEnv\nfrom stable_baselines.common.vec_env.util import (\n copy_obs_dict,\n dict_to_obs,\n obs_space_info,\n)\n\nscenario_name = \"simple_tag\"\nnum_adversaries = 5\n\n\ndef to_categorical(action, shape=5):\n if type(action) is list:\n action = action[0]\n z = np.zeros(shape)\n z[action] = 1\n return z\n\n\nclass SingleAgentEnv(gym.Env):\n def __init__(self, observation_space, action_space):\n self.observation_space = observation_space\n self.action_space = action_space\n\n def step(self):\n return self\n\n def reset(self):\n return self\n\n\ndef env_splitter(multi_env):\n \"\"\"\n Takes in multiagentenv, and spits out each env individually?\n \"\"\"\n return [\n SingleAgentEnv(obs_space, act_space)\n for obs_space, act_space in zip(\n multi_env.observation_space, multi_env.action_space\n )\n ]\n\n\nclass MultiRunner(object):\n def __init__(self, *, env, models, n_steps, gamma, lam):\n \"\"\"\n A runner to learn the policy of an environment for a model\n :param env: (Gym environment) The environment to learn from\n :param model: (list[Model]) The model to learn\n :param n_steps: (int) The number of steps to run for each environment\n :param gamma: (float) Discount factor\n :param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n \"\"\"\n self.lam = lam\n self.gamma = gamma\n\n # super().__init__(env=env, model=models, n_steps=n_steps)\n self.env = env\n self.model = model\n n_env = 1 # env.num_envs\n\n self.batch_ob_shape = []\n self.obs = []\n for idx, env_observation_space in enumerate(env.observation_space):\n self.batch_ob_shape.append((n_env * n_steps,) + env.observation_space.shape)\n self.obs.append(\n np.zeros(\n (n_env,) + env.observation_space.shape,\n dtype=env.observation_space.dtype.name,\n )\n )\n\n obs_reset = env.reset()\n for idx, x in enumerate(obs_reset):\n self.obs[idx][:] = x\n self.n_steps = n_steps\n self.states = [x.initial_state for x in self.model] # get states...\n self.dones = [False for _ in range(n_env)]\n\n def run(self):\n \"\"\"\n Run a learning step of the model\n :return:\n - observations: (list[np.ndarray]) the observations\n - rewards: (np.ndarray) the rewards\n - masks: (numpy bool) whether an episode is over or not\n - actions: (np.ndarray) the actions\n - values: (np.ndarray) the value function output\n - negative log probabilities: (np.ndarray)\n - states: (np.ndarray) the internal states of the recurrent policies\n - infos: (dict) the extra information of the model\n \"\"\"\n # mb stands for minibatch\n mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = (\n [],\n [],\n [],\n [],\n [],\n [],\n )\n mb_states = self.states\n ep_infos = []\n for _ in range(self.n_steps):\n actions = []\n values = []\n states = []\n neglogpacs = []\n for idx, agent in enumerate(self.model):\n actions_, values_, states_, neglogpacs_ = self.model.step(\n self.obs[idx].reshape(1, -1), self.states[idx], self.dones\n )\n actions.append(actions_)\n values.append(values_)\n states.append(states_)\n neglogpacs.append(neglogpacs_)\n mb_obs.append(copy.copy(self.obs))\n mb_actions.append(actions)\n mb_values.append(values)\n mb_neglogpacs.append(neglogpacs)\n mb_dones.append(self.dones)\n clipped_actions = copy.copy(actions)\n\n # Clip the actions to avoid out of bound error - do this by agent\n # we will skip this for now...\n \"\"\"\n clipped_actions = []\n for idx, agent in enumerate(self.model):\n if isinstance(self.env.action_space, gym.spaces.Box):\n clipped_actions.append(np.clip(actions, self.env.action_space.low, self.env.action_space.high))\n \"\"\"\n\n self.obs[:], rewards, self.dones, infos = self.env.step(clipped_actions)\n for info in infos:\n maybe_ep_info = info.get(\"episode\")\n if maybe_ep_info is not None:\n ep_infos.append(maybe_ep_info)\n mb_rewards.append(rewards)\n # batch of steps to batch of rollouts\n mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)\n mb_rewards = np.asarray(mb_rewards, dtype=np.float32)\n mb_actions = np.asarray(mb_actions)\n mb_values = np.asarray(mb_values, dtype=np.float32)\n mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)\n mb_dones = np.asarray(mb_dones, dtype=np.bool)\n last_values = self.model.value(self.obs, self.states, self.dones)\n # discount/bootstrap off value fn\n mb_advs = np.zeros_like(mb_rewards)\n true_reward = np.copy(mb_rewards)\n last_gae_lam = 0\n for step in reversed(range(self.n_steps)):\n if step == self.n_steps - 1:\n nextnonterminal = 1.0 - self.dones\n nextvalues = last_values\n else:\n nextnonterminal = 1.0 - mb_dones[step + 1]\n nextvalues = mb_values[step + 1]\n delta = (\n mb_rewards[step]\n + self.gamma * nextvalues * nextnonterminal\n - mb_values[step]\n )\n mb_advs[step] = last_gae_lam = (\n delta + self.gamma * self.lam * nextnonterminal * last_gae_lam\n )\n mb_returns = mb_advs + mb_values\n\n mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward = map(\n swap_and_flatten,\n (\n mb_obs,\n mb_returns,\n mb_dones,\n mb_actions,\n mb_values,\n mb_neglogpacs,\n true_reward,\n ),\n )\n\n return (\n mb_obs,\n mb_returns,\n mb_dones,\n mb_actions,\n mb_values,\n mb_neglogpacs,\n mb_states,\n ep_infos,\n true_reward,\n )\n\n\nscenario = scenarios.load(scenario_name + \".py\").Scenario()\n# create world\nworld = scenario.make_world()\nenv = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation)\n# multi_env = DummyVecMultiEnv([lambda: env]) # TODO and implement??\nsplit_env = env_splitter(env)\nagents = [PPO2(MlpPolicy, DummyVecEnv([lambda: x]), verbose=1) for x in split_env]\n\n# based on these agents on the parent world we want to act and observe it.\nobs_reset = env.reset()\nstates = [x.initial_state for x in agents]\ndones = [False for _ in range(1)]\n\nactions = []\nfor idx in range(len(env.agents)):\n action = agents[idx].step(obs_reset[idx].reshape(1, -1), states[idx], False)\n actions.append(action[0][0])\n\n# see https://github.com/openai/multiagent-particle-envs/blob/master/multiagent/policy.py\n# for how to construct this...\nenv.step(\n [np.concatenate([to_categorical(x), np.zeros(env.world.dim_c)]) for x in actions]\n)\n\n\nobs_shape_n = [env.observation_space[i].shape for i in range(env.n)]\nnum_adversaries = env.n # min(env.n, arglist.num_adversaries)\nnum_adversaries = 0\n\n# get the trainers to train using PPO\n" ]
[ [ "numpy.zeros_like", "numpy.asarray", "numpy.zeros", "numpy.copy" ] ]
YerongLi2/LTVRR
[ "ec3be058da9c4f2f68d7c4dfb759209748732b93" ]
[ "lib/roi_data/fast_rcnn_rel.py" ]
[ "# Copyright (c) 2017-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\"\"\"Construct minibatches for Fast R-CNN training. Handles the minibatch blobs\nthat are specific to Fast R-CNN. Other blobs that are generic to RPN, etc.\nare handled by their respecitive roi_data modules.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\nimport numpy.random as npr\nimport logging\n\nfrom core.config import cfg\nimport utils.boxes as box_utils\nimport utils.blob as blob_utils\nimport utils.fpn as fpn_utils\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef add_rel_blobs(blobs, im_scales, roidb):\n \"\"\"Add blobs needed for training Fast R-CNN style models.\"\"\"\n # Sample training RoIs from each image and append them to the blob lists\n for im_i, entry in enumerate(roidb):\n frcn_blobs = _sample_pairs(entry, im_scales[im_i], im_i)\n for k, v in frcn_blobs.items():\n blobs[k].append(v)\n # Concat the training blob lists into tensors\n for k, v in blobs.items():\n if isinstance(v, list) and len(v) > 0:\n blobs[k] = np.concatenate(v)\n \n if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_ROIS:\n _add_rel_multilevel_rois(blobs)\n\n return True\n\n\ndef _sample_pairs(roidb, im_scale, batch_idx):\n \"\"\"Generate a random sample of RoIs comprising foreground and background\n examples.\n \"\"\"\n fg_pairs_per_image = cfg.TRAIN.FG_REL_SIZE_PER_IM\n pairs_per_image = int(cfg.TRAIN.FG_REL_SIZE_PER_IM / cfg.TRAIN.FG_REL_FRACTION) # need much more pairs since it's quadratic\n max_pair_overlaps = roidb['max_pair_overlaps']\n\n gt_pair_inds = np.where(max_pair_overlaps > 1.0 - 1e-4)[0]\n fg_pair_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH) &\n (max_pair_overlaps <= 1.0 - 1e-4))[0]\n \n fg_pairs_per_this_image = np.minimum(fg_pairs_per_image, gt_pair_inds.size + fg_pair_inds.size)\n # Sample foreground regions without replacement\n # if rel_pos_inds.size > 0 and rel_pos_inds.size > fg_rois_per_image - rel_gt_inds.size:\n\n if fg_pair_inds.size > 0 and fg_pair_inds.size > (fg_pairs_per_this_image - gt_pair_inds.size) \\\n and fg_pairs_per_this_image > gt_pair_inds.size:\n fg_pair_inds = npr.choice(\n fg_pair_inds, size=(fg_pairs_per_this_image - gt_pair_inds.size), replace=False)\n fg_pair_inds = np.append(fg_pair_inds, gt_pair_inds)\n\n # Label is the class each RoI has max overlap with\n fg_prd_labels = roidb['max_prd_classes'][fg_pair_inds]\n blob_dict = dict(\n fg_prd_labels_int32=fg_prd_labels.astype(np.int32, copy=False))\n \n bg_pair_inds = np.where((max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]\n \n # Compute number of background RoIs to take from this image (guarding\n # against there being fewer than desired)\n bg_pairs_per_this_image = pairs_per_image - fg_pairs_per_this_image\n bg_pairs_per_this_image = np.minimum(bg_pairs_per_this_image, bg_pair_inds.size)\n # Sample foreground regions without replacement\n if bg_pair_inds.size > 0:\n bg_pair_inds = npr.choice(\n bg_pair_inds, size=bg_pairs_per_this_image, replace=False)\n keep_pair_inds = np.append(fg_pair_inds, bg_pair_inds)\n all_prd_labels = np.zeros(keep_pair_inds.size, dtype=np.int32)\n all_prd_labels[:fg_pair_inds.size] = fg_prd_labels + 1 # class should start from 1 # size 311\n\n blob_dict['all_prd_labels_int32'] = all_prd_labels.astype(np.int32, copy=False)\n blob_dict['fg_size'] = np.array([fg_pair_inds.size], dtype=np.int32) # this is used to check if there is at least one fg to learn\n\n sampled_sbj_boxes = roidb['sbj_boxes'][keep_pair_inds]\n sampled_obj_boxes = roidb['obj_boxes'][keep_pair_inds]\n # Scale rois and format as (batch_idx, x1, y1, x2, y2)\n sampled_sbj_rois = sampled_sbj_boxes * im_scale\n sampled_obj_rois = sampled_obj_boxes * im_scale\n repeated_batch_idx = batch_idx * blob_utils.ones((keep_pair_inds.shape[0], 1))\n sampled_sbj_rois = np.hstack((repeated_batch_idx, sampled_sbj_rois))\n sampled_obj_rois = np.hstack((repeated_batch_idx, sampled_obj_rois))\n blob_dict['sbj_rois'] = sampled_sbj_rois\n blob_dict['obj_rois'] = sampled_obj_rois\n sampled_rel_rois = box_utils.rois_union(sampled_sbj_rois, sampled_obj_rois)\n blob_dict['rel_rois'] = sampled_rel_rois\n if cfg.MODEL.USE_FREQ_BIAS or cfg.MODEL.USE_SEPARATE_SO_SCORES:\n sbj_labels = roidb['max_sbj_classes'][keep_pair_inds]\n obj_labels = roidb['max_obj_classes'][keep_pair_inds]\n blob_dict['all_sbj_labels_int32'] = sbj_labels.astype(np.int32, copy=False) # 1703\n blob_dict['all_obj_labels_int32'] = obj_labels.astype(np.int32, copy=False) # 1703\n\n return blob_dict\n\n\ndef _add_rel_multilevel_rois(blobs):\n \"\"\"By default training RoIs are added for a single feature map level only.\n When using FPN, the RoIs must be distributed over different FPN levels\n according the level assignment heuristic (see: modeling.FPN.\n map_rois_to_fpn_levels).\n \"\"\"\n lvl_min = cfg.FPN.ROI_MIN_LEVEL\n lvl_max = cfg.FPN.ROI_MAX_LEVEL\n\n def _distribute_rois_over_fpn_levels(rois_blob_names):\n \"\"\"Distribute rois over the different FPN levels.\"\"\"\n # Get target level for each roi\n # Recall blob rois are in (batch_idx, x1, y1, x2, y2) format, hence take\n # the box coordinates from columns 1:5\n lowest_target_lvls = None\n for rois_blob_name in rois_blob_names:\n target_lvls = fpn_utils.map_rois_to_fpn_levels(\n blobs[rois_blob_name][:, 1:5], lvl_min, lvl_max)\n if lowest_target_lvls is None:\n lowest_target_lvls = target_lvls\n else:\n lowest_target_lvls = np.minimum(lowest_target_lvls, target_lvls)\n for rois_blob_name in rois_blob_names:\n # Add per FPN level roi blobs named like: <rois_blob_name>_fpn<lvl>\n fpn_utils.add_multilevel_roi_blobs(\n blobs, rois_blob_name, blobs[rois_blob_name], lowest_target_lvls, lvl_min,\n lvl_max)\n\n _distribute_rois_over_fpn_levels(['sbj_rois'])\n _distribute_rois_over_fpn_levels(['obj_rois'])\n _distribute_rois_over_fpn_levels(['rel_rois'])" ]
[ [ "numpy.append", "numpy.zeros", "numpy.concatenate", "numpy.random.choice", "numpy.hstack", "numpy.array", "numpy.where", "numpy.minimum" ] ]
Cryaaa/Master-Thesis-Repository
[ "a887fd9dd95c32ce9275d14ec6583bd19cd8bc15" ]
[ "Code/Generating video data/embryo serosa kmeans finsterwalde making the pictures.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 28 15:24:40 2021\n\n@author: ryans\n\"\"\"\nimport tribolium_clustering as tc\nimport pyclesperanto_prototype as cle\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport umap\nimport hdbscan\nimport napari\n\n\ndef nice_screenshots_of_1_timepoint(dataset, prediction_list, timepoint, \n cmap, save_data_location, name, rotations):\n import napari\n import pyclesperanto_prototype as cle\n from qtpy.QtCore import QTimer\n \n label_image = dataset.get_labels(timepoint)\n intensity_image = dataset.get_intensity_image(timepoint)\n cum_indices = dataset.cumulative_label_lengths()\n \n prediction = prediction_list[cum_indices[timepoint]:cum_indices[timepoint+1]]\n prop = dataset.get_regionprops_timepoint(timepoint)\n\n regprop_with_predict = pd.concat([prop,pd.DataFrame(prediction, columns = ['prediction'],\n index = prop.index)], axis = 1)\n regprop_with_predict.to_csv(save_data_location + 'regprops with ' + name +' t{}.csv'.format(timepoint))\n\n cluster_image = tc.generate_parametric_cluster_image(label_image,cle.push(label_image),prediction)\n \n for i,rot in enumerate(rotations):\n with napari.gui_qt() as app:\n viewer = napari.Viewer(ndisplay=3)\n viewer.add_image(intensity_image, rotate= rot)\n viewer.add_labels(cluster_image, rotate= rot, color = cmap)\n\n viewer.screenshot(save_data_location + name +' rotation{}'.format(i) + ' t{}.tif'.format(timepoint))\n\n time_in_msec = 1000\n QTimer().singleShot(time_in_msec, app.quit)\n viewer.close()\n\n\n\nfolder = 'D:/Uni/MSTER TUD/Master Thesis/output data/Finsterwalde Gastrulation Labels (new timeframe)//'\nfinster = tc.processed_dataset(folder )\n\npred_location = 'C:/Users/ryans/OneDrive/Documents/Master Thesis/Documents/Figures/embryo serosa video files//'\nfinster_prediction_scaled = np.load(pred_location + 'finsterwalde_scaled_prediction.npy')\nfinster_prediction_unscaled = np.load(pred_location + 'finsterwalde_scaled_prediction.npy')\n\n\n\nimage_output_folder = pred_location + 'finster/'\nrotations_finster = [(0,170,0),(0,0,0)]\ncmap_napari = tc.napari_label_cmap()\n\n \nfor time in range(30,32):\n nice_screenshots_of_1_timepoint(finster,finster_prediction_unscaled,\n time,cmap_napari,image_output_folder, \n 'embryo serosa Kmeans unscaled'\n ,rotations_finster) \n" ]
[ [ "numpy.load", "pandas.DataFrame" ] ]
jmaberk/RGPUCB
[ "a14cc524fa10bc90166ba0955c611a1c46d0f779" ]
[ "RGP-UCB/prada_bayes_opt/bayesian_optimization_function.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 29 11:49:58 2016\r\n\r\n\"\"\"\r\n\r\nfrom __future__ import division\r\nimport numpy as np\r\n#from sklearn.gaussian_process import GaussianProcess\r\nfrom scipy.optimize import minimize\r\nfrom prada_bayes_opt.acquisition_functions import AcquisitionFunction, unique_rows\r\n#from visualization import Visualization\r\nfrom prada_bayes_opt.prada_gaussian_process import PradaGaussianProcess\r\n#from prada_gaussian_process import PradaMultipleGaussianProcess\r\n\r\nfrom prada_bayes_opt.acquisition_maximization import acq_max\r\nfrom prada_bayes_opt.acquisition_maximization import acq_max_thompson\r\nfrom prada_bayes_opt.acquisition_maximization import acq_max_global\r\nfrom sklearn.metrics.pairwise import euclidean_distances\r\nfrom scipy.spatial.distance import pdist\r\nfrom scipy.spatial.distance import squareform\r\nfrom scipy import optimize\r\nfrom scipy import stats\r\nfrom pyDOE import lhs\r\nimport matplotlib.pyplot as plt\r\nfrom cycler import cycler\r\nimport time\r\nimport math\r\n\r\n\r\n#@author: Julian\r\n\r\n#==============================================================================\r\n#==============================================================================\r\n#==============================================================================\r\n#==============================================================================\r\ncounter = 0\r\n\r\n###############################################################################\r\nclass PradaBayOptFn(object):\r\n\r\n def __init__(self, gp_params, func_params, acq_params, experiment_num, seed):\r\n \"\"\" \r\n Input parameters\r\n ----------\r\n \r\n gp_params: GP parameters\r\n gp_params.l: to compute the kernel\r\n gp_params.theta: paramater for DGP-UCB gamma distribution\r\n gp_params.delta: to compute the kernel\r\n \r\n func_params: function to optimize\r\n func_params.init bound: initial bounds for parameters\r\n func_params.bounds: bounds on parameters \r\n func_params.func: a function to be optimized\r\n \r\n \r\n acq_params: acquisition function, \r\n acq_params.acq_func['name']=['ei','ucb','poi','lei']\r\n ,acq['kappa'] for ucb, acq['k'] for lei\r\n acq_params.opt_toolbox: optimization toolbox 'nlopt','direct','scipy'\r\n \r\n experiment_num: the interation of the GP method. Used to make sure each \r\n independant stage of the experiment uses different \r\n initial conditions\r\n seed: Variable used as part of a seed to generate random initial points\r\n \r\n Returns\r\n -------\r\n dim: dimension\r\n scalebounds: bound used thoughout the BO algorithm\r\n time_opt: will record the time spent on optimization\r\n gp: Gaussian Process object\r\n \"\"\"\r\n\r\n self.experiment_num=experiment_num\r\n np.random.seed(self.experiment_num*seed)\r\n self.seed=seed\r\n \r\n # Prior distribution paramaters for the DDB method\r\n self.theta=1\r\n # Find number of parameters\r\n bounds=func_params['bounds']\r\n if 'init_bounds' not in func_params:\r\n init_bounds=bounds\r\n else:\r\n init_bounds=func_params['init_bounds']\r\n # Find input dimention\r\n self.dim = len(bounds)\r\n self.radius=np.ones([self.dim,1])\r\n\r\n # Generate bound array\r\n scalebounds=np.array([np.zeros(self.dim), np.ones(self.dim)])\r\n self.scalebounds=scalebounds.T\r\n \r\n # find function to be optimized\r\n self.f = func_params['f']\r\n\r\n # acquisition function type\r\n \r\n self.acq=acq_params['acq_func']\r\n self.delta=acq_params[\"delta\"]\r\n self.acq['max_iterations']=acq_params['max_iterations']\r\n self.acq['num_initial_points']=acq_params['num_initial_points']\r\n self.acq['iterations_num']=acq_params['iterations_num']\r\n \r\n # Other checks\r\n if 'debug' not in self.acq:\r\n self.acq['debug']=0 \r\n if 'stopping' not in acq_params:\r\n self.stopping_criteria=0\r\n else:\r\n self.stopping_criteria=acq_params['stopping']\r\n if 'optimize_gp' not in acq_params:\r\n self.optimize_gp=0\r\n else: \r\n self.optimize_gp=acq_params['optimize_gp']\r\n if 'marginalize_gp' not in acq_params:\r\n self.marginalize_gp=0\r\n else: \r\n self.marginalize_gp=acq_params['marginalize_gp']\r\n \r\n # optimization toolbox\r\n if 'opt_toolbox' not in acq_params:\r\n if self.acq['name']=='ei_reg':\r\n self.opt_toolbox='unbounded'\r\n else:\r\n self.opt_toolbox='scipy'\r\n else:\r\n self.opt_toolbox=acq_params['opt_toolbox']\r\n self.iteration_factor=acq_params['iteration_factor']\r\n # store X in original scale\r\n self.X_original= None\r\n\r\n # store X in 0-1 scale\r\n self.X = None\r\n \r\n # store y=f(x)\r\n # (y - mean)/(max-min)\r\n self.Y = None\r\n \r\n # y original scale\r\n self.Y_original = None\r\n \r\n # value of the acquisition function at the selected point\r\n self.alpha_Xt=None\r\n self.Tau_Xt=None\r\n \r\n self.time_opt=0\r\n\r\n self.k_Neighbor=2\r\n \r\n # Gaussian Process class\r\n self.gp=PradaGaussianProcess(gp_params)\r\n self.gp_params=gp_params\r\n #self.gp.theta=gp_params['theta']\r\n # acquisition function\r\n self.acq_func = None\r\n \r\n # stop condition\r\n self.stop_flag=0\r\n self.logmarginal=0\r\n \r\n # xt_suggestion, caching for Consensus\r\n self.xstars=[]\r\n self.ystars=np.zeros((2,1))\r\n \r\n # l vector for marginalization GP\r\n self.l_vector =[]\r\n \r\n def init(self,gp_params, n_init_points=3):\r\n \"\"\" \r\n Input parameters\r\n ----------\r\n gp_params: Gaussian Process structure \r\n n_init_points: # init points\r\n \"\"\"\r\n # set seed to allow for reproducible results\r\n np.random.seed(self.experiment_num*self.seed)\r\n print(self.experiment_num)\r\n #Generate initial points on grid\r\n l=np.zeros([n_init_points,self.dim])\r\n bound_length=self.scalebounds[0,1]-self.scalebounds[0,0]\r\n for d in range(0,self.dim):\r\n l[:,d]=lhs(n_init_points)[:,0]\r\n self.X=np.asarray(l)+self.scalebounds[:,0] \r\n self.X=self.X*bound_length #initial inouts\r\n print(\"starting points={}\".format(self.X))\r\n y_init=self.f(self.X)\r\n y_init=np.reshape(y_init,(n_init_points,1))\r\n self.Y_original = np.asarray(y_init) #initial outputs \r\n print('initial_bound={}'.format(self.scalebounds))\r\n \r\n def maximize(self,gp_params):\r\n \"\"\"\r\n Main optimization method.\r\n\r\n Input parameters\r\n ----------\r\n gp_params: parameter for Gaussian Process\r\n\r\n Returns\r\n -------\r\n x: recommented point for evaluation\r\n \"\"\"\r\n\r\n if self.stop_flag==1:\r\n return\r\n \r\n if self.acq['name']=='random':\r\n x_max = [np.random.uniform(x[0], x[1], size=1) for x in self.scalebounds]\r\n x_max=np.asarray(x_max)\r\n x_max=x_max.T\r\n self.X_original=np.vstack((self.X_original, x_max))\r\n # evaluate Y using original X\r\n \r\n self.Y_original = np.append(self.Y_original, self.f(x_max))\r\n \r\n # update Y after change Y_original\r\n self.Y=(self.Y_original-np.mean(self.Y_original))/np.std(self.Y_original)\r\n \r\n self.time_opt=np.hstack((self.time_opt,0))\r\n return \r\n\r\n # init a new Gaussian Process\r\n self.gp=PradaGaussianProcess(gp_params)\r\n if self.gp.KK_x_x_inv ==[]:\r\n self.Y=(self.Y_original-np.mean(self.Y_original))/np.std(self.Y_original)\r\n # Find unique rows of X to avoid GP from breaking\r\n ur = unique_rows(self.X)\r\n self.gp.fit(self.X[ur], self.Y[ur])\r\n\r\n \r\n acq=self.acq\r\n self.acq_func = AcquisitionFunction(self.acq,self.delta)\r\n if acq['debug']==1:\r\n logmarginal=self.gp.log_marginal_lengthscale(gp_params['l'],gp_params['noise_delta'])\r\n print(gp_params['l'])\r\n print(\"log marginal before optimizing ={:.4f}\".format(logmarginal))\r\n self.logmarginal=logmarginal\r\n \r\n if logmarginal<-999999:\r\n logmarginal=self.gp.log_marginal_lengthscale(gp_params['l'],gp_params['noise_delta'])\r\n\r\n if self.optimize_gp==1 and len(self.Y)%2*self.dim==0 and len(self.Y)>5*self.dim:\r\n\r\n print(\"Initial length scale={}\".format(gp_params['l']))\r\n newl = self.gp.optimize_lengthscale(gp_params['l'],gp_params['noise_delta'],self.scalebounds)\r\n gp_params['l']=newl\r\n print(\"New length scale={}\".format(gp_params['l']))\r\n\r\n # init a new Gaussian Process after optimizing hyper-parameter\r\n self.gp=PradaGaussianProcess(gp_params)\r\n # Find unique rows of X to avoid GP from breaking\r\n ur = unique_rows(self.X)\r\n self.gp.fit(self.X[ur], self.Y[ur])\r\n \r\n # Set acquisition function\r\n start_opt=time.time()\r\n\r\n y_max = self.Y.max() \r\n \r\n if 'xstars' not in globals():\r\n xstars=[]\r\n \r\n self.xstars=xstars\r\n\r\n self.acq['xstars']=xstars\r\n self.acq['WW']=False\r\n self.acq['WW_dim']=False\r\n self.acq_func = AcquisitionFunction(self.acq,self.delta)\r\n\r\n if acq['name']==\"thompson\":\r\n x_max = acq_max_thompson(gp=self.gp,y_max=y_max,bounds=self.scalebounds)\r\n else:\r\n x_max = acq_max(ac=self.acq_func.acq_kind,gp=self.gp,y_max=y_max,bounds=self.scalebounds,opt_toolbox=self.opt_toolbox,seeds=self.xstars)\r\n \r\n \r\n\r\n \r\n val_acq=self.acq_func.acq_kind(x_max,self.gp,y_max)\r\n #print x_max\r\n #print val_acq\r\n if self.stopping_criteria!=0 and val_acq<self.stopping_criteria:\r\n val_acq=self.acq_func.acq_kind(x_max,self.gp,y_max)\r\n\r\n self.stop_flag=1\r\n print(\"Stopping Criteria is violated. Stopping Criteria is {:.15f}\".format(self.stopping_criteria))\r\n \r\n \r\n self.alpha_Xt= np.append(self.alpha_Xt,val_acq)\r\n \r\n mean,var=self.gp.predict(x_max, eval_MSE=True)\r\n var.flags['WRITEABLE']=True\r\n var[var<1e-20]=0\r\n #self.Tau_Xt= np.append(self.Tau_Xt,val_acq/var)\r\n \r\n # record the optimization time\r\n finished_opt=time.time()\r\n elapse_opt=finished_opt-start_opt\r\n self.time_opt=np.hstack((self.time_opt,elapse_opt))\r\n \r\n # store X \r\n self.X = np.vstack((self.X, x_max.reshape((1, -1))))\r\n\r\n # evaluate Y using original X\r\n self.Y_original = np.append(self.Y_original, self.f(x_max))\r\n \r\n # update Y after change Y_original\r\n self.Y=(self.Y_original-np.mean(self.Y_original))/np.std(self.Y_original)\r\n\r\n self.experiment_num=self.experiment_num+1" ]
[ [ "numpy.vstack", "numpy.ones", "numpy.random.uniform", "numpy.append", "numpy.zeros", "numpy.reshape", "numpy.random.seed", "numpy.asarray", "numpy.hstack", "numpy.std", "numpy.mean" ] ]
zoeimogen/AoC2019
[ "44ffc08a38cb07273d7c4fd49200fb7912d4a1cb" ]
[ "tests/day09_test.py" ]
[ "#!/usr/bin/python3\n'''Advent of Code 2019 Day 5 tests'''\nimport unittest\nimport os\nimport sys\nimport numpy\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nfrom aoc2019 import intcode # pylint: disable=wrong-import-position\n\nclass TestUM(unittest.TestCase):\n '''Tests from day nine, although we actually test intcode rather than day09.py'''\n def test_day09part1(self) -> None:\n '''Part one tests'''\n prg = [109, 1, 204, -1, 1001, 100, 1, 100, 1008, 100, 16, 101, 1006, 101, 0, 99]\n prg.extend((map(int, numpy.zeros(100))))\n pgm = intcode.Program('standard', prg)\n self.assertEqual(pgm.run(), prg[:16])\n\n prg = [1102, 34915192, 34915192, 7, 4, 7, 99, 0]\n prg.extend((map(int, numpy.zeros(100))))\n pgm = intcode.Program('standard', prg)\n output = pgm.run()[0]\n self.assertEqual(len(f\"{output}\"), 16)\n\n prg = [104, 1125899906842624, 99]\n pgm = intcode.Program('standard', prg)\n self.assertEqual(pgm.run()[0], prg[1])\n" ]
[ [ "numpy.zeros" ] ]
timsque/deep-histopath
[ "a91619cc5b20c5a760d72d89124e558306ef5fc3" ]
[ "resnet.py" ]
[ "\"\"\"Custom ResNet model with pre-activation residual blocks.\n\nHe K, Zhang X, Ren S, Sun J. Identity Mappings in Deep Residual\nNetworks. arXiv.org. 2016.\n\nAuthor: Mike Dusenberry\n\"\"\"\nimport tensorflow as tf\n\n\ndef res_block(xin, dbottle, dout, k, stride):\n \"\"\"A residual block.\n\n This implements the \"pre-activation\" formulation of a residual block,\n as discussed in:\n\n He K, Zhang X, Ren S, Sun J. Identity Mappings in Deep Residual\n Networks. arXiv.org. 2016.\n\n Args:\n xin: Input tensor.\n dbottle: Bottleneck depth.\n dout: Output depth.\n k: Integer kernel size.\n stride: Integer stride.\n\n Returns:\n Output tensor for the block.\n \"\"\"\n depth_axis = 3 if tf.keras.backend.image_data_format() == 'channels_last' else 1\n din = tf.shape(xin)[depth_axis] # input depth\n he_init = tf.keras.initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='normal')\n\n # TODO: ReLUs have been quite successful, but it still seems like it could be a problem due to\n # gradient stopping at ReLU zero values. Perhaps look into leaky ReLUs, ELUs, etc.\n\n # conv 1x1\n x = tf.keras.layers.BatchNormalization(axis=depth_axis, momentum=0.9, epsilon=1e-4)(xin)\n x = tf.keras.layers.Activation('relu')(x)\n x = tf.keras.layers.Conv2D(\n dbottle, (1, 1), strides=(stride, stride), kernel_initializer=he_init)(x)\n\n # conv 3x3\n x = tf.keras.layers.BatchNormalization(axis=depth_axis, momentum=0.9, epsilon=1e-4)(x)\n x = tf.keras.layers.Activation('relu')(x)\n x = tf.keras.layers.Conv2D(dbottle, (k, k), padding='same', kernel_initializer=he_init)(x)\n\n # conv 1x1\n x = tf.keras.layers.BatchNormalization(axis=depth_axis, momentum=0.9, epsilon=1e-4)(x)\n x = tf.keras.layers.Activation('relu')(x)\n x = tf.keras.layers.Conv2D(dout, (1, 1), kernel_initializer=he_init)(x)\n\n # shortcut\n if din == dout: # identity shortcut for same input/output depths\n shortcut = xin\n else: # conv shortcut to change depth (usually to increase depth)\n shortcut = tf.keras.layers.Conv2D(\n dout, (1, 1), strides=(stride, stride), kernel_initializer=he_init)(xin)\n\n x = tf.keras.layers.add([x, shortcut])\n\n return x\n\n\ndef ResNet(xin, shape): # camel case makes it feel like a class -- eventually we'll subclass Model\n \"\"\"Custom ResNet model with pre-activation residual blocks.\n\n Reference:\n\n He K, Zhang X, Ren S, Sun J. Identity Mappings in Deep Residual\n Networks. arXiv.org. 2016.\n\n Args:\n xin: Input tensor.\n shape: Integer tuple of length 3 containing the shape of a single\n example.\n\n Returns:\n A Keras Model.\n\n Example:\n ```\n import tensorflow as tf\n import numpy as np\n import resnet\n\n shape = (64, 64, 3)\n xin = tf.placeholder(tf.float32, shape=(None, *shape))\n model = resnet.ResNet(xin, shape)\n\n model.summary()\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n out = sess.run(model.output, feed_dict={xin: np.random.randn(10, *shape)})\n print(out)\n ```\n \"\"\"\n # TODO: `tf.keras.layers` -> `tf.layers`\n assert len(shape) == 3\n depth_axis = 3 if tf.keras.backend.image_data_format() == 'channels_last' else 1\n\n d = [16, 32, 64, 128] # depths (must be divisible by 4)\n db = [int(depth/4) for depth in d] # bottleneck depths\n n = 3 # num layers at each depth\n\n # input & conv\n with tf.variable_scope(\"beg\"):\n xin = tf.keras.layers.Input(tensor=xin, shape=shape) # shape (h,w,c)\n he_init = tf.keras.initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='normal')\n x = tf.keras.layers.Conv2D(\n d[0], (3, 3), strides=(2, 2),\n padding='same', kernel_initializer=he_init)(xin) # shape (h/2,w/2,d[0])\n\n # stage 1\n with tf.variable_scope(\"stage1\"):\n x = res_block(x, db[0], d[1], 3, 1) # shape (h/2,w/2,d[1]) <-- increase depth\n for i in range(n-1):\n x = res_block(x, db[1], d[1], 3, 1) # shape (h/2,w/2,d[1])\n\n # stage 2\n with tf.variable_scope(\"stage2\"):\n x = res_block(x, db[1], d[2], 3, 2) # shape (h/4,w/4,d[2]) <-- increase depth, cut spatial size\n for i in range(n-1):\n x = res_block(x, db[2], d[2], 3, 1) # shape (h/4,w/4,d[2])\n\n # stage 3\n with tf.variable_scope(\"stage3\"):\n x = res_block(x, db[2], d[3], 3, 2) # shape (h/8,w/8,d[3]) <-- increase depth, cut spatial size\n for i in range(n-1):\n x = res_block(x, db[3], d[3], 3, 1) # shape (h/8,w/8,d[3])\n\n # final functions\n with tf.variable_scope(\"end\"):\n x = tf.keras.layers.BatchNormalization(\n axis=depth_axis, momentum=0.9, epsilon=1e-4)(x) # shape (h/8,w/8,d[3])\n x = tf.keras.layers.Activation('relu')(x) # shape (h/8,w/8,d[3])\n if shape[1] == 64:\n x = tf.keras.layers.AvgPool2D((8, 8))(x) # shape (h/64,w/64,d[3])\n elif shape[1] == 128:\n x = tf.keras.layers.AvgPool2D((16, 16))(x) # shape (h/128,w/128,d[3]) NOTE: assumes 128x128\n elif shape[1] == 100:\n x = tf.keras.layers.AvgPool2D((12, 12))(x) # shape (h/100,w/100,d[3]) NOTE: assumes 100x100\n else:\n # Note for potential surgery reasons, we won't use global pooling\n #x = tf.keras.layers.GlobalAvgPool2D()(x) # shape (h/64,w/64,d[3])\n raise Exception(\"patch size unsupported\")\n init = tf.keras.initializers.VarianceScaling(scale=1.0, mode='fan_in', distribution='normal')\n # TODO: this is a binary classification problem so optimizing a loss derived from a Bernoulli\n # distribution is appropriate. however, would the dynamics of the training algorithm be more\n # stable if we treated this as a multi-class classification problem and derived a loss from a\n # Multinomial distribution with two classes (and a single trial)? it would be\n # over-parameterized, but then again, the deep net itself is already heavily parameterized.\n x = tf.keras.layers.Conv2D(\n 1, (1, 1), kernel_initializer=init)(x) # shape (h/64,w/64,1) <-- could use this for surgery\n #2, (1, 1), kernel_initializer=init)(x) # shape (h/64,w/64,2) <-- could use this for surgery\n x = tf.keras.layers.Flatten()(x) # shape ((h/64)*(w/64)*1) <-- normally will be a single value\n\n # create model (106 functions)\n model = tf.keras.Model(xin, x, name='resnet')\n\n return model\n\n" ]
[ [ "tensorflow.keras.layers.AvgPool2D", "tensorflow.keras.layers.Flatten", "tensorflow.shape", "tensorflow.keras.Model", "tensorflow.keras.layers.Activation", "tensorflow.variable_scope", "tensorflow.keras.initializers.VarianceScaling", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.add", "tensorflow.keras.backend.image_data_format", "tensorflow.keras.layers.Input" ] ]
hqucms/onnxruntime
[ "6e4e76414639f50836a64546603c8957227857b0" ]
[ "docs/python/examples/plot_backend.py" ]
[ "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"\n\n.. _l-example-backend-api:\n\nONNX Runtime Backend for ONNX\n=============================\n\n*ONNX Runtime* extends the \n`onnx backend API <https://github.com/onnx/onnx/blob/master/docs/ImplementingAnOnnxBackend.md>`_\nto run predictions using this runtime.\nLet's use the API to compute the prediction\nof a simple logistic regression model.\n\"\"\"\nimport numpy as np\nfrom onnxruntime import datasets\nimport onnxruntime.backend as backend\nfrom onnx import load\n\nname = datasets.get_example(\"logreg_iris.onnx\")\nmodel = load(name)\n\nrep = backend.prepare(model, 'CPU')\nx = np.array([[-1.0, -2.0]], dtype=np.float32)\nlabel, proba = rep.run(x)\nprint(\"label={}\".format(label))\nprint(\"probabilities={}\".format(proba))\n\n########################################\n# The device depends on how the package was compiled,\n# GPU or CPU.\nfrom onnxruntime import get_device\nprint(get_device())\n\n########################################\n# The backend can also directly load the model\n# without using *onnx*.\n\nrep = backend.prepare(name, 'CPU')\nx = np.array([[-1.0, -2.0]], dtype=np.float32)\nlabel, proba = rep.run(x)\nprint(\"label={}\".format(label))\nprint(\"probabilities={}\".format(proba))\n\n#######################################\n# The backend API is implemented by other frameworks\n# and makes it easier to switch between multiple runtimes\n# with the same API.\n" ]
[ [ "numpy.array" ] ]
qdpham/machine-learning-engineering-for-production-public
[ "e02961fa72cd5d009bfb699c2e76594b9dbad1bf" ]
[ "course4/week3-ungraded-labs/C4_W3_Lab_4_Github_Actions/app/main.py" ]
[ "# added by QDP to run the test using the CI/CD pipeline\nimport pickle\nimport numpy as np\nfrom typing import List\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel, conlist\n\n\n\napp = FastAPI(title=\"Predicting Wine Class with batching\")\n\n# Open classifier in global scope\nwith open(\"models/wine-95-fixed.pkl\", \"rb\") as file:\n clf = pickle.load(file)\n\n\nclass Wine(BaseModel):\n batches: List[conlist(item_type=float, min_items=13, max_items=13)]\n\n\[email protected](\"/predict\")\ndef predict(wine: Wine):\n batches = wine.batches\n np_batches = np.array(batches)\n pred = clf.predict(np_batches).tolist()\n return {\"Prediction\": pred}\n" ]
[ [ "numpy.array" ] ]
world2vec/coref
[ "a314773215f7f59e3dc1cb14034460a23734b291" ]
[ "coref_ops.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\n\nimport tensorflow as tf\nfrom tensorflow.python import pywrap_tensorflow\n\ncur_dir = os.path.dirname(os.path.realpath(__file__))\n\ncoref_op_library = tf.load_op_library(os.path.join(cur_dir, \"coref_kernels.so\"))\n\nextract_spans = coref_op_library.extract_spans\ntf.NotDifferentiable(\"ExtractSpans\")\n" ]
[ [ "tensorflow.NotDifferentiable" ] ]
graphcore/poprithms
[ "9975a6a343891e3c5f8968a9507261c1185029ed" ]
[ "poprithms/tests/regression/schedule/shift/summarize.py" ]
[ "# Copyright (c) 2020 Graphcore Ltd. All rights reserved.\nimport matplotlib.pyplot as mpl\nimport matplotlib.gridspec as gridspec\nimport numpy as np\nimport sys\nimport os\n\n\ndef run(logsDir, plotsDir=\".\"):\n \"\"\"\n logsDir : \n -- where to read all log files from (.txt and .log extensions). \n This is the data which will be plotted.\n plotsDir : \n -- where to write pdf figures to.\n \"\"\"\n\n lines = []\n for fn in [\n os.path.join(logsDir, x) for x in os.listdir(logsDir)\n if \".txt\" in x or \".log\" in x\n ]:\n filly = open(fn, \"r\")\n lines += filly.readlines()\n\n print(\"In run with \", len(lines), \" lines\")\n\n records = {}\n description = \"\"\n settingsString = \"\"\n for l in lines:\n if \"description\" in l:\n if (description):\n if (description not in records.keys()):\n records[description] = {}\n if settingsString not in records[description]:\n records[description][settingsString] = []\n records[description][settingsString].append({\n \"timeInit\":\n timeInit,\n \"timeShift\":\n timeShift,\n \"nOpsBefore\":\n nOpsBefore,\n \"nOpsAfter\":\n nOpsAfter\n })\n description = l.split(\"=\")[1].strip()\n settingsString = \"\"\n\n elif \"timeInitialize\" in l:\n timeInit = float(l.split(\"=\")[1].split()[0].strip())\n elif \"timeShift\" in l:\n timeShift = float(l.split(\"=\")[1].split()[0].strip())\n elif \"nOpsBefore\" in l:\n nOpsBefore = int(l.split(\"=\")[1])\n elif \"nOpsAfter\" in l:\n nOpsAfter = int(l.split(\"=\")[1])\n else:\n #shorten the string for cleaner figure legend:\n if \"logTime=\" in l:\n l = l.split(\"logTime=\")[1].split(\"at\")[0]\n settingsString += l\n\n nPlots = len(records.keys())\n for i, k in enumerate(records.keys()):\n gs1 = gridspec.GridSpec(1, 1)\n mpl.subplot(gs1[0:1, 0:1])\n mpl.title(k)\n mpl.ylabel(\"time [s]\")\n mpl.xlabel(\"number of Ops\")\n for summary in records[k].keys():\n rs = records[k][summary]\n ax = mpl.gca()\n ax.set_xscale('log', basex=2)\n ax.set_yscale('log', basey=2)\n\n label = summary.replace('\\n', ' ').replace(\"logging=0 \",\n \"\").replace(\n \"tieBreaker=\", \"\")\n\n mpl.plot([x[\"nOpsBefore\"] for x in rs],\n [x[\"timeShift\"] + x[\"timeInit\"] for x in rs],\n linestyle=\":\",\n marker=\"o\",\n label=label)\n mpl.legend(loc=\"lower right\")\n\n plotfilename = os.path.join(plotsDir, \"%s.pdf\" % (k, ))\n print(\"Saving figure at \", plotfilename)\n mpl.savefig(plotfilename)\n\n\nif __name__ == \"__main__\":\n # expected use is something like\n # >> python3 summarize.py logs/ plots/\n if (len(sys.argv) != 3):\n raise RuntimeError(\n \"Expected 2 arguments: (0) where the log files are and (1) where to store pdf plots\"\n )\n\n logsDir = sys.argv[1]\n plotsDir = sys.argv[2]\n run(logsDir, plotsDir)\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.savefig", "matplotlib.pyplot.gca", "matplotlib.pyplot.title", "matplotlib.pyplot.subplot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.xlabel" ] ]
IanYeung/ReCp
[ "1a7ace0e1ca3c262e24a222f3f0ab0d5674e9410" ]
[ "basicsr/archs/iconvsr_arch.py" ]
[ "import torch\nfrom torch import nn as nn\nfrom torch.nn import functional as F\n\nfrom basicsr.utils.registry import ARCH_REGISTRY\nfrom .spynet_arch import SpyNet\nfrom .basicvsr_arch import ConvResBlock, PSUpsample\nfrom .edvr_arch import PredeblurModule, PCDAlignment, TSAFusion\nfrom .arch_util import ResidualBlockNoBN, flow_warp, make_layer\n\n\n@ARCH_REGISTRY.register()\nclass IconVSR(nn.Module):\n \"\"\"IconVSR network for video super-resolution.\n Args:\n num_feat (int): Channel number of intermediate features. \n Default: 64.\n num_block (int): Block number of residual blocks in each propagation branch.\n Default: 30.\n keyframe_stride (int): Number determining the keyframes. If stride=5,\n then the (0, 5, 10, 15, ...)-th frame will be the keyframes.\n Default: 5.\n temporal_padding (int): Number of frames to be padded at two ends of\n the sequence. 2 for REDS and 3 for Vimeo-90K. Default: 2\n spynet_path (str): The path of Pre-trained SPyNet model.\n Default: None.\n \"\"\"\n def __init__(self, \n num_feat=64, num_block=30, \n keyframe_stride=5, temporal_padding=2, \n spynet_path=None):\n super(IconVSR, self).__init__()\n\n self.num_feat = num_feat\n self.t_pad = temporal_padding\n self.kframe_stride = keyframe_stride\n\n self.edvr = EDVRExtractor(num_frame=temporal_padding*2 + 1,\n center_frame_idx=temporal_padding)\n \n # Flow-based Feature Alignment\n self.spynet = SpyNet(load_path=spynet_path)\n\n # Coupled Propagation and Information-refill\n self.backward_fuse = nn.Conv2d(num_feat * 2, num_feat, kernel_size=3, stride=1, padding=1, bias=True)\n self.backward_resblocks = ConvResBlock(num_feat + 3, num_feat, num_block)\n\n self.forward_fuse = nn.Conv2d(num_feat * 2, num_feat, kernel_size=3, stride=1, padding=1, bias=True)\n self.forward_resblocks = ConvResBlock(num_feat + 3, num_feat, num_block)\n\n # Pixel-Shuffle Upsampling\n self.up1 = PSUpsample(num_feat, num_feat, scale_factor=2)\n self.up2 = PSUpsample(num_feat, 64, scale_factor=2)\n\n # The channel of the tail layers is 64\n self.conv_hr = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)\n self.conv_last = nn.Conv2d(64, 3, kernel_size=3, stride=1, padding=1)\n\n # Global Residual Learning\n self.img_up = nn.Upsample(scale_factor=4, mode='bilinear', align_corners=False)\n\n # Activation Function\n self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n def comp_flow(self, lrs):\n \"\"\"Compute optical flow using SPyNet for feature warping.\n\n Args:\n lrs (tensor): LR frames, the shape is (n, t, c, h, w)\n\n Return:\n tuple(Tensor): Optical flow. \n forward_flow refers to the flow from current frame to the previous frame. \n backward_flow is the flow from current frame to the next frame.\n \"\"\"\n n, t, c, h, w = lrs.size()\n forward_lrs = lrs[:, 1:, :, :, :].reshape(-1, c, h, w) # 'n t c h w -> (n t) c h w'\n backward_lrs = lrs[:, :-1, :, :, :].reshape(-1, c, h, w) # 'n t c h w -> (n t) c h w')\n \n forward_flow = self.spynet(forward_lrs, backward_lrs).view(n, t-1, 2, h, w)\n backward_flow = self.spynet(backward_lrs, forward_lrs).view(n, t-1, 2, h, w)\n\n return forward_flow, backward_flow\n\n def extract_refill_features(self, lrs, keyframe_idx):\n \"\"\"Compute the features for information refill.\n\n We use EDVR-M to extract features from the selected keyframes\n and its neighbor. The window size in EDVR-M is 5 for REDS and\n 7 for Vimeo-90K (following the settings in EDVR).\n\n Args:\n lrs (Tensor): The input LR sequence with shape (n, t, c, h, w).\n keyframe_idx (list[int]): List of the indices of the selected\n keyframes.\n\n Returns:\n dict: The features for information-refill. The keys are the\n corresponding index.\n\n \"\"\"\n lrs_start = lrs[:, 1+self.t_pad : 1+self.t_pad*2].flip(1)\n lrs_end = lrs[:, -1-self.t_pad*2 : -1-self.t_pad].flip(1)\n lrs = torch.cat([lrs_start, lrs, lrs_end], dim=1)\n num_frame = 2 * self.t_pad + 1\n\n refill_feat = {}\n for i in keyframe_idx:\n refill_feat[i] = self.edvr(lrs[:, i:i + num_frame].contiguous())\n return refill_feat\n \n def spatial_padding(self, lrs):\n \"\"\" Apply spatial pdding.\n\n Since the PCD module in EDVR requires a resolution of a multiple of 4, \n we use reflect padding on the LR frame to match the requirements..\n\n Args:\n lrs (Tensor): Input LR sequence with shape (n, t, c, h, w).\n\n Returns:\n Tensor: Padded LR sequence with shape (n, t, c, h_pad, w_pad).\n\n \"\"\"\n n, t, c, h, w = lrs.size()\n\n pad_h = (4 - h % 4) % 4\n pad_w = (4 - w % 4) % 4\n\n # padding\n lrs = lrs.view(-1, c, h, w)\n lrs = F.pad(lrs, [0, pad_w, 0, pad_h], mode='reflect')\n\n return lrs.view(n, t, c, h + pad_h, w + pad_w)\n \n def forward(self, lrs):\n n, t, c, h_in, w_in = lrs.size()\n assert h_in >= 64 and w_in >= 64, (\n 'The height and width of input should be at least 64, '\n f'but got {h_in} and {w_in}.')\n \n # Padding\n lrs = self.spatial_padding(lrs)\n h, w = lrs.size(3), lrs.size(4)\n\n # get the keyframe for information-refill\n keyframe_idx = list(range(0, t, self.kframe_stride))\n if keyframe_idx[-1] != t-1:\n keyframe_idx.append(t-1) # the last frame is a keyframe\n \n # compute flow and refill\n forward_flow, backward_flow = self.comp_flow(lrs)\n refill_feat = self.extract_refill_features(lrs, keyframe_idx)\n\n # backward propgation\n rlt = []\n feat_prop = lrs.new_zeros(n, self.num_feat, h, w)\n for i in range(t-1, -1, -1):\n curr_lr = lrs[:, i, :, :, ]\n if i < t-1:\n flow = backward_flow[:, i, :, :, :]\n feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))\n if i in keyframe_idx:\n feat_prop = torch.cat([feat_prop, refill_feat[i]], dim=1)\n feat_prop = self.backward_fuse(feat_prop)\n feat_prop = torch.cat([feat_prop, curr_lr], dim=1)\n feat_prop = self.backward_resblocks(feat_prop)\n rlt.append(feat_prop)\n rlt = rlt[::-1]\n\n # forward propgation\n feat_prop = torch.zeros_like(feat_prop)\n for i in range(0, t):\n curr_lr = lrs[:, i, :, :, :]\n if i > 0:\n flow = forward_flow[:, i-1, :, :, :]\n feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))\n if i in keyframe_idx:\n feat_prop = torch.cat([feat_prop, refill_feat[i]], dim=1)\n feat_prop = self.forward_fuse(feat_prop)\n feat_prop = torch.cat([curr_lr, rlt[i], feat_prop], dim=1)\n feat_prop = self.forward_resblocks(feat_prop)\n\n # Upsampling\n sr_rlt = self.lrelu(self.up1(sr_rlt))\n sr_rlt = self.lrelu(self.up2(sr_rlt))\n sr_rlt = self.lrelu(self.conv_hr(sr_rlt))\n sr_rlt = self.conv_last(sr_rlt)\n\n # Global Residual Learning\n base = self.img_up(curr_lr)\n\n sr_rlt += base\n rlt[i] = sr_rlt\n return torch.stack(rlt, dim=1)[:, :, :, :4 * h_in, :4 * w_in]\n\n\nclass EDVRExtractor(nn.Module):\n \"\"\"EDVR feature extractor for information-refill in IconVSR.\n\n We use EDVR-M in IconVSR.\n\n Paper:\n EDVR: Video Restoration with Enhanced Deformable Convolutional Networks.\n\n Args:\n num_in_ch (int): Channel number of input image. Default: 3.\n num_out_ch (int): Channel number of output image. Default: 3.\n num_feat (int): Channel number of intermediate features. Default: 64.\n num_frame (int): Number of input frames. Default: 5.\n deformable_groups (int): Deformable groups. Defaults: 8.\n num_extract_block (int): Number of blocks for feature extraction.\n Default: 5.\n center_frame_idx (int): The index of center frame. Frame counting from\n 0. Default: Middle of input frames.\n hr_in (bool): Whether the input has high resolution. Default: False.\n with_predeblur (bool): Whether has predeblur module.\n Default: False.\n with_tsa (bool): Whether has TSA module. Default: True.\n \"\"\"\n def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, num_frame=5,\n deformable_groups=8, num_extract_block=5,\n center_frame_idx=None, hr_in=None, \n with_predeblur=False, with_tsa=True):\n super(EDVRExtractor, self).__init__()\n\n if center_frame_idx is None:\n self.center_frame_idx = num_frame // 2\n else:\n self.center_frame_idx = center_frame_idx\n \n self.hr_in = hr_in\n self.with_predeblur = with_predeblur\n self.with_tsa = with_tsa\n\n # extract features for each frame\n if self.with_predeblur:\n self.pre_deblur = PredeblurModule(num_feat=num_feat, hr_in=self.hr_in)\n self.conv_1x1 = nn.Conv2d(num_feat, num_feat, kernel_size=1, stride=1, padding=0, bias=True)\n else:\n self.conv_first = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1)\n \n # extract pyramid features \n self.feature_extraction = make_layer(ResidualBlockNoBN, num_extract_block, num_feat=num_feat)\n self.conv_l2_1 = nn.Conv2d(num_feat, num_feat, kernel_size=3, stride=2, padding=1)\n self.conv_l2_2 = nn.Conv2d(num_feat, num_feat, kernel_size=3, stride=1, padding=1)\n self.conv_l3_1 = nn.Conv2d(num_feat, num_feat, kernel_size=3, stride=2, padding=1)\n self.conv_l3_2 = nn.Conv2d(num_feat, num_feat, kernel_size=3, stride=1, padding=1)\n\n # pcd and tsa module\n self.pcd_align = PCDAlignment(num_feat=num_feat, deformable_groups=deformable_groups)\n \n if self.with_tsa:\n self.fusion = TSAFusion(\n num_feat=num_feat,\n num_frame=num_frame,\n center_frame_idx=self.center_frame_idx)\n else:\n self.fusion = nn.Conv2d(num_frame * num_feat, num_feat, 1, 1)\n\n # activation function\n self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n \n def forward(self, x):\n n, t, c, h, w = x.size()\n\n if self.hr_in:\n assert h % 16 == 0 and w % 16 == 0, (\n 'The height and width must be multiple of 16.')\n else:\n assert h % 4 == 0 and w % 4 == 0, (\n 'The height and width must be multiple of 4.')\n \n # extract features for each frame\n # Level 1\n if self.with_predeblur:\n feat_l1 = self.conv_1x1(self.pre_deblur(x.view(-1, c, h, w)))\n if self.hr_in:\n h, w = h // 4, w // 4\n else:\n feat_l1 = self.lrelu(self.conv_first(x.view(-1, c, h, w)))\n \n feat_l1 = self.feature_extraction(feat_l1)\n\n # Level 2\n feat_l2 = self.lrelu(self.conv_l2_1(feat_l1))\n feat_l2 = self.lrelu(self.conv_l2_2(feat_l2))\n\n # Level 3\n feat_l3 = self.lrelu(self.conv_l3_1(feat_l2))\n feat_l3 = self.lrelu(self.conv_l3_2(feat_l3))\n\n feat_l1 = feat_l1.view(n, t, -1, h, w)\n feat_l2 = feat_l2.view(n, t, -1, h // 2, w // 2)\n feat_l3 = feat_l3.view(n, t, -1, h // 4, w // 4)\n\n # PCD alignment\n ref_feat_l = [ # reference feature list\n feat_l1[:, self.center_frame_idx, :, :, :].clone(),\n feat_l2[:, self.center_frame_idx, :, :, :].clone(),\n feat_l3[:, self.center_frame_idx, :, :, :].clone()\n ]\n aligned_feat = []\n for i in range(t):\n nbr_feat_l = [ # neighboring feature list\n feat_l1[:, i, :, :, :].clone(), feat_l2[:, i, :, :, :].clone(),\n feat_l3[:, i, :, :, :].clone()\n ]\n aligned_feat.append(self.pcd_align(nbr_feat_l, ref_feat_l))\n aligned_feat = torch.stack(aligned_feat, dim=1) # (n, t, c, h, w)\n\n if not self.with_tsa:\n aligned_feat = aligned_feat.view(n, -1, h, w)\n feat = self.fusion(aligned_feat)\n\n return feat\n\n\nif __name__ == '__main__':\n model = IconVSR()\n lrs = torch.randn(3, 4, 3, 64, 64)\n rlt = model(lrs)\n print(rlt.size())\n \n\n \n\n\n" ]
[ [ "torch.stack", "torch.randn", "torch.nn.functional.pad", "torch.zeros_like", "torch.nn.Upsample", "torch.nn.Conv2d", "torch.cat", "torch.nn.LeakyReLU" ] ]
JulianoLagana/MT3
[ "c1270e1de5b8d68eab50a797d16061310fa95d97" ]
[ "src/modules/position_encoder.py" ]
[ "import torch\nfrom torch import nn, Tensor\nimport math\n\nclass LearnedPositionEncoder(nn.Module):\n \"\"\"\n Learned Position Encoder. Takes tensor of positional indicies and converts to learned embeddings \n \"\"\"\n\n def __init__(self, n_timesteps, d_model):\n super().__init__()\n self.embeddor = nn.Embedding(n_timesteps, d_model) # lookup table, each with vector of size d_model \n nn.init.uniform_(self.embeddor.weight)\n\n def forward(self, pos_indicies):\n pos_indicies = pos_indicies.long()\n return self.embeddor(pos_indicies)\n\n\nclass PositionEmbeddingSine(nn.Module):\n \"\"\"\n This is a more standard version of the position embedding, very similar to the one\n used by the Attention is all you need paper, generalized to work on images.\n \"\"\"\n def __init__(self,params, temperature=10000, scale=2*math.pi):\n super().__init__()\n self.params=params\n self.num_pos_feats = params.arch.d_model\n self.temperature = temperature\n self.scale = scale\n self.max_time = params.data_generation.n_timesteps\n\n def forward(self, proposals):\n proposals = proposals + 1\n dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=proposals.device)\n dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)\n # N, L\n proposals = proposals / self.max_time * self.scale\n # N, L, num_pos_feats\n pos = proposals[:, :, None] / dim_t\n # N, L, 2, num_pos_feats/2, 2\n pos = torch.stack((pos[:, :, 0::2].sin(), pos[:, :, 1::2].cos()), dim=3).flatten(2)\n # N, L, num_pos_feats*2\n return pos\n\n\n" ]
[ [ "torch.arange", "torch.nn.init.uniform_", "torch.nn.Embedding" ] ]
bfabiandev/atom3d
[ "b2499ff743be2e851c286cabf64696682abffa44" ]
[ "atom3d/util/graph.py" ]
[ "import numpy as np\nimport scipy.spatial as ss\nimport torch\n\nimport atom3d.util.formats as fo\n\n# PDB atom names -- these include co-crystallized metals\nprot_atoms = ['C', 'H', 'O', 'N', 'S', 'P', 'ZN', 'NA', 'FE', 'CA', 'MN', 'NI', 'CO', 'MG', 'CU', 'CL', 'SE', 'F']\n# RDKit molecule atom names\nmol_atoms = ['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na',\n 'Ca', 'Fe', 'As', 'Al', 'I', 'B', 'V', 'K', 'Tl', 'Yb',\n 'Sb', 'Sn', 'Ag', 'Pd', 'Co', 'Se', 'Ti', 'Zn', 'H', # H?\n 'Li', 'Ge', 'Cu', 'Au', 'Ni', 'Cd', 'In', 'Mn', 'Zr',\n 'Cr', 'Pt', 'Hg', 'Pb']\n# Residue names\nresidues = ['ALA', 'CYS', 'ASP', 'GLU', 'PHE', 'GLY', 'HIS', 'ILE', 'LYS', 'LEU', 'MET', 'ASN', 'PRO', 'GLN', 'ARG',\n 'SER', 'THR', 'VAL', 'TRP', 'TYR']\n\n\ndef prot_df_to_graph(df, feat_col='element', allowable_feats=prot_atoms, edge_dist_cutoff=4.5):\n r\"\"\"\n Converts protein in dataframe representation to a graph compatible with Pytorch-Geometric, where each node is an atom.\n\n :param df: Protein structure in dataframe format.\n :type df: pandas.DataFrame\n :param node_col: Column of dataframe to find node feature values. For example, for atoms use ``feat_col=\"element\"`` and for residues use ``feat_col=\"resname\"``\n :type node_col: str, optional\n :param allowable_feats: List containing all possible values of node type, to be converted into 1-hot node features. \n Any elements in ``feat_col`` that are not found in ``allowable_feats`` will be added to an appended \"unknown\" bin (see :func:`atom3d.util.graph.one_of_k_encoding_unk`).\n :type allowable_feats: list, optional\n :param edge_dist_cutoff: Maximum distance cutoff (in Angstroms) to define an edge between two atoms, defaults to 4.5.\n :type edge_dist_cutoff: float, optional\n\n :return: tuple containing\n\n - node_feats (torch.FloatTensor): Features for each node, one-hot encoded by values in ``allowable_feats``.\n\n - edges (torch.LongTensor): Edges in COO format\n\n - edge_weights (torch.LongTensor): Edge weights, defined as a function of distance between atoms given by :math:`w_{i,j} = \\frac{1}{d(i,j)}`, where :math:`d(i, j)` is the Euclidean distance between node :math:`i` and node :math:`j`.\n\n - node_pos (torch.FloatTensor): x-y-z coordinates of each node\n :rtype: Tuple\n \"\"\" \n\n node_pos = torch.FloatTensor(df[['x', 'y', 'z']].to_numpy())\n\n kd_tree = ss.KDTree(node_pos)\n edge_tuples = list(kd_tree.query_pairs(edge_dist_cutoff))\n edges = torch.LongTensor(edge_tuples).t().contiguous()\n\n node_feats = torch.FloatTensor([one_of_k_encoding_unk(e, allowable_feats) for e in df[feat_col]])\n edge_weights = torch.FloatTensor(\n [1.0 / (np.linalg.norm(node_pos[i] - node_pos[j]) + 1e-5) for i, j in edge_tuples]).view(-1, 1)\n # feats = F.one_hot(elems, num_classes=len(atom_int_dict))\n \n return node_feats, edges, edge_weights.view(-1), node_pos\n\n\ndef mol_df_to_graph(mol, allowable_atoms=mol_atoms):\n \"\"\"\n Converts molecule to a graph compatible with Pytorch-Geometric\n\n TODO: Change to operate on dataframe representation instead of Mol object\n\n :param mol: Molecule structure in RDKit format\n :type mol: rdkit.Chem.rdchem.Mol\n :param allowable_atoms: List containing allowable atom types\n :type allowable_atoms: list[str], optional\n\n :return: Tuple containing \\n\n - node_feats (torch.FloatTensor): Features for each node, one-hot encoded by atom type in ``allowable_atoms``.\n - edges (torch.LongTensor): Edges from chemical bond graph in COO format.\n - edge_feats (torch.FloatTensor): Edge features given by bond type. Single = 1.0, Double = 2.0, Triple = 3.0, Aromatic = 1.5.\n - node_pos (torch.FloatTensor): x-y-z coordinates of each node.\n \"\"\"\n node_pos = torch.FloatTensor(fo.get_coordinates_of_conformer(mol))\n bonds = fo.get_bonds_matrix_from_mol(mol)\n edge_tuples = np.argwhere(bonds)\n edges = torch.LongTensor(edge_tuples).t().contiguous()\n\n node_feats = torch.FloatTensor([one_of_k_encoding_unk(a.GetSymbol(), mol_atoms) for a in mol.GetAtoms()])\n edge_feats = torch.FloatTensor([bonds[i, j] for i, j in edge_tuples]).view(-1, 1)\n\n return node_feats, edges, edge_feats, node_pos\n\n\ndef combine_graphs(graph1, graph2, edges_between=True, edges_between_dist=4.5):\n \"\"\"Combine two graphs into one, optionally adding edges between the two graphs using :func:`atom3d.util.graph.edges_between_graphs`. Node features are concatenated in the feature dimension, to distinguish which nodes came from which graph.\n\n :param graph1: One of the graphs to be combined, in the format returned by :func:`atom3d.util.graph.prot_df_to_graph` or :func:`atom3d.util.graph.mol_df_to_graph`.\n :type graph1: Tuple\n :param graph2: The other graph to be combined, in the format returned by :func:`atom3d.util.graph.prot_df_to_graph` or :func:`atom3d.util.graph.mol_df_to_graph`.\n :type graph2: Tuple\n :param edges_between: Indicates whether to add new edges between graphs, defaults to True.\n :type edges_between: bool, optional\n :param edges_between_dist: Distance cutoff in Angstroms for adding edges between graphs, defaults to 4.5.\n :type edges_between_dist: float, optional\n :return: Tuple containing \\n\n - node_feats (torch.FloatTensor): Features for each node in the combined graph, concatenated along the feature dimension.\\n\n - edges (torch.LongTensor): Edges of combined graph in COO format, including edges from two input graphs and edges between them, if specified.\\n\n - edge_weights (torch.FloatTensor): Concatenated edge features from two input graphs and edges between them, if specified.\\n\n - node_pos (torch.FloatTensor): x-y-z coordinates of each node in combined graph.\n :rtype: Tuple\n \"\"\" \n node_feats1, edges1, edge_feats1, pos1 = graph1\n node_feats2, edges2, edge_feats2, pos2 = graph2\n\n dummy_node_feats1 = torch.zeros(pos1.shape[0], node_feats2.shape[1])\n dummy_node_feats2 = torch.zeros(pos2.shape[0], node_feats1.shape[1])\n node_feats1 = torch.cat((node_feats1, dummy_node_feats1), dim=1)\n node_feats2 = torch.cat((dummy_node_feats2, node_feats2), dim=1)\n\n edges2 += pos1.shape[0]\n\n node_pos = torch.cat((pos1, pos2), dim=0)\n node_feats = torch.cat((node_feats1, node_feats2), dim=0)\n\n if edges_between:\n edges_between, edge_feats_between = edges_between_graphs(pos1, pos2)\n edge_feats = torch.cat((edge_feats1, edge_feats2, edge_feats_between), dim=0)\n edges = torch.cat((edges1, edges2, edges_between), dim=1)\n else:\n edge_feats = torch.cat((edge_feats1, edge_feats2), dim=0)\n edges = torch.cat((edges1, edges2), dim=1)\n\n return node_feats, edges, edge_feats, node_pos\n\n\ndef edges_between_graphs(pos1, pos2, dist=4.5):\n \"\"\"calculates edges between nodes in two separate graphs using a specified cutoff distance.\n\n :param pos1: x-y-z node coordinates from Graph 1\n :type pos1: torch.FloatTensor or numpy.ndarray\n :param pos2: x-y-z node coordinates from Graph 2\n :type pos2: torch.FloatTensor or numpy.ndarray\n :return: Tuple containing\\n\n - edges (torch.LongTensor): Edges between two graphs, in COO format.\\n\n - edge_weights (torch.FloatTensor): Edge weights between two graphs.\\n\n :rtype: Tuple\n \"\"\" \n tree1 = ss.KDTree(pos1)\n tree2 = ss.KDTree(pos2)\n res = tree1.query_ball_tree(tree2, r=dist)\n edges = []\n edge_weights = []\n for i, contacts in enumerate(res):\n if len(contacts) == 0:\n continue\n for j in contacts:\n edges.append((i, j + pos1.shape[0]))\n edge_weights.append(np.linalg.norm(pos1[i] - pos2[j]))\n\n edges = torch.LongTensor(edges).t().contiguous()\n edge_weights = torch.FloatTensor(edge_weights).view(-1, 1)\n return edges, edge_weights \n\n\ndef adjust_graph_indices(graph):\n \"\"\"Adjusts indices into graphs for concatenated multi-graph batches. Specifically, if each graph in the batch has a different selection index defined relative to that graph, the index is adjusted to be defined relative to the batch indexing.\n\n :param graph: Pytorch-geometric graph object representing a batch of graphs. Assumed to have a ``select_idx`` attribute set, specifying a node index for each graph\n :type graph: torch_geometric.data.Data\n :return: Same graph with selection indices adjusted\n :rtype: torch_geometric.data.Data\n \"\"\" \n batch_size = len(graph.n_nodes)\n total_n = 0\n for i in range(batch_size-1):\n n_nodes = graph.n_nodes[i].item()\n total_n += n_nodes\n graph.select_idx[i+1] += total_n\n return graph\n\n\n# below functions are adapted from DeepChem repository:\ndef one_of_k_encoding(x, allowable_set):\n \"\"\"Converts input to 1-hot encoding given a set of allowable values.\"\"\"\n if x not in allowable_set:\n raise Exception(\"input {0} not in allowable set{1}:\".format(x, allowable_set))\n return list(map(lambda s: x == s, allowable_set))\n\n\ndef one_of_k_encoding_unk(x, allowable_set):\n \"\"\"Converts input to 1-hot encoding given a set of allowable values. Additionally maps inputs not in the allowable set to the last element.\"\"\"\n if x not in allowable_set:\n x = allowable_set[-1]\n return list(map(lambda s: x == s, allowable_set))\n" ]
[ [ "torch.FloatTensor", "numpy.linalg.norm", "scipy.spatial.KDTree", "numpy.argwhere", "torch.zeros", "torch.LongTensor", "torch.cat" ] ]
Darel13712/rs_metrics
[ "bf1c2f6e02537508255bbf675c48a14f512e51de" ]
[ "tests/test_metrics.py" ]
[ "import numpy as np\nimport pandas as pd\nimport pytest\n\nfrom rs_metrics.metrics import _ndcg_score\nfrom rs_metrics import *\nfrom rs_metrics.statistics import item_pop\n\n\ndef test_dcg_score_1():\n assert _ndcg_score([1], [1], 1) == 1\n\n\ndef test_dcg_score_0():\n assert _ndcg_score([1], [0], 1) == 0\n\n\ndef test_dcg_score_half():\n idcg2 = (1 / np.log2(2) + 1 / np.log2(3))\n dcg = 1 / np.log2(3)\n assert _ndcg_score([1, 2], [0, 2], 2) == dcg / idcg2\n\n\ndef test_ndcg_test_less_than_k():\n y_true = {1: [1, 2, 3]}\n assert ndcg(y_true, y_true, 5) == ndcg(y_true, y_true, 3) == 1\n\n\ndef test_ndcg():\n y_true = {1: [1, 2], 2: [1, 2]}\n y_pred = {1: [1, 2], 2: [0, 0]}\n assert ndcg(y_true, y_pred, 2) == 0.5\n\n\ndef test_ndcg_pandas():\n y_true = pd.DataFrame([[1, 1], [1, 2]], columns=['user_idx', 'item_id'])\n y_pred = pd.DataFrame([[1, 1], [1, 0]], columns=['user_idx', 'item_id'])\n idcg2 = (1 / np.log2(2) + 1 / np.log2(3))\n dcg = 1 / np.log2(2)\n assert ndcg(y_true, y_pred, 2, user_col='user_idx') == dcg / idcg2\n\n\ndef test_a_ndcg_one_user():\n y_true = {1: [1, 2, 3]}\n y_pred = {1: [1, 2, 3]}\n sp = {1: [{1}, {2}, {3}]}\n assert a_ndcg(y_true, y_pred, sp, 3) == 1\n\n\ndef test_a_ndcg():\n y_true = {1: [1, 2, 3], 2: [1, 2, 3]}\n y_pred = {1: [1, 2, 3], 2: [0, 0, 0]}\n sp = {1: [{1, 2}, {3}], 2: [{1, 2, 3}]}\n u1_score = (1 + 0.4/np.log2(3) + 1/np.log2(4)) / (1 + 1/np.log2(3) + 0.4/np.log2(4))\n answer = (u1_score + 0) / 2\n assert a_ndcg(y_true, y_pred, sp, 3, 0.6) == answer\n\n\ndef test_hitrate():\n y_true = {1: [1, 2], 2: [1, 2]}\n y_pred = {1: [0, 1], 2: [0, 0]}\n assert hitrate(y_true, y_pred, 2) == 0.5\n\n\ndef test_precision():\n y_true = {1: [1, 0, 0, 2], 2: [1, 2]}\n y_pred = {1: [1, 2], 2: [1, 3]}\n assert precision(y_true, y_pred, 2) == 0.75\n\n\ndef test_recall():\n y_true = {1: [1, 2], 2: [1, 2]}\n y_pred = {1: [1, 3], 2: [0, 0]}\n assert recall(y_true, y_pred, 2) == 0.25\n\n\ndef test_mrr():\n y_true = {1: [1, 2], 2: [1, 2]}\n y_pred = {1: [1, 3], 2: [0, 1]}\n assert mrr(y_true, y_pred, 2) == 0.75\n\n\ndef test_map():\n y_true = {1: [1, 2], 2: [1, 2]}\n y_pred = {1: [1, 3], 2: [0, 1]}\n assert mapr(y_true, y_pred, 2) == 0.75\n\n\ndef test_mar():\n y_true = {1: [1, 2], 2: [1, 2]}\n y_pred = {1: [1, 3], 2: [0, 1]}\n assert mar(y_true, y_pred, 2) == 0.25\n\n\ndef test_coverage():\n items = [1, 2, 3, 4]\n pred = {1: [1, 2], 2: [2, 5]}\n assert coverage(items, pred) == 0.5\n\n\[email protected]\ndef log():\n return pd.DataFrame({'user_id': [1, 1, 2], 'item_id': [1, 2, 2]})\n\n\ndef test_item_pop(log):\n pops = item_pop(log)\n assert sum(pops) == 1.5\n\n\ndef test_popularity(log):\n pred = {1: [2], 2: [1]}\n assert popularity(log, pred, 2) == 0.75\n\n\ndef test_surprisal():\n df = pd.DataFrame({'user_id': [1, 2], 'item_id': [1, 2]})\n pred = {1: [2], 2: [1]}\n assert surprisal(df, pred, 2) == 1\n" ]
[ [ "numpy.log2", "pandas.DataFrame" ] ]
aytackanaci/deep-vehicle-reid
[ "9f951288a38f8b295b5c77cc6c9b26f0632ecea3" ]
[ "train_imgreid_dpfl_large_batch.py" ]
[ "from __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport sys\nimport time\nimport datetime\nimport os.path as osp\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom torch.nn import functional as F\nfrom torch.optim import lr_scheduler\n\nfrom tensorboardX import SummaryWriter\n\nfrom args import argument_parser, image_dataset_kwargs, optimizer_kwargs\nfrom torchreid.data_manager import ImageDataManager\nfrom torchreid import models\nfrom torchreid.losses import CrossEntropyLoss, DeepSupervision\nfrom torchreid.utils.iotools import save_checkpoint, check_isfile\nfrom torchreid.utils.avgmeter import AverageMeter\nfrom torchreid.utils.loggers import Logger, RankLogger\nfrom torchreid.utils.torchtools import count_num_param, open_all_layers, open_specified_layers\nfrom torchreid.utils.reidtools import visualize_ranked_results\nfrom torchreid.utils.generaltools import set_random_seed\nfrom torchreid.eval_metrics import evaluate, accuracy\nfrom torchreid.optimizers import init_optimizer\n\ndef exp_name(cfg):\n name = [\n 'e_' + cfg.prefix,\n 'S_' + '-'.join(cfg.source_names),\n 'T_' + '-'.join(cfg.target_names),\n cfg.arch,\n 'E',\n '' if cfg.resume == '' else 'r',\n '' if cfg.fixbase_epoch is 0 else 'warmup' + str(cfg.fixbase_epoch),\n str(cfg.stepsize),\n 'm' + str(cfg.max_epoch),\n 'P',\n 'b' + str(cfg.train_batch_size),\n cfg.optim,\n 'lr' + str(cfg.lr),\n 'wd' + str(cfg.weight_decay),\n ]\n\n return '_'.join(name)\n\n# read config\nparser = argument_parser()\nargs = parser.parse_args()\nargs.fixbase_epoch = 0\nargs.arch = 'dpfl'\nargs.save_dir = exp_name(args)\n\n\ndef main():\n global args\n\n set_random_seed(args.seed)\n if not args.use_avai_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices\n use_gpu = torch.cuda.is_available()\n if args.use_cpu: use_gpu = False\n log_name = 'log_test.txt' if args.evaluate else 'log_train.txt'\n sys.stdout = Logger(osp.join(args.save_dir, log_name))\n print(\"==========\\nArgs:{}\\n==========\".format(args))\n\n if use_gpu:\n print(\"Currently using GPU {}\".format(args.gpu_devices))\n cudnn.benchmark = True\n else:\n print(\"Currently using CPU, however, GPU is highly recommended\")\n\n print(\"Initializing MultiScale data manager\")\n assert args.train_batch_size % args.train_loss_batch_size == 0, \"'{}' is not divisable by {}\".format(args.train_loss_batch_size, args.train_loss_batch_size)\n dm = ImageDataManager(use_gpu, scales=[224,160], **image_dataset_kwargs(args))\n trainloader, testloader_dict = dm.return_dataloaders()\n # sys.exit(0)\n\n print(\"Initializing model: {}\".format(args.arch))\n model = models.init_model(name=args.arch, num_classes=dm.num_train_pids, input_size=args.width, loss={'xent'}, use_gpu=use_gpu)\n print(\"Model size: {:.3f} M\".format(count_num_param(model)))\n # print(model)\n\n criterion = CrossEntropyLoss(num_classes=dm.num_train_pids, use_gpu=use_gpu, label_smooth=args.label_smooth)\n optimizer = init_optimizer(model.parameters(), **optimizer_kwargs(args))\n scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.stepsize, gamma=args.gamma)\n # # scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=3, verbose=True, threshold=1e-04)\n\n if args.load_weights and check_isfile(args.load_weights): # load pretrained weights but ignore layers that don't match in size\n checkpoint = torch.load(args.load_weights)\n pretrain_dict = checkpoint['state_dict']\n model_dict = model.state_dict()\n pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}\n model_dict.update(pretrain_dict)\n model.load_state_dict(model_dict)\n print(\"Loaded pretrained weights from '{}'\".format(args.load_weights))\n\n if args.resume and check_isfile(args.resume):\n checkpoint = torch.load(args.resume)\n model.load_state_dict(checkpoint['state_dict'])\n args.start_epoch = checkpoint['epoch'] + 1\n print(\"Loaded checkpoint from '{}'\".format(args.resume))\n print(\"- start_epoch: {}\\n- rank1: {}\".format(args.start_epoch, checkpoint['rank1']))\n\n if use_gpu:\n model = nn.DataParallel(model).cuda()\n\n if args.evaluate:\n print(\"Evaluate only\")\n\n for name in args.target_names:\n print(\"Evaluating {} ...\".format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n test_set = dm.return_testdataset_by_name(name)\n rank1, mAP = test(model, test_set, name, queryloader, galleryloader, use_gpu, visualize=args.visualize_ranks)\n\n return\n\n start_time = time.time()\n ranklogger = RankLogger(args.source_names, args.target_names)\n maplogger = RankLogger(args.source_names, args.target_names)\n train_time = 0\n\n\n # Tensorboard\n writer = SummaryWriter(log_dir=osp.join('runs', args.save_dir))\n print(\"=> Start training\")\n\n\n if args.fixbase_epoch > 0:\n print(\"Train {} for {} epochs while keeping other layers frozen\".format(args.open_layers, args.fixbase_epoch))\n initial_optim_state = optimizer.state_dict()\n\n for epoch in range(args.fixbase_epoch):\n start_train_time = time.time()\n loss, prec1 = train(epoch, model, criterion, optimizer, trainloader, writer, use_gpu, fixbase=True)\n writer.add_scalar('train/loss', loss, epoch+1)\n writer.add_scalar('train/prec1', prec1, epoch+1)\n print('Epoch: [{:02d}] [Average Loss:] {:.4f}\\t [Average Prec.:] {:.2%}'.format(epoch+1, loss, prec1))\n train_time += round(time.time() - start_train_time)\n\n print(\"Done. All layers are open to train for {} epochs\".format(args.max_epoch))\n optimizer.load_state_dict(initial_optim_state)\n\n args.start_epoch += args.fixbase_epoch\n args.max_epoch += args.fixbase_epoch\n\n for epoch in range(args.start_epoch, args.max_epoch):\n start_train_time = time.time()\n loss, prec1 = train(epoch, model, criterion, optimizer, trainloader, writer, use_gpu)\n writer.add_scalar('train/loss', loss, epoch+1)\n writer.add_scalar('train/prec1', prec1, epoch+1)\n print('Epoch: [{:02d}] [Average Loss:] {:.4f}\\t [Average Prec.:] {:.2%}'.format(epoch+1, loss, prec1))\n train_time += round(time.time() - start_train_time)\n\n scheduler.step()\n\n if (epoch + 1) > args.start_eval and args.eval_freq > 0 and (epoch + 1) % args.eval_freq == 0 or (epoch + 1) == args.max_epoch:\n print(\"=> Test\")\n\n for name in args.target_names:\n print(\"Evaluating {} ...\".format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n\n test_set = dm.return_testdataset_by_name(name)\n\n if epoch+1 == args.max_epoch:\n rank1, mAP = test(model, test_set, name, queryloader, galleryloader, use_gpu, visualize=True)\n else:\n rank1, mAP = test(model, test_set, name, queryloader, galleryloader, use_gpu)\n\n writer.add_scalar(name + '_test/top1', rank1, epoch+1)\n writer.add_scalar(name + '_test/mAP', mAP, epoch+1)\n\n ranklogger.write(name, epoch + 1, rank1)\n maplogger.write(name, epoch + 1, mAP)\n\n if use_gpu:\n state_dict = model.module.state_dict()\n else:\n state_dict = model.state_dict()\n\n save_checkpoint({\n 'state_dict': state_dict,\n 'rank1': rank1,\n 'epoch': epoch,\n }, False, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))\n\n\n # save last checkpoint\n save_checkpoint({\n 'state_dict': state_dict,\n 'rank1': rank1,\n 'epoch': epoch,\n }, False, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))\n\n elapsed = round(time.time() - start_time)\n elapsed = str(datetime.timedelta(seconds=elapsed))\n train_time = str(datetime.timedelta(seconds=train_time))\n print(\"Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.\".format(elapsed, train_time))\n ranklogger.show_summary()\n maplogger.show_summary()\n\n\ndef train(epoch, model, criterion, optimizer, trainloader, writer, use_gpu, fixbase=False):\n losses = AverageMeter()\n precisions = AverageMeter()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n epoch_iterations = len(trainloader)\n\n model.train()\n\n if fixbase or args.always_fixbase:\n open_specified_layers(model, args.open_layers)\n else:\n open_all_layers(model)\n\n end = time.time()\n for batch_idx, ((img1, img2), pids, _, _) in enumerate(trainloader):\n data_time.update(time.time() - end)\n\n if use_gpu:\n img1, img2, pids = img1.cuda(), img2.cuda(), pids.cuda()\n\n y_large, y_small, y_joint = model(img1, img2)\n\n loss_batch = args.train_loss_batch_size\n how_many_mini = args.train_batch_size // loss_batch\n for mini_idx in range(how_many_mini):\n\n start_index = mini_idx * loss_batch\n end_index = start_index + loss_batch\n\n mini_y_large = y_large[start_index:end_index, :]\n mini_y_small = y_small[start_index:end_index, :]\n mini_y_joint = y_joint[start_index:end_index, :]\n mini_pids = pids[start_index:end_index]\n\n loss_large = criterion(mini_y_large, mini_pids)\n loss_small = criterion(mini_y_small, mini_pids)\n loss_joint = criterion(mini_y_joint, mini_pids)\n\n joint_prob = F.softmax(mini_y_joint, dim=1)\n loss_joint_large = criterion(mini_y_large, joint_prob, one_hot=True)\n loss_joint_small = criterion(mini_y_small, joint_prob, one_hot=True)\n\n total_loss_large = loss_large + loss_joint_large #+\n total_loss_small = loss_small + loss_joint_small #+\n total_loss_joint = loss_joint #+\n\n prec, = accuracy(mini_y_joint.data, mini_pids.data)\n prec1 = prec[0] # get top 1\n\n optimizer.zero_grad()\n\n # total_loss_large.backward(retain_graph=True)\n # total_loss_small.backward(retain_graph=True)\n # total_loss_joint.backward()\n # sum losses\n loss = total_loss_joint + total_loss_small + total_loss_large\n loss.backward(retain_graph=True)\n\n optimizer.step()\n\n loss_iter = epoch*epoch_iterations+batch_idx*how_many_mini+mini_idx\n writer.add_scalar('iter/loss_small', loss_small, loss_iter)\n writer.add_scalar('iter/loss_large', loss_large, loss_iter)\n writer.add_scalar('iter/loss_joint', loss_joint, loss_iter)\n writer.add_scalar('iter/loss_joint_small', loss_joint_small, loss_iter)\n writer.add_scalar('iter/loss_joint_large', loss_joint_large, loss_iter)\n writer.add_scalar('iter/total_loss_small', total_loss_small, loss_iter)\n writer.add_scalar('iter/total_loss_large', total_loss_large, loss_iter)\n writer.add_scalar('iter/total_loss_joint', total_loss_joint, loss_iter)\n writer.add_scalar('iter/loss', loss, loss_iter)\n\n\n losses.update(loss.item(), pids.size(0))\n precisions.update(prec1, pids.size(0))\n\n if (batch_idx*how_many_mini+mini_idx + 1) % args.print_freq == 0:\n print('Epoch: [{0:02d}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.4f} ({data_time.avg:.4f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec {prec.val:.2%} ({prec.avg:.2%})\\t'.format(\n epoch + 1, batch_idx + 1, len(trainloader), batch_time=batch_time,\n data_time=data_time, loss=losses, prec=precisions))\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n return losses.avg, precisions.avg\n\n\ndef test(model, test_set, name, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20], visualize=False):\n batch_time = AverageMeter()\n\n model.eval()\n\n with torch.no_grad():\n qf, q_pids, q_camids = [], [], []\n for batch_idx, ((img1, img2), pids, camids, _) in enumerate(queryloader):\n if use_gpu: img1, img2 = img1.cuda(), img2.cuda()\n\n end = time.time()\n features = model(img1, img2)\n batch_time.update(time.time() - end)\n\n features = features.data.cpu()\n qf.append(features)\n q_pids.extend(pids)\n q_camids.extend(camids)\n qf = torch.cat(qf, 0)\n q_pids = np.asarray(q_pids)\n q_camids = np.asarray(q_camids)\n\n print(\"Extracted features for query set, obtained {}-by-{} matrix\".format(qf.size(0), qf.size(1)))\n\n gf, g_pids, g_camids = [], [], []\n end = time.time()\n for batch_idx, ((img1, img2), pids, camids, _) in enumerate(galleryloader):\n if use_gpu: img1, img2 = img1.cuda(), img2.cuda()\n\n end = time.time()\n features = model(img1, img2)\n batch_time.update(time.time() - end)\n\n features = features.data.cpu()\n gf.append(features)\n g_pids.extend(pids)\n g_camids.extend(camids)\n gf = torch.cat(gf, 0)\n g_pids = np.asarray(g_pids)\n g_camids = np.asarray(g_camids)\n\n print(\"Extracted features for gallery set, obtained {}-by-{} matrix\".format(gf.size(0), gf.size(1)))\n\n print(\"=> BatchTime(s)/BatchSize(img): {:.3f}/{}\".format(batch_time.avg, args.test_batch_size))\n\n m, n = qf.size(0), gf.size(0)\n distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \\\n torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()\n distmat.addmm_(1, -2, qf, gf.t())\n distmat = distmat.numpy()\n\n print(\"Computing CMC and mAP\")\n cmc, mAP, all_AP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)\n\n if visualize:\n visualize_ranked_results(\n distmat, all_AP, test_set, name,\n save_path=args.save_dir,\n topk=100\n )\n\n print(\"Results ----------\")\n print(\"mAP: {:.1%}\".format(mAP))\n print(\"CMC curve\")\n for r in ranks:\n print(\"Rank-{:<3}: {:.1%}\".format(r, cmc[r-1]))\n print(\"------------------\")\n\n return cmc[0], mAP\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.load", "torch.nn.functional.softmax", "torch.no_grad", "numpy.asarray", "torch.cuda.is_available", "torch.optim.lr_scheduler.MultiStepLR", "torch.nn.DataParallel", "torch.cat", "torch.pow" ] ]
dhruv9vats/stingray
[ "e952762ebc098de42d8decf2d0df34f9e9b0c200" ]
[ "stingray/crossspectrum.py" ]
[ "import copy\nimport warnings\nfrom collections.abc import Iterable, Iterator\n\nimport numpy as np\nimport scipy\nimport scipy.optimize\nimport scipy.stats\n\nfrom stingray.exceptions import StingrayError\nfrom stingray.gti import bin_intervals_from_gtis, check_gtis, cross_two_gtis\nfrom stingray.largememory import createChunkedSpectra, saveData\nfrom stingray.utils import genDataPath, rebin_data, rebin_data_log, simon\n\nfrom .events import EventList\nfrom .lightcurve import Lightcurve\nfrom .utils import show_progress\n\n# location of factorial moved between scipy versions\ntry:\n from scipy.misc import factorial\nexcept ImportError:\n from scipy.special import factorial\n\ntry:\n from pyfftw.interfaces.scipy_fft import fft, fftfreq\nexcept ImportError:\n warnings.warn(\"pyfftw not installed. Using standard scipy fft\")\n from scipy.fft import fft, fftfreq\n\n__all__ = [\n \"Crossspectrum\", \"AveragedCrossspectrum\", \"coherence\", \"time_lag\",\n \"cospectra_pvalue\", \"normalize_crossspectrum\"\n]\n\n\ndef normalize_crossspectrum(unnorm_power, tseg, nbins, nphots1, nphots2, norm=\"none\", power_type=\"real\"):\n \"\"\"\n Normalize the real part of the cross spectrum to Leahy, absolute rms^2,\n fractional rms^2 normalization, or not at all.\n\n Parameters\n ----------\n unnorm_power: numpy.ndarray\n The unnormalized cross spectrum.\n\n tseg: int\n The length of the Fourier segment, in seconds.\n\n nbins : int\n Number of bins in the light curve\n\n nphots1 : int\n Number of photons in the light curve no. 1\n\n nphots2 : int\n Number of photons in the light curve no. 2\n\n Other parameters\n ----------------\n norm : str\n One of `'leahy'` (Leahy+83), `'frac'` (fractional rms), `'abs'`\n (absolute rms)\n\n power_type : str\n One of `'real'` (real part), `'all'` (all complex powers), `'abs'`\n (absolute value)\n\n Returns\n -------\n power: numpy.nd.array\n The normalized co-spectrum (real part of the cross spectrum). For\n 'none' normalization, imaginary part is returned as well.\n \"\"\"\n\n # The \"effective\" counts/bin is the geometrical mean of the counts/bin\n # of the two light curves. Same goes for counts/second in meanrate.\n\n log_nphots1 = np.log(nphots1)\n log_nphots2 = np.log(nphots2)\n\n actual_nphots = np.float64(np.sqrt(np.exp(log_nphots1 + log_nphots2)))\n\n if power_type == \"all\":\n c_num = unnorm_power\n elif power_type == \"real\":\n c_num = unnorm_power.real\n elif power_type == \"absolute\":\n c_num = np.absolute(unnorm_power)\n else:\n raise ValueError(\"`power_type` not recognized!\")\n\n if norm.lower() == 'leahy':\n power = c_num * 2. / actual_nphots\n\n elif norm.lower() == 'frac':\n meancounts1 = nphots1 / nbins\n meancounts2 = nphots2 / nbins\n\n actual_mean = np.sqrt(meancounts1 * meancounts2)\n\n assert actual_mean > 0.0, \\\n \"Mean count rate is <= 0. Something went wrong.\"\n\n c = c_num / float(nbins ** 2.)\n power = c * 2. * tseg / (actual_mean ** 2.0)\n\n elif norm.lower() == 'abs':\n meanrate = np.sqrt(nphots1 * nphots2) / tseg\n\n power = c_num * 2. * meanrate / actual_nphots\n\n elif norm.lower() == 'none':\n power = unnorm_power\n\n else:\n raise ValueError(\"Value for `norm` not recognized.\")\n\n return power\n\n\ndef normalize_crossspectrum_gauss(\n unnorm_power, mean_flux, var, dt, N, norm=\"none\", power_type=\"real\"):\n \"\"\"\n Normalize the real part of the cross spectrum to Leahy, absolute rms^2,\n fractional rms^2 normalization, or not at all.\n\n Parameters\n ----------\n unnorm_power: numpy.ndarray\n The unnormalized cross spectrum.\n\n mean_flux: float\n The mean flux of the light curve (if a cross spectrum, the geometrical\n mean of the flux in the two channels)\n\n var: float\n The variance of the light curve (if a cross spectrum, the geometrical\n mean of the variance in the two channels)\n\n dt: float\n The sampling time of the light curve\n\n N: int\n The number of bins in the light curve\n\n Other parameters\n ----------------\n norm : str\n One of `'leahy'` (Leahy+83), `'frac'` (fractional rms), `'abs'`\n (absolute rms)\n\n power_type : str\n One of `'real'` (real part), `'all'` (all complex powers), `'abs'`\n (absolute value)\n\n Returns\n -------\n power: numpy.nd.array\n The normalized co-spectrum (real part of the cross spectrum). For\n 'none' normalization, imaginary part is returned as well.\n\n Examples\n --------\n >>> lc_c = np.random.poisson(10000, 10000)\n >>> lc_c_var = 10000\n >>> lc = lc_c / 17.3453\n >>> lc_var = (100 / 17.3453)**2\n >>> pds_c = np.absolute(np.fft.fft(lc_c))**2\n >>> pds = np.absolute(np.fft.fft(lc))**2\n >>> norm_c = normalize_crossspectrum_gauss(pds_c, np.mean(lc_c), lc_c_var, 0.1, len(lc_c), norm='leahy')\n >>> norm = normalize_crossspectrum_gauss(pds, np.mean(lc), lc_var, 0.1, len(lc), norm='leahy')\n >>> np.allclose(norm, norm_c)\n True\n >>> np.isclose(np.mean(norm[1:]), 2, atol=0.1)\n True\n >>> norm_c = normalize_crossspectrum_gauss(pds_c, np.mean(lc_c), np.mean(lc_c), 0.1, len(lc_c), norm='frac')\n >>> norm = normalize_crossspectrum_gauss(pds, np.mean(lc), lc_var, 0.1, len(lc), norm='frac')\n >>> np.allclose(norm, norm_c)\n True\n >>> norm_c = normalize_crossspectrum_gauss(pds_c, np.mean(lc_c), np.mean(lc_c), 0.1, len(lc_c), norm='abs')\n >>> norm = normalize_crossspectrum_gauss(pds, np.mean(lc), lc_var, 0.1, len(lc), norm='abs')\n >>> np.allclose(norm / np.mean(lc)**2, norm_c / np.mean(lc_c)**2)\n True\n >>> np.isclose(np.mean(norm_c[2:]), 2 * np.mean(lc_c * 0.1), rtol=0.1)\n True\n \"\"\"\n\n # The \"effective\" counts/bin is the geometrical mean of the counts/bin\n # of the two light curves. Same goes for counts/second in meanrate.\n if power_type == \"all\":\n c_num = unnorm_power\n elif power_type == \"real\":\n c_num = unnorm_power.real\n elif power_type == \"absolute\":\n c_num = np.absolute(unnorm_power)\n else:\n raise ValueError(\"`power_type` not recognized!\")\n\n common_factor = 2 * dt / N\n rate_mean = mean_flux * dt\n if norm.lower() == 'leahy':\n norm = 2 / var / N\n\n elif norm.lower() == 'frac':\n norm = common_factor / rate_mean**2\n\n elif norm.lower() == 'abs':\n norm = common_factor\n\n elif norm.lower() == 'none':\n norm = 1\n\n else:\n raise ValueError(\"Value for `norm` not recognized.\")\n\n return norm * c_num\n\n\ndef _averaged_cospectra_cdf(xcoord, n):\n \"\"\"\n Function calculating the cumulative distribution function for\n averaged cospectra, Equation 19 of Huppenkothen & Bachetti (2018).\n\n Parameters\n ----------\n xcoord : float or iterable\n The cospectral power for which to calculate the CDF.\n\n n : int\n The number of averaged cospectra\n\n Returns\n -------\n cdf : float\n The value of the CDF at `xcoord` for `n` averaged cospectra\n \"\"\"\n if np.size(xcoord) == 1:\n xcoord = [xcoord]\n\n cdf = np.zeros_like(xcoord)\n\n for i, x in enumerate(xcoord):\n prefac_bottom1 = factorial(n - 1)\n for j in range(n):\n prefac_top = factorial(n - 1 + j)\n prefac_bottom2 = factorial(\n n - 1 - j) * factorial(j)\n prefac_bottom3 = 2.0 ** (n + j)\n\n prefac = prefac_top / (prefac_bottom1 * prefac_bottom2 *\n prefac_bottom3)\n\n gf = -j + n\n\n first_fac = scipy.special.gamma(gf)\n if x >= 0:\n second_fac = scipy.special.gammaincc(gf, n * x) * first_fac\n fac = 2.0 * first_fac - second_fac\n else:\n fac = scipy.special.gammaincc(gf, -n * x) * first_fac\n\n cdf[i] += (prefac * fac)\n if np.size(xcoord) == 1:\n return cdf[i]\n else:\n continue\n return cdf\n\n\ndef cospectra_pvalue(power, nspec):\n \"\"\"\n This function computes the single-trial p-value that the power was\n observed under the null hypothesis that there is no signal in\n the data.\n\n Important: the underlying assumption that make this calculation valid\n is that the powers in the power spectrum follow a Laplace distribution,\n and this requires that:\n\n 1. the co-spectrum is normalized according to [Leahy 1983]_\n 2. there is only white noise in the light curve. That is, there is no\n aperiodic variability that would change the overall shape of the power\n spectrum.\n\n Also note that the p-value is for a *single trial*, i.e. the power\n currently being tested. If more than one power or more than one power\n spectrum are being tested, the resulting p-value must be corrected for the\n number of trials (Bonferroni correction).\n\n Mathematical formulation in [Huppenkothen 2017]_.\n\n Parameters\n ----------\n power : float\n The squared Fourier amplitude of a spectrum to be evaluated\n\n nspec : int\n The number of spectra or frequency bins averaged in ``power``.\n This matters because averaging spectra or frequency bins increases\n the signal-to-noise ratio, i.e. makes the statistical distributions\n of the noise narrower, such that a smaller power might be very\n significant in averaged spectra even though it would not be in a single\n power spectrum.\n\n Returns\n -------\n pval : float\n The classical p-value of the observed power being consistent with\n the null hypothesis of white noise\n\n References\n ----------\n\n * .. [Leahy 1983] https://ui.adsabs.harvard.edu/#abs/1983ApJ...266..160L/abstract\n * .. [Huppenkothen 2017] http://adsabs.harvard.edu/abs/2018ApJS..236...13H\n\n \"\"\"\n if not np.all(np.isfinite(power)):\n raise ValueError(\"power must be a finite floating point number!\")\n\n # if power < 0:\n # raise ValueError(\"power must be a positive real number!\")\n\n if not np.isfinite(nspec):\n raise ValueError(\"nspec must be a finite integer number\")\n\n if not np.isclose(nspec % 1, 0):\n raise ValueError(\"nspec must be an integer number!\")\n\n if nspec < 1:\n raise ValueError(\"nspec must be larger or equal to 1\")\n\n elif nspec == 1:\n lapl = scipy.stats.laplace(0, 1)\n pval = lapl.sf(power)\n\n elif nspec > 50:\n exp_sigma = np.sqrt(2) / np.sqrt(nspec)\n gauss = scipy.stats.norm(0, exp_sigma)\n pval = gauss.sf(power)\n\n else:\n pval = 1. - _averaged_cospectra_cdf(power, nspec)\n\n return pval\n\n\ndef coherence(lc1, lc2):\n \"\"\"\n Estimate coherence function of two light curves.\n For details on the definition of the coherence, see Vaughan and Nowak,\n 1996 [#]_.\n\n Parameters\n ----------\n lc1: :class:`stingray.Lightcurve` object\n The first light curve data for the channel of interest.\n\n lc2: :class:`stingray.Lightcurve` object\n The light curve data for reference band\n\n Returns\n -------\n coh : ``np.ndarray``\n The array of coherence versus frequency\n\n References\n ----------\n .. [#] http://iopscience.iop.org/article/10.1086/310430/pdf\n \"\"\"\n\n if not isinstance(lc1, Lightcurve):\n raise TypeError(\"lc1 must be a lightcurve.Lightcurve object\")\n\n if not isinstance(lc2, Lightcurve):\n raise TypeError(\"lc2 must be a lightcurve.Lightcurve object\")\n\n cs = Crossspectrum(lc1, lc2, norm='none')\n\n return cs.coherence()\n\n\ndef time_lag(lc1, lc2):\n \"\"\"\n Estimate the time lag of two light curves.\n Calculate time lag and uncertainty.\n\n Equation from Bendat & Piersol, 2011 [bendat-2011]_.\n\n Returns\n -------\n lag : np.ndarray\n The time lag\n\n lag_err : np.ndarray\n The uncertainty in the time lag\n\n References\n ----------\n\n .. [bendat-2011] https://www.wiley.com/en-us/Random+Data%3A+Analysis+and+Measurement+Procedures%2C+4th+Edition-p-9780470248775\n\n \"\"\"\n\n if not isinstance(lc1, Lightcurve):\n raise TypeError(\"lc1 must be a lightcurve.Lightcurve object\")\n\n if not isinstance(lc2, Lightcurve):\n raise TypeError(\"lc2 must be a lightcurve.Lightcurve object\")\n\n cs = Crossspectrum(lc1, lc2, norm='none')\n lag = cs.time_lag()\n\n return lag\n\n\nclass Crossspectrum(object):\n \"\"\"\n Make a cross spectrum from a (binned) light curve.\n You can also make an empty :class:`Crossspectrum` object to populate with your\n own Fourier-transformed data (this can sometimes be useful when making\n binned power spectra). Stingray uses the scipy.fft standards for the sign\n of the Nyquist frequency.\n\n Parameters\n ----------\n data1: :class:`stingray.Lightcurve` or :class:`stingray.events.EventList`, optional, default ``None``\n The first light curve data for the channel/band of interest.\n\n data2: :class:`stingray.Lightcurve` or :class:`stingray.events.EventList`, optional, default ``None``\n The light curve data for the reference band.\n\n norm: {``frac``, ``abs``, ``leahy``, ``none``}, default ``none``\n The normalization of the (real part of the) cross spectrum.\n\n power_type: string, optional, default ``real``\n Parameter to choose among complete, real part and magnitude of the cross spectrum.\n\n fullspec: boolean, optional, default ``False``\n If False, keep only the positive frequencies, or if True, keep all of them .\n\n Other Parameters\n ----------------\n gti: 2-d float array\n ``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.\n This choice overrides the GTIs in the single light curves. Use with\n care!\n\n lc1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects\n For backwards compatibility only. Like ``data1``, but no\n :class:`stingray.events.EventList` objects allowed\n\n lc2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects\n For backwards compatibility only. Like ``data2``, but no\n :class:`stingray.events.EventList` objects allowed\n\n dt: float\n The time resolution of the light curve. Only needed when constructing\n light curves in the case where ``data1``, ``data2`` are\n :class:`EventList` objects\n\n\n Attributes\n ----------\n freq: numpy.ndarray\n The array of mid-bin frequencies that the Fourier transform samples\n\n power: numpy.ndarray\n The array of cross spectra (complex numbers)\n\n power_err: numpy.ndarray\n The uncertainties of ``power``.\n An approximation for each bin given by ``power_err= power/sqrt(m)``.\n Where ``m`` is the number of power averaged in each bin (by frequency\n binning, or averaging more than one spectra). Note that for a single\n realization (``m=1``) the error is equal to the power.\n\n df: float\n The frequency resolution\n\n m: int\n The number of averaged cross-spectra amplitudes in each bin.\n\n n: int\n The number of data points/time bins in one segment of the light\n curves.\n\n nphots1: float\n The total number of photons in light curve 1\n\n nphots2: float\n The total number of photons in light curve 2\n \"\"\"\n\n def __init__(self, data1=None, data2=None, norm='none', gti=None,\n lc1=None, lc2=None, power_type=\"real\", dt=None, fullspec=False):\n\n if isinstance(norm, str) is False:\n raise TypeError(\"norm must be a string\")\n\n if norm.lower() not in [\"frac\", \"abs\", \"leahy\", \"none\"]:\n raise ValueError(\"norm must be 'frac', 'abs', 'leahy', or 'none'!\")\n\n self.norm = norm.lower()\n\n # check if input data is a Lightcurve object, if not make one or\n # make an empty Crossspectrum object if lc1 == ``None`` or lc2 == ``None``\n\n if lc1 is not None or lc2 is not None:\n warnings.warn(\"The lcN keywords are now deprecated. Use dataN \"\n \"instead\", DeprecationWarning)\n # for backwards compatibility\n if data1 is None:\n data1 = lc1\n if data2 is None:\n data2 = lc2\n\n if data1 is None or data2 is None:\n if data1 is not None or data2 is not None:\n raise TypeError(\"You can't do a cross spectrum with just one \"\n \"light curve!\")\n else:\n self.freq = None\n self.power = None\n self.power_err = None\n self.df = None\n self.nphots1 = None\n self.nphots2 = None\n self.m = 1\n self.n = None\n return\n\n if (isinstance(data1, EventList) or isinstance(data2, EventList)) and \\\n dt is None:\n raise ValueError(\"If using event lists, please specify the bin \"\n \"time to generate lightcurves.\")\n\n if not isinstance(data1, EventList):\n lc1 = data1\n else:\n lc1 = data1.to_lc(dt)\n\n if not isinstance(data2, EventList):\n lc2 = data2\n elif isinstance(data2, EventList) and data2 is not data1:\n lc2 = data2.to_lc(dt)\n elif data2 is data1:\n lc2 = lc1\n\n self.gti = gti\n self.lc1 = lc1\n self.lc2 = lc2\n self.power_type = power_type\n self.fullspec = fullspec\n\n self._make_crossspectrum(lc1, lc2, fullspec)\n\n # These are needed to calculate coherence\n self._make_auxil_pds(lc1, lc2)\n\n def _make_auxil_pds(self, lc1, lc2):\n \"\"\"\n Helper method to create the power spectrum of both light curves\n independently.\n\n Parameters\n ----------\n lc1, lc2 : :class:`stingray.Lightcurve` objects\n Two light curves used for computing the cross spectrum.\n \"\"\"\n if lc1 is not lc2 and isinstance(lc1, Lightcurve):\n self.pds1 = Crossspectrum(lc1, lc1, norm='none')\n self.pds2 = Crossspectrum(lc2, lc2, norm='none')\n\n def _make_crossspectrum(self, lc1, lc2, fullspec=False):\n \"\"\"\n Auxiliary method computing the normalized cross spectrum from two\n light curves. This includes checking for the presence of and\n applying Good Time Intervals, computing the unnormalized Fourier\n cross-amplitude, and then renormalizing using the required\n normalization. Also computes an uncertainty estimate on the cross\n spectral powers.\n\n Parameters\n ----------\n lc1, lc2 : :class:`stingray.Lightcurve` objects\n Two light curves used for computing the cross spectrum.\n\n fullspec: boolean, default ``False``\n Return full frequency array (True) or just positive frequencies (False)\n\n \"\"\"\n\n # make sure the inputs work!\n if not isinstance(lc1, Lightcurve):\n raise TypeError(\"lc1 must be a lightcurve.Lightcurve object\")\n\n if not isinstance(lc2, Lightcurve):\n raise TypeError(\"lc2 must be a lightcurve.Lightcurve object\")\n\n if self.lc2.mjdref != self.lc1.mjdref:\n raise ValueError(\"MJDref is different in the two light curves\")\n\n # Then check that GTIs make sense\n if self.gti is None:\n self.gti = cross_two_gtis(lc1.gti, lc2.gti)\n\n check_gtis(self.gti)\n\n if self.gti.shape[0] != 1:\n raise TypeError(\"Non-averaged Cross Spectra need \"\n \"a single Good Time Interval\")\n\n lc1 = lc1.split_by_gti()[0]\n lc2 = lc2.split_by_gti()[0]\n\n # total number of photons is the sum of the\n # counts in the light curve\n self.meancounts1 = lc1.meancounts\n self.meancounts2 = lc2.meancounts\n self.nphots1 = np.float64(np.sum(lc1.counts))\n self.nphots2 = np.float64(np.sum(lc2.counts))\n\n self.err_dist = 'poisson'\n if lc1.err_dist == 'poisson':\n self.var1 = lc1.meancounts\n else:\n self.var1 = np.mean(lc1.counts_err) ** 2\n self.err_dist = 'gauss'\n\n if lc2.err_dist == 'poisson':\n self.var2 = lc2.meancounts\n else:\n self.var2 = np.mean(lc2.counts_err) ** 2\n self.err_dist = 'gauss'\n\n if lc1.n != lc2.n:\n raise StingrayError(\"Light curves do not have same number \"\n \"of time bins per segment.\")\n\n # If dt differs slightly, its propagated error must not be more than\n # 1/100th of the bin\n if not np.isclose(lc1.dt, lc2.dt, rtol=0.01 * lc1.dt / lc1.tseg):\n raise StingrayError(\"Light curves do not have same time binning \"\n \"dt.\")\n\n # In case a small difference exists, ignore it\n lc1.dt = lc2.dt\n\n self.dt = lc1.dt\n self.n = lc1.n\n\n # the frequency resolution\n self.df = 1.0 / lc1.tseg\n\n # the number of averaged periodograms in the final output\n # This should *always* be 1 here\n self.m = 1\n\n # make the actual Fourier transform and compute cross spectrum\n self.freq, self.unnorm_power = self._fourier_cross(lc1, lc2, fullspec)\n\n # If co-spectrum is desired, normalize here. Otherwise, get raw back\n # with the imaginary part still intact.\n self.power = self._normalize_crossspectrum(self.unnorm_power, lc1.tseg)\n\n if lc1.err_dist.lower() != lc2.err_dist.lower():\n simon(\"Your lightcurves have different statistics.\"\n \"The errors in the Crossspectrum will be incorrect.\")\n elif lc1.err_dist.lower() != \"poisson\":\n simon(\"Looks like your lightcurve statistic is not poisson.\"\n \"The errors in the Powerspectrum will be incorrect.\")\n\n if self.__class__.__name__ in ['Powerspectrum',\n 'AveragedPowerspectrum']:\n self.power_err = self.power / np.sqrt(self.m)\n elif self.__class__.__name__ in ['Crossspectrum',\n 'AveragedCrossspectrum']:\n # This is clearly a wild approximation.\n simon(\"Errorbars on cross spectra are not thoroughly tested. \"\n \"Please report any inconsistencies.\")\n unnorm_power_err = np.sqrt(2) / np.sqrt(self.m) # Leahy-like\n unnorm_power_err /= (2 / np.sqrt(self.nphots1 * self.nphots2))\n unnorm_power_err += np.zeros_like(self.power)\n\n self.power_err = \\\n self._normalize_crossspectrum(unnorm_power_err, lc1.tseg)\n else:\n self.power_err = np.zeros(len(self.power))\n\n def _fourier_cross(self, lc1, lc2, fullspec=False):\n \"\"\"\n Fourier transform the two light curves, then compute the cross spectrum.\n Computed as CS = lc1 x lc2* (where lc2 is the one that gets\n complex-conjugated). The user has the option to either get just the\n positive frequencies or the full spectrum.\n\n Parameters\n ----------\n lc1: :class:`stingray.Lightcurve` object\n One light curve to be Fourier transformed. Ths is the band of\n interest or channel of interest.\n\n lc2: :class:`stingray.Lightcurve` object\n Another light curve to be Fourier transformed.\n This is the reference band.\n\n fullspec: boolean. Default is False.\n If True, return the whole array of frequencies, or only positive frequencies (False).\n\n Returns\n -------\n fr: numpy.ndarray\n The squared absolute value of the Fourier amplitudes\n\n \"\"\"\n fourier_1 = fft(lc1.counts) # do Fourier transform 1\n fourier_2 = fft(lc2.counts) # do Fourier transform 2\n\n freqs = scipy.fft.fftfreq(lc1.n, lc1.dt)\n cross = np.multiply(fourier_1, np.conj(fourier_2))\n\n if fullspec is True:\n return freqs, cross\n else:\n return freqs[freqs > 0], cross[freqs > 0]\n\n def rebin(self, df=None, f=None, method=\"mean\"):\n \"\"\"\n Rebin the cross spectrum to a new frequency resolution ``df``.\n\n Parameters\n ----------\n df: float\n The new frequency resolution\n\n Other Parameters\n ----------------\n f: float\n the rebin factor. If specified, it substitutes df with ``f*self.df``\n\n Returns\n -------\n bin_cs = :class:`Crossspectrum` (or one of its subclasses) object\n The newly binned cross spectrum or power spectrum.\n Note: this object will be of the same type as the object\n that called this method. For example, if this method is called\n from :class:`AveragedPowerspectrum`, it will return an object of class\n :class:`AveragedPowerspectrum`, too.\n \"\"\"\n\n if f is None and df is None:\n raise ValueError('You need to specify at least one between f and '\n 'df')\n elif f is not None:\n df = f * self.df\n\n # rebin cross spectrum to new resolution\n binfreq, bincs, binerr, step_size = \\\n rebin_data(self.freq, self.power, df, self.power_err,\n method=method, dx=self.df)\n # make an empty cross spectrum object\n # note: syntax deliberate to work with subclass Powerspectrum\n bin_cs = copy.copy(self)\n\n # store the binned periodogram in the new object\n bin_cs.freq = binfreq\n bin_cs.power = bincs\n bin_cs.df = df\n bin_cs.n = self.n\n bin_cs.norm = self.norm\n bin_cs.nphots1 = self.nphots1\n bin_cs.power_err = binerr\n\n if hasattr(self, 'unnorm_power'):\n _, binpower_unnorm, _, _ = \\\n rebin_data(self.freq, self.unnorm_power, df,\n method=method, dx=self.df)\n\n bin_cs.unnorm_power = binpower_unnorm\n\n if hasattr(self, 'cs_all'):\n cs_all = []\n for c in self.cs_all:\n cs_all.append(c.rebin(df=df, f=f, method=method))\n bin_cs.cs_all = cs_all\n if hasattr(self, 'pds1'):\n bin_cs.pds1 = self.pds1.rebin(df=df, f=f, method=method)\n if hasattr(self, 'pds2'):\n bin_cs.pds2 = self.pds2.rebin(df=df, f=f, method=method)\n\n try:\n bin_cs.nphots2 = self.nphots2\n except AttributeError:\n if self.type == 'powerspectrum':\n pass\n else:\n raise AttributeError(\n 'Spectrum has no attribute named nphots2.')\n\n bin_cs.m = np.rint(step_size * self.m)\n\n return bin_cs\n\n def _normalize_crossspectrum(self, unnorm_power, tseg):\n \"\"\"\n Normalize the real part of the cross spectrum to Leahy, absolute rms^2,\n fractional rms^2 normalization, or not at all.\n\n Parameters\n ----------\n unnorm_power: numpy.ndarray\n The unnormalized cross spectrum.\n\n tseg: int\n The length of the Fourier segment, in seconds.\n\n Returns\n -------\n power: numpy.nd.array\n The normalized co-spectrum (real part of the cross spectrum). For\n 'none' normalization, imaginary part is returned as well.\n \"\"\"\n\n if self.err_dist == 'poisson':\n return normalize_crossspectrum(\n unnorm_power, tseg, self.n, self.nphots1, self.nphots2, self.norm,\n self.power_type)\n\n return normalize_crossspectrum_gauss(\n unnorm_power, np.sqrt(self.meancounts1 * self.meancounts2),\n np.sqrt(self.var1 * self.var2),\n dt=self.dt,\n N=self.n,\n norm=self.norm,\n power_type=self.power_type)\n\n def rebin_log(self, f=0.01):\n \"\"\"\n Logarithmic rebin of the periodogram.\n The new frequency depends on the previous frequency\n modified by a factor f:\n\n .. math::\n\n d\\\\nu_j = d\\\\nu_{j-1} (1+f)\n\n Parameters\n ----------\n f: float, optional, default ``0.01``\n parameter that steers the frequency resolution\n\n\n Returns\n -------\n new_spec : :class:`Crossspectrum` (or one of its subclasses) object\n The newly binned cross spectrum or power spectrum.\n Note: this object will be of the same type as the object\n that called this method. For example, if this method is called\n from :class:`AveragedPowerspectrum`, it will return an object of class\n \"\"\"\n\n binfreq, binpower, binpower_err, nsamples = \\\n rebin_data_log(self.freq, self.power, f,\n y_err=self.power_err, dx=self.df)\n\n # the frequency resolution\n df = np.diff(binfreq)\n\n # shift the lower bin edges to the middle of the bin and drop the\n # last right bin edge\n binfreq = binfreq[:-1] + df / 2\n\n new_spec = copy.copy(self)\n new_spec.freq = binfreq\n new_spec.power = binpower\n new_spec.power_err = binpower_err\n new_spec.m = nsamples * self.m\n\n if hasattr(self, 'unnorm_power'):\n _, binpower_unnorm, _, _ = \\\n rebin_data_log(self.freq, self.unnorm_power, f, dx=self.df)\n\n new_spec.unnorm_power = binpower_unnorm\n\n if hasattr(self, 'pds1'):\n new_spec.pds1 = self.pds1.rebin_log(f)\n if hasattr(self, 'pds2'):\n new_spec.pds2 = self.pds2.rebin_log(f)\n\n if hasattr(self, 'cs_all'):\n cs_all = []\n for c in self.cs_all:\n cs_all.append(c.rebin_log(f))\n new_spec.cs_all = cs_all\n\n return new_spec\n\n def coherence(self):\n \"\"\" Compute Coherence function of the cross spectrum.\n\n Coherence is defined in Vaughan and Nowak, 1996 [#]_.\n It is a Fourier frequency dependent measure of the linear correlation\n between time series measured simultaneously in two energy channels.\n\n Returns\n -------\n coh : numpy.ndarray\n Coherence function\n\n References\n ----------\n .. [#] http://iopscience.iop.org/article/10.1086/310430/pdf\n \"\"\"\n # this computes the averaged power spectrum, but using the\n # cross spectrum code to avoid circular imports\n\n return self.unnorm_power.real / (self.pds1.power.real *\n self.pds2.power.real)\n\n def _phase_lag(self):\n \"\"\"Return the fourier phase lag of the cross spectrum.\"\"\"\n return np.angle(self.unnorm_power)\n\n def time_lag(self):\n \"\"\"\n Calculate the fourier time lag of the cross spectrum. The time lag is\n calculate using the center of the frequency bins.\n \"\"\"\n if self.__class__ in [Crossspectrum, AveragedCrossspectrum]:\n ph_lag = self._phase_lag()\n\n return ph_lag / (2 * np.pi * self.freq)\n else:\n raise AttributeError(\"Object has no attribute named 'time_lag' !\")\n\n def plot(self, labels=None, axis=None, title=None, marker='-', save=False,\n filename=None):\n \"\"\"\n Plot the amplitude of the cross spectrum vs. the frequency using ``matplotlib``.\n\n Parameters\n ----------\n labels : iterable, default ``None``\n A list of tuple with ``xlabel`` and ``ylabel`` as strings.\n\n axis : list, tuple, string, default ``None``\n Parameter to set axis properties of the ``matplotlib`` figure. For example\n it can be a list like ``[xmin, xmax, ymin, ymax]`` or any other\n acceptable argument for the``matplotlib.pyplot.axis()`` method.\n\n title : str, default ``None``\n The title of the plot.\n\n marker : str, default '-'\n Line style and color of the plot. Line styles and colors are\n combined in a single format string, as in ``'bo'`` for blue\n circles. See ``matplotlib.pyplot.plot`` for more options.\n\n save : boolean, optional, default ``False``\n If ``True``, save the figure with specified filename.\n\n filename : str\n File name of the image to save. Depends on the boolean ``save``.\n \"\"\"\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n raise ImportError(\"Matplotlib required for plot()\")\n\n plt.figure('crossspectrum')\n plt.plot(self.freq,\n np.abs(self.power),\n marker,\n color='b',\n label='Amplitude')\n plt.plot(self.freq,\n np.abs(self.power.real),\n marker,\n color='r',\n alpha=0.5,\n label='Real Part')\n plt.plot(self.freq,\n np.abs(self.power.imag),\n marker,\n color='g',\n alpha=0.5,\n label='Imaginary Part')\n\n if labels is not None:\n try:\n plt.xlabel(labels[0])\n plt.ylabel(labels[1])\n except TypeError:\n simon(\"``labels`` must be either a list or tuple with \"\n \"x and y labels.\")\n raise\n except IndexError:\n simon(\"``labels`` must have two labels for x and y \"\n \"axes.\")\n # Not raising here because in case of len(labels)==1, only\n # x-axis will be labelled.\n plt.legend(loc='best')\n if axis is not None:\n plt.axis(axis)\n\n if title is not None:\n plt.title(title)\n\n if save:\n if filename is None:\n plt.savefig('spec.png')\n else:\n plt.savefig(filename)\n else:\n plt.show(block=False)\n\n def classical_significances(self, threshold=1, trial_correction=False):\n \"\"\"\n Compute the classical significances for the powers in the power\n spectrum, assuming an underlying noise distribution that follows a\n chi-square distributions with 2M degrees of freedom, where M is the\n number of powers averaged in each bin.\n\n Note that this function will *only* produce correct results when the\n following underlying assumptions are fulfilled:\n\n 1. The power spectrum is Leahy-normalized\n 2. There is no source of variability in the data other than the\n periodic signal to be determined with this method. This is important!\n If there are other sources of (aperiodic) variability in the data, this\n method will *not* produce correct results, but instead produce a large\n number of spurious false positive detections!\n 3. There are no significant instrumental effects changing the\n statistical distribution of the powers (e.g. pile-up or dead time)\n\n By default, the method produces ``(index,p-values)`` for all powers in\n the power spectrum, where index is the numerical index of the power in\n question. If a ``threshold`` is set, then only powers with p-values\n *below* that threshold with their respective indices. If\n ``trial_correction`` is set to ``True``, then the threshold will be corrected\n for the number of trials (frequencies) in the power spectrum before\n being used.\n\n Parameters\n ----------\n threshold : float, optional, default ``1``\n The threshold to be used when reporting p-values of potentially\n significant powers. Must be between 0 and 1.\n Default is ``1`` (all p-values will be reported).\n\n trial_correction : bool, optional, default ``False``\n A Boolean flag that sets whether the ``threshold`` will be corrected\n by the number of frequencies before being applied. This decreases\n the ``threshold`` (p-values need to be lower to count as significant).\n Default is ``False`` (report all powers) though for any application\n where `threshold`` is set to something meaningful, this should also\n be applied!\n\n Returns\n -------\n pvals : iterable\n A list of ``(index, p-value)`` tuples for all powers that have p-values\n lower than the threshold specified in ``threshold``.\n\n \"\"\"\n if not self.norm == \"leahy\":\n raise ValueError(\"This method only works on \"\n \"Leahy-normalized power spectra!\")\n\n if np.size(self.m) == 1:\n # calculate p-values for all powers\n # leave out zeroth power since it just encodes the number of photons!\n pv = np.array([cospectra_pvalue(power, self.m)\n for power in self.power])\n else:\n pv = np.array([cospectra_pvalue(power, m)\n for power, m in zip(self.power, self.m)])\n\n # if trial correction is used, then correct the threshold for\n # the number of powers in the power spectrum\n if trial_correction:\n threshold /= self.power.shape[0]\n\n # need to add 1 to the indices to make up for the fact that\n # we left out the first power above!\n indices = np.where(pv < threshold)[0]\n\n pvals = np.vstack([pv[indices], indices])\n\n return pvals\n\n\nclass AveragedCrossspectrum(Crossspectrum):\n \"\"\"\n Make an averaged cross spectrum from a light curve by segmenting two\n light curves, Fourier-transforming each segment and then averaging the\n resulting cross spectra.\n\n Parameters\n ----------\n data1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects OR :class:`stingray.EventList` object\n A light curve from which to compute the cross spectrum. In some cases, this would\n be the light curve of the wavelength/energy/frequency band of interest.\n\n data2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects OR :class:`stingray.EventList` object\n A second light curve to use in the cross spectrum. In some cases, this would be\n the wavelength/energy/frequency reference band to compare the band of interest with.\n\n segment_size: float\n The size of each segment to average. Note that if the total\n duration of each :class:`Lightcurve` object in ``lc1`` or ``lc2`` is not an\n integer multiple of the ``segment_size``, then any fraction left-over\n at the end of the time series will be lost. Otherwise you introduce\n artifacts.\n\n norm: {``frac``, ``abs``, ``leahy``, ``none``}, default ``none``\n The normalization of the (real part of the) cross spectrum.\n\n Other Parameters\n ----------------\n gti: 2-d float array\n ``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.\n This choice overrides the GTIs in the single light curves. Use with\n care!\n\n dt : float\n The time resolution of the light curve. Only needed when constructing\n light curves in the case where data1 or data2 are of :class:EventList\n\n power_type: string, optional, default ``real``\n Parameter to choose among complete, real part and magnitude of\n the cross spectrum.\n\n silent : bool, default False\n Do not show a progress bar when generating an averaged cross spectrum.\n Useful for the batch execution of many spectra\n\n lc1: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects\n For backwards compatibility only. Like ``data1``, but no\n :class:`stingray.events.EventList` objects allowed\n\n lc2: :class:`stingray.Lightcurve`object OR iterable of :class:`stingray.Lightcurve` objects\n For backwards compatibility only. Like ``data2``, but no\n :class:`stingray.events.EventList` objects allowed\n\n fullspec: boolean, optional, default ``False``\n If True, return the full array of frequencies, otherwise return just the\n positive frequencies.\n\n large_data : bool, default False\n Use only for data larger than 10**7 data points!! Uses zarr and dask for computation.\n\n save_all : bool, default False\n Save all intermediate PDSs used for the final average. Use with care.\n This is likely to fill up your RAM on medium-sized datasets, and to\n slow down the computation when rebinning.\n\n Attributes\n ----------\n freq: numpy.ndarray\n The array of mid-bin frequencies that the Fourier transform samples\n\n power: numpy.ndarray\n The array of cross spectra\n\n power_err: numpy.ndarray\n The uncertainties of ``power``.\n An approximation for each bin given by ``power_err= power/sqrt(m)``.\n Where ``m`` is the number of power averaged in each bin (by frequency\n binning, or averaging powerspectrum). Note that for a single\n realization (``m=1``) the error is equal to the power.\n\n df: float\n The frequency resolution\n\n m: int\n The number of averaged cross spectra\n\n n: int\n The number of time bins per segment of light curve\n\n nphots1: float\n The total number of photons in the first (interest) light curve\n\n nphots2: float\n The total number of photons in the second (reference) light curve\n\n gti: 2-d float array\n ``[[gti0_0, gti0_1], [gti1_0, gti1_1], ...]`` -- Good Time intervals.\n They are calculated by taking the common GTI between the\n two light curves\n \"\"\"\n\n def __init__(self, data1=None, data2=None, segment_size=None, norm='none',\n gti=None, power_type=\"real\", silent=False, lc1=None, lc2=None,\n dt=None, fullspec=False, large_data=False, save_all=False):\n\n\n if lc1 is not None or lc2 is not None:\n warnings.warn(\"The lcN keywords are now deprecated. Use dataN \"\n \"instead\", DeprecationWarning)\n # for backwards compatibility\n if data1 is None:\n data1 = lc1\n if data2 is None:\n data2 = lc2\n\n if segment_size is None and data1 is not None:\n raise ValueError(\"segment_size must be specified\")\n if segment_size is not None and not np.isfinite(segment_size):\n raise ValueError(\"segment_size must be finite!\")\n\n if large_data and data1 is not None and data2 is not None:\n if isinstance(data1, EventList):\n input_data = 'EventList'\n elif isinstance(data1, Lightcurve):\n input_data = 'Lightcurve'\n chunks = int(np.rint(segment_size // data1.dt))\n segment_size = chunks * data1.dt\n else:\n raise ValueError(\n f'Invalid input data type: {type(data1).__name__}')\n\n dir_path1 = saveData(data1, persist=False, chunks=chunks)\n dir_path2 = saveData(data2, persist=False, chunks=chunks)\n\n data_path1 = genDataPath(dir_path1)\n data_path2 = genDataPath(dir_path2)\n\n spec = createChunkedSpectra(input_data,\n 'AveragedCrossspectrum',\n data_path=list(data_path1 +\n data_path2),\n segment_size=segment_size,\n norm=norm,\n gti=gti,\n power_type=power_type,\n silent=silent,\n dt=dt)\n\n for key, val in spec.__dict__.items():\n setattr(self, key, val)\n\n return\n\n self.type = \"crossspectrum\"\n\n\n self.segment_size = segment_size\n self.power_type = power_type\n self.fullspec = fullspec\n\n self.show_progress = not silent\n self.dt = dt\n self.save_all = save_all\n\n if isinstance(data1, EventList):\n lengths = data1.gti[:, 1] - data1.gti[:, 0]\n good = lengths >= segment_size\n data1.gti = data1.gti[good]\n data1 = list(data1.to_lc_list(dt))\n\n if isinstance(data2, EventList):\n lengths = data2.gti[:, 1] - data2.gti[:, 0]\n good = lengths >= segment_size\n data2.gti = data2.gti[good]\n data2 = list(data2.to_lc_list(dt))\n\n Crossspectrum.__init__(self, data1, data2, norm, gti=gti,\n power_type=power_type, dt=dt, fullspec=fullspec)\n\n return\n\n def _make_auxil_pds(self, lc1, lc2):\n \"\"\"\n Helper method to create the power spectrum of both light curves\n independently.\n\n Parameters\n ----------\n lc1, lc2 : :class:`stingray.Lightcurve` objects\n Two light curves used for computing the cross spectrum.\n \"\"\"\n is_event = isinstance(lc1, EventList)\n is_lc = isinstance(lc1, Lightcurve)\n is_lc_iter = isinstance(lc1, Iterator)\n is_lc_list = isinstance(lc1, Iterable) and not is_lc_iter\n # A way to say that this is actually not a power spectrum\n if self.type != \"powerspectrum\" and \\\n (lc1 is not lc2) and (is_event or is_lc or is_lc_list):\n self.pds1 = AveragedCrossspectrum(lc1, lc1,\n segment_size=self.segment_size,\n norm='none', gti=self.gti,\n power_type=self.power_type,\n dt=self.dt, fullspec=self.fullspec,\n save_all=self.save_all)\n\n self.pds2 = AveragedCrossspectrum(lc2, lc2,\n segment_size=self.segment_size,\n norm='none', gti=self.gti,\n power_type=self.power_type,\n dt=self.dt, fullspec=self.fullspec,\n save_all=self.save_all)\n\n def _make_segment_spectrum(self, lc1, lc2, segment_size, silent=False):\n \"\"\"\n Split the light curves into segments of size ``segment_size``, and calculate a cross spectrum for\n each.\n\n Parameters\n ----------\n lc1, lc2 : :class:`stingray.Lightcurve` objects\n Two light curves used for computing the cross spectrum.\n\n segment_size : ``numpy.float``\n Size of each light curve segment to use for averaging.\n\n Other parameters\n ----------------\n silent : bool, default False\n Suppress progress bars\n\n Returns\n -------\n cs_all : list of :class:`Crossspectrum`` objects\n A list of cross spectra calculated independently from each light curve segment\n\n nphots1_all, nphots2_all : ``numpy.ndarray` for each of ``lc1`` and ``lc2``\n Two lists containing the number of photons for all segments calculated from ``lc1`` and ``lc2``.\n\n \"\"\"\n\n assert isinstance(lc1, Lightcurve)\n assert isinstance(lc2, Lightcurve)\n\n if lc1.tseg != lc2.tseg:\n simon(\"Lightcurves do not have same tseg. This means that the data\"\n \"from the two channels are not completely in sync. This \"\n \"might or might not be an issue. Keep an eye on it.\")\n\n # If dt differs slightly, its propagated error must not be more than\n # 1/100th of the bin\n if not np.isclose(lc1.dt, lc2.dt, rtol=0.01 * lc1.dt / lc1.tseg):\n raise ValueError(\"Light curves do not have same time binning dt.\")\n\n # In case a small difference exists, ignore it\n lc1.dt = lc2.dt\n\n current_gtis = cross_two_gtis(lc1.gti, lc2.gti)\n lc1.gti = lc2.gti = current_gtis\n lc1.apply_gtis()\n lc2.apply_gtis()\n\n if self.gti is None:\n self.gti = current_gtis\n else:\n if not np.allclose(self.gti, current_gtis):\n self.gti = np.vstack([self.gti, current_gtis])\n\n check_gtis(current_gtis)\n\n cs_all = []\n nphots1_all = []\n nphots2_all = []\n\n start_inds, end_inds = \\\n bin_intervals_from_gtis(current_gtis, segment_size, lc1.time,\n dt=lc1.dt)\n simon(\"Errorbars on cross spectra are not thoroughly tested. \"\n \"Please report any inconsistencies.\")\n\n local_show_progress = show_progress\n if not self.show_progress or silent:\n local_show_progress = lambda a: a\n\n for start_ind, end_ind in \\\n local_show_progress(zip(start_inds, end_inds)):\n time_1 = copy.deepcopy(lc1.time[start_ind:end_ind])\n counts_1 = copy.deepcopy(lc1.counts[start_ind:end_ind])\n counts_1_err = copy.deepcopy(lc1.counts_err[start_ind:end_ind])\n time_2 = copy.deepcopy(lc2.time[start_ind:end_ind])\n counts_2 = copy.deepcopy(lc2.counts[start_ind:end_ind])\n counts_2_err = copy.deepcopy(lc2.counts_err[start_ind:end_ind])\n if np.sum(counts_1) == 0 or np.sum(counts_2) == 0:\n warnings.warn(\n \"No counts in interval {}--{}s\".format(time_1[0],\n time_1[-1]))\n continue\n\n gti1 = np.array([[time_1[0] - lc1.dt / 2,\n time_1[-1] + lc1.dt / 2]])\n gti2 = np.array([[time_2[0] - lc2.dt / 2,\n time_2[-1] + lc2.dt / 2]])\n lc1_seg = Lightcurve(time_1, counts_1, err=counts_1_err,\n err_dist=lc1.err_dist,\n gti=gti1,\n dt=lc1.dt, skip_checks=True)\n lc2_seg = Lightcurve(time_2, counts_2, err=counts_2_err,\n err_dist=lc2.err_dist,\n gti=gti2,\n dt=lc2.dt, skip_checks=True)\n with warnings.catch_warnings(record=True) as w:\n cs_seg = Crossspectrum(lc1_seg, lc2_seg, norm=self.norm,\n power_type=self.power_type, fullspec=self.fullspec)\n\n cs_all.append(cs_seg)\n nphots1_all.append(np.sum(lc1_seg.counts))\n nphots2_all.append(np.sum(lc2_seg.counts))\n\n return cs_all, nphots1_all, nphots2_all\n\n def _make_crossspectrum(self, lc1, lc2, fullspec=False):\n \"\"\"\n Auxiliary method computing the normalized cross spectrum from two light curves.\n This includes checking for the presence of and applying Good Time Intervals, computing the\n unnormalized Fourier cross-amplitude, and then renormalizing using the required normalization.\n Also computes an uncertainty estimate on the cross spectral powers. Stingray uses the\n scipy.fft standards for the sign of the Nyquist frequency.\n\n Parameters\n ----------\n lc1, lc2 : :class:`stingray.Lightcurve` objects\n Two light curves used for computing the cross spectrum.\n\n fullspec: boolean, default ``False``,\n If True, return all frequencies otherwise return only positive frequencies\n \"\"\"\n local_show_progress = show_progress\n if not self.show_progress:\n local_show_progress = lambda a: a\n\n # chop light curves into segments\n if isinstance(lc1, Lightcurve) and \\\n isinstance(lc2, Lightcurve):\n\n if self.type == \"crossspectrum\":\n cs_all, nphots1_all, nphots2_all = \\\n self._make_segment_spectrum(lc1, lc2, self.segment_size)\n\n elif self.type == \"powerspectrum\":\n cs_all, nphots1_all = \\\n self._make_segment_spectrum(lc1, self.segment_size)\n\n else:\n raise ValueError(\"Type of spectrum not recognized!\")\n\n else:\n cs_all, nphots1_all, nphots2_all = [], [], []\n\n for lc1_seg, lc2_seg in local_show_progress(zip(lc1, lc2)):\n if self.type == \"crossspectrum\":\n cs_sep, nphots1_sep, nphots2_sep = \\\n self._make_segment_spectrum(lc1_seg, lc2_seg,\n self.segment_size,\n silent=True)\n nphots2_all.append(nphots2_sep)\n elif self.type == \"powerspectrum\":\n cs_sep, nphots1_sep = \\\n self._make_segment_spectrum(lc1_seg, self.segment_size,\n silent=True)\n\n else:\n raise ValueError(\"Type of spectrum not recognized!\")\n cs_all.append(cs_sep)\n nphots1_all.append(nphots1_sep)\n\n cs_all = np.hstack(cs_all)\n nphots1_all = np.hstack(nphots1_all)\n\n if self.type == \"crossspectrum\":\n nphots2_all = np.hstack(nphots2_all)\n\n m = len(cs_all)\n nphots1 = np.mean(nphots1_all)\n\n power_avg = np.zeros_like(cs_all[0].power)\n power_err_avg = np.zeros_like(cs_all[0].power_err)\n unnorm_power_avg = np.zeros_like(cs_all[0].unnorm_power)\n for cs in cs_all:\n power_avg += cs.power\n unnorm_power_avg += cs.unnorm_power\n power_err_avg += (cs.power_err) ** 2\n\n power_avg /= float(m)\n power_err_avg = np.sqrt(power_err_avg) / m\n unnorm_power_avg /= float(m)\n\n self.freq = cs_all[0].freq\n self.power = power_avg\n self.unnorm_power = unnorm_power_avg\n self.m = m\n self.power_err = power_err_avg\n self.df = cs_all[0].df\n self.n = cs_all[0].n\n self.nphots1 = nphots1\n if self.save_all:\n self.cs_all = cs_all\n\n if self.type == \"crossspectrum\":\n self.nphots1 = nphots1\n nphots2 = np.mean(nphots2_all)\n\n self.nphots2 = nphots2\n\n def coherence(self):\n \"\"\"Averaged Coherence function.\n\n\n Coherence is defined in Vaughan and Nowak, 1996 [#]_.\n It is a Fourier frequency dependent measure of the linear correlation\n between time series measured simultaneously in two energy channels.\n\n Compute an averaged Coherence function of cross spectrum by computing\n coherence function of each segment and averaging them. The return type\n is a tuple with first element as the coherence function and the second\n element as the corresponding uncertainty associated with it.\n\n Note : The uncertainty in coherence function is strictly valid for Gaussian \\\n statistics only.\n\n Returns\n -------\n (coh, uncertainty) : tuple of np.ndarray\n Tuple comprising the coherence function and uncertainty.\n\n References\n ----------\n .. [#] http://iopscience.iop.org/article/10.1086/310430/pdf\n \"\"\"\n if np.any(self.m < 50):\n simon(\"Number of segments used in averaging is \"\n \"significantly low. The result might not follow the \"\n \"expected statistical distributions.\")\n\n # Calculate average coherence\n unnorm_power_avg = self.unnorm_power\n\n num = np.absolute(unnorm_power_avg) ** 2\n\n # The normalization was 'none'!\n unnorm_powers_avg_1 = self.pds1.power.real\n unnorm_powers_avg_2 = self.pds2.power.real\n\n coh = num / (unnorm_powers_avg_1 * unnorm_powers_avg_2)\n coh[~np.isfinite(coh)] = 0.0\n\n # Calculate uncertainty\n uncertainty = \\\n (2 ** 0.5 * coh * (1 - coh)) / (np.sqrt(coh) * self.m ** 0.5)\n\n uncertainty[coh == 0] = 0.0\n\n return (coh, uncertainty)\n\n def time_lag(self):\n \"\"\"Calculate time lag and uncertainty.\n\n Equation from Bendat & Piersol, 2011 [bendat-2011]__.\n\n Returns\n -------\n lag : np.ndarray\n The time lag\n\n lag_err : np.ndarray\n The uncertainty in the time lag\n \"\"\"\n lag = super(AveragedCrossspectrum, self).time_lag()\n coh, uncert = self.coherence()\n\n dum = (1. - coh) / (2. * coh)\n\n dum[coh == 0] = 0.0\n\n lag_err = np.sqrt(dum / self.m) / (2 * np.pi * self.freq)\n\n return lag, lag_err\n" ]
[ [ "numpy.sum", "scipy.special.factorial", "numpy.diff", "numpy.any", "numpy.isclose", "numpy.size", "scipy.special.gamma", "numpy.log", "matplotlib.pyplot.ylabel", "numpy.isfinite", "numpy.vstack", "numpy.allclose", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "numpy.abs", "matplotlib.pyplot.title", "scipy.fft.fftfreq", "numpy.absolute", "numpy.where", "numpy.mean", "scipy.fft.fft", "matplotlib.pyplot.axis", "numpy.hstack", "numpy.array", "scipy.special.gammaincc", "numpy.rint", "numpy.zeros_like", "matplotlib.pyplot.legend", "numpy.conj", "numpy.exp", "scipy.stats.laplace", "matplotlib.pyplot.show", "scipy.stats.norm", "numpy.angle", "numpy.sqrt", "matplotlib.pyplot.xlabel" ] ]
SanjibSarkarU/EDRC
[ "c2408fad8b007b4709ee91caf173f98612afadb1" ]
[ "iver.py" ]
[ "import datetime\nimport re\nimport threading\nimport time\nimport tkinter as tk\nfrom collections import deque\nfrom queue import Queue\nfrom time import monotonic\n\nimport pandas as pd\nimport rasterio\nimport serial\nfrom geographiclib.geodesic import Geodesic\nfrom matplotlib import pyplot as plt, animation\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom rasterio.plot import show\n\nimport functions\n# just checking git\nrf, ac = 'COM11', 'COM13'\n# rf, ac = 'COM5', 'COM7'\n\nser_rf = serial.Serial(rf, baudrate=9600, bytesize=8, parity='N', stopbits=1, timeout=1, xonxoff=0)\nser_ac = serial.Serial(ac, baudrate=9600, bytesize=8, parity='N', stopbits=1, timeout=1, xonxoff=0)\n\n\nclass App(tk.Frame):\n def __init__(self, master=None, **kwargs):\n tk.Frame.__init__(self, master, **kwargs)\n self.event_plot = threading.Event()\n self.q_plot = Queue()\n self.current_position_iver = {}\n self.disnc_remaining = 0\n self.wp_nxt = '1'\n self.auv = '3089'\n self.omw_clear = False\n self.q_wp_omw = Queue()\n self.send_through_rf = True\n self.send_through_ac = False\n\n self.running = False\n self.ani = None\n btns = tk.Frame(self)\n btns.pack()\n\n lbl = tk.Label(btns, text=\"update interval (ms)\")\n lbl.pack(side=tk.LEFT)\n\n self.interval = tk.Entry(btns, width=5)\n self.intervl = 20\n self.interval.insert(0, str(self.intervl))\n self.interval.pack(side=tk.LEFT)\n\n self.btn = tk.Button(btns, text='Start', command=self.on_click)\n self.btn.pack(side=tk.LEFT)\n\n self.btn_rf = tk.Button(btns, text='RF', command=self.rf)\n self.btn_rf.pack(side=tk.LEFT)\n\n self.btn_ac = tk.Button(btns, text='AC', command=self.ac)\n self.btn_ac.pack(side=tk.LEFT)\n\n self.btn_exit = tk.Button(btns, text='Exit', command=quit)\n self.btn_exit.pack(side=tk.LEFT)\n\n self.fig = plt.Figure()\n self.ax1 = self.fig.add_subplot(111)\n self.line_iver, = self.ax1.plot([], [], 'r-', linewidth=1.5)\n self.canvas = FigureCanvasTkAgg(self.fig, master=master)\n img = rasterio.open('Stennis_QW.tif') # 'Cat_Island_Low.tif' , 'Stennis_QW.tif'\n show(img, ax=self.ax1)\n self.canvas.get_tk_widget().pack(expand=True)\n self.canvas.figure.tight_layout()\n self.geod = Geodesic(6378388, 1 / 297.0)\n # self.waypoints_iver = [[30.35099, -89.63138, 3], [30.35125, -89.63079, 3.5]]\n # self.waypoints_iver = [[30.3603, -89.0942, 10.5], [30.3546, -89.0734, 14.5],\n # [30.3151, -89.0589, 5.5], [30.2833, -89.0693, 3.0]]\n self.waypoints_iver = [[30.35099, -89.63138, 3], [30.35125, -89.63079, 3.5],\n [30.35173, -89.63064, 3], [30.35203, -89.62992, 3],\n [30.35247, -89.62979, 4], [30.35270, -89.62917, 4],\n [30.35322, -89.62920, 3.5], [30.35345, -89.62827, 4],\n [30.35099, -89.63138, 3.5]]\n # self.waypoints_iver = [[30.3612, -89.1002, 9], [30.3569, -89.1003, 9.5],\n # [30.3666, -89.1004, 5]]\n self.total_WPs = len(self.waypoints_iver)\n df = pd.DataFrame(self.waypoints_iver, columns=['lat', 'lon', 'speed'])\n self.ax1.scatter(df['lon'], df['lat'], color='red', marker='.', s=250,\n linewidths=0.05) # facecolors='none', edgecolors='r',\n for i in range(len(df)):\n self.ax1.scatter(df.lon[i], df.lat[i], marker=\"$\" + str(i + 1) + \"$\", color='black', linewidths=.09)\n\n HISTORY_LEN = 2000000\n self.xdata = deque([], maxlen=HISTORY_LEN)\n self.ydata = deque([], maxlen=HISTORY_LEN)\n\n def on_click(self):\n if self.ani is None:\n return self.start()\n if self.running:\n self.ani.event_source.stop()\n self.btn.config(text='Un-Pause')\n else:\n self.ani.event_source.start()\n self.btn.config(text='Pause')\n self.running = not self.running\n\n def start(self):\n threading.Thread(target=self.iver, daemon=True).start()\n threading.Thread(target=self.read_comports, daemon=True).start()\n self.ani = animation.FuncAnimation(\n self.fig,\n self.update_graph,\n # frames=self.lat_w.size - 1,\n interval=int(self.interval.get()),\n repeat=False,\n blit=True)\n self.running = True\n self.btn.config(text='Pause')\n self.ani._start()\n\n def iver_status(self):\n # print (nxt_wp)\n # 1 m/s = 1.94384 Knot\n iver_sta = '$OSI,8080808080,S,' + self.wp_nxt + ',' + \\\n str(self.current_position_iver['Latitude']) + ',' + str(self.current_position_iver['Longitude']) \\\n + ',' + str(self.current_position_iver['speed'] * 1.94384) + ',' + str(self.disnc_remaining) \\\n + ',N,0.000,P0,-1.4743,,0,292.5,0.0,94.3,False,IVER3-3089,2.5,True,False ' + '*'\n return '$AC;IVER3-' + self.auv + ';' + iver_sta + functions.check_sum(iver_sta) + '\\r\\n'\n\n def osd_ACK(self):\n return '$AC;IVER3-' + self.auv + ';$ACK,8,0,0*5D' + '\\r\\n'\n\n def omw_Ack(self):\n ack = '$ACK,16,0,0*'\n return '$AC;IVER3-' + self.auv + ';' + ack + functions.check_sum(ack) + '\\r\\n'\n\n def rf(self):\n if self.send_through_rf:\n self.send_through_rf = False\n self.send_through_ac = True\n self.btn_rf.config(text='RF-stop')\n self.btn_ac.config(text='AC-on')\n else:\n self.send_through_rf = True\n self.send_through_ac = False\n self.btn_rf.config(text='RF-on')\n self.btn_ac.config(text='AC-stop')\n\n def ac(self):\n if self.send_through_ac:\n self.send_through_ac = False\n self.send_through_rf = True\n self.btn_ac.config(text='AC-stop')\n self.btn_ac.config(text='AC-on')\n else:\n self.send_through_ac = True\n self.send_through_rf = False\n self.btn_ac.config(text='AC-on')\n self.btn_rf.config(text='RF-stop')\n\n def iver(self):\n print(datetime.datetime.now(), ': started')\n lat_i_past, lng_i_past, _ = self.waypoints_iver[0]\n while self.waypoints_iver:\n t_start = monotonic()\n lat_i_nxt, lng_i_nxt, speed_i = self.waypoints_iver[0]\n # speed_i *= 0.51 # * 1 knot = 0.514 m/s\n l = self.geod.InverseLine(lat_i_past, lng_i_past, lat_i_nxt, lng_i_nxt)\n nxt_wp_disnc = l.s13\n distance_travelled = 0\n while distance_travelled <= nxt_wp_disnc:\n g = l.Position(distance_travelled, Geodesic.STANDARD | Geodesic.LONG_UNROLL)\n lat_i, lng_i = g['lat2'], g['lon2']\n self.current_position_iver = {'Latitude': lat_i, 'Longitude': lng_i, 'speed': speed_i}\n # self.q_plot.put(self.current_position_iver)\n self.event_plot.set()\n # t_elapsed = monotonic() - t_start\n # distance_travelled = speed_i * t_elapsed\n # self.disnc_remaining = nxt_wp_disnc - distance_travelled\n # time.sleep(self.intervl * 0.009)\n while not self.q_wp_omw.empty():\n wp_omw = self.q_wp_omw.get()\n lat_i_r, lng_i_r, speed_i_r = wp_omw['lat'], wp_omw['lon'], wp_omw['speed']\n # speed_i_r *= 0.51 # 1 knot = 0.514 m/s\n self.wp_nxt = 'WP1'\n l_i_r = self.geod.InverseLine(self.current_position_iver['Latitude'],\n self.current_position_iver['Longitude'],\n lat_i_r, lng_i_r)\n omw_distance = l_i_r.s13\n omw_dstnce_travld = 0\n t_start_r = monotonic()\n while omw_dstnce_travld < omw_distance:\n if self.omw_clear:\n self.omw_clear = False\n print('OMW_CLEAR')\n break\n g_i_r = l_i_r.Position(omw_dstnce_travld, Geodesic.STANDARD | Geodesic.LONG_UNROLL)\n lat_i_r, lng_i_r = g_i_r['lat2'], g_i_r['lon2']\n self.current_position_iver = {'Latitude': lat_i_r, 'Longitude': lng_i_r, 'speed': speed_i_r}\n # self.q_plot.put(self.current_position_iver)\n t_elapsed_r = monotonic() - t_start_r\n omw_dstnce_travld = speed_i_r * t_elapsed_r\n omw_distance_remaining = omw_distance - omw_dstnce_travld\n self.disnc_remaining = omw_distance_remaining\n time.sleep(self.intervl * 0.009)\n if self.q_wp_omw.qsize() == 0:\n self.waypoints_iver.insert(0, self.waypoints_iver[0])\n t_elapsed = monotonic() - t_start\n distance_travelled = speed_i * t_elapsed\n self.disnc_remaining = nxt_wp_disnc - distance_travelled\n time.sleep(self.intervl * 0.009)\n lat_i_past, lng_i_past = self.current_position_iver['Latitude'], self.current_position_iver['Longitude']\n self.waypoints_iver.pop(0)\n remaining_WPs = self.total_WPs - len(self.waypoints_iver)\n print(datetime.datetime.now(),\n ': Total WPs: {}, remaining WPs: {}/{}'.format(self.total_WPs, len(self.waypoints_iver),\n remaining_WPs))\n self.wp_nxt = str(remaining_WPs)\n print(datetime.datetime.now(), ': nxt_WP: ', self.wp_nxt)\n\n def read_comports(self):\n while True:\n # print('Status: RF: {}, AC {}'.format(self.send_through_rf, self.send_through_ac))\n try:\n if (self.send_through_rf and ser_rf.inWaiting() > 0) or (\n self.send_through_ac and ser_ac.inWaiting() > 0):\n received_data_through = 'RF' if ser_rf.inWaiting() > 0 else 'AC'\n read_com = ser_rf.readline().decode().strip() if received_data_through == 'RF' else ser_ac.readline().decode().strip()\n print(datetime.datetime.now(), ':received through: ', received_data_through, read_com)\n # print('Status: RF: {}, AC {}', self.send_through_rf, self.send_through_ac)\n if functions.received_stream(read_com) == 'osd' and functions.osd_req_recvd(read_com) == 0:\n print(datetime.datetime.now(), \": Sending current Status through : \", received_data_through)\n ser_rf.write(self.iver_status().encode()) if received_data_through == 'RF' else ser_ac.write(\n self.iver_status().encode())\n ser_rf.write(self.osd_ACK().encode()) if received_data_through == 'RF' else ser_ac.write(\n self.osd_ACK().encode())\n # print(\"Time write:{} sec\".format(time.perf_counter() - toc_CS))\n elif functions.received_stream(read_com) == 'omw' and functions.omw_req_recvd(read_com) == 0:\n omw_rec = read_com.split(\";\")[2].split(',')\n ser_rf.write(self.omw_Ack().encode()) if received_data_through == 'RF' else ser_ac.write(\n self.omw_Ack().encode())\n print(datetime.datetime.now(), ': Sending OMW acknowledgement through :', received_data_through,\n self.omw_Ack())\n if re.search('CLEAR', read_com):\n self.q_wp_omw.queue.clear()\n self.omw_clear = True\n self.q_wp_omw.put({'lat': float(omw_rec[2]), 'lon': float(omw_rec[3]),\n 'speed': float(omw_rec[7])})\n else:\n self.q_wp_omw.put({'lat': float(omw_rec[2]), 'lon': float(omw_rec[3]),\n 'speed': float(omw_rec[7])})\n else:\n time.sleep(0.5)\n except Exception as e:\n print(\" Exception raised\", e)\n continue\n\n def update_graph(self, i):\n self.event_plot.wait()\n self.xdata.append(self.current_position_iver['Longitude'])\n self.ydata.append(self.current_position_iver['Latitude'])\n # plot_inbox = self.q_plot.get()\n # self.xdata.append(plot_inbox['Longitude'])\n # self.ydata.append(plot_inbox['Latitude'])\n # ax.plot([lng_i_p, current_position_iver['Longitude']], [lat_i_p, current_position_iver['Latitude']], 'r') if \\\n # lat_i_p != 0.0 else ax.plot(plot_inbox['Longitude'], plot_inbox['Latitude'], 'r')\n # lat_i_p, lng_i_p = current_position_iver['Latitude'], current_position_iver['Longitude']\n\n self.line_iver.set_data(self.xdata, self.ydata)\n return self.line_iver,\n\n\ndef main():\n root = tk.Tk()\n root.title('Iver_v2')\n root.iconbitmap('usm.ico')\n app = App(root)\n app.pack()\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "pandas.DataFrame", "matplotlib.pyplot.Figure" ] ]
jiye-ML/CoCosNet
[ "c4b3f44393462c8353c6c6952d7b05496298df1c" ]
[ "models/networks/base_network.py" ]
[ "\"\"\"\nCopyright (C) 2019 NVIDIA Corporation. All rights reserved.\nLicensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).\n\"\"\"\n\nimport torch.nn as nn\nfrom torch.nn import init\n\n\nclass BaseNetwork(nn.Module):\n def __init__(self):\n super(BaseNetwork, self).__init__()\n\n @staticmethod\n def modify_commandline_options(parser, is_train):\n return parser\n\n def print_network(self):\n if isinstance(self, list):\n self = self[0]\n num_params = 0\n for param in self.parameters():\n num_params += param.numel()\n print('Network [%s] was created. Total number of parameters: %.1f million. '\n 'To see the architecture, do print(network).'\n % (type(self).__name__, num_params / 1000000))\n\n def init_weights(self, init_type='normal', gain=0.02):\n def init_func(m):\n classname = m.__class__.__name__\n if classname.find('BatchNorm2d') != -1:\n if hasattr(m, 'weight') and m.weight is not None:\n init.normal_(m.weight.data, 1.0, gain)\n if hasattr(m, 'bias') and m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n if init_type == 'normal':\n init.normal_(m.weight.data, 0.0, gain)\n elif init_type == 'xavier':\n init.xavier_normal_(m.weight.data, gain=gain)\n elif init_type == 'xavier_uniform':\n init.xavier_uniform_(m.weight.data, gain=1.0)\n elif init_type == 'kaiming':\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif init_type == 'orthogonal':\n init.orthogonal_(m.weight.data, gain=gain)\n elif init_type == 'none': # uses pytorch's default init method\n m.reset_parameters()\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n if hasattr(m, 'bias') and m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n\n self.apply(init_func)\n\n # propagate to children\n for m in self.children():\n if hasattr(m, 'init_weights'):\n m.init_weights(init_type, gain)\n" ]
[ [ "torch.nn.init.kaiming_normal_", "torch.nn.init.xavier_uniform_", "torch.nn.init.constant_", "torch.nn.init.xavier_normal_", "torch.nn.init.normal_", "torch.nn.init.orthogonal_" ] ]
am-ivanov/dace
[ "4d65e0951c112160fe783766404a806b6043b521" ]
[ "tests/state_transition_array_test.py" ]
[ "import dace as dp\nimport numpy as np\n\nsdfg = dp.SDFG('sta_test')\ns0 = sdfg.add_state()\ns1 = sdfg.add_state()\ns2 = sdfg.add_state()\n\n# Arrays\ninp = s0.add_array('inp', [1], dp.float32)\nA = s0.add_array('A', [1], dp.float32)\nt = s0.add_tasklet('seta', {'a'}, {'b'}, 'b = a')\ns0.add_edge(inp, None, t, 'a', dp.Memlet.from_array(inp.data, inp.desc(sdfg)))\ns0.add_edge(t, 'b', A, None, dp.Memlet.from_array(A.data, A.desc(sdfg)))\n\nA = s1.add_array('A', [1], dp.float32)\nt = s1.add_tasklet('geta', {'a'}, {}, 'printf(\"ok %f\\\\n\", a + 1)')\ns1.add_edge(A, None, t, 'a', dp.Memlet.from_array(A.data, A.desc(sdfg)))\n\nA = s2.add_array('A', [1], dp.float32)\nt = s2.add_tasklet('geta', {'a'}, {}, 'printf(\"BAD %f\\\\n\", a - 1)')\ns2.add_edge(A, None, t, 'a', dp.Memlet.from_array(A.data, A.desc(sdfg)))\n\nsdfg.add_edge(s0, s1, dp.InterstateEdge('A[0] > 3'))\nsdfg.add_edge(s0, s2, dp.InterstateEdge('A[0] <= 3'))\n\nif __name__ == '__main__':\n print('Toplevel array usage in interstate edge')\n input = np.ndarray([1], np.float32)\n input[0] = 10\n output = np.ndarray([1], np.float32)\n output[0] = 10\n\n sdfg(inp=input, A=output)\n\n exit(0)\n" ]
[ [ "numpy.ndarray" ] ]
sixin-zh/kymatio_wph
[ "237c0d2009766cf83b2145420a14d3c6e90dc983" ]
[ "kymatio/phaseexp1d/pyscatwave/scatwave/scattering1d/examples/real_signal.py" ]
[ "import torch\nfrom torch.autograd import Variable\nfrom scatwave import Scattering1D\nfrom scatwave import fetch_fsdd\nimport matplotlib.pyplot as plt\nfrom scipy.io import wavfile\nimport numpy as np\nimport os\n\n\ndef loadfile(path_file):\n sr, x = wavfile.read(path_file)\n x = np.asarray(x, dtype='float')\n # make it mono\n if x.ndim > 1:\n smallest_axis = np.argmin(x.shape)\n x = x.mean(axis=smallest_axis)\n x = np.asarray(x, dtype='float')\n x /= np.max(np.abs(x))\n return sr, x\n\n\ndef show_signal(x, s, order0, order1, order2):\n fig, axarr = plt.subplots(4, 1, figsize=(8, 16))\n axarr[0].plot(x.data[0, 0])\n axarr[0].set_title('Original signal')\n axarr[1].plot(s[order0][0])\n axarr[1].set_title('Scattering Order 0')\n axarr[2].imshow(s[order1], aspect='auto')\n axarr[2].set_title('Scattering Order 1')\n axarr[3].imshow(s[order2], aspect='auto')\n axarr[3].set_title('Scattering Order 2')\n plt.show()\n\n\nif __name__ == '__main__':\n # fetch the dataset and get the signal\n info_dataset = fetch_fsdd(base_dir='fsdd', verbose=True)\n filepath = os.path.join(info_dataset['path_dataset'],\n sorted(info_dataset['files'])[0])\n\n # Load the signal\n sr, x = loadfile(filepath)\n x_th = Variable(torch.from_numpy(x).float().unsqueeze(0).unsqueeze(0))\n\n # Prepare the scattering\n T = x_th.shape[-1]\n J = 6\n Q = 16\n scattering = Scattering1D(T, J, Q)\n\n # Get the metadata\n coords = Scattering1D.compute_meta_scattering(J, Q, order2=True)\n order0 = torch.LongTensor([0])\n order1 = torch.LongTensor(\n sorted([cc for cc in coords.keys() if coords[cc]['order'] == '1']))\n order2 = torch.LongTensor(\n sorted([cc for cc in coords.keys() if coords[cc]['order'] == '2']))\n\n # Compute the scattering\n s = scattering.forward(x_th).data.numpy()[0]\n\n # show it\n show_signal(x_th, s, order0, order1, order2)\n" ]
[ [ "numpy.argmin", "numpy.abs", "numpy.asarray", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "torch.from_numpy", "scipy.io.wavfile.read", "torch.LongTensor" ] ]
Fdl1989/TimingofOneShotInterventions
[ "cfd7a5238c06baf77ee465b22392367197969a27" ]
[ "Fig4.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 29 15:19:36 2020\n@author: Francesco Di Lauro\n@mail: [email protected]\nCopyright 2020 Francesco Di Lauro. All Rights Reserved.\nSee LICENSE file for details\n\"\"\"\nfrom Eulerclasssir import SIR_model\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import\n\nfrom matplotlib import rc\n## for Palatino and other serif fonts use:\n#rc('font',**{'family':'serif','serif':['Palatino']})\nrc('text', usetex=True)\n\n\n\nngroups = 9\ngamma = [1.0]*9\ntauf = 35\nbetaij = np.loadtxt('mixing_baseline.txt', delimiter=',')\n\nc =[0.5]*9\n\ninterventiontime = [1.1]*9\ninterventionduration = [4]*9\n\nSIR = SIR_model(ngroups, gamma, tauf,betaij, betain=0, betaoff=0, seed=1)\n\ny=SIR.sir_intervention( c, [1], interventiontime, interventionduration, nt = 3000, epsilon=0.01, intervention='subgroup_threshold')\n\n#y[:ngroups] is the S_1(t)... S_n(t) susceptible populations evolution,\n#y[ngroups:2*ngroups] \"I(t)\"\n#y[2*ngroups:] \"R(t)\"\n\nt = np.linspace(0,tauf,3000)\nplt.close()\nfig,ax = plt.subplots(3,3, figsize=(5.5,5.5), sharex=True,sharey = True)\nax = ax.ravel()\nplt.subplots_adjust(left=0.1, bottom=0.1, right=0.96, top=0.96, wspace=0.2, hspace=0.2)\n\n#plot I(t)\nfor i,sub in enumerate(ax):\n #S(t)\n #sub.plot(t, y[:,i], color='b')\n #I(t)\n sub.plot(t, y[:,i+ngroups], color='r')\n #R(t)\n #sub.plot(t, y[:,i+2*ngroups], color='g')\n #intervention\n #sub.vlines(SIR.intervention_time[i], 0,np.max(y[:,i+ngroups]))\n sub.set_title(\"sub-population %d\" %(i+1))\n\nfinalsize = np.sum(y[-1:,2*ngroups:])\nax[7].set_xlabel(r\"$t$\", size=11)\nax[3].set_ylabel(r\"$I(t)$\",size=11,labelpad=-2)\nplt.savefig(\"fig4.tiff\",dpi=600)\nplt.savefig(\"fig4.eps\")\n" ]
[ [ "numpy.sum", "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplots", "matplotlib.rc", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.close", "numpy.linspace", "numpy.loadtxt" ] ]
shelleyHLX/bilm_EMLo
[ "7e3f94c80716665a16bfbc2efc2b8f2f32aad553" ]
[ "usage_token.py" ]
[ "'''\nELMo usage example with pre-computed and cached context independent\ntoken representations\n\nBelow, we show usage for SQuAD where each input example consists of both\na question and a paragraph of context.\n'''\n\nimport tensorflow as tf\nimport os\nfrom bilm_model import TokenBatcher, BidirectionalLanguageModel, weight_layers, \\\n dump_token_embeddings\n\n# Our small dataset.\nraw_context = [\n '同日 , 被告人 陈某 被 传唤 归案',\n '被告人 赵某 于 2013 年 4 月 28 日 事发 后 , 向 其 所在单位 投案'\n]\ntokenized_context = [sentence.split() for sentence in raw_context]\ntokenized_question = [\n ['案件', '审理', '过程', '中', ',', '双方', '已', '就', '民事', '赔偿', '部分', '达成', '了', '调解', '协议'],\n ['打', '了', '一', '、', '二', '分钟', ',', '吉某', '指挥', '被', '纠集', '人员', '逃离现场'],\n]\n\n# Create the vocabulary file with all unique tokens and\n# the special <S>, </S> tokens (case sensitive).\nall_tokens = set(['<S>', '</S>'] + tokenized_question[0])\nfor context_sentence in tokenized_context:\n for token in context_sentence:\n all_tokens.add(token)\n\n# vocab_file = './corpus_me/vocab_elmo.txt/'\nvocab_file = '/home/lxp3/PycharmProjects/bilm-tf-master/corpus_me/vocab_elmo.txt'\n\n# with open(vocab_file, 'w') as fout:\n# fout.write('\\n'.join(all_tokens))\n\n# Location of pretrained LM. Here we use the test fixtures.\n\noptions_file = '/home/lxp3/PycharmProjects/bilm-tf-master/try4/options.json' # try/options.json\nweight_file = '/home/lxp3/PycharmProjects/bilm-tf-master/try4/weights.hdf5'\n\n# Dump the token embeddings to a file. Run this once for your dataset.\ntoken_embedding_file = '/home/lxp3/PycharmProjects/bilm-tf-master/bin/8000_vocab_embedding.hdf5'\n# dump_token_embeddings(\n# vocab_file, options_file, weight_file, token_embedding_file\n# )\ntf.reset_default_graph()\n\n# Now we can do inference.\n# Create a TokenBatcher to map text to token ids.\nbatcher = TokenBatcher(vocab_file)\n\n# Input placeholders to the biLM.\ncontext_token_ids = tf.placeholder('int32', shape=(None, None))\nquestion_token_ids = tf.placeholder('int32', shape=(None, None))\n\n# Build the biLM graph.\nbilm = BidirectionalLanguageModel(options_file, weight_file, use_character_inputs=False,\n embedding_weight_file=token_embedding_file)\n\n# Get ops to compute the LM embeddings.\ncontext_embeddings_op = bilm(context_token_ids)\nquestion_embeddings_op = bilm(question_token_ids)\n\n# Get an op to compute ELMo (weighted average of the internal biLM layers)\n# Our SQuAD model includes ELMo at both the input and output layers\n# of the task GRU, so we need 4x ELMo representations for the question\n# and context at each of the input and output.\n# We use the same ELMo weights for both the question and context\n# at each of the input and output.\nelmo_context_input = weight_layers('input', context_embeddings_op, l2_coef=0.0)\nwith tf.variable_scope('', reuse=True):\n # the reuse=True scope reuses weights from the context for the question\n elmo_question_input = weight_layers(\n 'input', question_embeddings_op, l2_coef=0.0\n )\n\n# elmo_context_output = weight_layers(\n# 'output', context_embeddings_op, l2_coef=0.0\n# )\n# with tf.variable_scope('', reuse=True):\n# # the reuse=True scope reuses weights from the context for the question\n# elmo_question_output = weight_layers(\n# 'output', question_embeddings_op, l2_coef=0.0\n# )\n\n\nwith tf.Session() as sess:\n # It is necessary to initialize variables once before running inference.\n sess.run(tf.global_variables_initializer())\n\n # Create batches of data.\n context_ids = batcher.batch_sentences(tokenized_context)\n question_ids = batcher.batch_sentences(tokenized_question)\n print(context_ids)\n print(question_ids)\n\n # Compute ELMo representations (here for the input only, for simplicity).\n elmo_context_input_, elmo_question_input_ = sess.run(\n [elmo_context_input['weighted_op'], elmo_question_input['weighted_op']],\n feed_dict={ context_token_ids: context_ids,\n question_token_ids: question_ids}\n )\n\nprint('*'*20, type(elmo_context_input_))\nprint(elmo_context_input_.shape) # (2, 16, 1024)\nprint(elmo_context_input_)\nprint('*'*20, type(elmo_question_input_)) # <class 'numpy.ndarray'>\nprint(elmo_question_input_.shape) # (2, 15, 1024)\nprint(elmo_question_input_)\n" ]
[ [ "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.variable_scope", "tensorflow.Session", "tensorflow.reset_default_graph" ] ]
laurent90git/scipy
[ "bc111c2f56e854b1bf95b208078da525d267ceff" ]
[ "scipy/integrate/_ivp/tests/test_ivp.py" ]
[ "from itertools import product\nfrom numpy.testing import (assert_, assert_allclose,\n assert_equal, assert_no_warnings, suppress_warnings)\nimport pytest\nfrom pytest import raises as assert_raises\nimport numpy as np\nfrom scipy.optimize._numdiff import group_columns\nfrom scipy.integrate import solve_ivp, RK23, RK45, DOP853, Radau, BDF, LSODA\nfrom scipy.integrate import OdeSolution\nfrom scipy.integrate._ivp.common import num_jac\nfrom scipy.integrate._ivp.base import ConstantDenseOutput\nfrom scipy.sparse import coo_matrix, csc_matrix, diags\n\n\ndef fun_zero(t, y):\n return np.zeros_like(y)\n\n\ndef fun_linear(t, y):\n return np.array([-y[0] - 5 * y[1], y[0] + y[1]])\n\n\ndef jac_linear():\n return np.array([[-1, -5], [1, 1]])\n\n\ndef sol_linear(t):\n return np.vstack((-5 * np.sin(2 * t),\n 2 * np.cos(2 * t) + np.sin(2 * t)))\n\n\ndef fun_rational(t, y):\n return np.array([y[1] / t,\n y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))])\n\n\ndef fun_rational_vectorized(t, y):\n return np.vstack((y[1] / t,\n y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))))\n\n\ndef jac_rational(t, y):\n return np.array([\n [0, 1 / t],\n [-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2),\n (y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))]\n ])\n\n\ndef jac_rational_sparse(t, y):\n return csc_matrix([\n [0, 1 / t],\n [-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2),\n (y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))]\n ])\n\n\ndef sol_rational(t):\n return np.asarray((t / (t + 10), 10 * t / (t + 10) ** 2))\n\n\ndef fun_medazko(t, y):\n n = y.shape[0] // 2\n k = 100\n c = 4\n\n phi = 2 if t <= 5 else 0\n y = np.hstack((phi, 0, y, y[-2]))\n\n d = 1 / n\n j = np.arange(n) + 1\n alpha = 2 * (j * d - 1) ** 3 / c ** 2\n beta = (j * d - 1) ** 4 / c ** 2\n\n j_2_p1 = 2 * j + 2\n j_2_m3 = 2 * j - 2\n j_2_m1 = 2 * j\n j_2 = 2 * j + 1\n\n f = np.empty(2 * n)\n f[::2] = (alpha * (y[j_2_p1] - y[j_2_m3]) / (2 * d) +\n beta * (y[j_2_m3] - 2 * y[j_2_m1] + y[j_2_p1]) / d ** 2 -\n k * y[j_2_m1] * y[j_2])\n f[1::2] = -k * y[j_2] * y[j_2_m1]\n\n return f\n\n\ndef medazko_sparsity(n):\n cols = []\n rows = []\n\n i = np.arange(n) * 2\n\n cols.append(i[1:])\n rows.append(i[1:] - 2)\n\n cols.append(i)\n rows.append(i)\n\n cols.append(i)\n rows.append(i + 1)\n\n cols.append(i[:-1])\n rows.append(i[:-1] + 2)\n\n i = np.arange(n) * 2 + 1\n\n cols.append(i)\n rows.append(i)\n\n cols.append(i)\n rows.append(i - 1)\n\n cols = np.hstack(cols)\n rows = np.hstack(rows)\n\n return coo_matrix((np.ones_like(cols), (cols, rows)))\n\n\ndef fun_complex(t, y):\n return -y\n\n\ndef jac_complex(t, y):\n return -np.eye(y.shape[0])\n\n\ndef jac_complex_sparse(t, y):\n return csc_matrix(jac_complex(t, y))\n\n\ndef sol_complex(t):\n y = (0.5 + 1j) * np.exp(-t)\n return y.reshape((1, -1))\n\n\ndef compute_error(y, y_true, rtol, atol):\n e = (y - y_true) / (atol + rtol * np.abs(y_true))\n return np.linalg.norm(e, axis=0) / np.sqrt(e.shape[0])\n\n\ndef test_integration():\n rtol = 1e-3\n atol = 1e-6\n y0 = [1/3, 2/9]\n\n for vectorized, method, t_span, jac in product(\n [False, True],\n ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA'],\n [[5, 9], [5, 1]],\n [None, jac_rational, jac_rational_sparse]):\n\n if vectorized:\n fun = fun_rational_vectorized\n else:\n fun = fun_rational\n\n with suppress_warnings() as sup:\n sup.filter(UserWarning,\n \"The following arguments have no effect for a chosen \"\n \"solver: `jac`\")\n res = solve_ivp(fun, t_span, y0, rtol=rtol,\n atol=atol, method=method, dense_output=True,\n jac=jac, vectorized=vectorized)\n assert_equal(res.t[0], t_span[0])\n assert_(res.t_events is None)\n assert_(res.y_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n if method == 'DOP853':\n # DOP853 spends more functions evaluation because it doesn't\n # have enough time to develop big enough step size.\n assert_(res.nfev < 50)\n else:\n assert_(res.nfev < 40)\n\n if method in ['RK23', 'RK45', 'DOP853', 'LSODA']:\n assert_equal(res.njev, 0)\n assert_equal(res.nlu, 0)\n else:\n assert_(0 < res.njev < 3)\n assert_(0 < res.nlu < 10)\n\n y_true = sol_rational(res.t)\n e = compute_error(res.y, y_true, rtol, atol)\n assert_(np.all(e < 5))\n\n tc = np.linspace(*t_span)\n yc_true = sol_rational(tc)\n yc = res.sol(tc)\n\n e = compute_error(yc, yc_true, rtol, atol)\n assert_(np.all(e < 5))\n\n tc = (t_span[0] + t_span[-1]) / 2\n yc_true = sol_rational(tc)\n yc = res.sol(tc)\n\n e = compute_error(yc, yc_true, rtol, atol)\n assert_(np.all(e < 5))\n\n # LSODA for some reasons doesn't pass the polynomial through the\n # previous points exactly after the order change. It might be some\n # bug in LSOSA implementation or maybe we missing something.\n if method != 'LSODA':\n assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)\n\n\ndef test_integration_complex():\n rtol = 1e-3\n atol = 1e-6\n y0 = [0.5 + 1j]\n t_span = [0, 1]\n tc = np.linspace(t_span[0], t_span[1])\n for method, jac in product(['RK23', 'RK45', 'DOP853', 'BDF'],\n [None, jac_complex, jac_complex_sparse]):\n with suppress_warnings() as sup:\n sup.filter(UserWarning,\n \"The following arguments have no effect for a chosen \"\n \"solver: `jac`\")\n res = solve_ivp(fun_complex, t_span, y0, method=method,\n dense_output=True, rtol=rtol, atol=atol, jac=jac)\n\n assert_equal(res.t[0], t_span[0])\n assert_(res.t_events is None)\n assert_(res.y_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n if method == 'DOP853':\n assert res.nfev < 35\n else:\n assert res.nfev < 25\n\n if method == 'BDF':\n assert_equal(res.njev, 1)\n assert res.nlu < 6\n else:\n assert res.njev == 0\n assert res.nlu == 0\n\n y_true = sol_complex(res.t)\n e = compute_error(res.y, y_true, rtol, atol)\n assert np.all(e < 5)\n\n yc_true = sol_complex(tc)\n yc = res.sol(tc)\n e = compute_error(yc, yc_true, rtol, atol)\n\n assert np.all(e < 5)\n\n\ndef test_integration_sparse_difference():\n n = 200\n t_span = [0, 20]\n y0 = np.zeros(2 * n)\n y0[1::2] = 1\n sparsity = medazko_sparsity(n)\n\n for method in ['BDF', 'Radau']:\n res = solve_ivp(fun_medazko, t_span, y0, method=method,\n jac_sparsity=sparsity)\n\n assert_equal(res.t[0], t_span[0])\n assert_(res.t_events is None)\n assert_(res.y_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n assert_allclose(res.y[78, -1], 0.233994e-3, rtol=1e-2)\n assert_allclose(res.y[79, -1], 0, atol=1e-3)\n assert_allclose(res.y[148, -1], 0.359561e-3, rtol=1e-2)\n assert_allclose(res.y[149, -1], 0, atol=1e-3)\n assert_allclose(res.y[198, -1], 0.117374129e-3, rtol=1e-2)\n assert_allclose(res.y[199, -1], 0.6190807e-5, atol=1e-3)\n assert_allclose(res.y[238, -1], 0, atol=1e-3)\n assert_allclose(res.y[239, -1], 0.9999997, rtol=1e-2)\n\n\ndef test_integration_const_jac():\n rtol = 1e-3\n atol = 1e-6\n y0 = [0, 2]\n t_span = [0, 2]\n J = jac_linear()\n J_sparse = csc_matrix(J)\n\n for method, jac in product(['Radau', 'BDF'], [J, J_sparse]):\n res = solve_ivp(fun_linear, t_span, y0, rtol=rtol, atol=atol,\n method=method, dense_output=True, jac=jac)\n assert_equal(res.t[0], t_span[0])\n assert_(res.t_events is None)\n assert_(res.y_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n assert_(res.nfev < 100)\n assert_equal(res.njev, 0)\n assert_(0 < res.nlu < 15)\n\n y_true = sol_linear(res.t)\n e = compute_error(res.y, y_true, rtol, atol)\n assert_(np.all(e < 10))\n\n tc = np.linspace(*t_span)\n yc_true = sol_linear(tc)\n yc = res.sol(tc)\n\n e = compute_error(yc, yc_true, rtol, atol)\n assert_(np.all(e < 15))\n\n assert_allclose(res.sol(res.t), res.y, rtol=1e-14, atol=1e-14)\n\n\[email protected]\[email protected]('method', ['Radau', 'BDF', 'LSODA'])\ndef test_integration_stiff(method):\n rtol = 1e-6\n atol = 1e-6\n y0 = [1e4, 0, 0]\n tspan = [0, 1e8]\n\n def fun_robertson(t, state):\n x, y, z = state\n return [\n -0.04 * x + 1e4 * y * z,\n 0.04 * x - 1e4 * y * z - 3e7 * y * y,\n 3e7 * y * y,\n ]\n\n res = solve_ivp(fun_robertson, tspan, y0, rtol=rtol,\n atol=atol, method=method)\n\n # If the stiff mode is not activated correctly, these numbers will be much bigger\n assert res.nfev < 5000\n assert res.njev < 200\n\n\ndef test_events():\n def event_rational_1(t, y):\n return y[0] - y[1] ** 0.7\n\n def event_rational_2(t, y):\n return y[1] ** 0.6 - y[0]\n\n def event_rational_3(t, y):\n return t - 7.4\n\n event_rational_3.terminal = True\n\n for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:\n res = solve_ivp(fun_rational, [5, 8], [1/3, 2/9], method=method,\n events=(event_rational_1, event_rational_2))\n assert_equal(res.status, 0)\n assert_equal(res.t_events[0].size, 1)\n assert_equal(res.t_events[1].size, 1)\n assert_(5.3 < res.t_events[0][0] < 5.7)\n assert_(7.3 < res.t_events[1][0] < 7.7)\n\n assert_equal(res.y_events[0].shape, (1, 2))\n assert_equal(res.y_events[1].shape, (1, 2))\n assert np.isclose(\n event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)\n assert np.isclose(\n event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)\n\n event_rational_1.direction = 1\n event_rational_2.direction = 1\n res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,\n events=(event_rational_1, event_rational_2))\n assert_equal(res.status, 0)\n assert_equal(res.t_events[0].size, 1)\n assert_equal(res.t_events[1].size, 0)\n assert_(5.3 < res.t_events[0][0] < 5.7)\n assert_equal(res.y_events[0].shape, (1, 2))\n assert_equal(res.y_events[1].shape, (0,))\n assert np.isclose(\n event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)\n\n event_rational_1.direction = -1\n event_rational_2.direction = -1\n res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,\n events=(event_rational_1, event_rational_2))\n assert_equal(res.status, 0)\n assert_equal(res.t_events[0].size, 0)\n assert_equal(res.t_events[1].size, 1)\n assert_(7.3 < res.t_events[1][0] < 7.7)\n assert_equal(res.y_events[0].shape, (0,))\n assert_equal(res.y_events[1].shape, (1, 2))\n assert np.isclose(\n event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)\n\n event_rational_1.direction = 0\n event_rational_2.direction = 0\n\n res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,\n events=(event_rational_1, event_rational_2,\n event_rational_3), dense_output=True)\n assert_equal(res.status, 1)\n assert_equal(res.t_events[0].size, 1)\n assert_equal(res.t_events[1].size, 0)\n assert_equal(res.t_events[2].size, 1)\n assert_(5.3 < res.t_events[0][0] < 5.7)\n assert_(7.3 < res.t_events[2][0] < 7.5)\n assert_equal(res.y_events[0].shape, (1, 2))\n assert_equal(res.y_events[1].shape, (0,))\n assert_equal(res.y_events[2].shape, (1, 2))\n assert np.isclose(\n event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)\n assert np.isclose(\n event_rational_3(res.t_events[2][0], res.y_events[2][0]), 0)\n\n res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method,\n events=event_rational_1, dense_output=True)\n assert_equal(res.status, 0)\n assert_equal(res.t_events[0].size, 1)\n assert_(5.3 < res.t_events[0][0] < 5.7)\n\n assert_equal(res.y_events[0].shape, (1, 2))\n assert np.isclose(\n event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)\n\n # Also test that termination by event doesn't break interpolants.\n tc = np.linspace(res.t[0], res.t[-1])\n yc_true = sol_rational(tc)\n yc = res.sol(tc)\n e = compute_error(yc, yc_true, 1e-3, 1e-6)\n assert_(np.all(e < 5))\n\n # Test that the y_event matches solution\n assert np.allclose(sol_rational(res.t_events[0][0]), res.y_events[0][0], rtol=1e-3, atol=1e-6)\n\n # Test in backward direction.\n event_rational_1.direction = 0\n event_rational_2.direction = 0\n for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:\n res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,\n events=(event_rational_1, event_rational_2))\n assert_equal(res.status, 0)\n assert_equal(res.t_events[0].size, 1)\n assert_equal(res.t_events[1].size, 1)\n assert_(5.3 < res.t_events[0][0] < 5.7)\n assert_(7.3 < res.t_events[1][0] < 7.7)\n\n assert_equal(res.y_events[0].shape, (1, 2))\n assert_equal(res.y_events[1].shape, (1, 2))\n assert np.isclose(\n event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)\n assert np.isclose(\n event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)\n\n event_rational_1.direction = -1\n event_rational_2.direction = -1\n res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,\n events=(event_rational_1, event_rational_2))\n assert_equal(res.status, 0)\n assert_equal(res.t_events[0].size, 1)\n assert_equal(res.t_events[1].size, 0)\n assert_(5.3 < res.t_events[0][0] < 5.7)\n\n assert_equal(res.y_events[0].shape, (1, 2))\n assert_equal(res.y_events[1].shape, (0,))\n assert np.isclose(\n event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0)\n\n event_rational_1.direction = 1\n event_rational_2.direction = 1\n res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,\n events=(event_rational_1, event_rational_2))\n assert_equal(res.status, 0)\n assert_equal(res.t_events[0].size, 0)\n assert_equal(res.t_events[1].size, 1)\n assert_(7.3 < res.t_events[1][0] < 7.7)\n\n assert_equal(res.y_events[0].shape, (0,))\n assert_equal(res.y_events[1].shape, (1, 2))\n assert np.isclose(\n event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)\n\n event_rational_1.direction = 0\n event_rational_2.direction = 0\n\n res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method,\n events=(event_rational_1, event_rational_2,\n event_rational_3), dense_output=True)\n assert_equal(res.status, 1)\n assert_equal(res.t_events[0].size, 0)\n assert_equal(res.t_events[1].size, 1)\n assert_equal(res.t_events[2].size, 1)\n assert_(7.3 < res.t_events[1][0] < 7.7)\n assert_(7.3 < res.t_events[2][0] < 7.5)\n\n assert_equal(res.y_events[0].shape, (0,))\n assert_equal(res.y_events[1].shape, (1, 2))\n assert_equal(res.y_events[2].shape, (1, 2))\n assert np.isclose(\n event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0)\n assert np.isclose(\n event_rational_3(res.t_events[2][0], res.y_events[2][0]), 0)\n\n # Also test that termination by event doesn't break interpolants.\n tc = np.linspace(res.t[-1], res.t[0])\n yc_true = sol_rational(tc)\n yc = res.sol(tc)\n e = compute_error(yc, yc_true, 1e-3, 1e-6)\n assert_(np.all(e < 5))\n\n assert np.allclose(sol_rational(res.t_events[1][0]), res.y_events[1][0], rtol=1e-3, atol=1e-6)\n assert np.allclose(sol_rational(res.t_events[2][0]), res.y_events[2][0], rtol=1e-3, atol=1e-6)\n\n\ndef test_max_step():\n rtol = 1e-3\n atol = 1e-6\n y0 = [1/3, 2/9]\n for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:\n for t_span in ([5, 9], [5, 1]):\n res = solve_ivp(fun_rational, t_span, y0, rtol=rtol,\n max_step=0.5, atol=atol, method=method,\n dense_output=True)\n assert_equal(res.t[0], t_span[0])\n assert_equal(res.t[-1], t_span[-1])\n assert_(np.all(np.abs(np.diff(res.t)) <= 0.5 + 1e-15))\n assert_(res.t_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n y_true = sol_rational(res.t)\n e = compute_error(res.y, y_true, rtol, atol)\n assert_(np.all(e < 5))\n\n tc = np.linspace(*t_span)\n yc_true = sol_rational(tc)\n yc = res.sol(tc)\n\n e = compute_error(yc, yc_true, rtol, atol)\n assert_(np.all(e < 5))\n\n # See comment in test_integration.\n if method is not LSODA:\n assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)\n\n assert_raises(ValueError, method, fun_rational, t_span[0], y0,\n t_span[1], max_step=-1)\n\n if method is not LSODA:\n solver = method(fun_rational, t_span[0], y0, t_span[1],\n rtol=rtol, atol=atol, max_step=1e-20)\n message = solver.step()\n\n assert_equal(solver.status, 'failed')\n assert_(\"step size is less\" in message)\n assert_raises(RuntimeError, solver.step)\n\n\ndef test_first_step():\n rtol = 1e-3\n atol = 1e-6\n y0 = [1/3, 2/9]\n first_step = 0.1\n for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:\n for t_span in ([5, 9], [5, 1]):\n res = solve_ivp(fun_rational, t_span, y0, rtol=rtol,\n max_step=0.5, atol=atol, method=method,\n dense_output=True, first_step=first_step)\n\n assert_equal(res.t[0], t_span[0])\n assert_equal(res.t[-1], t_span[-1])\n assert_allclose(first_step, np.abs(res.t[1] - 5))\n assert_(res.t_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n y_true = sol_rational(res.t)\n e = compute_error(res.y, y_true, rtol, atol)\n assert_(np.all(e < 5))\n\n tc = np.linspace(*t_span)\n yc_true = sol_rational(tc)\n yc = res.sol(tc)\n\n e = compute_error(yc, yc_true, rtol, atol)\n assert_(np.all(e < 5))\n\n # See comment in test_integration.\n if method is not LSODA:\n assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15)\n\n assert_raises(ValueError, method, fun_rational, t_span[0], y0,\n t_span[1], first_step=-1)\n assert_raises(ValueError, method, fun_rational, t_span[0], y0,\n t_span[1], first_step=5)\n\n\ndef test_t_eval():\n rtol = 1e-3\n atol = 1e-6\n y0 = [1/3, 2/9]\n for t_span in ([5, 9], [5, 1]):\n t_eval = np.linspace(t_span[0], t_span[1], 10)\n res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,\n t_eval=t_eval)\n assert_equal(res.t, t_eval)\n assert_(res.t_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n y_true = sol_rational(res.t)\n e = compute_error(res.y, y_true, rtol, atol)\n assert_(np.all(e < 5))\n\n t_eval = [5, 5.01, 7, 8, 8.01, 9]\n res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol,\n t_eval=t_eval)\n assert_equal(res.t, t_eval)\n assert_(res.t_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n y_true = sol_rational(res.t)\n e = compute_error(res.y, y_true, rtol, atol)\n assert_(np.all(e < 5))\n\n t_eval = [5, 4.99, 3, 1.5, 1.1, 1.01, 1]\n res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol,\n t_eval=t_eval)\n assert_equal(res.t, t_eval)\n assert_(res.t_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n t_eval = [5.01, 7, 8, 8.01]\n res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol,\n t_eval=t_eval)\n assert_equal(res.t, t_eval)\n assert_(res.t_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n y_true = sol_rational(res.t)\n e = compute_error(res.y, y_true, rtol, atol)\n assert_(np.all(e < 5))\n\n t_eval = [4.99, 3, 1.5, 1.1, 1.01]\n res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol,\n t_eval=t_eval)\n assert_equal(res.t, t_eval)\n assert_(res.t_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n t_eval = [4, 6]\n assert_raises(ValueError, solve_ivp, fun_rational, [5, 9], y0,\n rtol=rtol, atol=atol, t_eval=t_eval)\n\n\ndef test_t_eval_dense_output():\n rtol = 1e-3\n atol = 1e-6\n y0 = [1/3, 2/9]\n t_span = [5, 9]\n t_eval = np.linspace(t_span[0], t_span[1], 10)\n res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,\n t_eval=t_eval)\n res_d = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol,\n t_eval=t_eval, dense_output=True)\n assert_equal(res.t, t_eval)\n assert_(res.t_events is None)\n assert_(res.success)\n assert_equal(res.status, 0)\n\n assert_equal(res.t, res_d.t)\n assert_equal(res.y, res_d.y)\n assert_(res_d.t_events is None)\n assert_(res_d.success)\n assert_equal(res_d.status, 0)\n\n # if t and y are equal only test values for one case\n y_true = sol_rational(res.t)\n e = compute_error(res.y, y_true, rtol, atol)\n assert_(np.all(e < 5))\n\n\ndef test_no_integration():\n for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:\n sol = solve_ivp(lambda t, y: -y, [4, 4], [2, 3],\n method=method, dense_output=True)\n assert_equal(sol.sol(4), [2, 3])\n assert_equal(sol.sol([4, 5, 6]), [[2, 2, 2], [3, 3, 3]])\n\n\ndef test_no_integration_class():\n for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]:\n solver = method(lambda t, y: -y, 0.0, [10.0, 0.0], 0.0)\n solver.step()\n assert_equal(solver.status, 'finished')\n sol = solver.dense_output()\n assert_equal(sol(0.0), [10.0, 0.0])\n assert_equal(sol([0, 1, 2]), [[10, 10, 10], [0, 0, 0]])\n\n solver = method(lambda t, y: -y, 0.0, [], np.inf)\n solver.step()\n assert_equal(solver.status, 'finished')\n sol = solver.dense_output()\n assert_equal(sol(100.0), [])\n assert_equal(sol([0, 1, 2]), np.empty((0, 3)))\n\n\ndef test_empty():\n def fun(t, y):\n return np.zeros((0,))\n\n y0 = np.zeros((0,))\n\n for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:\n sol = assert_no_warnings(solve_ivp, fun, [0, 10], y0,\n method=method, dense_output=True)\n assert_equal(sol.sol(10), np.zeros((0,)))\n assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3)))\n\n for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']:\n sol = assert_no_warnings(solve_ivp, fun, [0, np.inf], y0,\n method=method, dense_output=True)\n assert_equal(sol.sol(10), np.zeros((0,)))\n assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3)))\n\n\ndef test_ConstantDenseOutput():\n sol = ConstantDenseOutput(0, 1, np.array([1, 2]))\n assert_allclose(sol(1.5), [1, 2])\n assert_allclose(sol([1, 1.5, 2]), [[1, 1, 1], [2, 2, 2]])\n\n sol = ConstantDenseOutput(0, 1, np.array([]))\n assert_allclose(sol(1.5), np.empty(0))\n assert_allclose(sol([1, 1.5, 2]), np.empty((0, 3)))\n\n\ndef test_classes():\n y0 = [1 / 3, 2 / 9]\n for cls in [RK23, RK45, DOP853, Radau, BDF, LSODA]:\n solver = cls(fun_rational, 5, y0, np.inf)\n assert_equal(solver.n, 2)\n assert_equal(solver.status, 'running')\n assert_equal(solver.t_bound, np.inf)\n assert_equal(solver.direction, 1)\n assert_equal(solver.t, 5)\n assert_equal(solver.y, y0)\n assert_(solver.step_size is None)\n if cls is not LSODA:\n assert_(solver.nfev > 0)\n assert_(solver.njev >= 0)\n assert_equal(solver.nlu, 0)\n else:\n assert_equal(solver.nfev, 0)\n assert_equal(solver.njev, 0)\n assert_equal(solver.nlu, 0)\n\n assert_raises(RuntimeError, solver.dense_output)\n\n message = solver.step()\n assert_equal(solver.status, 'running')\n assert_equal(message, None)\n assert_equal(solver.n, 2)\n assert_equal(solver.t_bound, np.inf)\n assert_equal(solver.direction, 1)\n assert_(solver.t > 5)\n assert_(not np.all(np.equal(solver.y, y0)))\n assert_(solver.step_size > 0)\n assert_(solver.nfev > 0)\n assert_(solver.njev >= 0)\n assert_(solver.nlu >= 0)\n sol = solver.dense_output()\n assert_allclose(sol(5), y0, rtol=1e-15, atol=0)\n\n\ndef test_OdeSolution():\n ts = np.array([0, 2, 5], dtype=float)\n s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1]))\n s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1]))\n\n sol = OdeSolution(ts, [s1, s2])\n\n assert_equal(sol(-1), [-1])\n assert_equal(sol(1), [-1])\n assert_equal(sol(2), [-1])\n assert_equal(sol(3), [1])\n assert_equal(sol(5), [1])\n assert_equal(sol(6), [1])\n\n assert_equal(sol([0, 6, -2, 1.5, 4.5, 2.5, 5, 5.5, 2]),\n np.array([[-1, 1, -1, -1, 1, 1, 1, 1, -1]]))\n\n ts = np.array([10, 4, -3])\n s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1]))\n s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1]))\n\n sol = OdeSolution(ts, [s1, s2])\n assert_equal(sol(11), [-1])\n assert_equal(sol(10), [-1])\n assert_equal(sol(5), [-1])\n assert_equal(sol(4), [-1])\n assert_equal(sol(0), [1])\n assert_equal(sol(-3), [1])\n assert_equal(sol(-4), [1])\n\n assert_equal(sol([12, -5, 10, -3, 6, 1, 4]),\n np.array([[-1, 1, -1, 1, -1, 1, -1]]))\n\n ts = np.array([1, 1])\n s = ConstantDenseOutput(1, 1, np.array([10]))\n sol = OdeSolution(ts, [s])\n assert_equal(sol(0), [10])\n assert_equal(sol(1), [10])\n assert_equal(sol(2), [10])\n\n assert_equal(sol([2, 1, 0]), np.array([[10, 10, 10]]))\n\n\ndef test_num_jac():\n def fun(t, y):\n return np.vstack([\n -0.04 * y[0] + 1e4 * y[1] * y[2],\n 0.04 * y[0] - 1e4 * y[1] * y[2] - 3e7 * y[1] ** 2,\n 3e7 * y[1] ** 2\n ])\n\n def jac(t, y):\n return np.array([\n [-0.04, 1e4 * y[2], 1e4 * y[1]],\n [0.04, -1e4 * y[2] - 6e7 * y[1], -1e4 * y[1]],\n [0, 6e7 * y[1], 0]\n ])\n\n t = 1\n y = np.array([1, 0, 0])\n J_true = jac(t, y)\n threshold = 1e-5\n f = fun(t, y).ravel()\n\n J_num, factor = num_jac(fun, t, y, f, threshold, None)\n assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5)\n\n J_num, factor = num_jac(fun, t, y, f, threshold, factor)\n assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5)\n\n\ndef test_num_jac_sparse():\n def fun(t, y):\n e = y[1:]**3 - y[:-1]**2\n z = np.zeros(y.shape[1])\n return np.vstack((z, 3 * e)) + np.vstack((2 * e, z))\n\n def structure(n):\n A = np.zeros((n, n), dtype=int)\n A[0, 0] = 1\n A[0, 1] = 1\n for i in range(1, n - 1):\n A[i, i - 1: i + 2] = 1\n A[-1, -1] = 1\n A[-1, -2] = 1\n\n return A\n\n np.random.seed(0)\n n = 20\n y = np.random.randn(n)\n A = structure(n)\n groups = group_columns(A)\n\n f = fun(0, y[:, None]).ravel()\n\n # Compare dense and sparse results, assuming that dense implementation\n # is correct (as it is straightforward).\n J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, None,\n sparsity=(A, groups))\n J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, None)\n assert_allclose(J_num_dense, J_num_sparse.toarray(),\n rtol=1e-12, atol=1e-14)\n assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14)\n\n # Take small factors to trigger their recomputing inside.\n factor = np.random.uniform(0, 1e-12, size=n)\n J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, factor,\n sparsity=(A, groups))\n J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, factor)\n\n assert_allclose(J_num_dense, J_num_sparse.toarray(),\n rtol=1e-12, atol=1e-14)\n assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14)\n\n\ndef test_args():\n\n # sys3 is actually two decoupled systems. (x, y) form a\n # linear oscillator, while z is a nonlinear first order\n # system with equilibria at z=0 and z=1. If k > 0, z=1\n # is stable and z=0 is unstable.\n\n def sys3(t, w, omega, k, zfinal):\n x, y, z = w\n return [-omega*y, omega*x, k*z*(1 - z)]\n\n def sys3_jac(t, w, omega, k, zfinal):\n x, y, z = w\n J = np.array([[0, -omega, 0],\n [omega, 0, 0],\n [0, 0, k*(1 - 2*z)]])\n return J\n\n def sys3_x0decreasing(t, w, omega, k, zfinal):\n x, y, z = w\n return x\n\n def sys3_y0increasing(t, w, omega, k, zfinal):\n x, y, z = w\n return y\n\n def sys3_zfinal(t, w, omega, k, zfinal):\n x, y, z = w\n return z - zfinal\n\n # Set the event flags for the event functions.\n sys3_x0decreasing.direction = -1\n sys3_y0increasing.direction = 1\n sys3_zfinal.terminal = True\n\n omega = 2\n k = 4\n\n tfinal = 5\n zfinal = 0.99\n # Find z0 such that when z(0) = z0, z(tfinal) = zfinal.\n # The condition z(tfinal) = zfinal is the terminal event.\n z0 = np.exp(-k*tfinal)/((1 - zfinal)/zfinal + np.exp(-k*tfinal))\n\n w0 = [0, -1, z0]\n\n # Provide the jac argument and use the Radau method to ensure that the use\n # of the Jacobian function is exercised.\n # If event handling is working, the solution will stop at tfinal, not tend.\n tend = 2*tfinal\n sol = solve_ivp(sys3, [0, tend], w0,\n events=[sys3_x0decreasing, sys3_y0increasing, sys3_zfinal],\n dense_output=True, args=(omega, k, zfinal),\n method='Radau', jac=sys3_jac,\n rtol=1e-10, atol=1e-13)\n\n # Check that we got the expected events at the expected times.\n x0events_t = sol.t_events[0]\n y0events_t = sol.t_events[1]\n zfinalevents_t = sol.t_events[2]\n assert_allclose(x0events_t, [0.5*np.pi, 1.5*np.pi])\n assert_allclose(y0events_t, [0.25*np.pi, 1.25*np.pi])\n assert_allclose(zfinalevents_t, [tfinal])\n\n # Check that the solution agrees with the known exact solution.\n t = np.linspace(0, zfinalevents_t[0], 250)\n w = sol.sol(t)\n assert_allclose(w[0], np.sin(omega*t), rtol=1e-9, atol=1e-12)\n assert_allclose(w[1], -np.cos(omega*t), rtol=1e-9, atol=1e-12)\n assert_allclose(w[2], 1/(((1 - z0)/z0)*np.exp(-k*t) + 1),\n rtol=1e-9, atol=1e-12)\n\n # Check that the state variables have the expected values at the events.\n x0events = sol.sol(x0events_t)\n y0events = sol.sol(y0events_t)\n zfinalevents = sol.sol(zfinalevents_t)\n assert_allclose(x0events[0], np.zeros_like(x0events[0]), atol=5e-14)\n assert_allclose(x0events[1], np.ones_like(x0events[1]))\n assert_allclose(y0events[0], np.ones_like(y0events[0]))\n assert_allclose(y0events[1], np.zeros_like(y0events[1]), atol=5e-14)\n assert_allclose(zfinalevents[2], [zfinal])\n\n\[email protected]('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA'])\ndef test_integration_zero_rhs(method):\n result = solve_ivp(fun_zero, [0, 10], np.ones(3), method=method)\n assert_(result.success)\n assert_equal(result.status, 0)\n assert_allclose(result.y, 1.0, rtol=1e-15)\n\n\[email protected]('method', ['Radau'])\ndef test_mass_matrix_ODE(method):\n \"\"\" The idea is to test the \"mass\" option with a simple vector ODE\n on the array y=(y0,y1,y2,...,yn), which reads:\n d(y[k])/dt = eigvals[k]*y, with eigvals[k] the k-th system eigenvalue\n <=> dy/dt = diag(eigvals)*y\n This simple ODE (no mass matrix) can also be reformulated as a:\n M*dy/dt = y, with M=diag(1/eigvals).\n This allows to verify that the mass matrix implementation is coherent.\"\"\"\n\n rtol = atol = 1e-10\n n = 7 # number of variables\n eigvals = np.array([float(i)*(-1)**(i) for i in range(1, n+1)])\n mass = diags(diagonals=(1/eigvals,), offsets=(0,), format='csc')\n\n # solve both ODEs\n tf = 1.0 # final time\n y0 = np.ones((n,))\n true_solution = y0*np.exp(eigvals*tf)\n sol_with_mass = solve_ivp(fun=lambda t, x: x, t_span=(0., tf),\n y0=y0, max_step=np.inf, rtol=rtol, atol=atol,\n jac=None, jac_sparsity=None, method=method,\n vectorized=False, first_step=None, mass=mass)\n sol_without_mass = solve_ivp(fun=lambda t, x: eigvals*x, t_span=(0., tf),\n y0=y0, max_step=np.inf, rtol=rtol, atol=atol,\n jac=None, jac_sparsity=None, method=method,\n vectorized=False, first_step=None, mass=None)\n\n assert_(sol_without_mass.success,\n msg=f'solver {method} failed without mass matrix')\n assert_(sol_with_mass.success,\n msg=f'solver {method} failed with mass matrix')\n assert_allclose(sol_without_mass.y[:, -1], true_solution,\n rtol=10*max((atol, rtol)),\n err_msg='result without option \"mass\" is wrong')\n assert_allclose(sol_with_mass.y[:, -1], true_solution,\n rtol=10*max((atol, rtol)),\n err_msg='result with option \"mass\" is wrong')\n assert_allclose(sol_without_mass.t.size, sol_with_mass.t.size, rtol=0.05,\n err_msg='option \"mass\" affect number of steps too much')\n\n\[email protected]('method', ['Radau'])\ndef test_DAE_pendulum(method):\n \"\"\" Test the pendulum system, formulated as a DAE of index 0 to 3.\n COmpare with the true solution.\n \"\"\"\n import scipy.optimize._numdiff\n # 1 - setup the model\n m, r0, g = 1., 1., 9.81 # rod length, mass, gravity\n theta_0 = np.pi/4 # initial angle\n theta_dot0 = 0. # initial angular speed\n rtol = atol = 1e-6\n tf = 0.5 # final time\n\n # 2 - compute true solution (ODE on the angle in polar coordinates)\n def fun_ode(t, X):\n theta, theta_dot = X[0], X[1]\n return np.array([theta_dot,\n -g / r0 * np.sin(theta)])\n\n Xini_ode = np.array([theta_0, theta_dot0])\n sol_ode = solve_ivp(fun=fun_ode, t_span=(0., tf), y0=Xini_ode,\n rtol=1e-12, atol=1e-12, method='DOP853')\n theta_ode = sol_ode.y[0, :]\n theta_dot = sol_ode.y[1, :]\n x_ode = r0 * np.sin(theta_ode)\n y_ode = -r0 * np.cos(theta_ode)\n vx_ode = r0 * theta_dot * np.cos(theta_ode)\n vy_ode = r0 * theta_dot * np.sin(theta_ode)\n lbda_ode = m * r0 * theta_dot ** 2 + m * g * np.cos(theta_ode)\n yfinal_ode = np.array([x_ode[-1], y_ode[-1], vx_ode[-1],\n vy_ode[-1], lbda_ode[-1]])\n\n assert_(sol_ode.success,\n msg='the pendulum ODE solution failed')\n # 3 - compute DAE solutions\n x0 = r0 * np.sin(theta_0)\n y0 = -r0 * np.cos(theta_0)\n vx0 = r0 * theta_dot0 * np.cos(theta_0)\n vy0 = r0 * theta_dot0 * np.sin(theta_0)\n lbda_0 = (m * r0 * theta_dot0 ** 2 + m * g * np.cos(theta_0)) / r0\n Xini = np.array([x0, y0, vx0, vy0, lbda_0])\n for chosen_index in range(4):\n def dae_fun(t, X):\n x, y, vx, vy, lbda = X[0], X[1], X[2], X[3], X[4]\n if chosen_index == 3:\n constraint = x ** 2 + y ** 2 - r0 ** 2\n elif chosen_index == 2:\n constraint = x * vx + y * vy\n elif chosen_index == 1:\n constraint = lbda * (x ** 2 + y ** 2) / m \\\n + g * y - (vx ** 2 + vy ** 2)\n elif chosen_index == 0:\n rsq = x ** 2 + y ** 2\n dvx = -lbda * x / m\n dvy = -lbda * y / m - g\n constraint = (1 / m) * (- g * vy / rsq\n + 2 * (vx * dvx + vy * dvy) / rsq\n + (vx ** 2 + vy ** 2 - g * y) *\n (2 * x * vx + 2 * y * vy) / (rsq ** 2))\n return np.array([vx,\n vy,\n -x*lbda/m,\n -g-(y*lbda)/m,\n constraint])\n mass = np.eye(5) # mass matrix M\n if chosen_index > 0:\n mass[-1, -1] = 0\n if chosen_index == 3:\n # test sparse mass matrix\n mass = csc_matrix(mass)\n\n # the jacobian is computed via finite-differences\n def jac_dae(t, x):\n return scipy.optimize._numdiff.approx_derivative(\n fun=lambda x: dae_fun(t, x),\n x0=x, method='cs',\n rel_step=1e-50)\n\n sol = solve_ivp(fun=dae_fun, t_span=(0., tf), y0=Xini, max_step=tf/10,\n rtol=rtol, atol=atol, jac=jac_dae, jac_sparsity=None,\n method=method, vectorized=False, first_step=1e-3,\n dense_output=False, mass=mass)\n assert_(sol.success,\n msg=f'solver {method} failed for the index-{chosen_index} DAE')\n assert_(np.linalg.norm((yfinal_ode[:-1] - sol.y[:-1, -1]) /\n yfinal_ode[:-1]) < 100 * max((rtol, atol)),\n msg='The index-{} DAE does not yield correct results'.format(\n chosen_index))\n" ]
[ [ "numpy.ones", "numpy.diff", "numpy.testing.assert_equal", "numpy.random.seed", "numpy.asarray", "numpy.ones_like", "numpy.testing.assert_no_warnings", "scipy.integrate._ivp.common.num_jac", "scipy.integrate.OdeSolution", "numpy.vstack", "scipy.sparse.csc_matrix", "numpy.abs", "numpy.cos", "numpy.linspace", "numpy.random.uniform", "numpy.eye", "numpy.sqrt", "numpy.zeros", "numpy.equal", "scipy.sparse.diags", "numpy.arange", "numpy.hstack", "numpy.all", "numpy.linalg.norm", "numpy.zeros_like", "numpy.empty", "scipy.integrate.solve_ivp", "scipy.optimize._numdiff.group_columns", "numpy.random.randn", "numpy.exp", "numpy.testing.assert_allclose", "numpy.array", "numpy.sin", "numpy.testing.suppress_warnings", "numpy.testing.assert_" ] ]
mseeger/autogluon-1
[ "e8d82363ce07fd8e3087bcdd2d71c6f6bd8fd7a0" ]
[ "core/tests/unittests/bayesopt/gpmxnet/test_warping.py" ]
[ "import numpy as np\nimport mxnet as mx\n\nfrom autogluon.core.searcher import OneDimensionalWarping, \\\n Warping, WarpedKernel\nfrom autogluon.core.searcher.bayesopt.gpmxnet.constants import DATA_TYPE, \\\n NUMERICAL_JITTER\nfrom autogluon.core.searcher import Matern52\nfrom autogluon.core.searcher import \\\n GaussianProcessRegression\nfrom autogluon.core.searcher import \\\n LogarithmScalarEncoding, PositiveScalarEncoding\n\n\ndef test_warping_encoding():\n input_range = (0., 2.)\n warping = OneDimensionalWarping(input_range)\n assert isinstance(warping.encoding, LogarithmScalarEncoding)\n assert warping.encoding.dimension == 2\n warping = OneDimensionalWarping(input_range, encoding_type=\"positive\")\n assert isinstance(warping.encoding, PositiveScalarEncoding)\n\n\ndef test_warping_default_parameters():\n x = mx.nd.array([0., 1., 2.], dtype=DATA_TYPE)\n input_range = (0., 2.)\n warping = OneDimensionalWarping(input_range)\n warping.collect_params().initialize()\n\n warping_parameters = warping.encoding.get(mx.nd, warping.warping_internal.data())\n\n np.testing.assert_almost_equal(warping_parameters.asnumpy(), np.ones(2))\n np.testing.assert_almost_equal(warping(x).asnumpy(), np.array([NUMERICAL_JITTER, 0.5, 1.-NUMERICAL_JITTER]))\n\n\ndef test_warping_with_arbitrary_parameters():\n x = mx.nd.array([0., 1., 2.], dtype=DATA_TYPE)\n input_range = (0., 2.)\n\n warping = OneDimensionalWarping(input_range)\n warping.collect_params().initialize()\n\n warping.encoding.set(warping.warping_internal, [2., 0.5])\n warping_parameters = warping.encoding.get(mx.nd, warping.warping_internal.data())\n\n np.testing.assert_almost_equal(warping_parameters.asnumpy(), [2., 0.5])\n\n # In that case (with parameters [2., 0.5]), the warping is given by x => 1. - sqrt(1. - x^2)\n def expected_warping(x):\n return 1. - np.sqrt(1. - x*x)\n\n np.testing.assert_almost_equal(warping(x).asnumpy(), expected_warping(np.array([NUMERICAL_JITTER, 0.5, 1.-NUMERICAL_JITTER])))\n\n\ndef test_warping_with_multidimension_and_arbitrary_parameters():\n X = mx.nd.array([[0., 1., 0.], [1.,2.,1.], [2., 0., 2.]], dtype=DATA_TYPE)\n\n dimension=3\n\n # We transform only the columns {0,2} of the 3-dimensional data X\n input_range = (0., 2.)\n warping = Warping(index_to_range={0:input_range, 2:input_range}, dimension=dimension)\n\n assert len(warping.transformations) == dimension\n\n warping.collect_params().initialize()\n\n # We change the warping parameters of the first dimension only\n w0 = warping.transformations[0]\n w0.encoding.set(w0.warping_internal, [2., 0.5])\n\n w2 = warping.transformations[2]\n w2_parameters = w2.encoding.get(mx.nd, w2.warping_internal.data())\n\n # The parameters of w2 should be the default ones (as there was no set operations)\n np.testing.assert_almost_equal(w2_parameters.asnumpy(), np.ones(2))\n\n # print(warping(X).asnumpy())\n # for name, p in warping.collect_params().items():\n # print(name, p.data().asnumpy())\n\n # With parameters [2., 0.5], the warping is given by x => 1. - sqrt(1. - x^2)\n def expected_warping(x):\n return 1. - np.sqrt(1. - x*x)\n\n expected_column0 = expected_warping(np.array([NUMERICAL_JITTER, 0.5, 1.-NUMERICAL_JITTER])).reshape((-1,1))\n expected_column1 = np.array([1., 2., 0.]).reshape((-1,1))\n expected_column2 = np.array([NUMERICAL_JITTER, 0.5, 1.-NUMERICAL_JITTER]).reshape((-1,1))\n\n np.testing.assert_almost_equal(warping(X).asnumpy(), np.hstack([expected_column0, expected_column1, expected_column2]))\n\n\ndef test_gp_regression_with_warping():\n\n def f(x):\n return np.sin(3*np.log(x))\n\n np.random.seed(7)\n\n L, U = -5., 12.\n input_range = (2.**L, 2.**U)\n\n x_train = np.sort(2.**np.random.uniform(L, U, 250))\n x_test = np.sort(2.**np.random.uniform(L, U, 500))\n y_train = f(x_train)\n y_test = f(x_test)\n\n # to mx.nd\n y_train_mx_nd = mx.nd.array(y_train)\n x_train_mx_nd = mx.nd.array(x_train)\n x_test_mx_nd = mx.nd.array(x_test)\n\n kernels = [\n Matern52(dimension=1),\n WarpedKernel(\n kernel=Matern52(dimension=1),\n warping=Warping(dimension=1, index_to_range={0: input_range})\n )\n ]\n\n models = [GaussianProcessRegression(kernel=k, random_seed=0) for k in kernels]\n train_errors, test_errors = [], []\n\n for model in models:\n\n model.fit(x_train_mx_nd, y_train_mx_nd)\n\n mu_train, var_train = model.predict(x_train_mx_nd)[0]\n mu_test, var_test = model.predict(x_test_mx_nd)[0]\n\n # back to np.array\n mu_train = mu_train.asnumpy()\n mu_test = mu_test.asnumpy()\n # var_train = var_train.asnumpy()\n # var_test = var_test.asnumpy()\n\n train_errors.append(np.mean(np.abs((mu_train - y_train))))\n test_errors.append(np.mean(np.abs((mu_test - y_test))))\n\n # The two models have similar performance on training points\n np.testing.assert_almost_equal(train_errors[0], train_errors[1], decimal=4)\n\n # As expected, the model with warping largely outperforms the model without\n assert test_errors[1] < 0.1 * test_errors[0]\n\n # If we wish to plot things\n # import matplotlib.pyplot as plt\n # plt.plot(x_train, y_train, \"r-\")\n # plt.plot(x_train, mu_train, \"b--\")\n #\n # plt.plot(x_test, y_test, \"y-\")\n # plt.plot(x_test, mu_test, \"m--\")\n\n # plt.fill_between(x_train,\n # mu_train - np.sqrt(var_train),\n # mu_train + np.sqrt(var_train),\n # alpha=0.5, edgecolor='#3F7F4C', facecolor='#7EFF99', linewidth=0)\n #\n # plt.fill_between(x_test,\n # mu_test - np.sqrt(var_test),\n # mu_test + np.sqrt(var_test),\n # alpha=0.5, edgecolor='#3F7F4C', facecolor='#7EFF99', linewidth=0)\n #\n # plt.show()\n" ]
[ [ "numpy.testing.assert_almost_equal", "numpy.ones", "numpy.sqrt", "numpy.random.uniform", "numpy.random.seed", "numpy.abs", "numpy.hstack", "numpy.log", "numpy.array" ] ]
labscript-suite-bitbucket-archive/shjohnst-runviewer--forked-from--labscript_suite-runviewer
[ "78d7be530bbfd005744b3a6b1cd3f1beb5fd7fe9" ]
[ "__main__.py" ]
[ "#####################################################################\n# #\n# /main.pyw #\n# #\n# Copyright 2014, Monash University #\n# #\n# This file is part of the program runviewer, in the labscript #\n# suite (see http://labscriptsuite.org), and is licensed under the #\n# Simplified BSD License. See the license.txt file in the root of #\n# the project for the full license. #\n# #\n#####################################################################\nfrom __future__ import division, unicode_literals, print_function, absolute_import\nfrom labscript_utils import PY2\n\nimport os\nimport sys\nimport time\nimport threading\nimport logging\nimport ctypes\nimport socket\nif PY2:\n str = unicode\n from Queue import Queue\nelse:\n from queue import Queue\nimport ast\nimport pprint\n\nimport signal\n# Quit on ctrl-c\nsignal.signal(signal.SIGINT, signal.SIG_DFL)\n\nimport labscript_utils.excepthook\n\n# Set working directory to runviewer folder, resolving symlinks\nrunviewer_dir = os.path.dirname(os.path.realpath(__file__))\nos.chdir(runviewer_dir)\n\ntry:\n from labscript_utils import check_version\nexcept ImportError:\n raise ImportError('Require labscript_utils > 2.1.0')\n\ncheck_version('labscript_utils', '2.6.1', '3')\ncheck_version('qtutils', '2.0.0', '3.0.0')\ncheck_version('zprocess', '1.1.2', '3')\n\nfrom labscript_utils.setup_logging import setup_logging\nlogger = setup_logging('runviewer')\nlabscript_utils.excepthook.set_logger(logger)\n\nfrom zprocess import zmq_get, ZMQServer\nimport zprocess.locking\nimport labscript_utils.h5_lock\nimport h5py\nzprocess.locking.set_client_process_name('runviewer')\n\n# This must be bumped until after the h5_lock import\n# This is because the check imports pyqtgraph, which imports h5py\n# h5py must be imported after h5_lock, thus we do the check here\ncheck_version('pyqtgraph', '0.9.10', '1')\n\nfrom qtutils.qt.QtCore import *\nfrom qtutils.qt.QtGui import *\nfrom qtutils.qt.QtWidgets import *\nfrom qtutils.qt.QtCore import pyqtSignal as Signal\n\nimport numpy\nfrom scipy import interpolate\n\n# must be imported after PySide/PyQt4\nimport pyqtgraph as pg\npg.setConfigOption('background', 'w')\npg.setConfigOption('foreground', 'k')\n\nfrom qtutils import *\nimport qtutils.icons\nfrom labscript_utils.connections import ConnectionTable\nimport labscript_devices\n\nfrom labscript_utils.labconfig import LabConfig, config_prefix\n\nfrom runviewer.resample import resample as _resample\n\n\ndef set_win_appusermodel(window_id):\n from labscript_utils.winshell import set_appusermodel, appids, app_descriptions\n icon_path = os.path.abspath('runviewer.ico')\n executable = sys.executable.lower()\n if not executable.endswith('w.exe'):\n executable = executable.replace('.exe', 'w.exe')\n relaunch_command = executable + ' ' + os.path.abspath(__file__.replace('.pyc', '.py'))\n relaunch_display_name = app_descriptions['runviewer']\n set_appusermodel(window_id, appids['runviewer'], icon_path, relaunch_command, relaunch_display_name)\n\n\nSHOT_MODEL__COLOUR_INDEX = 0\nSHOT_MODEL__SHUTTER_INDEX = 1\nSHOT_MODEL__CHECKBOX_INDEX = 2\nSHOT_MODEL__PATH_INDEX = 1\nCHANNEL_MODEL__CHECKBOX_INDEX = 0\nCHANNEL_MODEL__CHANNEL_INDEX = 0\n\n\ndef format_time(input_sec):\n # inout is the time in sec\n if input_sec >= 1:\n return \"{:.3g}s\".format(input_sec)\n elif input_sec >= 1e-3:\n return \"{:.3g}ms\".format(input_sec * 1e3)\n elif input_sec >= 1e-6:\n return \"{:.3g}us\".format(input_sec * 1e6)\n elif input_sec >= 1e-9:\n return \"{:.3g}ns\".format(input_sec * 1e9)\n elif input_sec >= 1e-12:\n return \"{:.3g}ps\".format(input_sec * 1e12)\n elif input_sec >= 1e-15:\n return \"{:.3g}fs\".format(input_sec * 1e15)\n elif input_sec >= 1e-18:\n return \"{:.3g}as\".format(input_sec * 1e18)\n else:\n return str(input_sec) + \"s\"\n\n\ndef int_to_enum(enum_list, value):\n \"\"\"stupid hack to work around the fact that PySide screws with the type of a variable when it goes into a model. Enums are converted to ints, which then\n can't be interpreted by QColor correctly (for example)\n unfortunately Qt doesn't provide a python list structure of enums, so you have to build the list yourself.\n \"\"\"\n\n for item in enum_list:\n if item == value:\n return item\n return value\n\n\nclass ScaleHandler():\n\n def __init__(self, input_times, stop_time):\n # input_times is a list (may be unsorted) of times which should be scaled evenly with target_length\n # an input list of [1,2,4,6] and target_length of 1.0 will result in:\n # get_scaled_time(1) -> 1\n # get_scaled_time(1.5) -> 1.5\n # get_scaled_time(3) -> 2.5\n # get_scaled_time(4) -> 3\n # get_scaled_time(5) -> 3.5 ...\n self.org_stop_time = float(stop_time)\n\n if 0 not in input_times:\n input_times.append(0)\n\n if self.org_stop_time not in input_times:\n input_times.append(self.org_stop_time)\n\n if not all((x >= 0) and (x <= self.org_stop_time) for x in input_times):\n raise Exception('shot contains at least one marker before t=0 and/or after the stop time. Non-linear time currently does not support this.')\n\n unscaled_times = sorted(input_times)\n target_length = self.org_stop_time / float(len(unscaled_times)-1)\n scaled_times = [target_length*i for i in range(len(input_times))]\n\n # append values for linear scaling before t=0 and after stop time\n unscaled_times = [-1e-9] + unscaled_times + [self.org_stop_time + 1e-9]\n scaled_times = [-1e-9] + scaled_times + [self.org_stop_time + 1e-9]\n\n self.get_scaled_time = interpolate.interp1d(unscaled_times, scaled_times, assume_sorted=False, bounds_error=False, fill_value='extrapolate')\n self.get_unscaled_time = interpolate.interp1d(scaled_times, unscaled_times, assume_sorted=False, bounds_error=False, fill_value='extrapolate')\n\n self.scaled_stop_time = self.get_scaled_time(self.org_stop_time)\n\n\nclass ColourDelegate(QItemDelegate):\n\n def __init__(self, view, *args, **kwargs):\n QItemDelegate.__init__(self, *args, **kwargs)\n self._view = view\n self._colours = [Qt.black, Qt.red, Qt.green, Qt.blue, Qt.cyan, Qt.magenta, Qt.yellow, Qt.gray, Qt.darkRed, Qt.darkGreen, Qt.darkBlue, Qt.darkCyan, Qt.darkMagenta, Qt.darkYellow, Qt.darkGray, Qt.lightGray]\n\n self._current_colour_index = 0\n\n def get_next_colour(self):\n colour = self._colours[self._current_colour_index]\n self._current_colour_index += 1\n if self._current_colour_index >= len(self._colours):\n self._current_colour_index = 0\n return colour\n\n def createEditor(self, parent, option, index):\n editor = QComboBox(parent)\n #colours = QColor.colorNames()\n for colour in self._colours:\n pixmap = QPixmap(20, 20)\n pixmap.fill(colour)\n editor.addItem(QIcon(pixmap), '', colour)\n\n editor.activated.connect(lambda index, editor=editor: self._view.commitData(editor))\n editor.activated.connect(lambda index, editor=editor: self._view.closeEditor(editor, QAbstractItemDelegate.NoHint))\n QTimer.singleShot(10, editor.showPopup)\n\n return editor\n\n def setEditorData(self, editor, index):\n value = index.model().data(index, Qt.UserRole)\n for i in range(editor.count()):\n if editor.itemData(i) == value():\n editor.setCurrentIndex(i)\n break\n\n def setModelData(self, editor, model, index):\n icon = editor.itemIcon(editor.currentIndex())\n colour = editor.itemData(editor.currentIndex())\n\n # Note, all data being written to the model must be read out of the editor PRIOR to calling model.setData()\n # This is because a call to model.setData() triggers setEditorData(), which messes up subsequent\n # calls to the editor to determine the currently selected item/data\n model.setData(index, icon, Qt.DecorationRole)\n model.setData(index, lambda clist=self._colours, colour=colour: int_to_enum(clist, colour), Qt.UserRole)\n\n def updateEditorGeometry(self, editor, option, index):\n editor.setGeometry(option.rect)\n\n\nclass RunviewerMainWindow(QMainWindow):\n # A signal for when the window manager has created a new window for this widget:\n newWindow = Signal(int)\n\n def event(self, event):\n result = QMainWindow.event(self, event)\n if event.type() == QEvent.WinIdChange:\n self.newWindow.emit(self.effectiveWinId())\n return result\n\n\nclass RunViewer(object):\n def __init__(self, exp_config):\n self.ui = UiLoader().load(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'main.ui'), RunviewerMainWindow())\n\n # setup shot treeview model\n self.shot_model = QStandardItemModel()\n self.shot_model.setHorizontalHeaderLabels(['colour', 'shutters', 'path'])\n self.ui.shot_treeview.setModel(self.shot_model)\n self.ui.shot_treeview.resizeColumnToContents(1)\n self.shot_model.itemChanged.connect(self.on_shot_selection_changed)\n self.shot_colour_delegate = ColourDelegate(self.ui.shot_treeview)\n self.ui.shot_treeview.setItemDelegateForColumn(0, self.shot_colour_delegate)\n\n # setup channel treeview model\n self.channel_model = QStandardItemModel()\n self.channel_model.setHorizontalHeaderLabels(['channel'])\n self.ui.channel_treeview.setModel(self.channel_model)\n self.channel_model.itemChanged.connect(self.update_plots)\n\n # create a hidden plot widget that all plots can link their x-axis too\n hidden_plot = pg.PlotWidget(name='runviewer - time axis link')\n\n hidden_plot.setMinimumHeight(1)\n hidden_plot.setMaximumHeight(1)\n hidden_plot.setLabel('bottom', 'Time', units='s')\n hidden_plot.setLabel('left', \" \")\n hidden_plot.showAxis('right', True)\n hidden_plot_item = hidden_plot.plot([0, 1], [0, 0])\n self._hidden_plot = (hidden_plot, hidden_plot_item)\n self.ui.hidden_plot_layout.addWidget(hidden_plot)\n\n time_axis_plot = pg.PlotWidget()\n time_axis_plot.setMinimumHeight(120)\n time_axis_plot.setMaximumHeight(120)\n time_axis_plot.setLabel('bottom', 'Time', units='s')\n time_axis_plot.showAxis('right', True)\n time_axis_plot.setXLink('runviewer - time axis link')\n time_axis_plot.setMouseEnabled(y=False)\n time_axis_plot.getAxis('left').setTicks([]) # hide y ticks in the left & right side. only show time axis\n time_axis_plot.getAxis('right').setTicks([])\n time_axis_plot.setLabel('left', 'Slots')\n time_axis_plot.scene().sigMouseMoved.connect(lambda pos: self.mouseMovedEvent(pos, time_axis_plot, \"Slots\"))\n time_axis_plot_item = time_axis_plot.plot([0, 1], [0, 0], pen=(255, 255, 255))\n self._time_axis_plot = (time_axis_plot, time_axis_plot_item)\n\n self.all_markers = {}\n self.all_marker_items = {}\n markers_plot = pg.PlotWidget(name='runviewer - markers')\n markers_plot.setMinimumHeight(120)\n markers_plot.setMaximumHeight(120)\n markers_plot.showAxis('top', False)\n markers_plot.showAxis('bottom', False)\n markers_plot.showAxis('left', True)\n markers_plot.showAxis('right', True)\n markers_plot.getAxis('left').setTicks([])\n markers_plot.getAxis('right').setTicks([])\n markers_plot.setLabel('left', 'Markers')\n markers_plot.setXLink('runviewer - time axis link')\n markers_plot.setMouseEnabled(y=False)\n markers_plot.scene().sigMouseMoved.connect(lambda pos: self.mouseMovedEvent(pos, markers_plot, \"Markers\"))\n markers_plot_item = markers_plot.plot([])\n self._markers_plot = (markers_plot, markers_plot_item)\n\n self.ui.verticalLayout_9.insertWidget(1,markers_plot)\n self.ui.plot_layout.addWidget(time_axis_plot)\n\n # add some icons\n self.ui.add_shot.setIcon(QIcon(':/qtutils/fugue/plus'))\n self.ui.remove_shots.setIcon(QIcon(':/qtutils/fugue/minus'))\n self.ui.enable_selected_shots.setIcon(QIcon(':/qtutils/fugue/ui-check-box'))\n self.ui.disable_selected_shots.setIcon(QIcon(':/qtutils/fugue/ui-check-box-uncheck'))\n self.ui.group_channel.setIcon(QIcon(':/qtutils/fugue/layers-group'))\n self.ui.delete_group.setIcon(QIcon(':/qtutils/fugue/layers-ungroup'))\n self.ui.channel_move_to_top.setIcon(QIcon(':/qtutils/fugue/arrow-stop-090'))\n self.ui.channel_move_up.setIcon(QIcon(':/qtutils/fugue/arrow-090'))\n self.ui.channel_move_down.setIcon(QIcon(':/qtutils/fugue/arrow-270'))\n self.ui.channel_move_to_bottom.setIcon(QIcon(':/qtutils/fugue/arrow-stop-270'))\n self.ui.reset_x_axis.setIcon(QIcon(':/qtutils/fugue/clock-history'))\n self.ui.reset_y_axis.setIcon(QIcon(':/qtutils/fugue/magnifier-history'))\n self.ui.toggle_tooltip.setIcon(QIcon(':/qtutils/fugue/ui-tooltip-balloon'))\n self.ui.non_linear_time.setIcon(QIcon(':/qtutils/fugue/ui-ruler'))\n\n self.ui.actionOpen_Shot.setIcon(QIcon(':/qtutils/fugue/plus'))\n self.ui.actionQuit.setIcon(QIcon(':/qtutils/fugue/cross-button'))\n self.ui.actionLoad_channel_config.setIcon(QIcon(':/qtutils/fugue/folder-open'))\n self.ui.actionSave_channel_config.setIcon(QIcon(':/qtutils/fugue/disk'))\n\n # disable buttons that are not yet implemented to help avoid confusion!\n self.ui.group_channel.setEnabled(False)\n self.ui.delete_group.setEnabled(False)\n\n # connect signals\n self.ui.reset_x_axis.clicked.connect(self.on_x_axis_reset)\n self.ui.reset_y_axis.clicked.connect(self.on_y_axes_reset)\n self.ui.channel_move_up.clicked.connect(self._move_up)\n self.ui.channel_move_down.clicked.connect(self._move_down)\n self.ui.channel_move_to_top.clicked.connect(self._move_top)\n self.ui.channel_move_to_bottom.clicked.connect(self._move_bottom)\n self.ui.enable_selected_shots.clicked.connect(self._enable_selected_shots)\n self.ui.disable_selected_shots.clicked.connect(self._disable_selected_shots)\n self.ui.add_shot.clicked.connect(self.on_add_shot)\n self.ui.markers_comboBox.currentIndexChanged.connect(self._update_markers)\n self.ui.non_linear_time.toggled.connect(self._toggle_non_linear_time)\n self.ui.remove_shots.clicked.connect(self.on_remove_shots)\n\n self.ui.actionOpen_Shot.triggered.connect(self.on_add_shot)\n self.ui.actionQuit.triggered.connect(self.ui.close)\n self.ui.actionLoad_channel_config.triggered.connect(self.on_load_channel_config)\n self.ui.actionSave_channel_config.triggered.connect(self.on_save_channel_config)\n\n if os.name == 'nt':\n self.ui.newWindow.connect(set_win_appusermodel)\n\n self.ui.show()\n\n # internal variables\n #self._channels_list = {}\n self.plot_widgets = {}\n self.plot_items = {}\n self.shutter_lines = {}\n\n try:\n self.default_config_path = os.path.join(exp_config.get('DEFAULT', 'app_saved_configs'), 'runviewer')\n except LabConfig.NoOptionError:\n exp_config.set('DEFAULT', 'app_saved_configs', os.path.join('%(labscript_suite)s', 'userlib', 'app_saved_configs', '%(experiment_name)s'))\n self.default_config_path = os.path.join(exp_config.get('DEFAULT', 'app_saved_configs'), 'runviewer')\n if not os.path.exists(self.default_config_path):\n os.makedirs(self.default_config_path)\n\n self.last_opened_shots_folder = exp_config.get('paths', 'experiment_shot_storage')\n\n # start resample thread\n self._resample = False\n self._thread = threading.Thread(target=self._resample_thread)\n self._thread.daemon = True\n self._thread.start()\n\n # start shots_to_process_queue monitoring thread\n self._shots_to_process_thread = threading.Thread(target=self._process_shots)\n self._shots_to_process_thread.daemon = True\n self._shots_to_process_thread.start()\n\n self.scale_time = False\n self.scalehandler = None\n\n def _update_markers(self, index):\n for line, plot in self.all_marker_items.items():\n plot.removeItem(line)\n self.all_marker_items = {}\n\n marker_index = self.ui.markers_comboBox.currentIndex()\n shot = self.ui.markers_comboBox.itemData(marker_index)\n self.all_markers = shot.markers if index > 0 else {}\n\n self._update_non_linear_time(changed_shot=True)\n\n times = sorted(list(self.all_markers.keys()))\n for i, (t, m) in enumerate(sorted(self.all_markers.items())):\n if i < len(times)-1:\n delta_t = times[i+1] - t\n else:\n delta_t = shot.stop_time - t\n\n if self.scale_time:\n t = self.scalehandler.get_scaled_time(t)\n\n color = m['color']\n color = QColor(color[0], color[1], color[2])\n label = m['label'].decode() if isinstance( m['label'], bytes) else str(m['label'])\n\n line = self._markers_plot[0].addLine(x=t, pen=pg.mkPen(color=color, width=1.5, style=Qt.DashLine), label=label, labelOpts= {\"color\": color, \"fill\": QColor(255, 255, 255, 255), \"rotateAxis\":(1, 0), \"anchors\": [(0.5, 0),(0.5, 0)]} )\n self.all_marker_items[line] = self._markers_plot[0]\n\n line = self._time_axis_plot[0].addLine(x=t, pen=pg.mkPen(color=color, width=1.5, style=Qt.DashLine), label=format_time(delta_t), labelOpts= {\"color\": color, \"fill\": QColor(255, 255, 255, 255), \"rotateAxis\":(1, 0), \"anchors\": [(0.5, 0),(0.5, 0)]} )\n self.all_marker_items[line] = self._time_axis_plot[0]\n\n self.update_plots()\n\n def mouseMovedEvent(self, position, ui, name):\n if self.ui.toggle_tooltip.isChecked():\n v = ui.scene().views()[0]\n viewP = v.mapFromScene(position)\n glob_pos = ui.mapToGlobal(viewP) # convert to Screen x\n glob_zero = ui.mapToGlobal(QPoint(0, 0))\n self._global_start_x = glob_zero.x()\n self._global_start_y = glob_zero.y()\n self._global_width = ui.width()\n self._global_height = ui.height()\n\n coord_pos = ui.plotItem.vb.mapSceneToView(position)\n\n if len(self.get_selected_shots_and_colours()) > 0:\n if self.scale_time and self.scalehandler is not None:\n unscaled_t = float(self.scalehandler.get_unscaled_time(coord_pos.x()))\n else:\n unscaled_t = float(coord_pos.x())\n if unscaled_t is not None:\n pos = QPoint(glob_pos.x(), glob_pos.y())\n plot_data = ui.plotItem.listDataItems()[0].getData()\n if plot_data[0] is not None and unscaled_t is not None:\n nearest_index = numpy.abs(plot_data[0] - unscaled_t).argmin() - 1\n y_val = \"{:.2f}\".format(plot_data[1][nearest_index])\n else:\n y_val = '-'\n text = \"Plot: {} \\nTime: {:.9f}s\\nValue: {}\".format(name, unscaled_t, y_val)\n QToolTip.showText(pos, text)\n\n def _toggle_non_linear_time(self, state):\n self.scale_time = state\n self._update_non_linear_time()\n\n def _update_non_linear_time(self, changed_shot=False):\n old_scalerhandler = self.scalehandler\n marker_index = self.ui.markers_comboBox.currentIndex()\n shot = self.ui.markers_comboBox.itemData(marker_index)\n if shot is not None and self.scale_time:\n self.scalehandler = shot.scalehandler\n else:\n self.scalehandler = None\n\n # combine markers and shutter lines\n markers = list(self.all_marker_items.keys())\n for channel in self.shutter_lines:\n for shot in self.shutter_lines[channel]:\n for line in self.shutter_lines[channel][shot][0]:\n markers.append(line)\n for line in self.shutter_lines[channel][shot][1]:\n markers.append(line)\n\n # Move all Markes/Shutter Lines to new position\n for marker in markers:\n pos = marker.pos()\n\n if old_scalerhandler is None:\n unscaled_x = pos.x()\n else:\n unscaled_x = old_scalerhandler.get_unscaled_time(pos.x())\n\n if self.scale_time and self.scalehandler is not None:\n new_x = self.scalehandler.get_scaled_time(unscaled_x)\n else:\n new_x = unscaled_x\n\n pos.setX(new_x)\n marker.setPos(pos)\n\n if shot is not None and self.scale_time:\n self._time_axis_plot[0].getAxis(\"bottom\").setTicks([[[0, 0], [shot.stop_time, shot.stop_time]]])\n for plot in self.plot_widgets.values():\n plot.getAxis(\"bottom\").setTicks([[[0, 0], [shot.stop_time, shot.stop_time]]])\n else:\n self._time_axis_plot[0].getAxis(\"bottom\").setTicks(None)\n for plot in self.plot_widgets.values():\n plot.getAxis(\"bottom\").setTicks(None)\n\n for plot in self.plot_widgets.values():\n for item in plot.getPlotItem().items:\n if isinstance(item, pg.PlotDataItem):\n if old_scalerhandler is not None:\n unscaled_t = old_scalerhandler.get_unscaled_time(item.xData)\n else:\n unscaled_t = item.xData\n\n if self.scalehandler is not None:\n item.setData(self.scalehandler.get_scaled_time(unscaled_t), item.yData)\n else:\n item.setData(unscaled_t, item.yData)\n\n self._resample = True\n\n def _process_shots(self):\n while True:\n filepath = shots_to_process_queue.get()\n inmain_later(self.load_shot, filepath)\n\n def on_load_channel_config(self):\n config_file = QFileDialog.getOpenFileName(self.ui, \"Select file to load\", self.default_config_path, \"Config files (*.ini)\")\n if isinstance(config_file, tuple):\n config_file, _ = config_file\n if config_file:\n runviewer_config = LabConfig(config_file)\n try:\n channels = ast.literal_eval(runviewer_config.get('runviewer_state', 'Channels'))\n except (LabConfig.NoOptionError, LabConfig.NoSectionError):\n channels = {}\n\n for row, (channel, checked) in enumerate(channels):\n check_items = self.channel_model.findItems(channel)\n if len(check_items) == 0:\n items = []\n check_item = QStandardItem(channel)\n check_item.setEditable(False)\n check_item.setCheckable(True)\n items.append(check_item)\n check_item.setCheckState(Qt.Checked if checked else Qt.Unchecked)\n check_item.setEnabled(False)\n self.channel_model.insertRow(row, items)\n else:\n check_item = check_items[0]\n check_item.setCheckState(Qt.Checked if checked else Qt.Unchecked)\n self.channel_model.takeRow(check_item.row())\n self.channel_model.insertRow(row, check_item)\n\n def on_save_channel_config(self):\n save_file = QFileDialog.getSaveFileName(self.ui, 'Select file to save current channel configuration', self.default_config_path, \"config files (*.ini)\")\n if type(save_file) is tuple:\n save_file, _ = save_file\n\n if save_file:\n runviewer_config = LabConfig(save_file)\n\n channels = []\n for row in range(self.channel_model.rowCount()):\n item = self.channel_model.item(row)\n channels.append((item.text(), item.checkState() == Qt.Checked))\n\n runviewer_config.set('runviewer_state', 'Channels', pprint.pformat(channels))\n\n def on_toggle_shutter(self, checked, current_shot):\n for channel in self.shutter_lines:\n for shot in self.shutter_lines[channel]:\n if shot == current_shot:\n for line in self.shutter_lines[channel][shot][0]:\n if checked:\n line.show()\n else:\n line.hide()\n for line in self.shutter_lines[channel][shot][1]:\n if checked:\n line.show()\n else:\n line.hide()\n\n def on_add_shot(self):\n selected_files = QFileDialog.getOpenFileNames(self.ui, \"Select file to load\", self.last_opened_shots_folder, \"HDF5 files (*.h5 *.hdf5)\")\n popup_warning = False\n if isinstance(selected_files, tuple):\n selected_files, _ = selected_files\n # Convert to standard platform specific path, otherwise Qt likes forward slashes:\n selected_files = [os.path.abspath(str(shot_file)) for shot_file in selected_files]\n if len(selected_files) > 0:\n self.last_opened_shots_folder = os.path.dirname(selected_files[0])\n\n for file in selected_files:\n try:\n filepath = str(file)\n # Qt has this weird behaviour where if you type in the name of a file that exists\n # but does not have the extension you have limited the dialog to, the OK button is greyed out\n # but you can hit enter and the file will be selected.\n # So we must check the extension of each file here!\n if filepath.endswith('.h5') or filepath.endswith('.hdf5'):\n self.load_shot(filepath)\n else:\n popup_warning = True\n except:\n popup_warning = True\n raise\n if popup_warning:\n message = QMessageBox()\n message.setText(\"Warning: Some shots were not loaded because they were not valid hdf5 files\")\n message.setIcon(QMessageBox.Warning)\n message.setWindowTitle(\"Runviewer\")\n message.setStandardButtons(QMessageBox.Ok)\n message.exec_()\n\n def on_remove_shots(self):\n # Get the selection model from the treeview\n selection_model = self.ui.shot_treeview.selectionModel()\n # Create a list of select row indices\n selected_row_list = [index.row() for index in selection_model.selectedRows()]\n # sort in descending order to prevent index changes of rows to be deleted\n selected_row_list.sort(reverse=True)\n\n reply = QMessageBox.question(self.ui, 'Runviewer', 'Remove {} shots?'.format(len(selected_row_list)),\n QMessageBox.Yes | QMessageBox.No)\n if reply == QMessageBox.No:\n return\n\n for row in selected_row_list:\n item = self.shot_model.item(row, SHOT_MODEL__CHECKBOX_INDEX)\n colour_item = self.shot_model.item(row, SHOT_MODEL__COLOUR_INDEX)\n shutter_item = self.shot_model.item(row, SHOT_MODEL__SHUTTER_INDEX)\n shot = item.data()\n # unselect shot\n item.setCheckState(Qt.Unchecked)\n shutter_item.setCheckState(Qt.Unchecked)\n # remove row\n self.shot_model.removeRow(row)\n del shot\n\n def on_shot_selection_changed(self, item):\n if self.shot_model.indexFromItem(item).column() == SHOT_MODEL__CHECKBOX_INDEX:\n\n # add or remove a colour for this shot\n checked = item.checkState()\n row = self.shot_model.indexFromItem(item).row()\n colour_item = self.shot_model.item(row, SHOT_MODEL__COLOUR_INDEX)\n check_shutter = self.shot_model.item(row, SHOT_MODEL__SHUTTER_INDEX)\n\n if checked:\n colour = colour_item.data(Qt.UserRole)\n if colour is not None:\n colour = colour()\n else:\n colour = self.shot_colour_delegate.get_next_colour()\n\n colour_item.setEditable(True)\n pixmap = QPixmap(20, 20)\n pixmap.fill(colour)\n icon = QIcon(pixmap)\n colour_item.setData(lambda clist=self.shot_colour_delegate._colours, colour=colour: int_to_enum(clist, colour), Qt.UserRole)\n colour_item.setData(icon, Qt.DecorationRole)\n shot_combobox_index = self.ui.markers_comboBox.findText(os.path.basename(item.data().path))\n self.ui.markers_comboBox.model().item(shot_combobox_index).setEnabled(True)\n if self.ui.markers_comboBox.currentIndex() == 0:\n self.ui.markers_comboBox.setCurrentIndex(shot_combobox_index)\n if item.data().shutter_times != {}:\n check_shutter.setEnabled(True)\n else:\n check_shutter.setEnabled(False)\n check_shutter.setToolTip(\"This shot doesn't contain shutter markers\")\n else:\n # colour = None\n # icon = None\n shot_combobox_index = self.ui.markers_comboBox.findText(os.path.basename(item.data().path))\n self.ui.markers_comboBox.model().item(shot_combobox_index).setEnabled(False)\n if shot_combobox_index == self.ui.markers_comboBox.currentIndex():\n self.ui.markers_comboBox.setCurrentIndex(0)\n colour_item.setEditable(False)\n check_shutter.setEnabled(False)\n\n # model.setData(index, editor.itemIcon(editor.currentIndex()),\n # model.setData(index, editor.itemData(editor.currentIndex()), Qt.UserRole)\n\n self.update_channels_treeview()\n elif self.shot_model.indexFromItem(item).column() == SHOT_MODEL__COLOUR_INDEX:\n # update the plot colours\n\n # get reference to the changed shot\n current_shot = self.shot_model.item(self.shot_model.indexFromItem(item).row(), SHOT_MODEL__CHECKBOX_INDEX).data()\n\n # find and update the pen of the plot items\n for channel in self.plot_items.keys():\n for shot in self.plot_items[channel]:\n if shot == current_shot:\n colour = item.data(Qt.UserRole)\n self.plot_items[channel][shot].setPen(pg.mkPen(QColor(colour()), width=2))\n elif self.shot_model.indexFromItem(item).column() == SHOT_MODEL__SHUTTER_INDEX:\n current_shot = self.shot_model.item(self.shot_model.indexFromItem(item).row(), SHOT_MODEL__CHECKBOX_INDEX).data()\n self.on_toggle_shutter(item.checkState(), current_shot)\n\n def load_shot(self, filepath):\n shot = Shot(filepath)\n\n # add shot to shot list\n # Create Items\n items = []\n colour_item = QStandardItem('')\n colour_item.setEditable(False)\n colour_item.setToolTip('Double-click to change colour')\n items.append(colour_item)\n\n check_shutter = QStandardItem()\n check_shutter.setCheckable(True)\n check_shutter.setCheckState(Qt.Unchecked) # options are Qt.Checked OR Qt.Unchecked\n check_shutter.setEnabled(False)\n check_shutter.setToolTip(\"Toggle shutter markers\")\n items.append(check_shutter)\n\n check_item = QStandardItem(shot.path)\n check_item.setEditable(False)\n check_item.setCheckable(True)\n check_item.setCheckState(Qt.Unchecked) # options are Qt.Checked OR Qt.Unchecked\n check_item.setData(shot)\n check_item.setToolTip(filepath)\n items.append(check_item)\n # script name\n # path_item = QStandardItem(shot.path)\n # path_item.setEditable(False)\n # items.append(path_item)\n self.shot_model.appendRow(items)\n self.ui.markers_comboBox.addItem(os.path.basename(shot.path), shot)\n shot_combobox_index = self.ui.markers_comboBox.findText(os.path.basename(shot.path))\n self.ui.markers_comboBox.model().item(shot_combobox_index).setEnabled(False)\n\n # only do this if we are checking the shot we are adding\n # self.update_channels_treeview()\n\n def get_selected_shots_and_colours(self):\n # get the ticked shots\n ticked_shots = {}\n for i in range(self.shot_model.rowCount()):\n item = self.shot_model.item(i, SHOT_MODEL__CHECKBOX_INDEX)\n colour_item = self.shot_model.item(i, SHOT_MODEL__COLOUR_INDEX)\n shutter_item = self.shot_model.item(i, SHOT_MODEL__SHUTTER_INDEX)\n if item.checkState() == Qt.Checked:\n shot = item.data()\n colour_item_data = colour_item.data(Qt.UserRole)\n ticked_shots[shot] = (colour_item_data(), shutter_item.checkState())\n return ticked_shots\n\n def update_channels_treeview(self):\n ticked_shots = self.get_selected_shots_and_colours()\n\n # get set of channels\n channels = {}\n for shot in ticked_shots.keys():\n channels[shot] = set(shot.channels)\n channels_set = frozenset().union(*channels.values())\n\n # now find channels in channels_set which are not in the treeview, and add them\n # now find channels in channels set which are already in the treeview, but deactivated, and activate them\n treeview_channels_dict = {}\n deactivated_treeview_channels_dict = {}\n for i in range(self.channel_model.rowCount()):\n item = self.channel_model.item(i, CHANNEL_MODEL__CHECKBOX_INDEX)\n # Sanity check\n if str(item.text()) in treeview_channels_dict:\n raise RuntimeError(\"A duplicate channel name was detected in the treeview due to an internal error. Please lodge a bugreport detailing how the channels with the same name appeared in the channel treeview. Please restart the application\")\n\n treeview_channels_dict[str(item.text())] = i\n if not item.isEnabled():\n deactivated_treeview_channels_dict[str(item.text())] = i\n treeview_channels = set(treeview_channels_dict.keys())\n deactivated_treeview_channels = set(deactivated_treeview_channels_dict.keys())\n\n # speed up working with self.channel_model by blocking signals and later reenabeling them\n self.channel_model.blockSignals(True)\n\n # find list of channels to work with\n channels_to_add = channels_set.difference(treeview_channels)\n for channel in sorted(channels_to_add):\n items = []\n check_item = QStandardItem(channel)\n check_item.setEditable(False)\n check_item.setCheckable(True)\n check_item.setCheckState(Qt.Unchecked)\n items.append(check_item)\n # channel_name_item = QStandardItem(channel)\n # channel_name_item.setEditable(False)\n # items.append(channel_name_item)\n self.channel_model.appendRow(items)\n\n channels_to_reactivate = deactivated_treeview_channels.intersection(channels_set)\n for channel in channels_to_reactivate:\n for i in range(self.channel_model.columnCount()):\n item = self.channel_model.item(deactivated_treeview_channels_dict[channel], i)\n item.setEnabled(True)\n item.setSelectable(True)\n\n # now find channels in the treeview which are not in the channels_set and deactivate them\n channels_to_deactivate = treeview_channels.difference(channels_set)\n for channel in channels_to_deactivate:\n for i in range(self.channel_model.columnCount()):\n item = self.channel_model.item(treeview_channels_dict[channel], i)\n item.setEnabled(False)\n item.setSelectable(False)\n\n self.channel_model.blockSignals(False)\n self.channel_model.layoutChanged.emit()\n\n # TODO: Also update entries in groups\n\n self.update_plots()\n\n def update_plots(self):\n # get list of selected shots\n ticked_shots = self.get_selected_shots_and_colours()\n\n # SHould we rescale the x-axis?\n # if self._hidden_plot[0].getViewBox.getState()['autoRange'][0]:\n # self._hidden_plot[0].enableAutoRange(axis=pg.ViewBox.XAxis)\n # else:\n # self._hidden_plot[0].enableAutoRange(axis=pg.ViewBox.XAxis, enable=False)\n\n # find stop time of longest ticked shot\n\n largest_stop_time = 0\n stop_time_set = False\n for shot in ticked_shots.keys():\n if shot.stop_time > largest_stop_time:\n largest_stop_time = shot.stop_time\n stop_time_set = True\n if not stop_time_set:\n largest_stop_time = 1.0\n\n # Update the range of the link plot\n self._hidden_plot[1].setData([0, largest_stop_time], [0, 1e-9])\n\n # Update plots\n for i in range(self.channel_model.rowCount()):\n check_item = self.channel_model.item(i, CHANNEL_MODEL__CHECKBOX_INDEX)\n channel = str(check_item.text())\n if check_item.checkState() == Qt.Checked and check_item.isEnabled():\n # we want to show this plot\n # does a plot already exist? If yes, show it\n if channel in self.plot_widgets:\n self.plot_widgets[channel].show()\n # update the plot\n # are there are plot items for this channel which are shown that should not be?\n to_delete = []\n for shot in self.plot_items[channel]:\n if shot not in ticked_shots.keys():\n self.plot_widgets[channel].removeItem(self.plot_items[channel][shot])\n # Remove Shutter Markers of unticked Shots\n if shot in self.shutter_lines[channel]:\n for line in self.shutter_lines[channel][shot][0]:\n self.plot_widgets[channel].removeItem(line)\n for line in self.shutter_lines[channel][shot][1]:\n self.plot_widgets[channel].removeItem(line)\n self.shutter_lines[channel].pop(shot)\n to_delete.append(shot)\n for shot in to_delete:\n del self.plot_items[channel][shot]\n\n # do we need to add any plot items for shots that were not previously selected?\n for shot, (colour, shutters_checked) in ticked_shots.items():\n if shot not in self.plot_items[channel]:\n # plot_item = self.plot_widgets[channel].plot(shot.traces[channel][0], shot.traces[channel][1], pen=pg.mkPen(QColor(colour), width=2))\n # Add empty plot as it the custom resampling we do will happen quicker if we don't attempt to first plot all of the data\n plot_item = self.plot_widgets[channel].plot([0, 0], [0], pen=pg.mkPen(QColor(colour), width=2), stepMode=True)\n self.plot_items[channel][shot] = plot_item\n\n # Add Shutter Markers of newly ticked Shots\n self.add_shutter_markers(shot, channel, shutters_checked)\n\n for t, m in self.all_markers.items():\n color = m['color']\n color = QColor(color[0], color[1], color[2])\n if self.scale_time and self.scalehandler is not None:\n t = self.scalehandler.get_scaled_time(t)\n line = self.plot_widgets[channel].addLine(x=t, pen=pg.mkPen(color=color, width=1.5, style=Qt.DashLine))\n self.all_marker_items[line] = self.plot_widgets[channel]\n\n # If no, create one\n else:\n self.create_plot(channel, ticked_shots)\n\n else:\n if channel not in self.plot_widgets:\n self.create_plot(channel, ticked_shots)\n self.plot_widgets[channel].hide()\n\n self._resample = True\n\n def create_plot(self, channel, ticked_shots):\n self.plot_widgets[channel] = pg.PlotWidget() # name=channel)\n self.plot_widgets[channel].setMinimumHeight(200)\n self.plot_widgets[channel].setMaximumHeight(200)\n self.plot_widgets[channel].setLabel('bottom', 'Time', units='s')\n self.plot_widgets[channel].showAxis('right', True)\n self.plot_widgets[channel].showAxis('bottom', True)\n self.plot_widgets[channel].setXLink('runviewer - time axis link')\n self.plot_widgets[channel].sigXRangeChanged.connect(self.on_x_range_changed)\n self.plot_widgets[channel].scene().sigMouseMoved.connect(lambda pos: self.mouseMovedEvent(pos, self.plot_widgets[channel], channel))\n self.ui.plot_layout.insertWidget(self.ui.plot_layout.count() - 1, self.plot_widgets[channel])\n self.shutter_lines[channel] = {} # initialize Storage for shutter lines\n self.plot_items.setdefault(channel, {})\n\n has_units = False\n units = ''\n for shot, (colour, shutters_checked) in ticked_shots.items():\n if channel in shot.traces:\n # plot_item = self.plot_widgets[channel].plot(shot.traces[channel][0], shot.traces[channel][1], pen=pg.mkPen(QColor(colour), width=2))\n # Add empty plot as it the custom resampling we do will happen quicker if we don't attempt to first plot all of the data\n plot_item = self.plot_widgets[channel].plot([0, 0], [0], pen=pg.mkPen(QColor(colour), width=2), stepMode=True)\n self.plot_items[channel][shot] = plot_item\n\n if len(shot.traces[channel]) == 3:\n has_units = True\n units = shot.traces[channel][2]\n\n # Add Shutter Markers of ticked Shots\n self.add_shutter_markers(shot, channel, shutters_checked)\n\n if has_units:\n self.plot_widgets[channel].setLabel('left', channel, units=units)\n else:\n self.plot_widgets[channel].setLabel('left', channel)\n\n def add_shutter_markers(self, shot, channel, shutters_checked):\n if shot not in self.shutter_lines[channel] and channel in shot.shutter_times:\n self.shutter_lines[channel][shot] = [[], []]\n\n open_color = QColor(0, 255, 0)\n close_color = QColor(255, 0, 0)\n\n for t, val in shot.shutter_times[channel].items():\n scaled_t = t\n if val: # val != 0, shutter open\n line = self.plot_widgets[channel].addLine(x=scaled_t, pen=pg.mkPen(color=open_color, width=4., style=Qt.DotLine))\n self.shutter_lines[channel][shot][1].append(line)\n if not shutters_checked:\n line.hide()\n else: # else shutter close\n line = self.plot_widgets[channel].addLine(x=scaled_t, pen=pg.mkPen(color=close_color, width=4., style=Qt.DotLine))\n self.shutter_lines[channel][shot][0].append(line)\n if not shutters_checked:\n line.hide()\n\n def on_x_range_changed(self, *args):\n # print 'x range changed'\n self._resample = True\n\n @inmain_decorator(wait_for_return=True)\n def _get_resample_params(self, channel, shot):\n rect = self.plot_items[channel][shot].getViewBox().viewRect()\n xmin, xmax = rect.left(), rect.width() + rect.left()\n dx = xmax - xmin\n view_range = self.plot_widgets[channel].viewRange()\n return view_range[0][0], view_range[0][1], dx\n\n def resample(self, data_x, data_y, xmin, xmax, stop_time, num_pixels):\n \"\"\"This is a function for downsampling the data before plotting\n it. Unlike using nearest neighbour interpolation, this method\n preserves the features of the plot. It chooses what value to\n use based on what values within a region are most different\n from the values it's already chosen. This way, spikes of a short\n duration won't just be skipped over as they would with any sort\n of interpolation.\"\"\"\n # TODO: Only finely sample the currently visible region. Coarsely sample the rest\n # x_out = numpy.float32(numpy.linspace(data_x[0], data_x[-1], 4000*(data_x[-1]-data_x[0])/(xmax-xmin)))\n x_out = numpy.float64(numpy.linspace(xmin, xmax, 3 * 2000 + 2))\n y_out = numpy.empty(len(x_out) - 1, dtype=numpy.float64)\n data_x = numpy.float64(data_x)\n data_y = numpy.float64(data_y)\n\n # TODO: investigate only resampling when necessary.\n # Currently pyqtgraph sometimes has trouble rendering things\n # if you don't resample. If a point is far off the graph,\n # and this point is the first that should be drawn for stepMode,\n # because there is a long gap before the next point (which is\n # visible) then there is a problem.\n # Also need to explicitly handle cases where none of the data\n # is visible (which resampling does by setting NaNs)\n #\n # x_data_slice = data_x[(data_x>=xmin)&(data_x<=xmax)]\n # print len(data_x)\n # if len(x_data_slice) < 3*2000+2:\n # x_out = x_data_slice\n # y_out = data_y[(data_x>=xmin)&(data_x<=xmax)][:-1]\n # logger.info('skipping resampling')\n # else:\n resampling = True\n\n if resampling:\n _resample(data_x, data_y, x_out, y_out, numpy.float64(stop_time))\n # self.__resample4(data_x, data_y, x_out, y_out, numpy.float32(stop_time))\n else:\n x_out, y_out = data_x, data_y\n\n return x_out, y_out\n\n def __resample4(self, x_in, y_in, x_out, y_out, stop_time):\n # we want x-out to have three times the number of points as there are pixels\n # Plus one at the end\n # y_out = numpy.empty(len(x_out)-1, dtype=numpy.float64)\n # print 'len x_out: %d'%len(x_out)\n\n # A couple of special cases that I don't want to have to put extra checks in for:\n if x_out[-1] < x_in[0] or x_out[0] > stop_time:\n # We're all the way to the left of the data or all the way to the right. Fill with NaNs:\n y_out.fill('NaN')\n elif x_out[0] > x_in[-1]:\n # We're after the final clock tick, but before stop_time\n i = 0\n while i < len(x_out) - 1:\n if x_out[i] < stop_time:\n y_out[i] = y_in[-1]\n else:\n y_out[i] = numpy.float('NaN')\n i += 1\n else:\n i = 0\n j = 1\n # Until we get to the data, fill the output array with NaNs (which\n # get ignored when plotted)\n while x_out[i] < x_in[0]:\n y_out[i] = numpy.float('NaN')\n y_out[i + 1] = numpy.float('NaN')\n y_out[i + 2] = numpy.float('NaN')\n i += 3\n # If we're some way into the data, we need to skip ahead to where\n # we want to get the first datapoint from:\n while x_in[j] < x_out[i]:\n j += 1\n\n # Get the first datapoint:\n # y_out[i] = y_in[j-1]\n # i += 1\n\n # Get values until we get to the end of the data:\n while j < len(x_in) and i < len(x_out) - 2: # Leave one spare for the final data point and one because stepMode=True requires len(y)=len(x)-1\n # This is 'nearest neighbour on the left' interpolation. It's\n # what we want if none of the source values checked in the\n # upcoming loop are used:\n y_out[i] = y_in[j - 1]\n i += 2\n positive_jump_value = 0\n positive_jump_index = j - 1\n negative_jump_value = 0\n negative_jump_index = j - 1\n # now find the max and min values between this x_out time point and the next x_out timepoint\n # print i\n while j < len(x_in) and x_in[j] < x_out[i]:\n jump = y_in[j] - y_out[i - 2]\n # would using this source value cause a bigger positive jump?\n if jump > 0 and jump > positive_jump_value:\n positive_jump_value = jump\n positive_jump_index = j\n # would using this source value cause a bigger negative jump?\n elif jump < 0 and jump < negative_jump_value:\n negative_jump_value = jump\n negative_jump_index = j\n\n j += 1\n\n if positive_jump_index < negative_jump_index:\n y_out[i - 1] = y_in[positive_jump_index]\n y_out[i] = y_in[negative_jump_index]\n # TODO: We could override the x_out values with x_in[jump_index]\n else:\n y_out[i - 1] = y_in[negative_jump_index]\n y_out[i] = y_in[positive_jump_index]\n\n i += 1\n\n # Get the last datapoint:\n if j < len(x_in):\n # If the sample rate of the raw data is low, then the current\n # j point could be outside the current plot view range\n # If so, decrease j so that we take a value that is within the\n # plot view range.\n if x_in[j] > x_out[-1] and j > 0:\n j -= 1\n\n y_out[i] = y_in[j]\n i += 1\n # if i < len(x_out):\n # y_out[i] = y_in[-1]\n # i += 1\n # Fill the remainder of the array with the last datapoint,\n # if t < stop_time, and then NaNs after that:\n while i < len(x_out) - 1:\n if x_out[i] < stop_time:\n y_out[i] = y_in[-1]\n else:\n y_out[i] = numpy.float('NaN')\n i += 1\n # return y_out # method changed to modify y_out array in place\n\n def __resample3(self, x_in, y_in, x_out, stop_time):\n \"\"\"This is a Python implementation of the C extension. For\n debugging and developing the C extension.\"\"\"\n y_out = numpy.empty(len(x_out))\n i = 0\n j = 1\n # A couple of special cases that I don't want to have to put extra checks in for:\n if x_out[-1] < x_in[0] or x_out[0] > stop_time:\n # We're all the way to the left of the data or all the way to the right. Fill with NaNs:\n while i < len(x_out):\n y_out[i] = numpy.float('NaN')\n i += 1\n elif x_out[0] > x_in[-1]:\n # We're after the final clock tick, but before stop_time\n while i < len(x_out):\n if x_out[i] < stop_time:\n y_out[i] = y_in[-1]\n else:\n y_out[i] = numpy.float('NaN')\n i += 1\n else:\n # Until we get to the data, fill the output array with NaNs (which\n # get ignored when plotted)\n while x_out[i] < x_in[0]:\n y_out[i] = numpy.float('NaN')\n i += 1\n # If we're some way into the data, we need to skip ahead to where\n # we want to get the first datapoint from:\n while x_in[j] < x_out[i]:\n j += 1\n # Get the first datapoint:\n y_out[i] = y_in[j - 1]\n i += 1\n # Get values until we get to the end of the data:\n while j < len(x_in) and i < len(x_out):\n # This is 'nearest neighbour on the left' interpolation. It's\n # what we want if none of the source values checked in the\n # upcoming loop are used:\n y_out[i] = y_in[j - 1]\n while j < len(x_in) and x_in[j] < x_out[i]:\n # Would using this source value cause the interpolated values\n # to make a bigger jump?\n if numpy.abs(y_in[j] - y_out[i - 1]) > numpy.abs(y_out[i] - y_out[i - 1]):\n # If so, use this source value:\n y_out[i] = y_in[j]\n j += 1\n i += 1\n # Get the last datapoint:\n if i < len(x_out):\n y_out[i] = y_in[-1]\n i += 1\n # Fill the remainder of the array with the last datapoint,\n # if t < stop_time, and then NaNs after that:\n while i < len(x_out):\n if x_out[i] < stop_time:\n y_out[i] = y_in[-1]\n else:\n y_out[i] = numpy.float('NaN')\n i += 1\n return y_out\n\n def _resample_thread(self):\n logger = logging.getLogger('runviewer.resample_thread')\n while True:\n if self._resample:\n self._resample = False\n # print 'resampling'\n ticked_shots = inmain(self.get_selected_shots_and_colours)\n for shot, (colour, shutters_checked) in ticked_shots.items():\n for channel in shot.traces:\n if self.channel_checked_and_enabled(channel):\n try:\n xmin, xmax, dx = self._get_resample_params(channel, shot)\n\n # We go a bit outside the visible range so that scrolling\n # doesn't immediately go off the edge of the data, and the\n # next resampling might have time to fill in more data before\n # the user sees any empty space.\n if self.scale_time:\n xnew, ynew = self.resample(shot.scaled_times(channel), shot.traces[channel][1], xmin, xmax, shot.stop_time, dx)\n else:\n xnew, ynew = self.resample(shot.traces[channel][0], shot.traces[channel][1], xmin, xmax, shot.stop_time, dx)\n inmain(self.plot_items[channel][shot].setData, xnew, ynew, pen=pg.mkPen(QColor(colour), width=2), stepMode=True)\n except Exception:\n #self._resample = True\n pass\n else:\n logger.info('ignoring channel %s' % channel)\n time.sleep(0.5)\n\n @inmain_decorator(wait_for_return=True)\n def channel_checked_and_enabled(self, channel):\n logger.info('is channel %s enabled' % channel)\n index = self.channel_model.index(0, CHANNEL_MODEL__CHANNEL_INDEX)\n indexes = self.channel_model.match(index, Qt.DisplayRole, channel, 1, Qt.MatchExactly)\n logger.info('number of matches %d' % len(indexes))\n if len(indexes) == 1:\n check_item = self.channel_model.itemFromIndex(indexes[0])\n if check_item.checkState() == Qt.Checked and check_item.isEnabled():\n return True\n return False\n\n def on_x_axis_reset(self):\n self._hidden_plot[0].enableAutoRange(axis=pg.ViewBox.XAxis)\n\n def on_y_axes_reset(self):\n for plot_widget in self.plot_widgets.values():\n plot_widget.enableAutoRange(axis=pg.ViewBox.YAxis)\n\n def _enable_selected_shots(self):\n self.update_ticks_of_selected_shots(Qt.Checked)\n\n def _disable_selected_shots(self):\n self.update_ticks_of_selected_shots(Qt.Unchecked)\n\n def update_ticks_of_selected_shots(self, state):\n # Get the selection model from the treeview\n selection_model = self.ui.shot_treeview.selectionModel()\n # Create a list of select row indices\n selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]\n # for each row selected\n for row in selected_row_list:\n check_item = self.shot_model.item(row, SHOT_MODEL__CHECKBOX_INDEX)\n check_item.setCheckState(state)\n\n def _move_up(self):\n # Get the selection model from the treeview\n selection_model = self.ui.channel_treeview.selectionModel()\n # Create a list of select row indices\n selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]\n # For each row selected\n for i, row in enumerate(selected_row_list):\n # only move the row if it is not element 0, and the row above it is not selected\n # (note that while a row above may have been initially selected, it should by now, be one row higher\n # since we start moving elements of the list upwards starting from the lowest index)\n if row > 0 and (row - 1) not in selected_row_list:\n # Remove the selected row\n items = self.channel_model.takeRow(row)\n # Add the selected row into a position one above\n self.channel_model.insertRow(row - 1, items)\n # Since it is now a newly inserted row, select it again\n selection_model.select(self.channel_model.indexFromItem(items[0]), QItemSelectionModel.SelectCurrent)\n # reupdate the list of selected indices to reflect this change\n selected_row_list[i] -= 1\n self.update_plot_positions()\n\n def _move_down(self):\n # Get the selection model from the treeview\n selection_model = self.ui.channel_treeview.selectionModel()\n # Create a list of select row indices\n selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]\n # For each row selected\n for i, row in enumerate(selected_row_list):\n # only move the row if it is not the last element, and the row above it is not selected\n # (note that while a row below may have been initially selected, it should by now, be one row lower\n # since we start moving elements of the list upwards starting from the highest index)\n if row < self.channel_model.rowCount() - 1 and (row + 1) not in selected_row_list:\n # Remove the selected row\n items = self.channel_model.takeRow(row)\n # Add the selected row into a position one above\n self.channel_model.insertRow(row + 1, items)\n # Since it is now a newly inserted row, select it again\n selection_model.select(self.channel_model.indexFromItem(items[0]), QItemSelectionModel.SelectCurrent)\n # reupdate the list of selected indices to reflect this change\n selected_row_list[i] += 1\n self.update_plot_positions()\n\n def _move_top(self):\n # Get the selection model from the treeview\n selection_model = self.ui.channel_treeview.selectionModel()\n # Create a list of select row indices\n selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]\n # For each row selected\n for i, row in enumerate(selected_row_list):\n # only move the row while it is not element 0, and the row above it is not selected\n # (note that while a row above may have been initially selected, it should by now, be one row higher\n # since we start moving elements of the list upwards starting from the lowest index)\n while row > 0 and (row - 1) not in selected_row_list:\n # Remove the selected row\n items = self.channel_model.takeRow(row)\n # Add the selected row into a position one above\n self.channel_model.insertRow(row - 1, items)\n # Since it is now a newly inserted row, select it again\n selection_model.select(self.channel_model.indexFromItem(items[0]), QItemSelectionModel.SelectCurrent)\n # reupdate the list of selected indices to reflect this change\n selected_row_list[i] -= 1\n row -= 1\n self.update_plot_positions()\n\n def _move_bottom(self):\n selection_model = self.ui.channel_treeview.selectionModel()\n # Create a list of select row indices\n selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]\n # For each row selected\n for i, row in enumerate(selected_row_list):\n # only move the row while it is not the last element, and the row above it is not selected\n # (note that while a row below may have been initially selected, it should by now, be one row lower\n # since we start moving elements of the list upwards starting from the highest index)\n while row < self.channel_model.rowCount() - 1 and (row + 1) not in selected_row_list:\n # Remove the selected row\n items = self.channel_model.takeRow(row)\n # Add the selected row into a position one above\n self.channel_model.insertRow(row + 1, items)\n # Since it is now a newly inserted row, select it again\n selection_model.select(self.channel_model.indexFromItem(items[0]), QItemSelectionModel.SelectCurrent)\n # reupdate the list of selected indices to reflect this change\n selected_row_list[i] += 1\n row += 1\n self.update_plot_positions()\n\n def update_plot_positions(self):\n # remove all widgets\n layout_items = {}\n for i in range(self.ui.plot_layout.count()):\n if i == 0:\n continue\n item = self.ui.plot_layout.takeAt(i)\n\n # add all widgets\n for i in range(self.channel_model.rowCount()):\n check_item = self.channel_model.item(i, CHANNEL_MODEL__CHECKBOX_INDEX)\n channel = str(check_item.text())\n if channel in self.plot_widgets:\n self.ui.plot_layout.addWidget(self.plot_widgets[channel])\n if check_item.checkState() == Qt.Checked and check_item.isEnabled():\n self.plot_widgets[channel].show()\n else:\n self.plot_widgets[channel].hide()\n self.ui.plot_layout.addWidget(self._time_axis_plot[0])\n\n\nclass Shot(object):\n def __init__(self, path):\n self.path = path\n\n # Store list of traces\n self._traces = None\n # store list of channels\n self._channels = None\n # store list of markers\n self._markers = None\n self.cached_scaler = None\n self._scalehandler = None\n self._scaled_x = {}\n\n # store list of shutter changes and callibrations\n self._shutter_times = None\n self._shutter_calibrations = {}\n\n # TODO: Get this dynamically\n device_list = ['PulseBlaster', 'NI_PCIe_6363', 'NI_PCI_6733']\n\n # Load connection table\n self.connection_table = ConnectionTable(path)\n\n # open h5 file\n with h5py.File(path, 'r') as file:\n # Get master pseudoclock\n self.master_pseudoclock_name = file['connection table'].attrs['master_pseudoclock']\n if isinstance(self.master_pseudoclock_name, bytes):\n self.master_pseudoclock_name = self.master_pseudoclock_name.decode('utf8')\n else:\n self.master_pseudoclock_name = str(self.master_pseudoclock_name)\n\n # get stop time\n self.stop_time = file['devices'][self.master_pseudoclock_name].attrs['stop_time']\n\n self.device_names = list(file['devices'].keys())\n\n # Get Shutter Calibrations\n if 'calibrations' in file and 'Shutter' in file['calibrations']:\n for name, open_delay, close_delay in numpy.array(file['calibrations']['Shutter']):\n self._shutter_calibrations[name] = [open_delay, close_delay]\n\n def delete_cache(self):\n self._channels = None\n self._traces = None\n\n def _load(self):\n if self._channels is None:\n self._channels = {}\n if self._traces is None:\n self._traces = {}\n if self._markers is None:\n self._markers = {}\n if self._shutter_times is None:\n self._shutter_times = {}\n\n self._load_markers()\n # Let's walk the connection table, starting with the master pseudoclock\n master_pseudoclock_device = self.connection_table.find_by_name(self.master_pseudoclock_name)\n\n self._load_device(master_pseudoclock_device)\n\n self._scalehandler = ScaleHandler(self._markers.keys(), self.stop_time)\n\n def _load_markers(self):\n with h5py.File(self.path, 'r') as file:\n if \"time_markers\" in file:\n for row in file[\"time_markers\"]:\n self._markers[row['time']] = {'color': row['color'].tolist()[0], 'label': row['label']}\n elif \"runviewer\" in file:\n for time, val in file[\"runviewer\"][\"markers\"].attrs.items():\n props = val.strip('{}}').rsplit(\",\", 1)\n color = list(map(int, props[0].split(\":\")[1].strip(\" ()\").split(\",\")))\n label = props[1].split(\":\")[1]\n self._markers[float(time)] = {'color': color, 'label': label}\n\n def add_trace(self, name, trace, parent_device_name, connection):\n name = str(name)\n self._channels[name] = {'device_name': parent_device_name, 'port': connection}\n self._traces[name] = trace\n\n # add shutter times\n try:\n con = self.connection_table.find_by_name(name)\n if con.device_class == \"Shutter\":\n self.add_shutter_times([(name, con.properties['open_state'])])\n except KeyError:\n pass\n\n\n # Temporary solution to physical shutter times\n def add_shutter_times(self, shutters):\n for name, open_state in shutters:\n x_values, y_values = self._traces[name]\n if len(x_values) > 0:\n change_indices = numpy.where(y_values[:-1] != y_values[1:])[0]\n change_indices += 1 # use the index of the value that is changed to\n change_values = zip(x_values[change_indices], y_values[change_indices])\n change_values.insert(0, (x_values[0], y_values[0])) # insert first value\n self._shutter_times[name] = {x_value + (self._shutter_calibrations[name][0] if y_value == open_state else self._shutter_calibrations[name][1]): 1 if y_value == open_state else 0 for x_value, y_value in change_values}\n\n def _load_device(self, device, clock=None):\n try:\n print('loading %s' % device.name)\n module = device.device_class\n # Load the master pseudoclock class\n # labscript_devices.import_device(module)\n device_class = labscript_devices.get_runviewer_parser(module)\n device_instance = device_class(self.path, device)\n clocklines_and_triggers = device_instance.get_traces(self.add_trace, clock)\n\n for name, trace in clocklines_and_triggers.items():\n child_device = self.connection_table.find_by_name(name)\n for grandchild_device_name, grandchild_device in child_device.child_list.items():\n self._load_device(grandchild_device, trace)\n\n except Exception:\n # TODO: print/log exception traceback\n # if device.name == 'ni_card_0' or device.name == 'pulseblaster_0' or device.name == 'pineblaster_0' or device.name == 'ni_card_1' or device.name == 'novatechdds9m_0':\n # raise\n # raise\n if hasattr(device, 'name'):\n print('Failed to load device %s' % device.name)\n else:\n print('Failed to load device (unknown name, device object does not have attribute name)')\n\n # backwards compat\n with h5py.File(self.path, 'r') as file:\n if \"runviewer\" in file:\n if \"shutter_times\" in file[\"runviewer\"]:\n for name, val in file[\"runviewer\"][\"shutter_times\"].attrs.items():\n self._shutter_times[name] = {float(key_value.split(\":\")[0]): int(key_value.split(\":\")[1]) for key_value in val.strip('{}}').split(\",\")}\n\n def scaled_times(self, channel):\n if self.cached_scaler != app.scalehandler:\n self.cached_scaler = app.scalehandler\n self._scaled_x = {}\n if channel not in self._scaled_x:\n self._scaled_x[channel] = self.cached_scaler.get_scaled_time(self._traces[channel][0])\n\n return self._scaled_x[channel]\n\n @property\n def channels(self):\n if self._channels is None:\n self._load()\n\n return self._channels.keys()\n\n def clear_cache(self):\n # clear cache variables to cut down on memory usage\n pass\n\n @property\n def markers(self):\n if self._markers is None:\n self._load()\n return self._markers\n\n @property\n def traces(self):\n # if traces cached:\n # return cached traces and waits\n if self._traces is None:\n self._load()\n return self._traces\n\n @property\n def shutter_times(self):\n if self._shutter_times is None:\n self._load()\n return self._shutter_times\n\n @property\n def scalehandler(self):\n if self._scalehandler is None:\n self._load()\n return self._scalehandler\n\n\nclass TempShot(Shot):\n def __init__(self, i):\n Shot.__init__(self, 'shot %d' % i)\n self._channels = ['Bx', 'By', 'Bz', 'Bq']\n\n self.stop_time = i + 1\n\n self.traces = {}\n no_x_points = 10000\n for channel in self.channels:\n # self.traces[channel] = (numpy.linspace(0,10,no_x_points), numpy.random.rand(no_x_points))\n x_points = numpy.linspace(0, self.stop_time, no_x_points)\n self.traces[channel] = (x_points, (i + 1) * numpy.sin(x_points * numpy.pi + i / 11.0 * 2 * numpy.pi))\n\n @property\n def channels(self):\n return self._channels\n\n def get_traces(self):\n return self.traces\n\n\nclass RunviewerServer(ZMQServer):\n def __init__(self, *args, **kwargs):\n ZMQServer.__init__(self, *args, **kwargs)\n self.logger = logging.getLogger('runviewer.server')\n\n def handler(self, h5_filepath):\n if h5_filepath == 'hello':\n return 'hello'\n\n self.logger.info('Received hdf5 file: %s' % h5_filepath)\n # Convert path to local slashes and shared drive prefix:\n h5_filepath = labscript_utils.shared_drive.path_to_local(h5_filepath)\n logger.info('local filepath: %s' % h5_filepath)\n # we add the shot to a queue so that we don't have to wait for the app to come up before\n # responding to runmanager\n shots_to_process_queue.put(h5_filepath)\n return 'ok'\n\n\nif __name__ == \"__main__\":\n qapplication = QApplication(sys.argv)\n\n shots_to_process_queue = Queue()\n\n exp_config = LabConfig(required_params = {\"DEFAULT\": [\"experiment_name\"], \"paths\": [\"shared_drive\", \"experiment_shot_storage\"], 'ports': ['runviewer']})\n\n port = int(exp_config.get('ports', 'runviewer'))\n myappid = 'monashbec.runviewer' # arbitrary string\n try:\n ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)\n except:\n logger.info('Not on a windows machine')\n # Start experiment server\n experiment_server = RunviewerServer(port)\n\n app = RunViewer(exp_config)\n\n def execute_program():\n qapplication.exec_()\n\n sys.exit(execute_program())\n" ]
[ [ "scipy.interpolate.interp1d", "numpy.abs", "numpy.float", "numpy.array", "numpy.sin", "numpy.where", "numpy.linspace", "numpy.float64" ] ]
maniospas/pygrank
[ "a92f6bb6d13553dd960f2e6bda4c041a8027a9d1" ]
[ "pygrank/measures/unsupervised.py" ]
[ "import warnings\nimport numpy as np\nfrom pygrank.measures.utils import Measure\nfrom pygrank.core.signals import to_signal\nfrom pygrank.core import backend, GraphSignalGraph, GraphSignalData, BackendPrimitive\n\n\nclass Unsupervised(Measure):\n pass\n\n\nclass Conductance(Unsupervised):\n \"\"\" Graph conductance (information flow) of scores.\n\n Assumes a fuzzy set of subgraphs whose nodes are included with probability proportional to their scores,\n as per the formulation of [krasanakis2019linkauc] and calculates E[outgoing edges] / E[internal edges] of\n the fuzzy rank subgraph.\n If scores assume binary values, E[.] becomes set size and this calculates the induced subgraph Conductance.\n \"\"\"\n\n def __init__(self, graph: GraphSignalGraph = None, max_rank: float = 1):\n \"\"\" Initializes the Conductance measure.\n\n Args:\n graph: Optional. The graph on which to calculate the measure. If None (default) it is automatically\n extracted from graph signals passed for evaluation.\n max_rank: Optional. The maximum value scores can assume. To maintain a probabilistic formulation of\n conductance, this can be greater but not less than the maximum rank during evaluation. Default is 1.\n\n Example:\n >>> import pygrank as pg\n >>> graph, seed_nodes, algorithm = ...\n >>> algorithm = pg.Normalize(algorithm)\n >>> scores = algorithm.rank(graph, seed_nodes)\n >>> conductance = pg.Conductance().evaluate(scores)\n \"\"\"\n self.graph = graph\n self.max_rank = max_rank\n\n def evaluate(self, scores: GraphSignalData) -> BackendPrimitive:\n scores = to_signal(self.graph, scores)\n graph = scores.graph\n if backend.max(scores.np) > self.max_rank:\n raise Exception(\"Normalize scores to be <= \" + str(self.max_rank) + \" for non-negative conductance\")\n external_edges = sum(scores.get(i, 0)*(self.max_rank-scores.get(j, 0)) for i, j in graph.edges())\n internal_edges = sum(scores.get(i, 0)*scores.get(j, 0) for i, j in graph.edges())\n if internal_edges > graph.number_of_edges()/2:\n internal_edges = graph.number_of_edges()-internal_edges # user the smallest partition as reference\n if not graph.is_directed():\n external_edges += sum(scores.get(j, 0) * (self.max_rank - scores.get(i, 0)) for i, j in graph.edges())\n internal_edges *= 2\n return external_edges / internal_edges if internal_edges != 0 else float('inf')\n\n\nclass Density(Unsupervised):\n \"\"\" Extension of graph density that accounts for node scores.\n\n Assumes a fuzzy set of subgraphs whose nodes are included with probability proportional to their scores,\n as per the formulation of [krasanakis2019linkauc] and calculates E[internal edges] / E[possible edges] of\n the fuzzy rank subgraph.\n If scores assume binary values, E[.] becomes set size and this calculates the induced subgraph Density.\n \"\"\"\n\n def __init__(self, graph: GraphSignalGraph = None):\n \"\"\" Initializes the Density measure.\n\n Args:\n graph: Optional. The graph on which to calculate the measure. If None (default) it is automatically\n extracted from graph signals passed for evaluation.\n\n Example:\n >>> import pygrank as pg\n >>> graph, seed_nodes, algorithm = ...\n >>> scores = algorithm.rank(graph, seed_nodes)\n >>> conductance = pg.Density().evaluate(scores)\n \"\"\"\n self.graph = graph\n\n def evaluate(self, scores: GraphSignalData) -> BackendPrimitive:\n scores = to_signal(self.graph, scores)\n graph = scores.graph\n internal_edges = sum(scores.get(i, 0) * scores.get(j, 0) for i,j in graph.edges())\n expected_edges = backend.sum(scores.np) ** 2 - backend.sum(scores.np ** 2) # without self-loops\n if internal_edges == 0:\n return 0\n return internal_edges / expected_edges\n\n\nclass Modularity(Unsupervised):\n \"\"\"\n Extension of modularity that accounts for node scores.\n \"\"\"\n \n def __init__(self,\n graph: GraphSignalGraph = None,\n max_rank: float = 1,\n max_positive_samples: int = 2000,\n seed: int = 0):\n \"\"\" Initializes the Modularity measure with a sampling strategy that speeds up normal computations.\n\n Args:\n graph: Optional. The graph on which to calculate the measure. If None (default) it is automatically\n extracted from graph signals passed for evaluation.\n max_rank: Optional. Default is 1.\n max_positive_samples: Optional. The number of nodes with which to compute modularity. These are\n sampled uniformly from all graph nodes. If this is greater than the number of graph nodes,\n all nodes are used and the measure is deterministic. However,\n calculation time is O(max_positive_samples<sup>2</sup>) and thus a trade-off needs to be determined of time\n vs approximation quality. Effectively, the value should be high enough for max_positive_samples<sup>2</sup>\n to be comparable to the number of graph edges. Default is 2000.\n seed: Optional. Makes the evaluation seeded, for example to use in tuning. Default is 0.\n\n Example:\n >>> import pygrank as pg\n >>> graph, seed_nodes, algorithm = ...\n >>> scores = algorithm.rank(graph, seed_nodes)\n >>> modularity = pg.Modularity(max_positive_samples=int(graph.number_of_edges()**0.5)).evaluate(scores)\n \"\"\"\n self.graph = graph\n self.max_positive_samples = max_positive_samples\n self.max_rank = max_rank\n self.seed = seed\n\n def evaluate(self, scores: GraphSignalData) -> BackendPrimitive:\n scores = to_signal(self.graph, scores)\n graph = scores.graph\n positive_candidates = list(graph)\n if len(positive_candidates) > self.max_positive_samples:\n np.random.seed(self.seed)\n positive_candidates = np.random.choice(positive_candidates, self.max_positive_samples)\n m = graph.number_of_edges()\n if m == 0:\n return 0\n Q = 0\n for v in positive_candidates:\n for u in positive_candidates:\n Avu = 1 if graph.has_edge(v,u) else 0\n Avu -= graph.degree[v]*graph.degree[u]/2/m\n Q += Avu*(scores[v]/self.max_rank)*(scores[u]/self.max_rank)\n return Q/2/m\n" ]
[ [ "numpy.random.seed", "numpy.random.choice" ] ]
SummaLabs/DLS
[ "2adba47430b456ad0f324e4c8883a896a23b3fbf" ]
[ "data-test/test_data_serialization/step2_h5py_test_perf/run02_s1_write_image_blob.py" ]
[ "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n__author__ = 'ar'\n\nimport skimage.io as skio\nimport numpy as np\nimport h5py\nimport time\n\nfrom run00_common import ImageDirParser\n\n############################\ndef buidImageDataset(imageDirParser=None, datasetName='test-dataset.h5', numberOfSamples=1000, isRawBlob = False):\n if imgDirParser is None:\n raise Exception('Invalid imageDirParser')\n pathH5File = datasetName\n f = h5py.File(pathH5File, 'w')\n f.create_dataset('scheme', data=np.array(imgDirParser.scheme))\n grpData = f.create_group('data')\n #\n rndIndex = np.random.randint(0, imgDirParser.getNumSamples(), (numberOfSamples))\n for ii in range(len(rndIndex)):\n ridx = rndIndex[ii]\n dataRow = imgDirParser.listPathAndIdx[ridx]\n grpName = 'row_%08d' % ii\n grp = grpData.create_group(grpName)\n for vvi, vv in enumerate(dataRow):\n ttype = imgDirParser.scheme[vvi]\n tkey = 'col_%02d' % vvi\n if ttype == 'path-img2d':\n if isRawBlob:\n timgData = np.void(open(vv, 'r').read())\n dset = grp.create_dataset(tkey, data=timgData)\n else:\n timg = skio.imread(vv)\n dset = grp.create_dataset(tkey, data=timg)\n elif ttype == 'category-idx':\n dset = grp.create_dataset(tkey, data=np.array(vv))\n elif ttype == 'array-float':\n dset = grp.create_dataset(tkey, data=vv)\n elif ttype == 'category-name':\n dset = grp.create_dataset(tkey, data=np.array(vv))\n else:\n raise Exception('Unknown feature type [%s]' % ttype)\n f.close()\n\n\n############################\nif __name__ == '__main__':\n wdir = '../../dataset-image2d/simple4c_test'\n imgDirParser = ImageDirParser(wdir=wdir)\n print (imgDirParser)\n #\n numberOfSamples = 10000\n dataSetNameRaw = 'test-dataset-rawimg.h5'\n dataSetNameArr = 'test-dataset-numpy.h5'\n # (1) Raw\n t1 = time.time()\n buidImageDataset(imageDirParser=imgDirParser,\n datasetName=dataSetNameRaw,\n numberOfSamples=numberOfSamples, isRawBlob=True)\n dt = time.time() - t1\n tspeed = float(numberOfSamples) / dt\n dT1k = 1000. / tspeed\n print ('WRITE [%s] : T=%0.2fs, #Samples=%d, Speed: %0.3f (Samples/Sec), dt(#1000) = %0.3fs'\n % (dataSetNameRaw, dt, numberOfSamples, tspeed, dT1k))\n # (2) Numpy\n t1 = time.time()\n buidImageDataset(imageDirParser=imgDirParser,\n datasetName=dataSetNameArr,\n numberOfSamples=numberOfSamples, isRawBlob=False)\n dt = time.time() - t1\n tspeed = float(numberOfSamples) / dt\n dT1k = 1000. / tspeed\n print ('WRITE [%s] : T=%0.2fs, #Samples=%d, Speed: %0.3f (Samples/Sec), dt(#1000) = %0.3fs'\n % (dataSetNameArr, dt, numberOfSamples, tspeed, dT1k))\n" ]
[ [ "numpy.array" ] ]
Heng-Z/mwr
[ "28e42a3a64f46dc627333b2c6ae4b317803648ba" ]
[ "training/data_sequence.py" ]
[ "from tensorflow.keras.utils import Sequence\nimport numpy as np\nimport mrcfile\nimport os\n# Here, `x_set` is list of path to the images\n# and `y_set` are the associated classes.\n\nclass dataSequence(Sequence):\n\n def __init__(self, x_set, y_set, batch_size):\n self.x, self.y = x_set, y_set\n self.batch_size = batch_size\n self.perm = np.random.permutation(len(self.x))\n\n def __len__(self):\n return int(np.ceil(len(self.x) / float(self.batch_size)))\n\n def on_epoch_end(self):\n self.perm = np.random.permutation(len(self.x))\n\n def __getitem__(self, i):\n idx = slice(i*self.batch_size,(i+1)*self.batch_size)\n idx = self.perm[idx]\n # print('*******',self.x[-1],mrcfile.open(self.x[0]).data[:,:,:,np.newaxis].shape)\n rx = np.array([mrcfile.open(self.x[j]).data[:,:,:,np.newaxis] for j in idx])\n ry = np.array([mrcfile.open(self.y[j]).data[:,:,:,np.newaxis] for j in idx])\n # for j in idx:\n # print(mrcfile.open(self.x[j]).data.shape,mrcfile.open(self.y[j]).data.shape)\n return rx,ry\n\n\ndef prepare_dataseq(data_folder, batch_size):\n\n dirs_tomake = ['train_x','train_y', 'test_x', 'test_y']\n path_all = []\n for d in dirs_tomake:\n p = '{}/{}/'.format(data_folder, d)\n path_all.append(sorted([p+f for f in os.listdir(p)]))\n # train_data = dataSequence(path_all[0], path_all[1], batch_size)\n # test_data = dataSequence(path_all[2], path_all[3], batch_size)\n train_data = get_gen(path_all[0], path_all[1], batch_size)\n test_data = get_gen(path_all[2], path_all[3], batch_size)\n # print(path_all[2],path_all[3])\n return train_data, test_data\n\ndef get_gen(x_set,y_set,batch_size,shuffle=True):\n def gen():\n while True:\n all_idx = np.arange(len(x_set))\n if shuffle:\n np.random.shuffle(all_idx)\n for i in range(len(x_set)//batch_size):\n idx = slice(i * batch_size,(i+1) * batch_size)\n idx = all_idx[idx]\n rx = np.array([mrcfile.open(x_set[j]).data[:,:,:,np.newaxis] for j in idx])\n ry = np.array([mrcfile.open(y_set[j]).data[:,:,:,np.newaxis] for j in idx])\n\n yield rx,ry\n return gen\n\ndef get_gen_single(x_set,batch_size,shuffle=True):\n def gen():\n while True:\n all_idx = np.arange(len(x_set))\n if shuffle:\n np.random.shuffle(all_idx)\n for i in range(len(x_set)//batch_size):\n idx = slice(i * batch_size,(i+1) * batch_size)\n idx = all_idx[idx]\n rx = np.array([mrcfile.open(x_set[j]).data[:,:,:,np.newaxis] for j in idx])\n yield rx\n return gen" ]
[ [ "numpy.random.shuffle" ] ]
dwkim565/laygo2
[ "fea1263638fa5641ad27f2000d7562cdf910c67f" ]
[ "examples/demo/7_thshin_test.py" ]
[ "#!/usr/bin/python\n########################################################################################################################\n#\n# Copyright (c) 2014, Regents of the University of California\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n# following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n########################################################################################################################\n\nimport numpy as np\nimport pprint\nimport laygo2\nimport laygo2.interface\nimport laygo2_tech as tech\n\n# Parameter definitions ##############\n# Templates\ntpmos_name = 'pmos'\ntnmos_name = 'nmos'\n# Grids\npg_name = 'placement_basic'\nr12_name = 'routing_12_cmos'\nr23_name = 'routing_23_cmos'\n# Design hierarchy\nlibname = 'thshin_65_to_40'\ncellname = 'thshin_test'\n# Design parameters\nnf_a = 2\nnf_b = 4\n# End of parameter definitions #######\n\n# Generation start ###################\n# 1. Load templates and grids.\nprint(\"Load templates\")\ntemplates = tech.load_templates()\ntpmos, tnmos = templates[tpmos_name], templates[tnmos_name]\nprint(templates[tpmos_name], templates[tnmos_name], sep=\"\\n\")\n\nprint(\"Load grids\")\ngrids = tech.load_grids(templates=templates)\npg, r12, r23 = grids[pg_name], grids[r12_name], grids[r23_name]\nprint(grids[pg_name], grids[r12_name], grids[r23_name], sep=\"\\n\")\n\n# 2. Create a design hierarchy.\nlib = laygo2.object.database.Library(name=libname)\ndsn = laygo2.object.database.Design(name=cellname, libname=libname)\nlib.append(dsn)\n\n# 3. Create instances.\nprint(\"Create instances\")\nin0 = tnmos.generate(name='MN0', params={'nf': nf_b, 'trackswap': False, 'tie': 'D', 'gbndl': True})\nin1 = tnmos.generate(name='MN1', params={'nf': nf_b, 'trackswap': True, 'tie': 'D', 'gbndl': True})\nin2 = tnmos.generate(name='MN2', params={'nf': nf_b, 'trackswap': True, 'tie': 'D', 'gbndl': True})\nin3 = tnmos.generate(name='MN3', params={'nf': nf_b, 'trackswap': True, 'gbndl': True})\n#in1 = tnmos.generate(name='MN1', params={'nf': nf_a, 'gbndr': True})\n#ip0 = tpmos.generate(name='MP0', transform='MX', params={'nf': nf_b, 'tie': 'S',11gbndl': True})\n#ip1 = tpmos.generate(name='MP1', transform='MX', params={'nf': nf_a, 'trackswap': True, 'tie': 'D', 'gbndr': True})\n\n# 4. Place instances.\ndsn.place(grid=pg, inst=in0, mn=pg.mn[0, 0])\ndsn.place(grid=pg, inst=in1, mn=pg.mn.bottom_right(in0)+np.array([2, 0])) # same with pg == in0.bottom_right\ndsn.place(grid=pg, inst=in2, mn=pg.mn.top_left(in1)+np.array([0, 4])) # same with pg == in0.bottom_right\ndsn.place(grid=pg, inst=in3, mn=pg.mn.bottom_right(in2)+np.array([2, 0])) # same with pg == in0.bottom_right\n\n#dsn.place(grid=pg, inst=ip0, mn=pg.mn.top_left(in0) + pg.mn.height_vec(ip0)) # +height_vec due to MX transform\n#dsn.place(grid=pg, inst=ip1, mn=pg.mn.top_right(ip0))\n\n# 5. Create and place wires.\nprint(\"Create wires\")\n# A\n#_mn = [r23.mn(in1.pins['G'])[0], r23.mn(ip1.pins['G'])[0]]\n#va0, ra0, va1 = dsn.route(grid=r23, mn=_mn, via_tag=[True, True])\n# B\n#_mn = [r23.mn(in0.pins['G'])[0], r23.mn(ip0.pins['G'])[0]]\n#vb0, rb0, vb1 = dsn.route(grid=r23, mn=_mn, via_tag=[True, True])\n# Internal\n#_mn = [r12.mn(in0.pins['S'])[0], r12.mn(in1.pins['D'])[0]]\n#ri0 = dsn.route(grid=r23, mn=_mn)\n#_mn = [r12.mn(ip0.pins['D'])[0], r12.mn(ip1.pins['S'])[0]]\n#ri1 = dsn.route(grid=r23, mn=_mn)\n# Output\n#_mn = [r23.mn(in1.pins['S'])[1], r23.mn(ip1.pins['S'])[1]]\n#_track = [r23.mn(ip1.pins['S'])[1, 0], None]\n#_, vo0, ro0, vo1, _= dsn.route_via_track(grid=r23, mn=_mn, track=_track)\n# VSS\n#rvss0 = dsn.route(grid=r12, mn=[r12.mn(in0.pins['RAIL'])[0], r12.mn(in1.pins['RAIL'])[1]])\n# VDD\n#rvdd0 = dsn.route(grid=r12, mn=[r12.mn(ip0.pins['RAIL'])[0], r12.mn(ip1.pins['RAIL'])[1]])\n\n# 6. Create pins.\n#pa0 = dsn.pin(name='A', grid=r23, mn=r23.mn.bbox(ra0))\n#pb0 = dsn.pin(name='B', grid=r23, mn=r23.mn.bbox(rb0))\n#po0 = dsn.pin(name='O', grid=r23, mn=r23.mn.bbox(ro0))\n#pvss0 = dsn.pin(name='VSS', grid=r12, mn=r12.mn.bbox(rvss0))\n#pvdd0 = dsn.pin(name='VDD', grid=r12, mn=r12.mn.bbox(rvdd0))\n\n# 7. Export to physical database.\nprint(\"Export design\")\nprint(dsn)\n# Uncomment for GDS export\n\"\"\"\n#abstract = False # export abstract\n#laygo2.interface.gds.export(lib, filename=libname+'_'+cellname+'.gds', cellname=None, scale=1e-9,\n# layermapfile=\"../technology_example/technology_example.layermap\", physical_unit=1e-9, logical_unit=0.001,\n# pin_label_height=0.1, pin_annotate_layer=['text', 'drawing'], text_height=0.1,\n# abstract_instances=abstract)\n\"\"\"\n\n# Uncomment for SKILL export\n\"\"\"\n#skill_str = laygo2.interface.skill.export(lib, filename=libname+'_'+cellname+'.il', cellname=None, scale=1e-3)\n#print(skill_str)\n\"\"\"\n\n# Uncomment for BAG export\nlaygo2.interface.bag.export(lib, filename=libname+'_'+cellname+'.il', cellname=None, scale=1e-3, reset_library=False, tech_library=tech.name)\n\n# 7-a. Import the GDS file back and display\n#with open('nand_generate.gds', 'rb') as stream:\n# pprint.pprint(laygo2.interface.gds.readout(stream, scale=1e-9))\n\n# 8. Export to a template database file.\nnat_temp = dsn.export_to_template()\nlaygo2.interface.yaml.export_template(nat_temp, filename=libname+'_templates.yaml', mode='append')\n\n" ]
[ [ "numpy.array" ] ]
jihyunbak/spyglass
[ "780fe2c101db60d42a1b73ad8fd729db42620ba6" ]
[ "src/nwb_datajoint/spikesorting/spikesorting_artifact.py" ]
[ "import warnings\nfrom functools import reduce\n\nimport datajoint as dj\nimport numpy as np\nimport scipy.stats as stats\nimport spikeinterface as si\nfrom spikeinterface.core.segmentutils import AppendSegmentRecording\n\nfrom ..common.common_interval import IntervalList\nfrom ..common.nwb_helper_fn import get_valid_intervals\nfrom .spikesorting_recording import SpikeSortingRecording\n\nschema = dj.schema('spikesorting_artifact')\n\n@schema\nclass ArtifactDetectionParameters(dj.Manual):\n definition = \"\"\"\n # Parameters for detecting artifact times within a sort group.\n artifact_params_name: varchar(200)\n ---\n artifact_params: blob # dictionary of parameters\n \"\"\"\n\n def insert_default(self):\n \"\"\"Insert the default artifact parameters with an appropriate parameter dict.\n \"\"\"\n artifact_params = {}\n artifact_params['zscore_thresh'] = None # must be None or >= 0\n artifact_params['amplitude_thresh'] = 3000 # must be None or >= 0\n # all electrodes of sort group\n artifact_params['proportion_above_thresh'] = 1.0\n artifact_params['removal_window_ms'] = 1.0 # in milliseconds\n self.insert1(['default', artifact_params], skip_duplicates=True)\n\n artifact_params_none = {}\n artifact_params_none['zscore_thresh'] = None\n artifact_params_none['amplitude_thresh'] = None\n self.insert1(['none', artifact_params_none], skip_duplicates=True)\n\n\n@schema\nclass ArtifactDetectionSelection(dj.Manual):\n definition = \"\"\"\n # Specifies artifact detection parameters to apply to a sort group's recording.\n -> SpikeSortingRecording\n -> ArtifactDetectionParameters\n ---\n \"\"\"\n\n\n@schema\nclass ArtifactDetection(dj.Computed):\n definition = \"\"\"\n # Stores artifact times and valid no-artifact times as intervals.\n -> ArtifactDetectionSelection\n ---\n artifact_times: longblob # np array of artifact intervals\n artifact_removed_valid_times: longblob # np array of valid no-artifact intervals\n artifact_removed_interval_list_name: varchar(200) # name of the array of no-artifact valid time intervals\n \"\"\"\n\n def make(self, key):\n # get the dict of artifact params associated with this artifact_params_name\n artifact_params = (ArtifactDetectionParameters &\n key).fetch1(\"artifact_params\")\n\n recording_path = (SpikeSortingRecording & key).fetch1('recording_path')\n recording_name = SpikeSortingRecording._get_recording_name(key)\n recording = si.load_extractor(recording_path)\n\n artifact_removed_valid_times, artifact_times = _get_artifact_times(\n recording, **artifact_params)\n\n # NOTE: decided not to do this but to just create a single long segment; keep for now\n # get artifact times by segment\n # if AppendSegmentRecording, get artifact times for each segment\n # if isinstance(recording, AppendSegmentRecording):\n # artifact_removed_valid_times = []\n # artifact_times = []\n # for rec in recording.recording_list:\n # rec_valid_times, rec_artifact_times = _get_artifact_times(rec, **artifact_params)\n # for valid_times in rec_valid_times:\n # artifact_removed_valid_times.append(valid_times)\n # for artifact_times in rec_artifact_times:\n # artifact_times.append(artifact_times)\n # artifact_removed_valid_times = np.asarray(artifact_removed_valid_times)\n # artifact_times = np.asarray(artifact_times)\n # else:\n # artifact_removed_valid_times, artifact_times = _get_artifact_times(recording, **artifact_params)\n\n key['artifact_times'] = artifact_times\n key['artifact_removed_valid_times'] = artifact_removed_valid_times\n\n # set up a name for no-artifact times using recording id\n key['artifact_removed_interval_list_name'] = recording_name + \\\n '_' + key['artifact_params_name'] + '_artifact_removed_valid_times'\n\n ArtifactRemovedIntervalList.insert1(key, replace=True)\n\n # # insert artifact times and valid times into ArtifactRemovedIntervalList with an appropriate name\n # tmp_key = (ArtifactDetectionSelection & key).proj().fetch1()\n # tmp_key['artifact_removed_interval_list_name'] = key['artifact_removed_interval_list_name']\n # tmp_key['artifact_removed_valid_times'] = key['artifact_removed_valid_times']\n # tmp_key['artifact_times'] = key['artifact_times']\n # ArtifactRemovedIntervalList.insert1(tmp_key, skip_duplicates = True)\n\n # also insert into IntervalList\n tmp_key = {}\n tmp_key['nwb_file_name'] = key['nwb_file_name']\n tmp_key['interval_list_name'] = key['artifact_removed_interval_list_name']\n tmp_key['valid_times'] = key['artifact_removed_valid_times']\n IntervalList.insert1(tmp_key, replace=True)\n\n # insert into computed table\n self.insert1(key)\n\n\n@schema\nclass ArtifactRemovedIntervalList(dj.Manual):\n definition = \"\"\"\n # Stores intervals without detected artifacts.\n # Note that entries can come from either ArtifactDetection() or alternative artifact removal analyses.\n artifact_removed_interval_list_name: varchar(200)\n ---\n -> ArtifactDetectionSelection\n artifact_removed_valid_times: longblob\n artifact_times: longblob # np array of artifact intervals\n \"\"\"\n\n\ndef _get_artifact_times(recording, zscore_thresh=None, amplitude_thresh=None,\n proportion_above_thresh=1.0, removal_window_ms=1.0):\n \"\"\"Detects times during which artifacts do and do not occur.\n Artifacts are defined as periods where the absolute value of the recording signal exceeds one\n OR both specified amplitude or zscore thresholds on the proportion of channels specified,\n with the period extended by the removal_window_ms/2 on each side. Z-score and amplitude\n threshold values of None are ignored.\n\n Parameters\n ----------\n recording : si.Recording\n zscore_thresh : float, optional\n Stdev threshold for exclusion, should be >=0, defaults to None\n amplitude_thresh : float, optional\n Amplitude threshold for exclusion, should be >=0, defaults to None\n proportion_above_thresh : float, optional, should be>0 and <=1\n Proportion of electrodes that need to have threshold crossings, defaults to 1\n removal_window_ms : float, optional\n Width of the window in milliseconds to mask out per artifact (window/2 removed on each side of threshold crossing), defaults to 1 ms\n\n Returns\n ------_\n artifact_intervals : np.ndarray\n Intervals in which artifacts are detected (including removal windows), unit: seconds\n artifact_removed_valid_times : np.ndarray\n Intervals of valid times where artifacts were not detected, unit: seconds\n \"\"\"\n\n valid_timestamps = SpikeSortingRecording._get_recording_timestamps(\n recording)\n if recording.get_num_segments() > 1:\n recording = si.concatenate_recordings(recording.recording_list)\n\n # if both thresholds are None, we essentially skip artifract detection and\n # return an array with the times of the first and last samples of the recording\n if (amplitude_thresh is None) and (zscore_thresh is None):\n recording_interval = np.asarray(\n [valid_timestamps[0], valid_timestamps[-1]])\n artifact_times_empty = np.asarray([])\n print(\"Amplitude and zscore thresholds are both None, skipping artifact detection\")\n return recording_interval, artifact_times_empty\n\n # verify threshold parameters\n amplitude_thresh, zscore_thresh, proportion_above_thresh = _check_artifact_thresholds(\n amplitude_thresh, zscore_thresh, proportion_above_thresh)\n\n # turn ms to remove total into s to remove from either side of each detected artifact\n half_removal_window_s = removal_window_ms * (1 / 1000) * (1 / 2)\n\n # TODO: load by chunk to avoid memory problems\n data = recording.get_traces()\n\n # compute the number of electrodes that have to be above threshold\n nelect_above = np.ceil(proportion_above_thresh *\n len(recording.get_channel_ids()))\n\n # find the artifact occurrences using one or both thresholds, across channels\n if ((amplitude_thresh is not None) and (zscore_thresh is None)):\n above_a = np.abs(data) > amplitude_thresh\n above_thresh = np.ravel(np.argwhere(\n np.sum(above_a, axis=0) >= nelect_above))\n elif ((amplitude_thresh is None) and (zscore_thresh is not None)):\n dataz = np.abs(stats.zscore(data, axis=1))\n above_z = dataz > zscore_thresh\n above_thresh = np.ravel(np.argwhere(\n np.sum(above_z, axis=0) >= nelect_above))\n else:\n above_a = np.abs(data) > amplitude_thresh\n dataz = np.abs(stats.zscore(data, axis=1))\n above_z = dataz > zscore_thresh\n above_thresh = np.ravel(np.argwhere(\n np.sum(np.logical_or(above_z, above_a), axis=0) >= nelect_above))\n\n if len(above_thresh) == 0:\n recording_interval = np.asarray(\n [[valid_timestamps[0], valid_timestamps[-1]]])\n artifact_times_empty = np.asarray([])\n print(\"No artifacts detected.\")\n return recording_interval, artifact_times_empty\n\n # find timestamps of initial artifact threshold crossings\n above_thresh_times = valid_timestamps[above_thresh]\n\n # keep track of all the artifact timestamps within each artifact removal window and the indices of those timestamps\n artifact_times = []\n artifact_indices = []\n for a in above_thresh_times:\n a_times = np.copy(valid_timestamps[(valid_timestamps > (\n a - half_removal_window_s)) & (valid_timestamps <= (a + half_removal_window_s))])\n a_indices = np.argwhere((valid_timestamps > (\n a - half_removal_window_s)) & (valid_timestamps <= (a + half_removal_window_s)))\n artifact_times.append(a_times)\n artifact_indices.append(a_indices)\n all_artifact_times = reduce(np.union1d, artifact_times)\n all_artifact_indices = reduce(np.union1d, artifact_indices)\n # turn artifact detected times into intervals\n # should be faster than diffing and comparing to zero\n if not np.all(all_artifact_times[:-1] <= all_artifact_times[1:]):\n warnings.warn(\n \"Warning: sorting artifact timestamps; all_artifact_times was not strictly increasing\")\n all_artifact_times = np.sort(all_artifact_times)\n artifact_intervals = get_valid_intervals(\n all_artifact_times, recording.get_sampling_frequency(), 1.5, .000001)\n\n artifact_percent_of_times = 100 * \\\n len(all_artifact_times) / len(valid_timestamps)\n print(f\"{len(artifact_intervals)} artifact intervals detected;\\\n {artifact_percent_of_times} % of the recording's valid_timestamps removed as artifact\")\n\n # turn all artifact detected times into -1 to easily find non-artifact intervals\n valid_timestamps[all_artifact_indices] = -1\n artifact_removed_valid_times = get_valid_intervals(valid_timestamps[valid_timestamps != -1],\n recording.get_sampling_frequency(), 1.5, 0.000001)\n\n return artifact_removed_valid_times, artifact_intervals\n\n\ndef _check_artifact_thresholds(amplitude_thresh, zscore_thresh, proportion_above_thresh):\n \"\"\"Alerts user to likely unintended parameters. Not an exhaustive verification.\n\n Parameters\n ----------\n zscore_thresh: float\n amplitude_thresh: float\n proportion_above_thresh: float\n\n Return\n ------\n zscore_thresh: float\n amplitude_thresh: float\n proportion_above_thresh: float\n\n Raise\n ------\n ValueError: if signal thresholds are negative\n \"\"\"\n # amplitude or zscore thresholds should be negative, as they are applied to an absolute signal\n signal_thresholds = [t for t in [\n amplitude_thresh, zscore_thresh] if t is not None]\n for t in signal_thresholds:\n if t < 0:\n raise ValueError(\n \"Amplitude and Z-Score thresholds must be >= 0, or None\")\n\n # proportion_above_threshold should be in [0:1] inclusive\n if proportion_above_thresh < 0:\n warnings.warn(\n \"Warning: proportion_above_thresh must be a proportion >0 and <=1. Using proportion_above_thresh = 0.01 instead of \" + str(proportion_above_thresh))\n proportion_above_thresh = 0.01\n elif proportion_above_thresh > 1:\n warnings.warn(\n \"Warning: proportion_above_thresh must be a proportion >0 and <=1. Using proportion_above_thresh = 1 instead of \" + str(proportion_above_thresh))\n proportion_above_thresh = 1\n return amplitude_thresh, zscore_thresh, proportion_above_thresh\n" ]
[ [ "scipy.stats.zscore", "numpy.sum", "numpy.logical_or", "numpy.argwhere", "numpy.abs", "numpy.asarray", "numpy.copy", "numpy.all", "numpy.sort" ] ]
taylorguo/model-optimization
[ "ddd9a67c7599214a4061ae04b28387171d29d96a" ]
[ "CPD/CPDtorch/quant/quant_function.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom torch.utils.cpp_extension import load\nimport os\ncurrent_path = os.path.dirname(os.path.realpath(__file__))\n\nif torch.cuda.is_available():\n quant_cuda = load(\n name='quant_cuda',\n sources=[\n os.path.join(current_path, \"quant_cuda/quant_cuda.cpp\"),\n os.path.join(current_path, \"quant_cuda/float_kernel.cu\"),\n os.path.join(current_path, \"quant_cuda/quant.cu\"),\n ],\n )\nelse:\n quant_cuda = None\n\n__all__ = ['float_quantize', \"quantizer\", \"quant_gemm\"]\n\n\ndef get_module(x):\n if x.is_cuda:\n quant_module = quant_cuda\n else:\n raise NotImplementedError(\n \"Currently, we do not support customized precision for CPU\")\n return quant_module\n\n\ndef quantizer(forward_exp=8, forward_man=23, backward_exp=8, backward_man=23):\n\n class Rounding(torch.autograd.Function):\n @staticmethod\n def forward(self, x):\n if forward_exp == 8 and forward_man == 23:\n return x\n quant_module = get_module(x)\n out = quant_module.float_quantize_nearest(\n x.contiguous(), forward_man, forward_exp)\n return out\n\n @staticmethod\n def backward(self, grad_output):\n if self.needs_input_grad[0]:\n if backward_exp == 8 and backward_man == 23:\n return grad_output\n quant_module = get_module(grad_output)\n grad_input = quant_module.float_quantize_nearest(\n grad_output.contiguous(), backward_man, backward_exp)\n else:\n grad_input = None\n return grad_input\n\n return Rounding.apply\n\n\ndef float_quantize(x, exp, man):\n \"\"\"\n Quantize a single precision Floating Point into low-precision Floating Point\n\n Args:\n - :attr: `x` (torch.Tensor) : the single precision number(torch.Tensor) to be quantized\n - :attr: `exp` (int) : number of bits allocated for exponent\n - :attr: `man` (int) : number of bits allocated for mantissa, not counting the virtual bit\n\n Returns:\n - a quantized low-precision floating point number (torch.Tensor)\n \"\"\"\n assert isinstance(\n x, torch.Tensor), \"x is not a single precision Floating Point Tensor\"\n quant_module = get_module(x)\n return quant_module.float_quantize_nearest(x.contiguous(), man, exp)\n\n\ndef quant_gemm(a, b, man=23, exp=8):\n \"\"\"\n Quantize GEMM with customized precision as accumulator\n\n Args:\n - :attr: `a` (torch.Tensor) : the input of GEMM, with shape:(M, K)\n - :attr: `b` (torch.Tensor) : the input of GEMM, with shape:(K, N)\n - :attr: `exp` (int) : number of bits allocated for exponent\n - :attr: `man` (int) : number of bits allocated for mantissa, not counting the virtual bit\n\n Returns:\n - the result of GEMM (torch.Tensor)\n \"\"\"\n assert len(a.shape) == 2\n assert len(b.shape) == 2\n assert a.shape[1] == b.shape[0]\n quant_module = get_module(a)\n c = torch.zeros(a.shape[0], b.shape[1]).cuda()\n quant_module.float_quantize_gemm(a.contiguous(), b.contiguous(), c.contiguous(),\n a.shape[0], b.shape[1], a.shape[1], man, exp)\n return c\n" ]
[ [ "torch.zeros", "torch.cuda.is_available" ] ]
JiachenMao/TransfornerPrune
[ "084956e00807af5ce3f363d964f327405862e51b" ]
[ "tensor2tensor/models/resnet.py" ]
[ "# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Resnets.\"\"\"\n# Copied from cloud_tpu/models/resnet/resnet_model.py and modified\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensor2tensor.layers import common_hparams\nfrom tensor2tensor.layers import common_layers\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import t2t_model\nfrom tensor2tensor.utils.hparam import HParams\n\nimport tensorflow as tf\n\n\nBATCH_NORM_DECAY = 0.9\nBATCH_NORM_EPSILON = 1e-5\n\n\n# TODO(lukaszkaiser): remove or simplify after V2 work is done.\ndef layers():\n return common_layers.layers()\n\n\ndef batch_norm_relu(inputs,\n is_training,\n relu=True,\n init_zero=False,\n data_format=\"channels_first\"):\n \"\"\"Performs a batch normalization followed by a ReLU.\n\n Args:\n inputs: `Tensor` of shape `[batch, channels, ...]`.\n is_training: `bool` for whether the model is training.\n relu: `bool` if False, omits the ReLU operation.\n init_zero: `bool` if True, initializes scale parameter of batch\n normalization with 0 instead of 1 (default).\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n\n Returns:\n A normalized `Tensor` with the same `data_format`.\n \"\"\"\n if init_zero:\n gamma_initializer = tf.zeros_initializer()\n else:\n gamma_initializer = tf.ones_initializer()\n\n if data_format == \"channels_first\":\n axis = 1\n else:\n axis = 3\n\n inputs = layers().BatchNormalization(\n axis=axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON,\n center=True,\n scale=True,\n fused=True,\n gamma_initializer=gamma_initializer)(inputs, training=is_training)\n\n if relu:\n inputs = tf.nn.relu(inputs)\n return inputs\n\n\ndef fixed_padding(inputs, kernel_size, data_format=\"channels_first\"):\n \"\"\"Pads the input along the spatial dimensions independently of input size.\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height, width]` or\n `[batch, height, width, channels]` depending on `data_format`.\n kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`\n operations. Should be a positive integer.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n\n Returns:\n A padded `Tensor` of the same `data_format` with size either intact\n (if `kernel_size == 1`) or padded (if `kernel_size > 1`).\n \"\"\"\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n if data_format == \"channels_first\":\n padded_inputs = tf.pad(\n inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(\n inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])\n\n return padded_inputs\n\n\ndef conv2d_fixed_padding(inputs,\n filters,\n kernel_size,\n strides,\n data_format=\"channels_first\",\n use_td=False,\n targeting_rate=None,\n keep_prob=None,\n is_training=None):\n \"\"\"Strided 2-D convolution with explicit padding.\n\n The padding is consistent and is based only on `kernel_size`, not on the\n dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.\n filters: `int` number of filters in the convolution.\n kernel_size: `int` size of the kernel to be used in the convolution.\n strides: `int` strides of the convolution.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n use_td: `str` one of \"weight\" or \"unit\". Set to False or \"\" to disable\n targeted dropout.\n targeting_rate: `float` proportion of weights to target with targeted\n dropout.\n keep_prob: `float` keep probability for targeted dropout.\n is_training: `bool` for whether the model is in training.\n\n Returns:\n A `Tensor` of shape `[batch, filters, height_out, width_out]`.\n\n Raises:\n Exception: if use_td is not valid.\n \"\"\"\n if strides > 1:\n inputs = fixed_padding(inputs, kernel_size, data_format=data_format)\n\n if use_td:\n inputs_shape = common_layers.shape_list(inputs)\n if use_td == \"weight\":\n if data_format == \"channels_last\":\n size = kernel_size * kernel_size * inputs_shape[-1]\n else:\n size = kernel_size * kernel_size * inputs_shape[1]\n targeting_count = targeting_rate * tf.to_float(size)\n targeting_fn = common_layers.weight_targeting\n elif use_td == \"unit\":\n targeting_count = targeting_rate * filters\n targeting_fn = common_layers.unit_targeting\n else:\n raise Exception(\"Unrecognized targeted dropout type: %s\" % use_td)\n\n y = common_layers.td_conv(\n inputs,\n filters,\n kernel_size,\n targeting_count,\n targeting_fn,\n keep_prob,\n is_training,\n do_prune=True,\n strides=strides,\n padding=(\"SAME\" if strides == 1 else \"VALID\"),\n data_format=data_format,\n use_bias=False,\n kernel_initializer=tf.variance_scaling_initializer())\n else:\n y = layers().Conv2D(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=(\"SAME\" if strides == 1 else \"VALID\"),\n use_bias=False,\n kernel_initializer=tf.variance_scaling_initializer(),\n data_format=data_format)(inputs)\n \n # added by mjc: to see the activation distribution\n tf.summary.histogram('activations', y)\n\n return y\n\n\ndef residual_block(inputs,\n filters,\n is_training,\n projection_shortcut,\n strides,\n final_block,\n data_format=\"channels_first\",\n use_td=False,\n targeting_rate=None,\n keep_prob=None):\n \"\"\"Standard building block for residual networks with BN before convolutions.\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height, width]`.\n filters: `int` number of filters for the first two convolutions. Note that\n the third and final convolution will use 4 times as many filters.\n is_training: `bool` for whether the model is in training.\n projection_shortcut: `function` to use for projection shortcuts (typically\n a 1x1 convolution to match the filter dimensions). If None, no\n projection is used and the input is passed as unchanged through the\n shortcut connection.\n strides: `int` block stride. If greater than 1, this block will ultimately\n downsample the input.\n final_block: unused parameter to keep the same function signature as\n `bottleneck_block`.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n use_td: `str` one of \"weight\" or \"unit\". Set to False or \"\" to disable\n targeted dropout.\n targeting_rate: `float` proportion of weights to target with targeted\n dropout.\n keep_prob: `float` keep probability for targeted dropout.\n\n Returns:\n The output `Tensor` of the block.\n \"\"\"\n del final_block\n shortcut = inputs\n inputs = batch_norm_relu(inputs, is_training, data_format=data_format)\n\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=filters,\n kernel_size=3,\n strides=strides,\n data_format=data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob,\n is_training=is_training)\n\n inputs = batch_norm_relu(inputs, is_training, data_format=data_format)\n # added by mjc: to see the activation distribution after relu\n tf.summary.histogram('activations_after_relu', inputs)\n # added by mjc: add activation sparsity\n # inputs = common_layers.activation_sparsity(inputs, sparsity=0.8)\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=filters,\n kernel_size=3,\n strides=1,\n data_format=data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob,\n is_training=is_training)\n\n return inputs + shortcut\n\n\ndef bottleneck_block(inputs,\n filters,\n is_training,\n projection_shortcut,\n strides,\n final_block,\n data_format=\"channels_first\",\n use_td=False,\n targeting_rate=None,\n keep_prob=None):\n \"\"\"Bottleneck block variant for residual networks with BN after convolutions.\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height, width]`.\n filters: `int` number of filters for the first two convolutions. Note that\n the third and final convolution will use 4 times as many filters.\n is_training: `bool` for whether the model is in training.\n projection_shortcut: `function` to use for projection shortcuts (typically\n a 1x1 convolution to match the filter dimensions). If None, no\n projection is used and the input is passed as unchanged through the\n shortcut connection.\n strides: `int` block stride. If greater than 1, this block will ultimately\n downsample the input.\n final_block: `bool` set to True if it is this the final block in the group.\n This is changes the behavior of batch normalization initialization for\n the final batch norm in a block.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n use_td: `str` one of \"weight\" or \"unit\". Set to False or \"\" to disable\n targeted dropout.\n targeting_rate: `float` proportion of weights to target with targeted\n dropout.\n keep_prob: `float` keep probability for targeted dropout.\n\n Returns:\n The output `Tensor` of the block.\n \"\"\"\n # TODO(chrisying): this block is technically the post-activation resnet-v1\n # bottleneck unit. Test with v2 (pre-activation) and replace if there is no\n # difference for consistency.\n shortcut = inputs\n if projection_shortcut is not None:\n shortcut = projection_shortcut(inputs)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=filters,\n kernel_size=1,\n strides=1,\n data_format=data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob,\n is_training=is_training)\n\n inputs = batch_norm_relu(inputs, is_training, data_format=data_format)\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=filters,\n kernel_size=3,\n strides=strides,\n data_format=data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob,\n is_training=is_training)\n\n inputs = batch_norm_relu(inputs, is_training, data_format=data_format)\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=4 * filters,\n kernel_size=1,\n strides=1,\n data_format=data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob,\n is_training=is_training)\n inputs = batch_norm_relu(\n inputs,\n is_training,\n relu=False,\n init_zero=final_block,\n data_format=data_format)\n\n return tf.nn.relu(inputs + shortcut)\n\n\ndef block_layer(inputs,\n filters,\n block_fn,\n blocks,\n strides,\n is_training,\n name,\n data_format=\"channels_first\",\n use_td=False,\n targeting_rate=None,\n keep_prob=None):\n \"\"\"Creates one layer of blocks for the ResNet model.\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height, width]`.\n filters: `int` number of filters for the first convolution of the layer.\n block_fn: `function` for the block to use within the model\n blocks: `int` number of blocks contained in the layer.\n strides: `int` stride to use for the first convolution of the layer. If\n greater than 1, this layer will downsample the input.\n is_training: `bool` for whether the model is training.\n name: `str`name for the Tensor output of the block layer.\n data_format: `str` either \"channels_first\" for `[batch, channels, height,\n width]` or \"channels_last for `[batch, height, width, channels]`.\n use_td: `str` one of \"weight\" or \"unit\". Set to False or \"\" to disable\n targeted dropout.\n targeting_rate: `float` proportion of weights to target with targeted\n dropout.\n keep_prob: `float` keep probability for targeted dropout.\n\n Returns:\n The output `Tensor` of the block layer.\n \"\"\"\n # Bottleneck blocks end with 4x the number of filters as they start with\n filters_out = 4 * filters if block_fn is bottleneck_block else filters\n\n def projection_shortcut(inputs):\n \"\"\"Project identity branch.\"\"\"\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=filters_out,\n kernel_size=1,\n strides=strides,\n data_format=data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob,\n is_training=is_training)\n return batch_norm_relu(\n inputs, is_training, relu=False, data_format=data_format)\n\n # Only the first block per block_layer uses projection_shortcut and strides\n inputs = block_fn(\n inputs,\n filters,\n is_training,\n projection_shortcut,\n strides,\n False,\n data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob)\n\n for i in range(1, blocks):\n inputs = block_fn(\n inputs,\n filters,\n is_training,\n None,\n 1, (i + 1 == blocks),\n data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob)\n\n return tf.identity(inputs, name)\n\n\ndef resnet_v2(inputs,\n block_fn,\n layer_blocks,\n filters,\n data_format=\"channels_first\",\n is_training=False,\n is_cifar=False,\n use_td=False,\n targeting_rate=None,\n keep_prob=None):\n \"\"\"Resnet model.\n\n Args:\n inputs: `Tensor` images.\n block_fn: `function` for the block to use within the model. Either\n `residual_block` or `bottleneck_block`.\n layer_blocks: list of 3 or 4 `int`s denoting the number of blocks to include\n in each of the 3 or 4 block groups. Each group consists of blocks that\n take inputs of the same resolution.\n filters: list of 4 or 5 `int`s denoting the number of filter to include in\n block.\n data_format: `str`, \"channels_first\" `[batch, channels, height,\n width]` or \"channels_last\" `[batch, height, width, channels]`.\n is_training: bool, build in training mode or not.\n is_cifar: bool, whether the data is CIFAR or not.\n use_td: `str` one of \"weight\" or \"unit\". Set to False or \"\" to disable\n targeted dropout.\n targeting_rate: `float` proportion of weights to target with targeted\n dropout.\n keep_prob: `float` keep probability for targeted dropout.\n\n Returns:\n Pre-logit activations.\n \"\"\"\n inputs = block_layer(\n inputs=inputs,\n filters=filters[1],\n block_fn=block_fn,\n blocks=layer_blocks[0],\n strides=1,\n is_training=is_training,\n name=\"block_layer1\",\n data_format=data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob)\n inputs = block_layer(\n inputs=inputs,\n filters=filters[2],\n block_fn=block_fn,\n blocks=layer_blocks[1],\n strides=2,\n is_training=is_training,\n name=\"block_layer2\",\n data_format=data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob)\n inputs = block_layer(\n inputs=inputs,\n filters=filters[3],\n block_fn=block_fn,\n blocks=layer_blocks[2],\n strides=2,\n is_training=is_training,\n name=\"block_layer3\",\n data_format=data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob)\n if not is_cifar:\n inputs = block_layer(\n inputs=inputs,\n filters=filters[4],\n block_fn=block_fn,\n blocks=layer_blocks[3],\n strides=2,\n is_training=is_training,\n name=\"block_layer4\",\n data_format=data_format,\n use_td=use_td,\n targeting_rate=targeting_rate,\n keep_prob=keep_prob)\n\n return inputs\n\n\[email protected]_model\nclass Resnet(t2t_model.T2TModel):\n \"\"\"Residual Network.\"\"\"\n\n def body(self, features):\n hp = self.hparams\n block_fns = {\n \"residual\": residual_block,\n \"bottleneck\": bottleneck_block,\n }\n assert hp.block_fn in block_fns\n is_training = hp.mode == tf.estimator.ModeKeys.TRAIN\n if is_training:\n targets = features[\"targets_raw\"]\n\n inputs = features[\"inputs\"]\n\n data_format = \"channels_last\"\n if hp.use_nchw:\n # Convert from channels_last (NHWC) to channels_first (NCHW). This\n # provides a large performance boost on GPU.\n inputs = tf.transpose(inputs, [0, 3, 1, 2])\n data_format = \"channels_first\"\n\n inputs = conv2d_fixed_padding(\n inputs=inputs,\n filters=hp.filter_sizes[0],\n kernel_size=7,\n strides=1 if hp.is_cifar else 2,\n data_format=data_format)\n inputs = tf.identity(inputs, \"initial_conv\")\n inputs = batch_norm_relu(inputs, is_training, data_format=data_format)\n\n if not hp.is_cifar:\n inputs = layers().MaxPooling2D(\n pool_size=3,\n strides=2,\n padding=\"SAME\",\n data_format=data_format)(inputs)\n inputs = tf.identity(inputs, \"initial_max_pool\")\n\n out = resnet_v2(\n inputs,\n block_fns[hp.block_fn],\n hp.layer_sizes,\n hp.filter_sizes,\n data_format,\n is_training=is_training,\n is_cifar=hp.is_cifar,\n use_td=hp.use_td,\n targeting_rate=hp.targeting_rate,\n keep_prob=hp.keep_prob)\n\n if hp.use_nchw:\n out = tf.transpose(out, [0, 2, 3, 1])\n\n if not hp.is_cifar:\n return out\n\n out = tf.reduce_mean(out, [1, 2])\n num_classes = self._problem_hparams.vocab_size[\"targets\"]\n if hasattr(self._hparams, \"vocab_divisor\"):\n num_classes += (-num_classes) % self._hparams.vocab_divisor\n logits = layers().Dense(num_classes, name=\"logits\")(out)\n\n losses = {\"training\": 0.0}\n if is_training:\n loss = tf.losses.sparse_softmax_cross_entropy(\n labels=tf.squeeze(targets), logits=logits)\n loss = tf.reduce_mean(loss)\n\n losses = {\"training\": loss}\n\n logits = tf.reshape(logits, [-1, 1, 1, 1, logits.shape[1]])\n\n return logits, losses\n\n def infer(self,\n features=None,\n decode_length=50,\n beam_size=1,\n top_beams=1,\n alpha=0.0,\n use_tpu=False):\n \"\"\"Predict.\"\"\"\n del decode_length, beam_size, top_beams, alpha, use_tpu\n assert features is not None\n logits, _ = self(features) # pylint: disable=not-callable\n assert len(logits.get_shape()) == 5\n logits = tf.squeeze(logits, [1, 2, 3])\n log_probs = common_layers.log_prob_from_logits(logits)\n predictions, scores = common_layers.argmax_with_score(log_probs)\n return {\n \"outputs\": predictions,\n \"scores\": scores,\n }\n\n\ndef resnet_base():\n \"\"\"Set of hyperparameters.\"\"\"\n # For imagenet on TPU:\n # Set train_steps=120000\n # Set eval_steps=48\n\n # Base\n hparams = common_hparams.basic_params1()\n\n # Model-specific parameters\n hparams.add_hparam(\"layer_sizes\", [3, 4, 6, 3])\n hparams.add_hparam(\"filter_sizes\", [64, 64, 128, 256, 512])\n hparams.add_hparam(\"block_fn\", \"bottleneck\")\n hparams.add_hparam(\"use_nchw\", True)\n hparams.add_hparam(\"is_cifar\", False)\n\n # Targeted dropout\n hparams.add_hparam(\"use_td\", False)\n hparams.add_hparam(\"targeting_rate\", None)\n hparams.add_hparam(\"keep_prob\", None)\n\n # Variable init\n hparams.initializer = \"normal_unit_scaling\"\n hparams.initializer_gain = 2.\n\n # Optimization\n hparams.optimizer = \"Momentum\"\n hparams.optimizer_momentum_momentum = 0.9\n hparams.optimizer_momentum_nesterov = True\n hparams.weight_decay = 1e-4\n hparams.clip_grad_norm = 0.0\n # (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.)\n hparams.learning_rate = 0.4\n hparams.learning_rate_decay_scheme = \"cosine\"\n # For image_imagenet224, 120k training steps, which effectively makes this a\n # cosine decay (i.e. no cycles).\n hparams.learning_rate_cosine_cycle_steps = 120000\n\n hparams.batch_size = 128\n return hparams\n\n\[email protected]_hparams\ndef resnet_50():\n hp = resnet_base()\n return hp\n\n\[email protected]_hparams\ndef resnet_18():\n hp = resnet_base()\n hp.block_fn = \"residual\"\n hp.layer_sizes = [2, 2, 2, 2]\n return hp\n\n\[email protected]_hparams\ndef resnet_imagenet_34():\n \"\"\"Set of hyperparameters.\"\"\"\n hp = resnet_base()\n hp.block_fn = \"residual\"\n hp.layer_sizes = [2, 4, 8, 2]\n\n return hp\n\n\[email protected]_hparams\ndef resnet_imagenet_34_td_weight_05_05():\n \"\"\"Set of hyperparameters.\"\"\"\n hp = resnet_imagenet_34()\n hp.use_td = \"weight\"\n hp.targeting_rate = 0.5\n hp.keep_prob = 0.5\n\n return hp\n\n\[email protected]_hparams\ndef resnet_imagenet_34_td_unit_05_05():\n \"\"\"Set of hyperparameters.\"\"\"\n hp = resnet_imagenet_34()\n hp.use_td = \"unit\"\n hp.targeting_rate = 0.5\n hp.keep_prob = 0.5\n\n return hp\n\n\[email protected]_hparams\ndef resnet_imagenet_34_td_unit_no_drop():\n \"\"\"Set of hyperparameters.\"\"\"\n hp = resnet_imagenet_34()\n hp.use_td = \"unit\"\n hp.targeting_rate = 0.0\n hp.keep_prob = 1.0\n\n return hp\n\n\[email protected]_hparams\ndef resnet_imagenet_102():\n hp = resnet_imagenet_34()\n hp.layer_sizes = [3, 8, 36, 3]\n return hp\n\n\[email protected]_hparams\ndef resnet_cifar_15():\n \"\"\"Set of hyperparameters.\"\"\"\n hp = resnet_base()\n hp.block_fn = \"residual\"\n hp.is_cifar = True\n hp.layer_sizes = [2, 2, 2]\n hp.filter_sizes = [16, 32, 64, 128]\n\n return hp\n\n\[email protected]_hparams\ndef resnet_cifar_32():\n hp = resnet_cifar_15()\n hp.layer_sizes = [5, 5, 5]\n return hp\n\n\[email protected]_hparams\ndef resnet_cifar_32_td_weight_05_05():\n hp = resnet_cifar_32()\n hp.use_td = \"weight\"\n hp.targeting_rate = 0.5\n hp.keep_prob = 0.5\n return hp\n\n\[email protected]_hparams\ndef resnet_cifar_32_td_unit_05_05():\n hp = resnet_cifar_32()\n hp.use_td = \"unit\"\n hp.targeting_rate = 0.5\n hp.keep_prob = 0.5\n return hp\n\n\[email protected]_hparams\ndef resnet_cifar_32_td_unit_no_drop():\n hp = resnet_cifar_32()\n hp.use_td = \"unit\"\n hp.targeting_rate = 0.0\n hp.keep_prob = 1.0\n return hp\n\n\[email protected]_hparams\ndef resnet_34():\n hp = resnet_base()\n hp.block_fn = \"residual\"\n return hp\n\n\[email protected]_hparams\ndef resnet_101():\n hp = resnet_base()\n hp.layer_sizes = [3, 4, 23, 3]\n return hp\n\n\[email protected]_hparams\ndef resnet_152():\n hp = resnet_base()\n hp.layer_sizes = [3, 8, 36, 3]\n return hp\n\n\[email protected]_hparams\ndef resnet_200():\n hp = resnet_base()\n hp.layer_sizes = [3, 24, 36, 3]\n return hp\n\n\n# Pruning parameters\[email protected]_pruning_params\ndef resnet_weight():\n hp = HParams()\n hp.add_hparam(\"strategy\", \"weight\")\n hp.add_hparam(\"black_list\", [\"logits\", \"bias\"])\n hp.add_hparam(\"white_list\", [\"td_conv\"])\n hp.add_hparam(\"sparsities\", [0.1 * i for i in range(10)])\n return hp\n\n\[email protected]_pruning_params\ndef resnet_unit():\n hp = resnet_weight()\n hp.strategy = \"unit\"\n return hp\n\n\n# Adversarial attack parameters\[email protected]_attack_params\ndef resnet_fgsm():\n aparams = HParams()\n aparams.attack = \"fgsm\"\n aparams.epsilon_name = \"eps\"\n aparams.attack_epsilons = [i * 0.8 for i in range(20)]\n aparams.add_hparam(\"clip_min\", 0.0)\n aparams.add_hparam(\"clip_max\", 255.0)\n return aparams\n\n\[email protected]_attack_params\ndef resnet_madry():\n aparams = resnet_fgsm()\n aparams.attack = \"madry\"\n aparams.add_hparam(\"nb_iter\", 40)\n aparams.add_hparam(\"eps_iter\", 1.0)\n return aparams\n\n\[email protected]_attack_params\ndef resnet_random():\n aparams = resnet_fgsm()\n aparams.attack = \"random\"\n aparams.epsilon_name = \"eps\"\n aparams.add_hparam(\"num_samples\", 10)\n aparams.add_hparam(\"num_batches\", 100)\n return aparams\n" ]
[ [ "tensorflow.pad", "tensorflow.summary.histogram", "tensorflow.variance_scaling_initializer", "tensorflow.reshape", "tensorflow.zeros_initializer", "tensorflow.to_float", "tensorflow.reduce_mean", "tensorflow.ones_initializer", "tensorflow.squeeze", "tensorflow.identity", "tensorflow.nn.relu", "tensorflow.transpose" ] ]
IvanaEscobar/oceanAcouPy
[ "9ea11792dfe1cc0ba7004d23521fd222b4e949eb" ]
[ "oceanAcouPy/soundSpeed.py" ]
[ "# EE348N: Ocean Acoustics \n# Sound speed profiles used in class\n\nfrom numpy import exp, sin\n\n### FUNCTIONS ###\ndef cSTD (t,s,z, lat=0, eqn='mackenzie81'):\n# Inputs:\n# t : tempurature [degC]\n# s : salinity [ppt]\n# z : column depth [m]\n# lat : latitude [deg]\n# eqn : mackenzie81, leroy08\n# Returns:\n# c : seawater sound speed [m/s] \n if (eqn=='mackenzie81'):\n return 1448.96 + 4.59*t - 0.05304*t**2 + 2.374e-4*t**3 + \\\n (1.340 - 0.01025*t)*(s - 35) + 0.01630*z + \\\n 1.675e-7*z**2 - 7.139e-13*t*z**3\n elif (eqn=='leroy08'):\n return 1402.5 +5*t - 5.44e-2*t**2 + 2.1e-4*t**3 + 1.33*s - \\\n 1.23e-2*t*s + 8.7e-5*t**2*s + 1.56e-2*z + \\\n 2.55e-7*z**2 - 7.3e-12*z**3 + 1.2e-6*z*(lat-45) - \\\n 9.5e-13*t*z**3 + 3e-7*t**2*z + 1.43e-5*s*z\n\n#-------------------------------------------------------------------------------\ndef cMunk (z):\n# Inputs:\n# z : depth [m]\n# Returns:\n# Munk sound speed profile [m/s]\n c0 = 1500.\n eps = 0.00737\n zt=2*(z-1300)/1300\n return c0*( 1 + eps*(zt - 1 + exp(-zt)) )\n\n#-------------------------------------------------------------------------------\ndef cIsoVel (z):\n# Inputs:\n# z : depth [m]\n# Returns:\n# Isovelocity downward refracting sound speed profile [m/s]\n c0 = 1520.\n return c0 - 0.5*z\n\n#-------------------------------------------------------------------------------\ndef cDD (z): \n# Inputs:\n# z : depth [m]\n# Returns:\n# double-ducted sound speed profile: \n return 10*sin(f1(z)*(5000.-z)) +\\\n 70*sin(f2(z)*(5000.-z)) +\\\n 0.014*z + 1480.\n\n#-------------------------------------------------------------------------------\nf1 = lambda z : -3e-18*(5000-z)**4\nf2 = lambda z : 3e-19*(5000-z)**4\n" ]
[ [ "numpy.exp" ] ]
omyllymaki/shifting-peaks
[ "0dd67662daac29ce1c2db257553c3355202a46f1" ]
[ "solvers/tests/test_grid_solver.py" ]
[ "import itertools\n\nimport numpy as np\n\nfrom solvers.grid_solver import GridSolver\nfrom solvers.math import interpolate_signal, ls_fit\nfrom solvers.tests.base_test_case import BaseTestCase\nfrom solvers.tests.correction_models import linear_correction\n\n\nclass TestGridSolver(BaseTestCase):\n offset_candidates = np.arange(-3, 3, 0.1)\n slope_candidates = np.arange(-0.03, 0.03, 0.001)\n candidates = np.array(list(itertools.product(slope_candidates, offset_candidates)))\n\n def setUp(self):\n super().setUp()\n self.solver = GridSolver(x=self.x,\n pure_components=self.pure_components,\n candidates=self.candidates,\n correction_model=linear_correction,\n fit_function=ls_fit)\n\n def test_no_x_axis_errors_should_pass(self) -> None:\n self.run_test(self.mixture_signal)\n\n def test_offset_error_should_pass(self) -> None:\n x_distorted = self.x + 2\n signal = interpolate_signal(self.mixture_signal, self.x, x_distorted, 0, 0)\n self.run_test(signal)\n\n def test_slope_error_should_pass(self) -> None:\n x_distorted = 1.01 * self.x\n signal = interpolate_signal(self.mixture_signal, self.x, x_distorted, 0, 0)\n self.run_test(signal)\n\n def test_slope_and_offset_error_should_pass(self) -> None:\n x_distorted = 1.01 * self.x - 2\n signal = interpolate_signal(self.mixture_signal, self.x, x_distorted, 0, 0)\n self.run_test(signal)\n" ]
[ [ "numpy.arange" ] ]
imaginary-person/pipeline_experiments
[ "32d20f1b9a4192e75ed6ba709c9acd2e0cf23e06" ]
[ "BERT/bert_local_pipeline.py" ]
[ "import argparse\nimport math\nimport sys\nimport time\nimport os\nimport socket\nimport statistics\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\n\nfrom model import MLMTask, MLMTask2, MLMTaskEmbedding, MLMTaskEncoder, MLMTaskHead\nfrom cuda_local_pipeline import LocalSequential, sync_all_device\n\n\nIS_SLURM = os.getenv('SLURM_LOCALID')\nUSE_TQDM = os.getenv('USE_TQDM', True if not IS_SLURM else False)\n\n\ndef collate_batch(batch_data, args, mask_id, cls_id):\n batch_data = torch.tensor(batch_data).long().view(args.batch_size, -1).t().contiguous()\n # Generate masks with args.mask_frac\n data_len = batch_data.size(0)\n ones_num = int(data_len * args.mask_frac)\n zeros_num = data_len - ones_num\n lm_mask = torch.cat([torch.zeros(zeros_num), torch.ones(ones_num)])\n lm_mask = lm_mask[torch.randperm(data_len)]\n batch_data = torch.cat((torch.tensor([[cls_id] * batch_data.size(1)]).long(), batch_data))\n lm_mask = torch.cat((torch.tensor([0.0]), lm_mask))\n\n targets = torch.stack([batch_data[i] for i in range(lm_mask.size(0)) if lm_mask[i]]).view(-1)\n batch_data = batch_data.masked_fill(lm_mask.bool().unsqueeze(1), mask_id)\n return batch_data, lm_mask, targets\n\n\ndef process_raw_data(raw_data, args):\n _num = raw_data.size(0) // (args.batch_size * args.bptt)\n raw_data = raw_data[:(_num * args.batch_size * args.bptt)]\n return raw_data\n\n\ndef train(model, vocab, train_loss_log, train_data,\n optimizer, criterion, ntokens, epoch, args):\n model.train()\n total_loss = 0.\n start_time = time.time()\n mask_id = vocab.stoi['<MASK>']\n cls_id = vocab.stoi['<cls>']\n train_loss_log.append(0.0)\n dataloader = DataLoader(train_data, batch_size=args.batch_size * args.bptt,\n shuffle=False, collate_fn=lambda b: collate_batch(b, args, mask_id, cls_id))\n\n forward_pyth_elapsed = []\n forward_cuda_elapsed = []\n forward_comm_elapsed = []\n forward_comp_elapsed = []\n backward_pyth_elapsed = []\n backward_cuda_elapsed = []\n\n for batch, (data, lm_mask, targets) in enumerate(dataloader):\n optimizer.zero_grad()\n data = data.to(0)\n targets = targets.to(args.gpus - 1)\n data = data.transpose(0, 1)\n\n fwd_tik = torch.cuda.Event(enable_timing=True)\n fwd_tok = torch.cuda.Event(enable_timing=True)\n\n sync_all_device(args.gpus)\n forward_start_time = time.time()\n\n fwd_tik.record()\n\n output = model(data)\n output = torch.stack([output[i] for i in range(lm_mask.size(0)) if lm_mask[i]])\n loss = criterion(output.view(-1, ntokens), targets)\n total_loss += loss.item()\n\n fwd_tok.record()\n fwd_tok.synchronize()\n fwd_delay = fwd_tik.elapsed_time(fwd_tok)\n\n forward_cuda_elapsed.append(fwd_delay)\n forward_comp_elapsed.append(model.get_fwd_compute_delay())\n forward_comm_elapsed.append(model.get_fwd_communication_delay()) # forward_comm_elapsed.append(fwd_delay - model.get_fwd_compute_delay())\n\n sync_all_device(args.gpus)\n forward_pyth_elapsed.append((time.time() - forward_start_time) * 1000)\n\n bwd_tik = torch.cuda.Event(enable_timing=True)\n bwd_tok = torch.cuda.Event(enable_timing=True)\n\n backward_start_time = time.time()\n\n bwd_tik.record()\n\n loss.backward()\n\n bwd_tok.record()\n bwd_tok.synchronize()\n bwd_delay = bwd_tik.elapsed_time(bwd_tok)\n\n backward_cuda_elapsed.append(bwd_delay)\n\n sync_all_device(args.gpus)\n backward_pyth_elapsed.append((time.time() - backward_start_time) * 1000)\n\n optimizer.step()\n\n if (batch + 1) % args.log_interval == 0:\n cur_loss = total_loss / args.log_interval\n elapsed = time.time() - start_time\n train_loss_log[-1] = cur_loss\n\n num_of_batches = len(train_data) // (args.bptt * args.batch_size)\n\n last = 10 # len(forward_comm_elapsed) // 2\n\n f_comm_last = forward_comm_elapsed[-last:]\n f_comm_last_mean = statistics.mean(f_comm_last)\n f_comm_last_std = statistics.stdev(f_comm_last) if len(f_comm_last) > 1 else 0.0\n\n f_comp_last = forward_comp_elapsed[-last:]\n f_comp_last_mean = statistics.mean(f_comp_last)\n f_comp_last_std = statistics.stdev(f_comp_last) if len(f_comp_last) > 1 else 0.0\n\n f_last = forward_cuda_elapsed[-last:]\n f_last_mean = statistics.mean(f_last)\n f_last_std = statistics.stdev(f_last) if len(f_last) > 1 else 0.0\n\n b_last = backward_cuda_elapsed[-last:]\n b_last_mean = statistics.mean(b_last)\n b_last_std = statistics.stdev(b_last) if len(b_last) > 1 else 0.0\n\n print(\n f\"EPOCH:{epoch:2}|\"\n f\"BATCH:{(batch + 1):3}/{num_of_batches:3}|\"\n f\"LOSS:{cur_loss:5.2f}|\"\n \"\\t\"\n f\"TIME:{(elapsed * 1000 / args.log_interval):10.2f} = {forward_pyth_elapsed[-1]:10.2f} + {backward_pyth_elapsed[-1]:10.2f}|\"\n \"\\t\"\n f\"FORWARD:{forward_cuda_elapsed[-1]:10.2f}({f_last_mean:10.2f} ±{f_last_std:8.2f})=({f_comp_last_mean:10.2f} ±{f_comp_last_std:8.2f})+({f_comm_last_mean:10.2f} ±{f_comm_last_std:8.2f}) |\"\n \"\\t\"\n f\"BACKWARD:{backward_cuda_elapsed[-1]:10.2f}({b_last_mean:10.2f} ±{b_last_std:8.2f})|\"\n )\n\n total_loss = 0\n start_time = time.time()\n\n\ndef run_main(args):\n torch.manual_seed(args.seed)\n import torchtext\n if args.dataset == 'WikiText103':\n from torchtext.experimental.datasets import WikiText103 as WLMDataset\n elif args.dataset == 'WikiText2':\n from torchtext.experimental.datasets import WikiText2 as WLMDataset\n elif args.dataset == 'WMTNewsCrawl':\n from torchtext.experimental.datasets import WMTNewsCrawl as WLMDataset\n elif args.dataset == 'EnWik9':\n from torchtext.datasets import EnWik9\n elif args.dataset == 'BookCorpus':\n from data import BookCorpus\n else:\n print(\"dataset for MLM task is not supported\")\n\n try:\n vocab = torch.load(args.save_vocab)\n except:\n print(f\"WLMDataset = {WLMDataset}\")\n train_dataset, valid_dataset, test_dataset = WLMDataset()\n old_vocab = train_dataset.vocab\n print(f\"len(old_vocab) = {len(old_vocab)}\")\n vocab = torchtext.vocab.Vocab(counter=old_vocab.freqs,\n specials=['<unk>', '<pad>', '<MASK>'])\n with open(args.save_vocab, 'wb') as f:\n torch.save(vocab, f)\n\n if args.dataset == 'WikiText103' or args.dataset == 'WikiText2':\n train_dataset, valid_dataset, test_dataset = WLMDataset(vocab=vocab)\n train_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, train_dataset)))\n valid_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, valid_dataset)))\n test_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, test_dataset)))\n elif args.dataset == 'WMTNewsCrawl':\n from torchtext.experimental.datasets import WikiText2\n test_dataset, valid_dataset = WikiText2(vocab=vocab, split=('test', 'valid'))\n valid_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, valid_dataset)))\n test_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, test_dataset)))\n train_dataset = WLMDataset(vocab=vocab, split='train')\n train_dataset.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, train_dataset)))\n elif args.dataset == 'EnWik9':\n enwik9 = EnWik9()\n idx1, idx2 = int(len(enwik9) * 0.8), int(len(enwik9) * 0.9)\n train_data = torch.tensor([vocab.stoi[_id]\n for _id in enwik9[0:idx1]]).long()\n val_data = torch.tensor([vocab.stoi[_id]\n for _id in enwik9[idx1:idx2]]).long()\n test_data = torch.tensor([vocab.stoi[_id]\n for _id in enwik9[idx2:]]).long()\n from torchtext.experimental.datasets import LanguageModelingDataset\n train_dataset = LanguageModelingDataset(train_data, vocab, lambda x: x)\n valid_dataset = LanguageModelingDataset(val_data, vocab, lambda x: x)\n test_dataset = LanguageModelingDataset(test_data, vocab, lambda x: x)\n elif args.dataset == 'BookCorpus':\n train_dataset, valid_dataset, test_dataset = BookCorpus(vocab)\n\n train_data = process_raw_data(train_dataset.data, args)\n val_data = process_raw_data(valid_dataset.data, args)\n test_data = process_raw_data(test_dataset.data, args)\n\n ntokens = len(train_dataset.get_vocab())\n print(f\"Vocabulary size = {ntokens}\")\n\n if args.gpus == 1:\n model = LocalSequential(\n nn.Sequential(\n MLMTask(ntokens, args.emsize, args.nhead, args.nhid, args.nlayers, args.dropout).to(0)\n )\n )\n elif args.gpus == 2:\n assert(args.nlayers % 2 == 0)\n model = LocalSequential(\n nn.Sequential(\n MLMTaskEmbedding(ntokens, args.emsize).to(0),\n MLMTaskEncoder(args.emsize, args.nhead, args.nhid, args.nlayers // 2, args.dropout).to(0),\n ),\n nn.Sequential(\n MLMTaskEncoder(args.emsize, args.nhead, args.nhid, args.nlayers // 2, args.dropout).to(1),\n MLMTaskHead(ntokens, args.emsize).to(1),\n ),\n )\n else:\n assert(args.nlayers % (args.gpus - 2) == 0)\n model = LocalSequential(\n MLMTaskEmbedding(ntokens, args.emsize).to(0),\n *(MLMTaskEncoder(args.emsize, args.nhead, args.nhid, args.nlayers // (args.gpus - 2), args.dropout).to(i) for i in range(1, args.gpus - 1)),\n MLMTaskHead(ntokens, args.emsize).to(args.gpus - 1),\n )\n\n params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print(f'Total parameters = {params // 10**6}M')\n\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)\n best_val_loss = None\n train_loss_log, val_loss_log = [], []\n\n for epoch in range(1, args.epochs + 1):\n epoch_start_time = time.time()\n train(model, train_dataset.vocab, train_loss_log, train_data,\n optimizer, criterion, ntokens, epoch, args)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Pipeline experiments')\n parser.add_argument('--emsize', type=int, default=768,\n help='size of word embeddings')\n parser.add_argument('--nhid', type=int, default=3072,\n help='number of hidden units per layer')\n parser.add_argument('--nlayers', type=int, default=12,\n help='number of layers')\n parser.add_argument('--nhead', type=int, default=12,\n help='the number of heads in the encoder/decoder of the transformer model')\n parser.add_argument('--lr', type=float, default=0.1,\n help='initial learning rate')\n parser.add_argument('--clip', type=float, default=0.1,\n help='gradient clipping')\n parser.add_argument('--epochs', type=int, default=8,\n help='upper epoch limit')\n parser.add_argument('--batch_size', type=int, default=32, metavar='N',\n help='batch size')\n parser.add_argument('--bptt', type=int, default=128,\n help='sequence length')\n parser.add_argument('--dropout', type=float, default=0.2,\n help='dropout applied to layers (0 = no dropout)')\n parser.add_argument('--seed', type=int, default=5431916812,\n help='random seed')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='report interval')\n parser.add_argument('--save-vocab', type=str, default='torchtext_bert_vocab.pt',\n help='path to save the vocab')\n parser.add_argument('--mask_frac', type=float, default=0.15,\n help='the fraction of masked tokens')\n parser.add_argument('--dataset', type=str, default='WikiText2',\n help='dataset used for MLM task')\n parser.add_argument('--gpus', type=int, default=8,\n help='number of GPUs per worker node to use')\n\n args = parser.parse_args()\n run_main(args)\n" ]
[ [ "torch.ones", "torch.load", "torch.manual_seed", "torch.save", "torch.tensor", "torch.nn.CrossEntropyLoss", "torch.cuda.Event", "torch.randperm", "torch.zeros" ] ]
dchenam/AnimeGAN
[ "15707a99dde000a6d7f283f4f82d5176b8313e0a" ]
[ "logger.py" ]
[ "# Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514\nimport tensorflow as tf\nimport numpy as np\nimport scipy.misc\nimport logging\n\ntry:\n from StringIO import StringIO # Python 2.7\nexcept ImportError:\n from io import BytesIO # Python 3.5+\n\n\nclass Logger(object):\n\n def __init__(self, config):\n \"\"\"Create a summary writer logging to log_dir.\"\"\"\n self.config = config\n self.writer = tf.summary.FileWriter(config.summary_dir)\n\n def scalar_summary(self, tag, value, step):\n \"\"\"Log a scalar variable.\"\"\"\n summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])\n self.writer.add_summary(summary, step)\n\n def image_summary(self, tag, images, step):\n \"\"\"Log a list of images.\"\"\"\n\n img_summaries = []\n for i, img in enumerate(images):\n # Write the image to a string\n try:\n s = StringIO()\n except:\n s = BytesIO()\n scipy.misc.toimage(img).save(s, format=\"png\")\n\n # Create an Image object\n img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),\n height=img.shape[0],\n width=img.shape[1])\n # Create a Summary value\n img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))\n\n # Create and write Summary\n summary = tf.Summary(value=img_summaries)\n self.writer.add_summary(summary, step)\n\n def histo_summary(self, tag, values, step, bins=1000):\n \"\"\"Log a histogram of the tensor of values.\"\"\"\n\n # Create a histogram using numpy\n counts, bin_edges = np.histogram(values, bins=bins)\n\n # Fill the fields of the histogram proto\n hist = tf.HistogramProto()\n hist.min = float(np.min(values))\n hist.max = float(np.max(values))\n hist.num = int(np.prod(values.shape))\n hist.sum = float(np.sum(values))\n hist.sum_squares = float(np.sum(values ** 2))\n\n # Drop the start of the first bin\n bin_edges = bin_edges[1:]\n\n # Add bin edges and counts\n for edge in bin_edges:\n hist.bucket_limit.append(edge)\n for c in counts:\n hist.bucket.append(c)\n\n # Create and write Summary\n summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])\n self.writer.add_summary(summary, step)\n self.writer.flush()\n\n def set_logger(self, log_path):\n \"\"\"Sets the logger to log info in terminal and file `log_path`.\n In general, it is useful to have a logger so that every output to the terminal is saved\n in a permanent file. Here we save it to `model_dir/train.log`.\n Example:\n ```\n logging.info(\"Starting training...\")\n ```\n Args:\n log_path: (string) where to log\n \"\"\"\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n if not logger.handlers:\n # Logging to a file\n file_handler = logging.FileHandler(log_path)\n file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))\n logger.addHandler(file_handler)\n\n # Logging to console\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter('%(message)s'))\n logger.addHandler(stream_handler)\n" ]
[ [ "numpy.sum", "tensorflow.Summary.Value", "numpy.histogram", "numpy.max", "tensorflow.HistogramProto", "numpy.min", "numpy.prod", "tensorflow.summary.FileWriter", "tensorflow.Summary" ] ]
Hardly-Human/Instance-Segmentation-of-Images
[ "45b048a2eb7fa31d5007f3fcd70b03fcb57abad4" ]
[ "app.py" ]
[ "import streamlit as st\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom gluoncv import model_zoo, data, utils\n\n\[email protected]\ndef load_image(image_file):\n img = Image.open(image_file)\n return img\n\[email protected](allow_output_mutation=True)\ndef load_model(model_name):\n\tmodel = model_zoo.get_model(model_name, pretrained = True)\n\treturn model\n\ndef plot_image(model, x, orig_img):\n\tst.warning(\"Inferencing from Model..\")\n\tids, scores, bboxes, masks = [xx[0].asnumpy() for xx in model(x)]\n\n\twidth, height = orig_img.shape[1], orig_img.shape[0]\n\tmasks, _ = utils.viz.expand_mask(masks, bboxes, (width, height), scores)\n\torig_img = utils.viz.plot_mask(orig_img, masks)\n\n\tfig = plt.figure(figsize=(10, 10))\n\tax = fig.add_subplot(1, 1, 1)\n\tax = utils.viz.plot_bbox(orig_img, bboxes, scores, ids,\n\t class_names=model.classes, ax=ax)\n\tst.set_option('deprecation.showPyplotGlobalUse', False)\n\tst.success(\"Instance Segmentation Successful!! Plotting Image..\")\n\tst.pyplot(plt.show())\n\ndef footer():\n\tst.markdown(\"\"\"\n\t* * *\n\tBuilt with ❤️ by [Rehan uddin](https://hardly-human.github.io/)\n\t\"\"\")\n\tst.success(\"Rehan uddin (Hardly-Human)👋😉\")\n\n\n################################################################################\n# main()\n################################################################################\n\ndef main():\n \n\tst.title(\"Instance Segmentation App\")\n\tst.text(\"Built with gluoncv and Streamlit\")\n\tst.markdown(\"### [Instance Segmentation](https://missinglink.ai/guides/neural-network-concepts/instance-segmentation-deep-learning/)\\\n ` `[Mask RCNN Networks](https://alittlepain833.medium.com/simple-understanding-of-mask-rcnn-134b5b330e95) \\\n\t\t [[Paper]](https://arxiv.org/abs/1703.06870)\\\n ` `[[View Source]](https://github.com/Hardly-Human/Instance-Segmentation-of-Images)\")\n\n\timage_file = st.file_uploader(\"Upload Image\", type = ['jpg','png','jpeg'])\n\n\tif image_file is None:\n\t\tst.warning(\"Upload Image and Run Model\")\n\n\tif image_file is not None:\n\t\timage1 = Image.open(image_file)\n\t\trgb_im = image1.convert('RGB') \n\t\timage = rgb_im.save(\"saved_image.jpg\")\n\t\timage_path = \"saved_image.jpg\"\n\t\tst.image(image1)\n\t\t\n\tif st.button(\"Run Model\"):\n\t\tst.warning(\"Loading Model..🤞\")\n\t\tmodel = load_model('mask_rcnn_resnet50_v1b_coco')\n\t\tst.success(\"Loaded Model Succesfully!!🤩👍\")\n\n\t\tx, orig_img = data.transforms.presets.rcnn.load_test(image_path)\n\t\tplot_image(model,x,orig_img)\n\n\n\nif __name__== \"__main__\":\n\tmain()\n\tfooter()" ]
[ [ "matplotlib.pyplot.figure", "matplotlib.pyplot.show" ] ]
luh0907/nn_breaking_detection
[ "6e810a5296bea3c6ef975b4e62caa2d94e992b81" ]
[ "density_estimation.py" ]
[ "# Copyright (C) 2017, Nicholas Carlini <[email protected]>\n# All rights reserved.\n\nimport sys\nimport time\nimport tensorflow as tf\nimport numpy as np\nimport random\n\nfrom setup_cifar import CIFARModel, CIFAR\nfrom setup_mnist import MNISTModel, MNIST\n\nsys.path.append(\"../..\")\nfrom nn_robust_attacks.l2_attack import CarliniL2\nfrom fast_gradient_sign import FGS\n\nimport keras\nfrom keras import backend as K\n\n#import matplotlib\n#import matplotlib.pyplot as plt\n#import matplotlib.patches as mpatches\n#from matplotlib.backends.backend_pdf import PdfPages\nfrom scipy.stats import gaussian_kde\n\nBINARY_SEARCH_STEPS = 9 # number of times to adjust the constant with binary search\nMAX_ITERATIONS = 10000 # number of iterations to perform gradient descent\nABORT_EARLY = True # if we stop improving, abort gradient descent early\nLEARNING_RATE = 1e-2 # larger values converge faster to less accurate results\nTARGETED = True # should we target one specific class? or just be wrong?\nCONFIDENCE = 0 # how strong the adversarial example should be\nINITIAL_CONST = 1e-3 # the initial constant c to pick as a first guess\n\nclass CarliniL2New:\n def __init__(self, sess, model, batch_size=1, confidence = CONFIDENCE,\n targeted = TARGETED, learning_rate = LEARNING_RATE,\n binary_search_steps = BINARY_SEARCH_STEPS, max_iterations = MAX_ITERATIONS,\n abort_early = ABORT_EARLY, \n initial_const = INITIAL_CONST, extra_loss=None):\n \"\"\"\n The L_2 optimized attack. \n\n This attack is the most efficient and should be used as the primary \n attack to evaluate potential defenses.\n\n Returns adversarial examples for the supplied model.\n\n confidence: Confidence of adversarial examples: higher produces examples\n that are farther away, but more strongly classified as adversarial.\n batch_size: Number of attacks to run simultaneously.\n targeted: True if we should perform a targetted attack, False otherwise.\n learning_rate: The learning rate for the attack algorithm. Smaller values\n produce better results but are slower to converge.\n binary_search_steps: The number of times we perform binary search to\n find the optimal tradeoff-constant between distance and confidence. \n max_iterations: The maximum number of iterations. Larger values are more\n accurate; setting too small will require a large learning rate and will\n produce poor results.\n abort_early: If true, allows early aborts if gradient descent gets stuck.\n initial_const: The initial tradeoff-constant to use to tune the relative\n importance of distance and confidence. If binary_search_steps is large,\n the initial constant is not important.\n \"\"\"\n\n image_size, num_channels, num_labels = model.image_size, model.num_channels, model.num_labels\n self.sess = sess\n self.TARGETED = targeted\n self.LEARNING_RATE = learning_rate\n self.MAX_ITERATIONS = max_iterations\n self.BINARY_SEARCH_STEPS = binary_search_steps\n self.ABORT_EARLY = abort_early\n self.CONFIDENCE = confidence\n self.initial_const = initial_const\n self.batch_size = batch_size\n\n self.repeat = binary_search_steps >= 10\n\n shape = (batch_size,image_size,image_size,num_channels)\n \n # the variable we're going to optimize over\n modifier = tf.Variable(np.zeros(shape,dtype=np.float32))\n\n # these are variables to be more efficient in sending data to tf\n self.origs = tf.Variable(np.zeros(shape), dtype=tf.float32)\n self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32)\n self.tlab = tf.Variable(np.zeros((batch_size,num_labels)), dtype=tf.float32)\n self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32)\n self.const2 = tf.Variable(np.zeros(batch_size), dtype=tf.float32)\n\n # and here's what we use to assign them\n self.assign_origs = tf.placeholder(tf.float32, shape)\n self.assign_timg = tf.placeholder(tf.float32, shape)\n self.assign_tlab = tf.placeholder(tf.float32, (batch_size,num_labels))\n self.assign_const = tf.placeholder(tf.float32, [batch_size])\n self.assign_const2 = tf.placeholder(tf.float32, [batch_size])\n \n # the resulting image, tanh'd to keep bounded from -0.5 to 0.5\n self.newimg = tf.tanh(modifier + self.timg)/2\n \n # prediction BEFORE-SOFTMAX of the model\n self.output = model.predict(self.newimg)\n \n # distance to the input data\n self.l2dist = tf.reduce_sum(tf.square(self.newimg-tf.tanh(self.origs)/2),[1,2,3])\n \n # compute the probability of the label class versus the maximum other\n self.real = real = tf.reduce_sum((self.tlab)*self.output,1)\n self.other = other = tf.reduce_max((1-self.tlab)*self.output - (self.tlab*10000),1)\n\n if self.TARGETED:\n # if targetted, optimize for making the other class most likely\n loss1 = tf.maximum(0.0, other-real+self.CONFIDENCE)\n else:\n # if untargeted, optimize for making this class least likely.\n loss1 = tf.maximum(0.0, real-other+self.CONFIDENCE)\n\n # sum up the losses\n self.loss2 = tf.reduce_sum(self.l2dist)\n self.loss1 = tf.reduce_sum(self.const*loss1)\n if extra_loss != None:\n self.extra_loss = extra_loss(self.newimg, self.output)\n else:\n self.extra_loss = 0\n self.loss = self.loss1+self.loss2+self.const*tf.reduce_sum(self.extra_loss)\n \n # Setup the adam optimizer and keep track of variables we're creating\n start_vars = set(x.name for x in tf.global_variables())\n optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE)\n self.train = optimizer.minimize(self.loss, var_list=[modifier])\n end_vars = tf.global_variables()\n new_vars = [x for x in end_vars if x.name not in start_vars]\n\n # these are the variables to initialize when we run\n self.setup = []\n self.setup.append(self.origs.assign(self.assign_origs))\n self.setup.append(self.timg.assign(self.assign_timg))\n self.setup.append(self.tlab.assign(self.assign_tlab))\n self.setup.append(self.const.assign(self.assign_const))\n self.setup.append(self.const2.assign(self.assign_const2))\n \n self.init = tf.variables_initializer(var_list=[modifier]+new_vars)\n\n def attack(self, origs, imgs, targets):\n \"\"\"\n Perform the L_2 attack on the given images for the given targets.\n\n If self.targeted is true, then the targets represents the target labels.\n If self.targeted is false, then targets are the original class labels.\n \"\"\"\n r = []\n print('go up to',len(imgs))\n for i in range(0,len(imgs),self.batch_size):\n print('tick',i)\n r.extend(self.attack_batch(origs[i:i+self.batch_size], \n imgs[i:i+self.batch_size], \n targets[i:i+self.batch_size]))\n return np.array(r)\n\n def attack_batch(self, origs, imgs, labs):\n \"\"\"\n Run the attack on a batch of images and labels.\n \"\"\"\n def compare(x,y):\n if not isinstance(x, (float, int, np.int64)):\n x = np.copy(x)\n x[y] -= self.CONFIDENCE\n x = np.argmax(x)\n if self.TARGETED:\n return x == y\n else:\n return x != y\n\n batch_size = self.batch_size\n\n # convert to tanh-space\n imgs = np.arctanh(imgs*1.999999)\n origs = np.arctanh(origs*1.999999)\n\n # set the lower and upper bounds accordingly\n lower_bound = np.zeros(batch_size)\n CONST = np.ones(batch_size)*self.initial_const\n upper_bound = np.ones(batch_size)*1e10\n\n CONST2 = np.ones(batch_size)*self.initial_const\n\n # the best l2, score, and image attack\n o_bestl2 = [1e10]*batch_size\n o_bestscore = [-1]*batch_size\n o_bestattack = [np.zeros(imgs[0].shape)]*batch_size\n \n for outer_step in range(self.BINARY_SEARCH_STEPS):\n # completely reset adam's internal state.\n self.sess.run(self.init)\n batch = imgs[:batch_size]\n batchlab = labs[:batch_size]\n \n bestl2 = [1e10]*batch_size\n bestscore = [-1]*batch_size\n\n # The last iteration (if we run many steps) repeat the search once.\n if self.repeat == True and outer_step == self.BINARY_SEARCH_STEPS-1:\n CONST = upper_bound\n\n # set the variables so that we don't have to send them over again\n self.sess.run(self.setup, {self.assign_timg: batch,\n self.assign_origs: origs,\n self.assign_tlab: batchlab,\n self.assign_const: CONST,\n self.assign_const2: CONST2})\n \n print('set new const',CONST)\n prev = 1e20\n for iteration in range(self.MAX_ITERATIONS):\n # perform the attack \n _, l, l2s, scores, nimg, extra = self.sess.run([self.train, self.loss, \n self.l2dist, self.output, \n self.newimg, self.extra_loss])\n #print(np.argmax(scores))\n # print out the losses every 10%\n if iteration%(self.MAX_ITERATIONS//10) == 0:\n print(iteration,*self.sess.run((self.loss,self.loss1,self.loss2,self.extra_loss)))\n\n # check if we should abort search if we're getting nowhere.\n if self.ABORT_EARLY and iteration%(self.MAX_ITERATIONS//10) == 0:\n if l > prev*.9999:\n break\n prev = l\n\n # adjust the best result found so far\n for e,(l2,sc,ii) in enumerate(zip(l2s,scores,nimg)):\n if l2 < bestl2[e] and compare(sc, np.argmax(batchlab[e])) and extra[e] <= 0:\n bestl2[e] = l2\n bestscore[e] = np.argmax(sc)\n #print(l2,o_bestl2[e],np.argmax(sc),np.argmax(batchlab[e]),\n # extra[e])\n if l2 < o_bestl2[e] and compare(sc, np.argmax(batchlab[e])) and extra[e] <= 0:\n #print('set')\n o_bestl2[e] = l2\n o_bestscore[e] = np.argmax(sc)\n o_bestattack[e] = ii\n\n # adjust the constant as needed\n for e in range(batch_size):\n if compare(bestscore[e], np.argmax(batchlab[e])) and bestscore[e] != -1:\n # success, divide const by two\n upper_bound[e] = min(upper_bound[e],CONST[e])\n if upper_bound[e] < 1e9:\n CONST[e] = (lower_bound[e] + upper_bound[e])/2\n else:\n # failure, either multiply by 10 if no solution found yet\n # or do binary search with the known upper bound\n lower_bound[e] = max(lower_bound[e],CONST[e])\n if upper_bound[e] < 1e9:\n CONST[e] = (lower_bound[e] + upper_bound[e])/2\n else:\n CONST[e] *= 10\n\n # return the best solution found\n o_bestl2 = np.array(o_bestl2)\n return o_bestattack\n\ndef pop(model):\n '''Removes a layer instance on top of the layer stack.\n This code is thanks to @joelthchao https://github.com/fchollet/keras/issues/2371#issuecomment-211734276\n '''\n if not model.outputs:\n raise Exception('Sequential model cannot be popped: model is empty.')\n else:\n model.layers.pop()\n if not model.layers:\n model.outputs = []\n model.inbound_nodes = []\n model.outbound_nodes = []\n else:\n model.layers[-1].outbound_nodes = []\n model.outputs = [model.layers[-1].output]\n model.built = False\n\n return model\n\nclass DensityEstimate:\n\n def __init__(self, sess, hidden, centers, image_size, num_channels, sigma=20):\n self.sess = sess\n\n centers = hidden.predict(centers).reshape((centers.shape[0],1,-1))\n print(centers.shape)\n self.centers = centers\n\n self.sigma = sigma\n\n self.gaussian_means = tf.constant(centers)\n\n self.X = tf.placeholder(tf.float32, (None, image_size, image_size, num_channels))\n\n self.dist = tf.reduce_sum(tf.square(self.gaussian_means - hidden(self.X)[tf.newaxis,:,:]),axis=2)\n\n self.Y = tf.reduce_mean(tf.exp(-self.dist/self.sigma),axis=0)\n self.hidden = hidden\n\n def make(self, X):\n dist = tf.reduce_sum(tf.square(self.gaussian_means - self.hidden(X)[tf.newaxis,:,:]),axis=2)\n \n return tf.reduce_mean(tf.exp(-dist/self.sigma),axis=0)\n \n \n def slow(self, x):\n x = x.flatten()\n dist = np.sum((self.centers.reshape((self.centers.shape[0],-1))-x)**2,axis=(1))\n dist = np.sort(dist)\n\n print(dist)\n \n #plt.plot(np.cumsum(np.exp(-dist/self.sigma)))\n #plt.show()\n return np.mean(np.exp(-dist/self.sigma))\n\n def predict(self, xs):\n return self.sess.run(self.Y, {self.X: xs})\n\n\ndef estimate_density(model, de, data):\n labels = model.model.predict(data)\n\n res = []\n\n for i in range(10):\n r = []\n this_class = data[np.argmax(labels,axis=1)==i]\n for j in range(0,len(this_class),10):\n probs = de[i].predict(this_class[j:j+10])\n r.extend(probs)\n res.append((r))\n return res\n\ndef estimate_density_full(model, de, data):\n labels = model.model.predict(data)\n\n res = []\n for j in range(0,len(data),1):\n i = np.argmax(labels[j])\n probs = de[i].predict(data[j:j+1])\n res.extend(probs)\n return np.array(res)\n\nclass RobustWrap:\n image_size = 28\n num_channels = 1\n num_labels = 11\n\n def __init__(self, model, de):\n self.model = model\n self.de = de\n\n def predict(self, xs):\n de = self.de.make(xs)\n \n padded = tf.pad(self.model.predict(xs), [[0, 0], [0, 1]], \"CONSTANT\")\n\n maximum = tf.reshape(tf.reduce_max(padded,axis=1),(-1,1))\n\n de = -8-tf.log(de) #TODO\n\n dee = tf.pad(tf.reshape(de, (-1,1)), [[0, 0], [0, self.num_labels-1]], \"CONSTANT\")\n\n padded = padded + 1*maximum*dee\n\n return padded, de\n\ndef extra_loss(de, target_lab):\n def fn(img, out):\n return tf.nn.relu(-tf.log(de[target_lab].make(img))-DECONST)\n return fn\n\ndef compute_optimal_sigma(sess, model, hidden_layer, data):\n sigma = tf.Variable(np.ones(1)*100,dtype=tf.float32)\n de = [DensityEstimate(sess, hidden_layer, data.train_data[np.argmax(data.train_labels,axis=1)==i], model.image_size, model.num_channels, sigma) for i in range(10)]\n #print(de[0].centers)\n #print(estimate_density(model, de, data.test_data))\n xs = []\n for const in np.arange(0,5,.1):\n sess.run(sigma.assign(np.ones(1)*(10**const)))\n r = []\n for labA in range(10):\n print(labA)\n for labB in range(10):\n subset = data.validation_data[np.argmax(data.validation_labels,axis=1)==labB,:,:,:]\n r.append(np.mean(np.log(1e-30+de[labA].predict(subset))))\n r = np.array(r).reshape((10,10))\n diag = np.mean(r[np.arange(10),np.arange(10)])\n r[np.arange(10),np.arange(10)] = 0\n rest = np.mean(r)\n value = diag-rest\n xs.append(value)\n print(xs)\n plt.plot(np.arange(0,5,.1),xs)\n plt.xlabel('sigma')\n plt.ylabel('Log liklihood difference')\n \n plt.show()\n \n exit(0)\n \n \n \ndef run_kde(Data, Model, path):\n global DECONST\n sess = K.get_session()\n K.set_learning_phase(False)\n data, model = Data(), Model(path)\n\n model2 = Model(path)\n\n hidden_layer = pop(model2.model) # once to remove dense(10)\n hidden_layer = pop(hidden_layer) # once to remove ReLU\n\n #compute_optimal_sigma(sess, model, hidden_layer, data)\n #MNIST SIGMA: 20\n \n de = [DensityEstimate(sess, hidden_layer, data.train_data[np.argmax(data.train_labels,axis=1)==i], model.image_size, model.num_channels, sigma=20) for i in range(10)]\n de2 = [DensityEstimate(sess, hidden_layer, data.train_data[np.argmax(data.train_labels,axis=1)==i][:100], model.image_size, model.num_channels, sigma=20) for i in range(10)]\n\n p = tf.placeholder(tf.float32, (None, model.image_size, model.image_size, model.num_channels))\n\n #print(np.log(de[0].predict(data.test_data[:10])))\n #print(sess.run(rmodel.predict(p)[1], {p: data.test_data[:10]}))\n #exit(0)\n\n N = 1\n print(model.model.predict(data.train_data[:N]))\n print(hidden_layer.predict(data.train_data[:N]))\n\n for i in range(10):\n print(de[i].predict(data.train_data[:N]))\n \n start_density = estimate_density_full(model, de, data.test_data[M:M+N])+1e-30\n print(\"starting density\", np.log(start_density))\n\n DECONST = -np.log(start_density)\n\n l = np.zeros((N,10))\n #l[np.arange(N),np.random.random_integers(0,9,N)] = 1\n for i in range(N):\n r = np.random.random_integers(0,9)\n while r == np.argmax(data.test_labels[i]):\n r = np.random.random_integers(0,9)\n l[i,r] = 1\n\n attack1 = CarliniL2(sess, model, batch_size=1, max_iterations=3000,\n binary_search_steps=3, initial_const=1.0, learning_rate=1e-1,\n targeted=True)\n attack2 = CarliniL2New(sess, model, batch_size=1, max_iterations=10000,\n binary_search_steps=5, initial_const=1.0, learning_rate=1e-2,\n targeted=True, extra_loss=extra_loss(de2, np.argmax(l)))\n #l = data.test_labels[:N]\n #l = np.zeros((N,10))\n #l[np.arange(N),1] = 1\n print(\"RUN PHASE 1\")\n adv = attack1.attack(data.test_data[M:M+N], l)\n print('mean distortion',np.mean(np.sum((adv-data.test_data[M:M+N])**2,axis=(1,2,3))**.5))\n\n print(\"RUN PHASE 2\")\n adv = attack2.attack(data.test_data[M:M+N], adv, l)\n\n np.save(\"/tmp/q\"+str(M),adv)\n #adv = np.load(\"/tmp/qq.npy\")\n\n print('labels',np.mean(np.argmax(sess.run(model.predict(p), {p: adv}),axis=1)==l))\n\n print('mean distortion',np.mean(np.sum((adv-data.test_data[M:M+N])**2,axis=(1,2,3))**.5))\n \n a = estimate_density_full(model, de, data.test_data[M:M+N])+1e-30\n b = estimate_density_full(model, de, adv)+1e-30\n\n show(adv)\n\n print('de of test', np.mean(np.log(a)))\n print('de of adv', np.mean(np.log(b)))\n\n print('better ratio', np.mean(np.array(a)>np.array(b)))\n exit(0)\n\n #density = gaussian_kde(np.array(np.log(a))-np.array(np.log(b)))\n #density_a = gaussian_kde(np.log(a))\n #density_b = gaussian_kde(np.log(b))\n\n xs = np.linspace(-25,25,200)\n \n fig = plt.figure(figsize=(4,3))\n fig.subplots_adjust(bottom=0.17,left=.15, right=.85)\n \n plt.xlabel('log(KDE(valid))-log(KDE(adversarial))')\n plt.ylabel('Occurrances')\n \n #plt.hist(np.log(a),100)\n #plt.hist(np.log(b),100)\n plt.hist(np.log(a)-np.log(b),100)\n #plt.hist(np.array(np.log(a))-np.array(np.log(b)),100)\n #a = plt.plot(xs,density_a(xs), 'r--',color='blue', label='Valid')\n #b = plt.plot(xs,density_b(xs), color='red', label='Adversarial')\n #plt.plot(xs,density(xs))\n \n #plt.legend(handles=[a[0], b[0]])\n \n pp = PdfPages('/tmp/a.pdf')\n plt.savefig(pp, format='pdf')\n pp.close()\n plt.show()\n\ndef show(img):\n remap = \" .*#\"+\"#\"*100\n img = (img.flatten()+.5)*3\n print(\"START\")\n for i in range(28):\n print(\"\".join([remap[int(round(x))] for x in img[i*28:i*28+28]]))\n\n#M = int(sys.argv[1])\nM = 0\nrun_kde(MNIST, MNISTModel, \"models/mnist\")\n#run_kde(CIFAR, CIFARModel, \"models/cifar\")\n" ]
[ [ "numpy.ones", "numpy.sum", "tensorflow.reduce_max", "tensorflow.reshape", "numpy.arctanh", "numpy.copy", "numpy.log", "tensorflow.reduce_sum", "tensorflow.constant", "numpy.linspace", "numpy.mean", "tensorflow.variables_initializer", "numpy.zeros", "tensorflow.tanh", "numpy.argmax", "tensorflow.global_variables", "numpy.arange", "numpy.sort", "tensorflow.placeholder", "tensorflow.train.AdamOptimizer", "numpy.exp", "tensorflow.exp", "numpy.array", "tensorflow.log", "tensorflow.maximum", "numpy.random.random_integers" ] ]
coldfix/probnum
[ "9b93d822c8d6501f9a12a783da84867ea54e6f6c" ]
[ "src/probnum/prob/distributions/dirac.py" ]
[ "\"\"\"\nDirac delta distribution.\n\"\"\"\nimport operator\n\nimport numpy as np\n\nfrom probnum.prob.distributions.distribution import Distribution\n\n\nclass Dirac(Distribution):\n \"\"\"\n The Dirac delta distribution.\n\n This distribution models a point mass and can be useful to represent\n numbers as random variables with Dirac measure. It has the useful\n property that arithmetic operations between a :class:`Dirac` random\n variable and an arbitrary :class:`RandomVariable` acts in the same\n way as the arithmetic operation with a constant.\n\n Note, that a Dirac measure does not admit a probability density\n function but can be viewed as a distribution (generalized function).\n\n Parameters\n ----------\n support : scalar or array-like or LinearOperator\n The support of the dirac delta function.\n\n See Also\n --------\n Distribution : Class representing general probability distribution.\n\n Examples\n --------\n >>> from probnum.prob import RandomVariable, Dirac\n >>> dist1 = Dirac(support=0.)\n >>> dist2 = Dirac(support=1.)\n >>> rv = RandomVariable(distribution=dist1 + dist2)\n >>> rv.sample(size=5)\n array([1., 1., 1., 1., 1.])\n \"\"\"\n\n def __init__(self, support, random_state=None):\n if np.isscalar(support):\n _dtype = np.dtype(type(support))\n else:\n _dtype = support.dtype\n super().__init__(\n parameters={\"support\": support}, dtype=_dtype, random_state=random_state\n )\n\n def cdf(self, x):\n if np.any(x < self.parameters[\"support\"]):\n return 0.0\n else:\n return 1.0\n\n def median(self):\n return self.parameters[\"support\"]\n\n def mode(self):\n return self.parameters[\"support\"]\n\n def mean(self):\n return self.parameters[\"support\"]\n\n def var(self):\n return 0.0\n\n def cov(self):\n if np.isscalar(self.parameters[\"support\"]):\n return self.var()\n else:\n return np.zeros(\n (len(self.parameters[\"support\"]), len(self.parameters[\"support\"]))\n )\n\n def sample(self, size=(), seed=None):\n ndims = len(self.shape)\n if size == 1 or size == ():\n return self.parameters[\"support\"]\n elif isinstance(size, int) and ndims == 0:\n return np.tile(A=self.parameters[\"support\"], reps=size)\n elif isinstance(size, int):\n return np.tile(\n A=self.parameters[\"support\"], reps=[size, *np.repeat(1, ndims)]\n )\n else:\n return np.tile(\n A=self.parameters[\"support\"], reps=tuple([*size, *np.repeat(1, ndims)])\n )\n\n def reshape(self, newshape):\n try:\n # Reshape support\n self._parameters[\"support\"].reshape(newshape=newshape)\n except ValueError:\n raise ValueError(\n \"Cannot reshape this Dirac distribution to the given shape: {}\".format(\n str(newshape)\n )\n )\n\n # Binary arithmetic operations\n def __add__(self, other):\n if isinstance(other, Dirac):\n support_ = self.parameters[\"support\"] + other.parameters[\"support\"]\n return Dirac(support=support_, random_state=self.random_state)\n else:\n return other.__add__(other=self)\n\n def __sub__(self, other):\n if isinstance(other, Dirac):\n support_ = self.parameters[\"support\"] - other.parameters[\"support\"]\n return Dirac(support=support_, random_state=self.random_state)\n else:\n return other.__rsub__(other=self)\n\n def __mul__(self, other):\n if isinstance(other, Dirac):\n support_ = self.parameters[\"support\"] * other.parameters[\"support\"]\n return Dirac(support=support_, random_state=self.random_state)\n else:\n return other.__mul__(other=self)\n\n def __matmul__(self, other):\n if isinstance(other, Dirac):\n support_ = self.parameters[\"support\"] @ other.parameters[\"support\"]\n return Dirac(support=support_, random_state=self.random_state)\n else:\n return other.__rmatmul__(other=self)\n\n def __truediv__(self, other):\n if isinstance(other, Dirac):\n support_ = operator.truediv(\n self.parameters[\"support\"], other.parameters[\"support\"]\n )\n return Dirac(support=support_, random_state=self.random_state)\n else:\n return other.__rtruediv__(other=self)\n\n def __pow__(self, power, modulo=None):\n if isinstance(power, Dirac):\n support_ = pow(\n self.parameters[\"support\"], power.parameters[\"support\"], modulo\n )\n return Dirac(support=support_, random_state=self.random_state)\n else:\n return power.__rpow__(power=self, modulo=modulo)\n\n # Binary arithmetic operations with reflected (swapped) operands\n\n def __radd__(self, other):\n return other.__add__(other=self)\n\n def __rsub__(self, other):\n return other.__sub__(other=self)\n\n def __rmul__(self, other):\n return other.__mul__(other=self)\n\n def __rmatmul__(self, other):\n return other.__matmul__(other=self)\n\n def __rtruediv__(self, other):\n return other.__truediv__(other=self)\n\n def __rpow__(self, power, modulo=None):\n return power.__pow__(power=self)\n\n # Augmented arithmetic assignments (+=, -=, *=, ...)\n # attempting to do the operation in place\n\n def __iadd__(self, other):\n if isinstance(other, Dirac):\n support_ = self.parameters[\"support\"] + other.parameters[\"support\"]\n self.parameters[\"support\"] = support_\n return self\n else:\n return NotImplemented\n\n def __isub__(self, other):\n if isinstance(other, Dirac):\n support_ = self.parameters[\"support\"] - other.parameters[\"support\"]\n self.parameters[\"support\"] = support_\n return self\n else:\n return NotImplemented\n\n def __imul__(self, other):\n if isinstance(other, Dirac):\n support_ = self.parameters[\"support\"] * other.parameters[\"support\"]\n self.parameters[\"support\"] = support_\n return self\n else:\n return NotImplemented\n\n def __imatmul__(self, other):\n if isinstance(other, Dirac):\n support_ = self.parameters[\"support\"] @ other.parameters[\"support\"]\n self.parameters[\"support\"] = support_\n return self\n else:\n return NotImplemented\n\n def __itruediv__(self, other):\n if isinstance(other, Dirac):\n support_ = operator.truediv(\n self.parameters[\"support\"], other.parameters[\"support\"]\n )\n self.parameters[\"support\"] = support_\n return self\n else:\n return NotImplemented\n\n def __ipow__(self, power, modulo=None):\n if isinstance(power, Dirac):\n support_ = pow(\n self.parameters[\"support\"], power.parameters[\"support\"], modulo\n )\n self.parameters[\"support\"] = support_\n return self\n else:\n return NotImplemented\n\n # Unary arithmetic operations\n\n def __neg__(self):\n self.parameters[\"support\"] = operator.neg(self.parameters[\"support\"])\n return self\n\n def __pos__(self):\n self.parameters[\"support\"] = operator.pos(self.parameters[\"support\"])\n return self\n\n def __abs__(self):\n self.parameters[\"support\"] = operator.abs(self.parameters[\"support\"])\n return self\n\n def __invert__(self):\n support_ = self.parameters[\"support\"]\n self.parameters[\"support\"] = operator.invert(support_)\n return self\n" ]
[ [ "numpy.any", "numpy.tile", "numpy.isscalar", "numpy.repeat" ] ]
SJTU-Det/R3Det
[ "3e092fa65dee2b9f7722b0985b3791811a1de5ae" ]
[ "libs/configs/DOTA1.0/r3det_plusplus/cfgs_res50_dota_r3det_plusplus_v8.py" ]
[ "# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import\nimport os\nimport tensorflow as tf\nimport math\n\n\"\"\"\nv3 + weight\n\nThis is your result for task 1:\n\n mAP: 0.694278910759864\n ap of each class:\n plane:0.88866159706304,\n baseball-diamond:0.7860352276239824,\n bridge:0.47338301497690105,\n ground-track-field:0.6216372729671545,\n small-vehicle:0.6994177931102508,\n large-vehicle:0.7458671012655077,\n ship:0.785294772102568,\n tennis-court:0.9075708653156096,\n basketball-court:0.7834021499469714,\n storage-tank:0.8172385380195397,\n soccer-ball-field:0.5645662115849255,\n roundabout:0.6018272737599449,\n harbor:0.5750654725229614,\n swimming-pool:0.6652388936929979,\n helicopter:0.49897747744560683\n\nThe submitted information is :\n\nDescription: RetinaNet_DOTA_R3Det_plusplus_2x_20200405_108w\nUsername: SJTU-Det\nInstitute: SJTU\nEmailadress: [email protected]\nTeamMembers: yangxue\n\n\"\"\"\n\n# ------------------------------------------------\nVERSION = 'RetinaNet_DOTA_R3Det_plusplus_2x_20200405'\nNET_NAME = 'resnet50_v1d' # 'MobilenetV2'\nADD_BOX_IN_TENSORBOARD = True\n\n# ---------------------------------------- System_config\nROOT_PATH = os.path.abspath('../')\nprint(20*\"++--\")\nprint(ROOT_PATH)\nGPU_GROUP = \"0,1,2,3\"\nNUM_GPU = len(GPU_GROUP.strip().split(','))\nSHOW_TRAIN_INFO_INTE = 20\nSMRY_ITER = 200\nSAVE_WEIGHTS_INTE = 27000 * 2\n\nSUMMARY_PATH = ROOT_PATH + '/output/summary'\nTEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'\n\nif NET_NAME.startswith(\"resnet\"):\n weights_name = NET_NAME\nelif NET_NAME.startswith(\"MobilenetV2\"):\n weights_name = \"mobilenet/mobilenet_v2_1.0_224\"\nelse:\n raise Exception('net name must in [resnet_v1_101, resnet_v1_50, MobilenetV2]')\n\nPRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'\nTRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')\nEVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'\n\n# ------------------------------------------ Train config\nRESTORE_FROM_RPN = False\nFIXED_BLOCKS = 1 # allow 0~3\nFREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone\nUSE_07_METRIC = True\n\nMUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy\nGRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip\n\nCLS_WEIGHT = 1.0\nREG_WEIGHT = 1.0\nUSE_IOU_FACTOR = False\n\nBATCH_SIZE = 1\nEPSILON = 1e-5\nMOMENTUM = 0.9\nLR = 5e-4\nDECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]\nMAX_ITERATION = SAVE_WEIGHTS_INTE*20\nWARM_SETP = int(1.0 / 4.0 * SAVE_WEIGHTS_INTE)\n\n# -------------------------------------------- Data_preprocess_config\nDATASET_NAME = 'DOTA' # 'pascal', 'coco'\nPIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR\nPIXEL_MEAN_ = [0.485, 0.456, 0.406]\nPIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR\nIMG_SHORT_SIDE_LEN = 800\nIMG_MAX_LENGTH = 800\nCLASS_NUM = 15\n\nIMG_ROTATE = False\nRGB2GRAY = False\nVERTICAL_FLIP = False\nHORIZONTAL_FLIP = True\nIMAGE_PYRAMID = False\n\n# --------------------------------------------- Network_config\nSUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)\nSUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)\nPROBABILITY = 0.01\nFINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))\nWEIGHT_DECAY = 1e-4\nUSE_GN = False\nNUM_SUBNET_CONV = 4\nNUM_REFINE_STAGE = 1\nUSE_RELU = False\nFPN_CHANNEL = 256\n\n# ---------------------------------------------Anchor config\nLEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']\nBASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]\nANCHOR_STRIDE = [8, 16, 32, 64, 128]\nANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]\nANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]\nANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]\nANCHOR_SCALE_FACTORS = None\nUSE_CENTER_OFFSET = True\nMETHOD = 'H'\nUSE_ANGLE_COND = False\nANGLE_RANGE = 90\n\n# --------------------------------------------RPN config\nSHARE_NET = True\nUSE_P5 = True\nIOU_POSITIVE_THRESHOLD = 0.5\nIOU_NEGATIVE_THRESHOLD = 0.4\nREFINE_IOU_POSITIVE_THRESHOLD = [0.6, 0.7]\nREFINE_IOU_NEGATIVE_THRESHOLD = [0.5, 0.6]\n\nNMS = True\nNMS_IOU_THRESHOLD = 0.1\nMAXIMUM_DETECTIONS = 100\nFILTERED_SCORE = 0.05\nVIS_SCORE = 0.4\n\n# --------------------------------------------MASK config\nUSE_SUPERVISED_MASK = True\nMASK_TYPE = 'r' # r or h\nBINARY_MASK = False\nSIGMOID_ON_DOT = False\nMASK_ACT_FET = True # weather use mask generate 256 channels to dot feat.\nGENERATE_MASK_LIST = [\"P3\", \"P4\", \"P5\", \"P6\", \"P7\"]\nADDITION_LAYERS = [1, 1, 1, 1, 1] # add 4 layer to generate P2_mask, 2 layer to generate P3_mask\nENLAEGE_RF_LIST = [\"P3\", \"P4\", \"P5\", \"P6\", \"P7\"]\nSUPERVISED_MASK_LOSS_WEIGHT = 1.0\n" ]
[ [ "tensorflow.random_normal_initializer", "tensorflow.constant_initializer" ] ]
bbrzycki/setigen
[ "3106c32a629c76c71768ea02b7661474e1cf7ff6" ]
[ "setigen/distributions.py" ]
[ "import numpy as np\n\nfwhm_m = 2 * np.sqrt(2 * np.log(2))\n\ndef fwhm(sigma):\n \"\"\"\n Get full width at half maximum (FWHM) for a provided sigma / \n standard deviation, assuming a Gaussian distribution.\n \"\"\"\n return fwhm_m * sigma\n \n\ndef gaussian(x_mean, x_std, shape):\n return np.random.normal(x_mean, x_std, shape)\n\n\ndef truncated_gaussian(x_mean, x_std, x_min, shape):\n \"\"\"\n Sample from a normal distribution, but enforces a minimum value.\n \"\"\"\n return np.maximum(gaussian(x_mean, x_std, shape), x_min)\n\n\ndef chi2(x_mean, chi2_df, shape):\n \"\"\"\n Chi-squared distribution centered at a specific mean.\n \n Parameters\n ----------\n x_mean : float\n chi2_df : int\n Degrees of freedom for chi-squared\n shape : list\n Shape of output noise array\n \n Returns\n -------\n dist : ndarray\n Array of chi-squared noise\n \"\"\"\n return np.random.chisquare(df=chi2_df, size=shape) * x_mean / chi2_df" ]
[ [ "numpy.random.normal", "numpy.random.chisquare", "numpy.log" ] ]
mdblackledge/SimpleITK-Image-Symmetry
[ "b18343394852a1514bf45ddb9078e27c4d6f6718" ]
[ "image_symmetry.py" ]
[ "import SimpleITK as sitk\nimport numpy as np\nfrom scipy.optimize import minimize\nfrom pywt import wavedecn\n\nclass ImageSymmetry(object):\n\n def plane_normalised(self, plane):\n if not type(plane) is np.ndarray:\n plane = np.array(plane)\n if not np.abs(np.sum(plane[0:-1]**2) - 1.0) < 1e-10:\n return False\n return True\n\n def normalise_plane(self, plane):\n norm = np.sqrt(np.sum(np.array(plane[0:-1])**2))\n return np.array(plane)/norm\n\n def cartesian_2_polar(self, plane):\n if not self.plane_normalised(plane):\n raise Exception(\"Input plane contents not normalised\")\n plane_polar = np.zeros(len(plane)-1)\n plane_polar[-1] = plane[-1]\n plane_polar[-2] = np.arcsin(plane[-2])\n for i in range(len(plane_polar)-3, -1, -1):\n plane_polar[i] = np.arcsin(plane[i+1] / np.prod(np.cos(plane_polar[i+1:-1])))\n return plane_polar\n\n def polar_2_cartesian(self, plane_polar):\n plane = np.zeros(len(plane_polar)+1)\n plane[0] = np.prod(np.cos(plane_polar[0:-1]))\n for i in range(1, len(plane)-2):\n plane[i] = np.sin(plane_polar[i-1]) * np.prod(np.cos(plane_polar[i:-1]))\n plane[-2] = np.sin(plane_polar[-2])\n plane[-1] = plane_polar[-1]\n return plane\n\n def __reflection_cost__(self, plane_polar, im):\n plane = self.polar_2_cartesian(plane_polar)\n imN = self.reflect_image(plane, im)\n cost = np.mean(np.abs(sitk.GetArrayFromImage(im-imN)))\n return cost\n\n def reflect_image(self, plane, im):\n trans = self.reflection_transform(plane)\n imN = sitk.Resample(im, im, trans, sitk.sitkLinear, 0.0, im.GetPixelID())\n return imN\n\n def plane_of_reflection(self, im, plane=None, levels=(2, 0)):\n if plane is None:\n plane = np.zeros(len(im.GetSize())+1)\n plane[0] = 1.0\n if not self.plane_normalised(plane):\n raise Exception(\"Input plane is not normalised\")\n origin = im.GetOrigin()\n shape = np.array(im.GetSize())\n spacing = np.array(im.GetSpacing())\n plane_polar = self.cartesian_2_polar(plane)\n for level in levels:\n arr = wavedecn(sitk.GetArrayFromImage(im), 'db1', level=level)[0]\n im_ = sitk.GetImageFromArray(arr)\n im_.SetSpacing(shape / arr.shape[::-1] * spacing)\n im_.SetOrigin(origin + 0.5 * (im_.GetSpacing() - spacing))\n plane_polar = minimize(self.__reflection_cost__, plane_polar, (im_), method='Nelder-Mead', tol=1e-10).x\n plane = self.polar_2_cartesian(plane_polar)\n return plane\n\n def reflection_matrix(self, plane):\n mat = np.zeros((len(plane), len(plane)))\n for i in range(len(plane)-1):\n for j in range(len(plane)):\n if i == j:\n mat[i, j] = 1 - 2 * plane[i] * plane[j]\n else:\n mat[i, j] = - 2 * plane[i] * plane[j]\n mat[-1, -1] = 1.0\n return mat\n\n def reflection_transform(self, plane):\n trans_arr = self.reflection_matrix(plane)\n trans = sitk.AffineTransform(len(plane)-1)\n trans_params = []\n for i in range(len(plane)-1):\n trans_params = np.r_[trans_params, trans_arr[i, 0:-1].ravel()]\n trans_params = np.r_[trans_params, trans_arr[0:-1, -1].ravel()]\n trans.SetParameters(trans_params)\n return trans\n\n def plane_2d(self, x, plane):\n a = plane[0]\n b = plane[1]\n c = plane[2]\n return (a * x + c) / (-1. * b)\n\n def plane(self, X, plane):\n d = plane[-1]\n plane = plane[0:-1]\n return (np.einsum(\"ij,j->i\", X, plane[0:-2]) + d)/(-1.*plane[-1])\n\n\nif __name__ == \"__main__\":\n\n from scipy.misc import face\n import matplotlib.pyplot as pl\n\n image_sym = ImageSymmetry()\n\n # Create a mock image with symmetry\n arr = face(gray=True).astype('float')\n arr = np.pad(arr, ((arr.shape[0], arr.shape[0]), (arr.shape[1], arr.shape[1])), 'constant', constant_values=0.0)\n\n im = sitk.GetImageFromArray(arr)\n im.SetOrigin((-arr.shape[1]/2, -arr.shape[0]/2))\n plane = image_sym.normalise_plane([1.0, 0.5, 100])\n trans = image_sym.reflection_transform(plane)\n im_reflected = sitk.Resample(im, im, trans, sitk.sitkLinear, 0.0, im.GetPixelID())\n im = im + im_reflected\n\n # Initialise the plane as something different and try to fit\n plane_init = [0.80, 0.7, 0.22]\n plane_init = image_sym.normalise_plane(plane_init)\n plane_est = image_sym.plane_of_reflection(im, plane_init, levels=[4])\n print('Initial plane: ', plane_init)\n print('Estimated plane: ', plane_est)\n print('True plane: ', plane)\n\n # Show the result\n f = pl.figure()\n pl.imshow(sitk.GetArrayFromImage(im),\n cmap = 'gray',\n origin='lower',\n extent = (-arr.shape[1]/2, arr.shape[1]/2, -arr.shape[0]/2, arr.shape[0]/2))\n x = np.linspace(-arr.shape[1]/2, arr.shape[1]/2, 100)\n y = image_sym.plane_2d(x, plane)\n pl.plot(x, y, 'r-', label = \"Truth\")\n y_ = image_sym.plane_2d(x, plane_init)\n pl.plot(x, y_, 'b-', label = \"Init.\")\n y__ = image_sym.plane_2d(x, plane_est)\n pl.plot(x, y__, 'g--', label = \"Est.\")\n pl.plot((0, 0), (0, 0), 'ro')\n pl.xlim(-arr.shape[1]/2, arr.shape[1]/2)\n pl.ylim(-arr.shape[0]/2, arr.shape[0]/2)\n pl.legend(loc = 1)\n pl.show()\n" ]
[ [ "numpy.sum", "matplotlib.pyplot.legend", "numpy.arcsin", "numpy.sin", "numpy.einsum", "matplotlib.pyplot.figure", "scipy.misc.face", "scipy.optimize.minimize", "numpy.cos", "matplotlib.pyplot.xlim", "matplotlib.pyplot.show", "matplotlib.pyplot.ylim", "numpy.array", "matplotlib.pyplot.plot", "numpy.pad", "numpy.linspace" ] ]
shbe-aau/multi-pose-estimation
[ "22cea6cd09684fe655fb2214bc14856f589048e1" ]
[ "multi-pose/utils/sundermeyer/pysixd/view_sampler.py" ]
[ "# Author: Tomas Hodan ([email protected])\n# Center for Machine Perception, Czech Technical University in Prague\n\n# Samples views from a sphere.\n\nimport math\nimport numpy as np\n#import transform\nfrom utils.sundermeyer.pysixd import transform\n\ndef calc_2d_bbox(xs, ys, im_size):\n bbTL = (max(xs.min() - 1, 0),\n max(ys.min() - 1, 0))\n bbBR = (min(xs.max() + 1, im_size[0] - 1),\n min(ys.max() + 1, im_size[1] - 1))\n return [bbTL[0], bbTL[1], bbBR[0] - bbTL[0], bbBR[1] - bbTL[1]]\n\n\n\ndef hinter_sampling(min_n_pts, radius=1):\n '''\n Sphere sampling based on refining icosahedron as described in:\n Hinterstoisser et al., Simultaneous Recognition and Homography Extraction of\n Local Patches with a Simple Linear Classifier, BMVC 2008\n\n :param min_n_pts: Minimum required number of points on the whole view sphere.\n :param radius: Radius of the view sphere.\n :return: 3D points on the sphere surface and a list that indicates on which\n refinement level the points were created.\n '''\n\n # Get vertices and faces of icosahedron\n a, b, c = 0.0, 1.0, (1.0 + math.sqrt(5.0)) / 2.0\n pts = [(-b, c, a), (b, c, a), (-b, -c, a), (b, -c, a), (a, -b, c), (a, b, c),\n (a, -b, -c), (a, b, -c), (c, a, -b), (c, a, b), (-c, a, -b), (-c, a, b)]\n faces = [(0, 11, 5), (0, 5, 1), (0, 1, 7), (0, 7, 10), (0, 10, 11), (1, 5, 9),\n (5, 11, 4), (11, 10, 2), (10, 7, 6), (7, 1, 8), (3, 9, 4), (3, 4, 2),\n (3, 2, 6), (3, 6, 8), (3, 8, 9), (4, 9, 5), (2, 4, 11), (6, 2, 10),\n (8, 6, 7), (9, 8, 1)]\n\n # Refinement level on which the points were created\n pts_level = [0 for _ in range(len(pts))]\n\n ref_level = 0\n while len(pts) < min_n_pts:\n ref_level += 1\n edge_pt_map = {} # Mapping from an edge to a newly added point on that edge\n faces_new = [] # New set of faces\n\n # Each face is replaced by 4 new smaller faces\n for face in faces:\n pt_inds = list(face) # List of point IDs involved in the new faces\n for i in range(3):\n # Add a new point if this edge hasn't been processed yet,\n # or get ID of the already added point.\n edge = (face[i], face[(i + 1) % 3])\n edge = (min(edge), max(edge))\n if edge not in list(edge_pt_map.keys()):\n pt_new_id = len(pts)\n edge_pt_map[edge] = pt_new_id\n pt_inds.append(pt_new_id)\n\n pt_new = 0.5 * (np.array(pts[edge[0]]) + np.array(pts[edge[1]]))\n pts.append(pt_new.tolist())\n pts_level.append(ref_level)\n else:\n pt_inds.append(edge_pt_map[edge])\n\n # Replace the current face with 4 new faces\n faces_new += [(pt_inds[0], pt_inds[3], pt_inds[5]),\n (pt_inds[3], pt_inds[1], pt_inds[4]),\n (pt_inds[3], pt_inds[4], pt_inds[5]),\n (pt_inds[5], pt_inds[4], pt_inds[2])]\n faces = faces_new\n\n # Project the points to a sphere\n pts = np.array(pts)\n pts *= np.reshape(radius / np.linalg.norm(pts, axis=1), (pts.shape[0], 1))\n\n # Collect point connections\n pt_conns = {}\n for face in faces:\n for i in range(len(face)):\n pt_conns.setdefault(face[i], set()).add(face[(i + 1) % len(face)])\n pt_conns[face[i]].add(face[(i + 2) % len(face)])\n\n # Order the points - starting from the top one and adding the connected points\n # sorted by azimuth\n top_pt_id = np.argmax(pts[:, 2])\n pts_ordered = []\n pts_todo = [top_pt_id]\n pts_done = [False for _ in range(pts.shape[0])]\n\n def calc_azimuth(x, y):\n two_pi = 2.0 * math.pi\n return (math.atan2(y, x) + two_pi) % two_pi\n\n while len(pts_ordered) != pts.shape[0]:\n # Sort by azimuth\n pts_todo = sorted(pts_todo, key=lambda i: calc_azimuth(pts[i][0], pts[i][1]))\n pts_todo_new = []\n for pt_id in pts_todo:\n pts_ordered.append(pt_id)\n pts_done[pt_id] = True\n pts_todo_new += [i for i in pt_conns[pt_id]] # Find the connected points\n\n # Points to be processed in the next iteration\n pts_todo = [i for i in set(pts_todo_new) if not pts_done[i]]\n\n # Re-order the points and faces\n pts = pts[np.array(pts_ordered), :]\n pts_level = [pts_level[i] for i in pts_ordered]\n pts_order = np.zeros((pts.shape[0],))\n pts_order[np.array(pts_ordered)] = np.arange(pts.shape[0])\n for face_id in range(len(faces)):\n faces[face_id] = [pts_order[i] for i in faces[face_id]]\n\n # import inout\n # inout.save_ply('output/hinter_sampling.ply', pts=pts, faces=np.array(faces))\n\n return pts, pts_level\n\ndef sample_views(min_n_views, radius=1,\n azimuth_range=(0, 2 * math.pi),\n elev_range=(-0.5 * math.pi, 0.5 * math.pi)):\n '''\n Viewpoint sampling from a view sphere.\n\n :param min_n_views: Minimum required number of views on the whole view sphere.\n :param radius: Radius of the view sphere.\n :param azimuth_range: Azimuth range from which the viewpoints are sampled.\n :param elev_range: Elevation range from which the viewpoints are sampled.\n :return: List of views, each represented by a 3x3 rotation matrix and\n a 3x1 translation vector.\n '''\n\n # Get points on a sphere\n if True:\n pts, pts_level = hinter_sampling(min_n_views, radius=radius)\n else:\n pts = fibonacci_sampling(min_n_views + 1, radius=radius)\n pts_level = [0 for _ in range(len(pts))]\n\n views = []\n for pt in pts:\n # Azimuth from (0, 2 * pi)\n azimuth = math.atan2(pt[1], pt[0])\n if azimuth < 0:\n azimuth += 2.0 * math.pi\n\n # Elevation from (-0.5 * pi, 0.5 * pi)\n a = np.linalg.norm(pt)\n b = np.linalg.norm([pt[0], pt[1], 0])\n elev = math.acos(b / a)\n if pt[2] < 0:\n elev = -elev\n\n # if hemisphere and (pt[2] < 0 or pt[0] < 0 or pt[1] < 0):\n if not (azimuth_range[0] <= azimuth <= azimuth_range[1] and\n elev_range[0] <= elev <= elev_range[1]):\n continue\n\n # Rotation matrix\n # The code was adopted from gluLookAt function (uses OpenGL coordinate system):\n # [1] http://stackoverflow.com/questions/5717654/glulookat-explanation\n # [2] https://www.opengl.org/wiki/GluLookAt_code\n f = -np.array(pt) # Forward direction\n f /= np.linalg.norm(f)\n u = np.array([0.0, 0.0, 1.0]) # Up direction\n s = np.cross(f, u) # Side direction\n if np.count_nonzero(s) == 0:\n # f and u are parallel, i.e. we are looking along or against Z axis\n s = np.array([1.0, 0.0, 0.0])\n s /= np.linalg.norm(s)\n u = np.cross(s, f) # Recompute up\n R = np.array([[s[0], s[1], s[2]],\n [u[0], u[1], u[2]],\n [-f[0], -f[1], -f[2]]])\n\n # Convert from OpenGL to OpenCV coordinate system\n R_yz_flip = transform.rotation_matrix(math.pi, [1, 0, 0])[:3, :3]\n R = R_yz_flip.dot(R)\n\n # Translation vector\n t = -R.dot(np.array(pt).reshape((3, 1)))\n\n views.append({'R': R, 't': t})\n\n return views, pts_level\n" ]
[ [ "numpy.zeros", "numpy.cross", "numpy.argmax", "numpy.count_nonzero", "numpy.arange", "numpy.array", "numpy.linalg.norm" ] ]
burcgokden/SDPA-Transformer-Wrapper
[ "81371d1bd7d9ae26a70a549740539242f1a76199" ]
[ "nmt_data_prep.py" ]
[ "\nimport logging\n\nimport tensorflow_datasets as tfds\nimport tensorflow as tf\nimport tensorflow_text as text\n\nlogging.getLogger('tensorflow').setLevel(logging.ERROR)\n\n\nclass src_tgt_data_prep:\n '''\n Prepares data for encoder-decoder architecture for machine translation task\n default inputs are for portuguese to english dataset from TED Talks Open Translation Project.\n '''\n def __init__(self,\n src_lang='pt',\n tgt_lang='en',\n BUFFER_SIZE=20000,\n BATCH_SIZE = 64,\n dataset_file='ted_hrlr_translate/pt_to_en',\n load_dataset=True,\n train_percent=None,\n model_name = \"./ted_hrlr_translate_pt_en_tokenizer\",\n revert_order=False,\n shuffle_set=True,\n shuffle_files=True,\n MAX_LENGTH=None,\n verbose=False):\n '''\n This init method asks for tokenizer source and target object loaded and ready to provide.\n The dataset may have order reverted, this method does the conversion to intended source target order.\n\n Args:\n src_lang: source language abbreviation as string\n tgt_lang: target language abbreviation as string\n BUFFER_SIZE: Buffer size for shuffling\n BATCH_SIZE: batch size for dataset\n dataset_file: path to tensorflow dataset\n load_dataset: if True load the dataset\n train_percent: Percentage of train data to be loaded. 1-100. None loads all training data.\n model_name: file path for tokenizer model.\n revert_order: If True, it reverts the order of language pairs in dataset_file. Reverted order should match\n src_lang/tgt_lang assignment.\n shuffle_set:If True shuffle the dataset while loading\n shuffle_files: shuffle dataset files while loading\n MAX_LENGTH: Maximum number of tokens in each sentence.\n verbose: If True print out more details.\n\n Returns batched, tokenized, filtered train, validation datasets and test dataset. Tokenizer methods are accessible\n through instance of this class object\n '''\n\n self.BUFFER_SIZE=BUFFER_SIZE\n self.BATCH_SIZE=BATCH_SIZE\n self.MAX_LENGTH = MAX_LENGTH\n self.model_name = model_name\n self.revert_order=revert_order\n self.src_lang = src_lang\n self.tgt_lang = tgt_lang\n self.tokenizers_src, self.tokenizers_tgt, self.tokenizers = self.load_tokenizer()\n\n #load dataset\n if load_dataset:\n print(\"LOADING DATASET\")\n if train_percent:\n #load only percentage of train data\n examples, metadata = tfds.load(dataset_file,\n split=[f'train[:{train_percent}%]', 'validation', 'test'],\n with_info=True, as_supervised=True, shuffle_files=shuffle_files)\n else:\n #load all data\n examples, metadata = tfds.load(dataset_file,\n split=['train', 'validation', 'test'],\n with_info=True, as_supervised=True, shuffle_files=shuffle_files)\n\n if self.revert_order:\n #revert the order if intended source and target language orders are reversed\n #tokenizer source and tokenizer target are intended values.\n print(f\"REVERTING ORDER OF DATASET TUPLES TO (SRC, TGT) : {self.src_lang},{self.tgt_lang}\")\n self.train_examples = examples[0].map(lambda dsl1, dsl2: [dsl2, dsl1])\n self.val_examples = examples[1].map(lambda dsl1, dsl2: [dsl2, dsl1])\n self.test_examples=examples[2].map(lambda dsl1, dsl2: [dsl2, dsl1])\n self.examples = examples\n self.metadata = metadata\n else:\n print(f\"ORDER OF DATASET TUPLES (SRC, TGT) : {self.src_lang},{self.tgt_lang}\")\n self.train_examples = examples[0]\n self.val_examples = examples[1]\n self.test_examples=examples[2]\n self.examples=None\n self.metadata=metadata\n else:\n print(\"SKIPPED LOADING DATASET\")\n\n #print some info about tokenizer model\n load_tokenizer_model= self.tokenizers_src and self.tokenizers_tgt\n if load_tokenizer_model:\n print(\"SOURCE AND TARGET TOKENIZERS INFO\")\n print(f\"Methods for source lang: {self.src_lang}\")\n print([item for item in dir(self.tokenizers_src) if not item.startswith('_')])\n print((f\"Methods for tgt lang: {self.tgt_lang}\"))\n print([item for item in dir(self.tokenizers_tgt) if not item.startswith('_')])\n else:\n print(\"PLEASE PROVIDE TOKENIZERS CORRECTLY\")\n\n if self.MAX_LENGTH is None:\n #create batched and tokenized datasets.\n print(\"CREATING SHUFFLED BATCHED DATASETS FOR TRAINING AND VALIDATION\")\n self.train_batches=self.make_batches(self.train_examples, map_tokenize=load_tokenizer_model, shuffle_set=shuffle_set)\n self.val_batches=self.make_batches(self.val_examples, map_tokenize=load_tokenizer_model, shuffle_set=False)\n self.test_examples = self.test_examples.prefetch(tf.data.AUTOTUNE)\n else:\n self.train_batches=self.make_padded_batches(self.train_examples, shuffle_set=shuffle_set)\n self.val_batches=self.make_padded_batches(self.val_examples, shuffle_set=False)\n self.test_examples=self.filter_test(self.test_examples)\n if verbose:\n #these operations are very slow so for large datasets should be avoided.\n print(f\"FILTERED BATCHED TRAIN DATASET ELEMENT COUNT: {self.dataset_batch_cardinality(self.train_batches)*self.BATCH_SIZE}\")\n print(f\"FILTERED BATCHED VAL DATASET ELEMENT COUNT: {self.dataset_batch_cardinality(self.val_batches)*self.BATCH_SIZE}\")\n\n @staticmethod\n def dataset_batch_cardinality(ds):\n cnt = 0\n for _ in ds:\n cnt += 1\n return cnt\n\n def filter_test(self, test_ds):\n '''\n The test needs to be first tokenized,\n filter for token length and then detokenized.\n '''\n\n print(f\"ORIGINAL TEST DATASET LENGTH: {len(test_ds)}\")\n\n test_ds=test_ds.batch(1).map(self.tokenize_pairs_src_tgt)\n test_ds=test_ds.unbatch().filter(self.filter_max_length)\n test_ds=test_ds.batch(1).map(self.detokenize_pairs_src_tgt)\n test_ds=test_ds.unbatch().prefetch(tf.data.AUTOTUNE)\n\n for ts in test_ds.take(3):\n print(f\"DETOKENIZED TEST SAMPLE LESS THAN LENGTH {self.MAX_LENGTH}: {ts}\")\n print(f\"FILTERED TEST LENGTH: {self.dataset_batch_cardinality(test_ds)}\")\n\n return test_ds\n\n def detokenize_pairs_src_tgt(self, src, tgt):\n\n src = self.tokenizers_src.detokenize(src)\n tgt = self.tokenizers_tgt.detokenize(tgt)\n\n return src, tgt\n\n\n\n def load_tokenizer(self):\n '''\n Run this first to get tokenizers pairs for intended source and target language.\n Returns source tokenizer, target tokenizer and tokenizer object\n '''\n print(f\"LOADING TOKENIZER AT {self.model_name}\")\n tokenizers = tf.saved_model.load(self.model_name)\n print(\"THE TOKENIZER LANGUAGES AVAILABLE ARE:\")\n print([item for item in dir(tokenizers) if not item.startswith('_')])\n tokenizers_src=getattr(tokenizers, self.src_lang, None)\n tokenizers_tgt=getattr(tokenizers, self.tgt_lang, None)\n\n return tokenizers_src, tokenizers_tgt, tokenizers\n\n\n\n def tokenize_pairs_src_tgt(self, src, tgt):\n '''\n Use tokenizer model to create tokenized pairs.\n '''\n src = self.tokenizers_src.tokenize(src)\n # Convert from ragged to dense, padding with zeros.\n src = src.to_tensor()\n\n tgt = self.tokenizers_tgt.tokenize(tgt)\n # Convert from ragged to dense, padding with zeros.\n tgt = tgt.to_tensor()\n\n return src, tgt\n\n def make_batches(self, ds, map_tokenize=True, shuffle_set=True):\n '''\n method to create dataset batches and map each element with tokenizer model\n it takes a dataset that contains lang1, lang2 pairs.\n '''\n #shuffle dataset and make batches\n ds_batched=ds\n if shuffle_set:\n ds_batched = ds_batched.shuffle(self.BUFFER_SIZE)\n\n ds_batched=ds_batched.batch(self.BATCH_SIZE)\n if map_tokenize:\n ds_batched = ds_batched.map(self.tokenize_pairs_src_tgt, num_parallel_calls=tf.data.AUTOTUNE)\n\n ds_batched=ds_batched.prefetch(tf.data.AUTOTUNE)\n print(\"Dataset element spec:\", ds_batched.element_spec)\n\n return ds_batched\n\n def filter_max_length(self, x, y):\n return tf.logical_and(tf.size(x) <= self.MAX_LENGTH,\n tf.size(y) <= self.MAX_LENGTH)\n\n def make_padded_batches(self, ds, shuffle_set=True):\n '''\n If a max length is specified, the dataset is filtered, padded then batched.\n '''\n\n ds_batched = ds.batch(1)\n ds_batched = ds_batched.map(self.tokenize_pairs_src_tgt, num_parallel_calls=tf.data.AUTOTUNE)\n ds_batched=ds_batched.unbatch()\n if shuffle_set:\n ds_batched=ds_batched.shuffle(self.BUFFER_SIZE)\n ds_batched=ds_batched.filter(self.filter_max_length).padded_batch(self.BATCH_SIZE, padded_shapes=(self.MAX_LENGTH, self.MAX_LENGTH))\n ds_batched = ds_batched.prefetch(tf.data.AUTOTUNE)\n\n return ds_batched\n\ndef download_tokenizer_model(model_name = \"ted_hrlr_translate_pt_en_converter\", cache_dir=\".\"):\n '''\n Downloads a pretrained tokenizer model to a cache dir where model can be loaded from.\n Can be used once to download the model. model_name needs to match exactly the name of the model.\n '''\n\n tf.keras.utils.get_file(\n f\"{model_name}.zip\",\n f\"https://storage.googleapis.com/download.tensorflow.org/models/{model_name}.zip\",\n cache_dir=cache_dir, cache_subdir='', extract=True\n )\n" ]
[ [ "tensorflow.saved_model.load", "tensorflow.keras.utils.get_file", "tensorflow.size" ] ]
rostyboost/scipy
[ "2f5aa264724099c03772ed784e7a947d2bea8398" ]
[ "scipy/sparse/linalg/tests/test_matfuncs.py" ]
[ "#\n# Created by: Pearu Peterson, March 2002\n#\n\"\"\" Test functions for scipy.linalg.matfuncs module\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport math\n\nimport numpy as np\nfrom numpy import array, eye, exp, random\nfrom numpy.linalg import matrix_power\nfrom numpy.testing import (\n assert_allclose, assert_, assert_array_almost_equal, assert_equal,\n assert_array_almost_equal_nulp)\nfrom scipy._lib._numpy_compat import suppress_warnings\n\nfrom scipy.sparse import csc_matrix, SparseEfficiencyWarning\nfrom scipy.sparse.construct import eye as speye\nfrom scipy.sparse.linalg.matfuncs import (expm, _expm,\n ProductOperator, MatrixPowerOperator,\n _onenorm_matrix_power_nnm)\nfrom scipy.linalg import logm\nfrom scipy.special import factorial\nimport scipy.sparse\nimport scipy.sparse.linalg\n\n\ndef _burkardt_13_power(n, p):\n \"\"\"\n A helper function for testing matrix functions.\n\n Parameters\n ----------\n n : integer greater than 1\n Order of the square matrix to be returned.\n p : non-negative integer\n Power of the matrix.\n\n Returns\n -------\n out : ndarray representing a square matrix\n A Forsythe matrix of order n, raised to the power p.\n\n \"\"\"\n # Input validation.\n if n != int(n) or n < 2:\n raise ValueError('n must be an integer greater than 1')\n n = int(n)\n if p != int(p) or p < 0:\n raise ValueError('p must be a non-negative integer')\n p = int(p)\n\n # Construct the matrix explicitly.\n a, b = divmod(p, n)\n large = np.power(10.0, -n*a)\n small = large * np.power(10.0, -n)\n return np.diag([large]*(n-b), b) + np.diag([small]*b, b-n)\n\n\ndef test_onenorm_matrix_power_nnm():\n np.random.seed(1234)\n for n in range(1, 5):\n for p in range(5):\n M = np.random.random((n, n))\n Mp = np.linalg.matrix_power(M, p)\n observed = _onenorm_matrix_power_nnm(M, p)\n expected = np.linalg.norm(Mp, 1)\n assert_allclose(observed, expected)\n\n\nclass TestExpM(object):\n def test_zero_ndarray(self):\n a = array([[0.,0],[0,0]])\n assert_array_almost_equal(expm(a),[[1,0],[0,1]])\n\n def test_zero_sparse(self):\n a = csc_matrix([[0.,0],[0,0]])\n assert_array_almost_equal(expm(a).toarray(),[[1,0],[0,1]])\n\n def test_zero_matrix(self):\n a = np.matrix([[0.,0],[0,0]])\n assert_array_almost_equal(expm(a),[[1,0],[0,1]])\n\n def test_misc_types(self):\n A = expm(np.array([[1]]))\n assert_allclose(expm(((1,),)), A)\n assert_allclose(expm([[1]]), A)\n assert_allclose(expm(np.matrix([[1]])), A)\n assert_allclose(expm(np.array([[1]])), A)\n assert_allclose(expm(csc_matrix([[1]])).A, A)\n B = expm(np.array([[1j]]))\n assert_allclose(expm(((1j,),)), B)\n assert_allclose(expm([[1j]]), B)\n assert_allclose(expm(np.matrix([[1j]])), B)\n assert_allclose(expm(csc_matrix([[1j]])).A, B)\n\n def test_bidiagonal_sparse(self):\n A = csc_matrix([\n [1, 3, 0],\n [0, 1, 5],\n [0, 0, 2]], dtype=float)\n e1 = math.exp(1)\n e2 = math.exp(2)\n expected = np.array([\n [e1, 3*e1, 15*(e2 - 2*e1)],\n [0, e1, 5*(e2 - e1)],\n [0, 0, e2]], dtype=float)\n observed = expm(A).toarray()\n assert_array_almost_equal(observed, expected)\n\n def test_padecases_dtype_float(self):\n for dtype in [np.float32, np.float64]:\n for scale in [1e-2, 1e-1, 5e-1, 1, 10]:\n A = scale * eye(3, dtype=dtype)\n observed = expm(A)\n expected = exp(scale) * eye(3, dtype=dtype)\n assert_array_almost_equal_nulp(observed, expected, nulp=100)\n\n def test_padecases_dtype_complex(self):\n for dtype in [np.complex64, np.complex128]:\n for scale in [1e-2, 1e-1, 5e-1, 1, 10]:\n A = scale * eye(3, dtype=dtype)\n observed = expm(A)\n expected = exp(scale) * eye(3, dtype=dtype)\n assert_array_almost_equal_nulp(observed, expected, nulp=100)\n\n def test_padecases_dtype_sparse_float(self):\n # float32 and complex64 lead to errors in spsolve/UMFpack\n dtype = np.float64\n for scale in [1e-2, 1e-1, 5e-1, 1, 10]:\n a = scale * speye(3, 3, dtype=dtype, format='csc')\n e = exp(scale) * eye(3, dtype=dtype)\n with suppress_warnings() as sup:\n sup.filter(SparseEfficiencyWarning,\n \"Changing the sparsity structure of a csc_matrix is expensive.\")\n exact_onenorm = _expm(a, use_exact_onenorm=True).toarray()\n inexact_onenorm = _expm(a, use_exact_onenorm=False).toarray()\n assert_array_almost_equal_nulp(exact_onenorm, e, nulp=100)\n assert_array_almost_equal_nulp(inexact_onenorm, e, nulp=100)\n\n def test_padecases_dtype_sparse_complex(self):\n # float32 and complex64 lead to errors in spsolve/UMFpack\n dtype = np.complex128\n for scale in [1e-2, 1e-1, 5e-1, 1, 10]:\n a = scale * speye(3, 3, dtype=dtype, format='csc')\n e = exp(scale) * eye(3, dtype=dtype)\n with suppress_warnings() as sup:\n sup.filter(SparseEfficiencyWarning,\n \"Changing the sparsity structure of a csc_matrix is expensive.\")\n assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100)\n\n def test_logm_consistency(self):\n random.seed(1234)\n for dtype in [np.float64, np.complex128]:\n for n in range(1, 10):\n for scale in [1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2]:\n # make logm(A) be of a given scale\n A = (eye(n) + random.rand(n, n) * scale).astype(dtype)\n if np.iscomplexobj(A):\n A = A + 1j * random.rand(n, n) * scale\n assert_array_almost_equal(expm(logm(A)), A)\n\n def test_integer_matrix(self):\n Q = np.array([\n [-3, 1, 1, 1],\n [1, -3, 1, 1],\n [1, 1, -3, 1],\n [1, 1, 1, -3]])\n assert_allclose(expm(Q), expm(1.0 * Q))\n\n def test_triangularity_perturbation(self):\n # Experiment (1) of\n # Awad H. Al-Mohy and Nicholas J. Higham (2012)\n # Improved Inverse Scaling and Squaring Algorithms\n # for the Matrix Logarithm.\n A = np.array([\n [3.2346e-1, 3e4, 3e4, 3e4],\n [0, 3.0089e-1, 3e4, 3e4],\n [0, 0, 3.221e-1, 3e4],\n [0, 0, 0, 3.0744e-1]],\n dtype=float)\n A_logm = np.array([\n [-1.12867982029050462e+00, 9.61418377142025565e+04,\n -4.52485573953179264e+09, 2.92496941103871812e+14],\n [0.00000000000000000e+00, -1.20101052953082288e+00,\n 9.63469687211303099e+04, -4.68104828911105442e+09],\n [0.00000000000000000e+00, 0.00000000000000000e+00,\n -1.13289322264498393e+00, 9.53249183094775653e+04],\n [0.00000000000000000e+00, 0.00000000000000000e+00,\n 0.00000000000000000e+00, -1.17947533272554850e+00]],\n dtype=float)\n assert_allclose(expm(A_logm), A, rtol=1e-4)\n\n # Perturb the upper triangular matrix by tiny amounts,\n # so that it becomes technically not upper triangular.\n random.seed(1234)\n tiny = 1e-17\n A_logm_perturbed = A_logm.copy()\n A_logm_perturbed[1, 0] = tiny\n A_expm_logm_perturbed = expm(A_logm_perturbed)\n rtol = 1e-4\n atol = 100 * tiny\n assert_(not np.allclose(A_expm_logm_perturbed, A, rtol=rtol, atol=atol))\n\n def test_burkardt_1(self):\n # This matrix is diagonal.\n # The calculation of the matrix exponential is simple.\n #\n # This is the first of a series of matrix exponential tests\n # collected by John Burkardt from the following sources.\n #\n # Alan Laub,\n # Review of \"Linear System Theory\" by Joao Hespanha,\n # SIAM Review,\n # Volume 52, Number 4, December 2010, pages 779--781.\n #\n # Cleve Moler and Charles Van Loan,\n # Nineteen Dubious Ways to Compute the Exponential of a Matrix,\n # Twenty-Five Years Later,\n # SIAM Review,\n # Volume 45, Number 1, March 2003, pages 3--49.\n #\n # Cleve Moler,\n # Cleve's Corner: A Balancing Act for the Matrix Exponential,\n # 23 July 2012.\n #\n # Robert Ward,\n # Numerical computation of the matrix exponential\n # with accuracy estimate,\n # SIAM Journal on Numerical Analysis,\n # Volume 14, Number 4, September 1977, pages 600--610.\n exp1 = np.exp(1)\n exp2 = np.exp(2)\n A = np.array([\n [1, 0],\n [0, 2],\n ], dtype=float)\n desired = np.array([\n [exp1, 0],\n [0, exp2],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_2(self):\n # This matrix is symmetric.\n # The calculation of the matrix exponential is straightforward.\n A = np.array([\n [1, 3],\n [3, 2],\n ], dtype=float)\n desired = np.array([\n [39.322809708033859, 46.166301438885753],\n [46.166301438885768, 54.711576854329110],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_3(self):\n # This example is due to Laub.\n # This matrix is ill-suited for the Taylor series approach.\n # As powers of A are computed, the entries blow up too quickly.\n exp1 = np.exp(1)\n exp39 = np.exp(39)\n A = np.array([\n [0, 1],\n [-39, -40],\n ], dtype=float)\n desired = np.array([\n [\n 39/(38*exp1) - 1/(38*exp39),\n -np.expm1(-38) / (38*exp1)],\n [\n 39*np.expm1(-38) / (38*exp1),\n -1/(38*exp1) + 39/(38*exp39)],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_4(self):\n # This example is due to Moler and Van Loan.\n # The example will cause problems for the series summation approach,\n # as well as for diagonal Pade approximations.\n A = np.array([\n [-49, 24],\n [-64, 31],\n ], dtype=float)\n U = np.array([[3, 1], [4, 2]], dtype=float)\n V = np.array([[1, -1/2], [-2, 3/2]], dtype=float)\n w = np.array([-17, -1], dtype=float)\n desired = np.dot(U * np.exp(w), V)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_5(self):\n # This example is due to Moler and Van Loan.\n # This matrix is strictly upper triangular\n # All powers of A are zero beyond some (low) limit.\n # This example will cause problems for Pade approximations.\n A = np.array([\n [0, 6, 0, 0],\n [0, 0, 6, 0],\n [0, 0, 0, 6],\n [0, 0, 0, 0],\n ], dtype=float)\n desired = np.array([\n [1, 6, 18, 36],\n [0, 1, 6, 18],\n [0, 0, 1, 6],\n [0, 0, 0, 1],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_6(self):\n # This example is due to Moler and Van Loan.\n # This matrix does not have a complete set of eigenvectors.\n # That means the eigenvector approach will fail.\n exp1 = np.exp(1)\n A = np.array([\n [1, 1],\n [0, 1],\n ], dtype=float)\n desired = np.array([\n [exp1, exp1],\n [0, exp1],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_7(self):\n # This example is due to Moler and Van Loan.\n # This matrix is very close to example 5.\n # Mathematically, it has a complete set of eigenvectors.\n # Numerically, however, the calculation will be suspect.\n exp1 = np.exp(1)\n eps = np.spacing(1)\n A = np.array([\n [1 + eps, 1],\n [0, 1 - eps],\n ], dtype=float)\n desired = np.array([\n [exp1, exp1],\n [0, exp1],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_8(self):\n # This matrix was an example in Wikipedia.\n exp4 = np.exp(4)\n exp16 = np.exp(16)\n A = np.array([\n [21, 17, 6],\n [-5, -1, -6],\n [4, 4, 16],\n ], dtype=float)\n desired = np.array([\n [13*exp16 - exp4, 13*exp16 - 5*exp4, 2*exp16 - 2*exp4],\n [-9*exp16 + exp4, -9*exp16 + 5*exp4, -2*exp16 + 2*exp4],\n [16*exp16, 16*exp16, 4*exp16],\n ], dtype=float) * 0.25\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_9(self):\n # This matrix is due to the NAG Library.\n # It is an example for function F01ECF.\n A = np.array([\n [1, 2, 2, 2],\n [3, 1, 1, 2],\n [3, 2, 1, 2],\n [3, 3, 3, 1],\n ], dtype=float)\n desired = np.array([\n [740.7038, 610.8500, 542.2743, 549.1753],\n [731.2510, 603.5524, 535.0884, 542.2743],\n [823.7630, 679.4257, 603.5524, 610.8500],\n [998.4355, 823.7630, 731.2510, 740.7038],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_10(self):\n # This is Ward's example #1.\n # It is defective and nonderogatory.\n A = np.array([\n [4, 2, 0],\n [1, 4, 1],\n [1, 1, 4],\n ], dtype=float)\n assert_allclose(sorted(scipy.linalg.eigvals(A)), (3, 3, 6))\n desired = np.array([\n [147.8666224463699, 183.7651386463682, 71.79703239999647],\n [127.7810855231823, 183.7651386463682, 91.88256932318415],\n [127.7810855231824, 163.6796017231806, 111.9681062463718],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_11(self):\n # This is Ward's example #2.\n # It is a symmetric matrix.\n A = np.array([\n [29.87942128909879, 0.7815750847907159, -2.289519314033932],\n [0.7815750847907159, 25.72656945571064, 8.680737820540137],\n [-2.289519314033932, 8.680737820540137, 34.39400925519054],\n ], dtype=float)\n assert_allclose(scipy.linalg.eigvalsh(A), (20, 30, 40))\n desired = np.array([\n [\n 5.496313853692378E+15,\n -1.823188097200898E+16,\n -3.047577080858001E+16],\n [\n -1.823188097200899E+16,\n 6.060522870222108E+16,\n 1.012918429302482E+17],\n [\n -3.047577080858001E+16,\n 1.012918429302482E+17,\n 1.692944112408493E+17],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_12(self):\n # This is Ward's example #3.\n # Ward's algorithm has difficulty estimating the accuracy\n # of its results.\n A = np.array([\n [-131, 19, 18],\n [-390, 56, 54],\n [-387, 57, 52],\n ], dtype=float)\n assert_allclose(sorted(scipy.linalg.eigvals(A)), (-20, -2, -1))\n desired = np.array([\n [-1.509644158793135, 0.3678794391096522, 0.1353352811751005],\n [-5.632570799891469, 1.471517758499875, 0.4060058435250609],\n [-4.934938326088363, 1.103638317328798, 0.5413411267617766],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n def test_burkardt_13(self):\n # This is Ward's example #4.\n # This is a version of the Forsythe matrix.\n # The eigenvector problem is badly conditioned.\n # Ward's algorithm has difficulty esimating the accuracy\n # of its results for this problem.\n #\n # Check the construction of one instance of this family of matrices.\n A4_actual = _burkardt_13_power(4, 1)\n A4_desired = [[0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1e-4, 0, 0, 0]]\n assert_allclose(A4_actual, A4_desired)\n # Check the expm for a few instances.\n for n in (2, 3, 4, 10):\n # Approximate expm using Taylor series.\n # This works well for this matrix family\n # because each matrix in the summation,\n # even before dividing by the factorial,\n # is entrywise positive with max entry 10**(-floor(p/n)*n).\n k = max(1, int(np.ceil(16/n)))\n desired = np.zeros((n, n), dtype=float)\n for p in range(n*k):\n Ap = _burkardt_13_power(n, p)\n assert_equal(np.min(Ap), 0)\n assert_allclose(np.max(Ap), np.power(10, -np.floor(p/n)*n))\n desired += Ap / factorial(p)\n actual = expm(_burkardt_13_power(n, 1))\n assert_allclose(actual, desired)\n\n def test_burkardt_14(self):\n # This is Moler's example.\n # This badly scaled matrix caused problems for MATLAB's expm().\n A = np.array([\n [0, 1e-8, 0],\n [-(2e10 + 4e8/6.), -3, 2e10],\n [200./3., 0, -200./3.],\n ], dtype=float)\n desired = np.array([\n [0.446849468283175, 1.54044157383952e-09, 0.462811453558774],\n [-5743067.77947947, -0.0152830038686819, -4526542.71278401],\n [0.447722977849494, 1.54270484519591e-09, 0.463480648837651],\n ], dtype=float)\n actual = expm(A)\n assert_allclose(actual, desired)\n\n\nclass TestOperators(object):\n\n def test_product_operator(self):\n random.seed(1234)\n n = 5\n k = 2\n nsamples = 10\n for i in range(nsamples):\n A = np.random.randn(n, n)\n B = np.random.randn(n, n)\n C = np.random.randn(n, n)\n D = np.random.randn(n, k)\n op = ProductOperator(A, B, C)\n assert_allclose(op.matmat(D), A.dot(B).dot(C).dot(D))\n assert_allclose(op.T.matmat(D), (A.dot(B).dot(C)).T.dot(D))\n\n def test_matrix_power_operator(self):\n random.seed(1234)\n n = 5\n k = 2\n p = 3\n nsamples = 10\n for i in range(nsamples):\n A = np.random.randn(n, n)\n B = np.random.randn(n, k)\n op = MatrixPowerOperator(A, p)\n assert_allclose(op.matmat(B), matrix_power(A, p).dot(B))\n assert_allclose(op.T.matmat(B), matrix_power(A, p).T.dot(B))\n\n" ]
[ [ "scipy.special.factorial", "numpy.spacing", "numpy.linalg.matrix_power", "numpy.diag", "numpy.random.seed", "numpy.allclose", "scipy.sparse.csc_matrix", "scipy._lib._numpy_compat.suppress_warnings", "numpy.random.rand", "scipy.sparse.linalg.matfuncs.ProductOperator", "numpy.expm1", "numpy.eye", "numpy.ceil", "numpy.zeros", "numpy.testing.assert_array_almost_equal_nulp", "scipy.sparse.linalg.matfuncs._expm", "numpy.testing.assert_array_almost_equal", "numpy.power", "numpy.max", "numpy.min", "scipy.linalg.logm", "numpy.iscomplexobj", "scipy.sparse.linalg.matfuncs.MatrixPowerOperator", "numpy.linalg.norm", "scipy.sparse.linalg.matfuncs._onenorm_matrix_power_nnm", "scipy.sparse.construct.eye", "numpy.matrix", "numpy.random.randn", "numpy.floor", "numpy.exp", "numpy.random.random", "numpy.testing.assert_allclose", "numpy.array", "scipy.sparse.linalg.matfuncs.expm" ] ]
Oriolac/data-utils
[ "87423d7f7f408c26ea31cbeb7a55a77a55a9ee27" ]
[ "src/dataut/visual.py" ]
[ "\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n\ndef show_corr(X, mask=True, figsize=(7,7)):\n fig, ax = plt.subplots(figsize=figsize)\n corr = X.corr()\n mask = np.triu(np.ones_like(corr, dtype=bool)) if mask else np.ones_like(corr, dtype=bool)\n sns.heatmap(corr, mask=mask, square=True, annot=True, ax=ax)\n plt.show()" ]
[ [ "matplotlib.pyplot.show", "numpy.ones_like", "matplotlib.pyplot.subplots" ] ]
wingkitlee0/lifelines
[ "46a225ab1d8845f0366921d2b61151fb9a69d398" ]
[ "lifelines/fitters/kaplan_meier_fitter.py" ]
[ "# -*- coding: utf-8 -*-\nimport functools\nimport warnings\nimport numpy as np\nimport pandas as pd\n\nfrom lifelines.fitters import UnivariateFitter\nfrom lifelines.utils import (\n _preprocess_inputs,\n _additive_estimate,\n _to_1d_array,\n StatError,\n inv_normal_cdf,\n median_survival_times,\n qth_survival_time,\n check_nans_or_infs,\n StatisticalWarning,\n coalesce,\n CensoringType,\n)\nfrom lifelines.plotting import plot_loglogs, _plot_estimate\n\n\nclass KaplanMeierFitter(UnivariateFitter):\n\n \"\"\"\n Class for fitting the Kaplan-Meier estimate for the survival function.\n\n Parameters\n ----------\n alpha: float, option (default=0.05)\n The alpha value associated with the confidence intervals.\n\n\n Examples\n --------\n >>> from lifelines import KaplanMeierFitter\n >>> from lifelines.datasets import load_waltons\n >>> waltons = load_waltons()\n >>> kmf = KaplanMeierFitter()\n >>> kmf.fit(waltons['T'], waltons['E'])\n >>> kmf.plot()\n\n\n Attributes\n ----------\n survival_function_ : DataFrame\n The estimated survival function (with custom timeline if provided)\n median_ : float\n The estimated median time to event. np.inf if doesn't exist.\n confidence_interval_ : DataFrame\n The lower and upper confidence intervals for the survival function. An alias of\n ``confidence_interval_survival_function_``\n confidence_interval_survival_function_ : DataFrame\n The lower and upper confidence intervals for the survival function. An alias of\n ``confidence_interval_``\n cumumlative_density_ : DataFrame\n The estimated cumulative density function (with custom timeline if provided)\n confidence_interval_cumulative_density_ : DataFrame\n The lower and upper confidence intervals for the cumulative density\n durations: array\n The durations provided\n event_observed: array\n The event_observed variable provided\n timeline: array\n The time line to use for plotting and indexing\n entry: array or None\n The entry array provided, or None\n event_table: DataFrame\n A summary of the life table\n \"\"\"\n\n @CensoringType.right_censoring\n def fit(\n self,\n durations,\n event_observed=None,\n timeline=None,\n entry=None,\n label=\"KM_estimate\",\n left_censorship=False,\n alpha=None,\n ci_labels=None,\n weights=None,\n ): # pylint: disable=too-many-arguments,too-many-locals\n \"\"\"\n Fit the model to a right-censored dataset\n\n Parameters\n ----------\n durations: an array, list, pd.DataFrame or pd.Series\n length n -- duration subject was observed for\n event_observed: an array, list, pd.DataFrame, or pd.Series, optional\n True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None\n timeline: an array, list, pd.DataFrame, or pd.Series, optional\n return the best estimate at the values in timelines (postively increasing)\n entry: an array, list, pd.DataFrame, or pd.Series, optional\n relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population\n entered study when they were \"born\".\n label: string, optional\n a string to name the column of the estimate.\n alpha: float, optional\n the alpha value in the confidence intervals. Overrides the initializing alpha for this call to fit only.\n left_censorship: bool, optional (default=False)\n Deprecated, use ``fit_left_censoring``\n ci_labels: tuple, optional\n add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<1-alpha/2>\n weights: an array, list, pd.DataFrame, or pd.Series, optional\n if providing a weighted dataset. For example, instead\n of providing every subject as a single element of `durations` and `event_observed`, one could\n weigh subject differently.\n\n Returns\n -------\n self: KaplanMeierFitter\n self with new properties like ``survival_function_``, ``plot()``, ``median``\n\n \"\"\"\n if left_censorship:\n warnings.warn(\n \"kwarg left_censorship is deprecated and will be removed in a future release. Please use ``.fit_left_censoring`` instead.\",\n DeprecationWarning,\n )\n\n return self._fit(durations, event_observed, timeline, entry, label, alpha, ci_labels, weights)\n\n @CensoringType.left_censoring\n def fit_left_censoring(\n self,\n durations,\n event_observed=None,\n timeline=None,\n entry=None,\n label=\"KM_estimate\",\n alpha=None,\n ci_labels=None,\n weights=None,\n ):\n \"\"\"\n Fit the model to a left-censored dataset\n\n Parameters\n ----------\n durations: an array, list, pd.DataFrame or pd.Series\n length n -- duration subject was observed for\n event_observed: an array, list, pd.DataFrame, or pd.Series, optional\n True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None\n timeline: an array, list, pd.DataFrame, or pd.Series, optional\n return the best estimate at the values in timelines (postively increasing)\n entry: an array, list, pd.DataFrame, or pd.Series, optional\n relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population\n entered study when they were \"born\".\n label: string, optional\n a string to name the column of the estimate.\n alpha: float, optional\n the alpha value in the confidence intervals. Overrides the initializing alpha for this call to fit only.\n left_censorship: bool, optional (default=False)\n Deprecated, use ``fit_left_censoring``\n ci_labels: tuple, optional\n add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<1-alpha/2>\n weights: an array, list, pd.DataFrame, or pd.Series, optional\n if providing a weighted dataset. For example, instead\n of providing every subject as a single element of `durations` and `event_observed`, one could\n weigh subject differently.\n\n Returns\n -------\n self: KaplanMeierFitter\n self with new properties like ``survival_function_``, ``plot()``, ``median``\n\n \"\"\"\n return self._fit(durations, event_observed, timeline, entry, label, alpha, ci_labels, weights)\n\n def _fit(\n self,\n durations,\n event_observed=None,\n timeline=None,\n entry=None,\n label=\"KM_estimate\",\n alpha=None,\n ci_labels=None,\n weights=None,\n ): # pylint: disable=too-many-arguments,too-many-locals\n \"\"\"\n Parameters\n ----------\n durations: an array, list, pd.DataFrame or pd.Series\n length n -- duration subject was observed for\n event_observed: an array, list, pd.DataFrame, or pd.Series, optional\n True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None\n timeline: an array, list, pd.DataFrame, or pd.Series, optional\n return the best estimate at the values in timelines (postively increasing)\n entry: an array, list, pd.DataFrame, or pd.Series, optional\n relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population\n entered study when they were \"born\".\n label: string, optional\n a string to name the column of the estimate.\n alpha: float, optional\n the alpha value in the confidence intervals. Overrides the initializing alpha for this call to fit only.\n left_censorship: bool, optional (default=False)\n True if durations and event_observed refer to left censorship events. Default False\n ci_labels: tuple, optional\n add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<1-alpha/2>\n weights: an array, list, pd.DataFrame, or pd.Series, optional\n if providing a weighted dataset. For example, instead\n of providing every subject as a single element of `durations` and `event_observed`, one could\n weigh subject differently.\n\n Returns\n -------\n self: KaplanMeierFitter\n self with new properties like ``survival_function_``, ``plot()``, ``median``\n\n \"\"\"\n self._check_values(durations)\n if event_observed is not None:\n self._check_values(event_observed)\n\n self._label = label\n\n if weights is not None:\n weights = np.asarray(weights)\n if (weights.astype(int) != weights).any():\n warnings.warn(\n \"\"\"It looks like your weights are not integers, possibly propensity scores then?\n It's important to know that the naive variance estimates of the coefficients are biased. Instead use Monte Carlo to\n estimate the variances. See paper \"Variance estimation when using inverse probability of treatment weighting (IPTW) with survival analysis\"\n or \"Adjusted Kaplan-Meier estimator and log-rank test with inverse probability of treatment weighting for survival data.\"\n \"\"\",\n StatisticalWarning,\n )\n else:\n weights = np.ones_like(durations, dtype=float)\n\n # if the user is interested in left-censorship, we return the cumulative_density_, no survival_function_,\n is_left_censoring = CensoringType.is_left_censoring(self)\n primary_estimate_name = \"survival_function_\" if not is_left_censoring else \"cumulative_density_\"\n secondary_estimate_name = \"cumulative_density_\" if not is_left_censoring else \"survival_function_\"\n\n self.durations, self.event_observed, self.timeline, self.entry, self.event_table, self.weights = _preprocess_inputs(\n durations, event_observed, timeline, entry, weights\n )\n\n alpha = alpha if alpha else self.alpha\n log_estimate, cumulative_sq_ = _additive_estimate(\n self.event_table, self.timeline, self._additive_f, self._additive_var, is_left_censoring\n )\n\n if entry is not None:\n # a serious problem with KM is that when the sample size is small and there are too few early\n # truncation times, it may happen that is the number of patients at risk and the number of deaths is the same.\n # we adjust for this using the Breslow-Fleming-Harrington estimator\n n = self.event_table.shape[0]\n net_population = (self.event_table[\"entrance\"] - self.event_table[\"removed\"]).cumsum()\n if net_population.iloc[: int(n / 2)].min() == 0:\n ix = net_population.iloc[: int(n / 2)].idxmin()\n raise StatError(\n \"\"\"There are too few early truncation times and too many events. S(t)==0 for all t>%g. Recommend BreslowFlemingHarringtonFitter.\"\"\"\n % ix\n )\n\n # estimation\n setattr(self, primary_estimate_name, pd.DataFrame(np.exp(log_estimate), columns=[self._label]))\n setattr(self, secondary_estimate_name, pd.DataFrame(1 - np.exp(log_estimate), columns=[self._label]))\n\n self.__estimate = getattr(self, primary_estimate_name)\n self.confidence_interval_ = self._bounds(cumulative_sq_[:, None], alpha, ci_labels)\n self._median = median_survival_times(self.survival_function_)\n self.percentile = functools.partial(qth_survival_time, model_or_survival_function=self.survival_function_)\n self._cumulative_sq_ = cumulative_sq_\n\n setattr(self, \"confidence_interval_\" + primary_estimate_name, self.confidence_interval_)\n setattr(self, \"confidence_interval_\" + secondary_estimate_name, 1 - self.confidence_interval_)\n\n # estimation methods\n self._estimation_method = primary_estimate_name\n self._estimate_name = primary_estimate_name\n self._update_docstrings()\n\n return self\n\n @property\n def median_(self):\n warnings.warn(\n \"\"\"Please use `median_survival_time_` property instead. Future property `median_` will be removed.\"\"\",\n FutureWarning,\n )\n return self._median\n\n @property\n def median_survival_time_(self):\n return self._median\n\n def _check_values(self, array):\n check_nans_or_infs(array)\n\n def plot_loglogs(self, *args, **kwargs):\n r\"\"\"\n Plot :math:`\\log(S(t))` against :math:`\\log(t)`. Same arguments as ``.plot``.\n \"\"\"\n return plot_loglogs(self, *args, **kwargs)\n\n def survival_function_at_times(self, times, label=None):\n \"\"\"\n Return a Pandas series of the predicted survival value at specific times\n\n Parameters\n -----------\n times: iterable or float\n\n Returns\n --------\n pd.Series\n\n \"\"\"\n label = coalesce(label, self._label)\n return pd.Series(self.predict(times), index=_to_1d_array(times), name=label)\n\n def cumulative_density_at_times(self, times, label=None):\n \"\"\"\n Return a Pandas series of the predicted cumulative density at specific times\n\n Parameters\n -----------\n times: iterable or float\n\n Returns\n --------\n pd.Series\n\n \"\"\"\n label = coalesce(label, self._label)\n return pd.Series(1 - self.predict(times), index=_to_1d_array(times), name=label)\n\n def plot_survival_function(self, **kwargs):\n \"\"\"Alias of ``plot``\"\"\"\n return _plot_estimate(self, estimate=\"survival_function_\", **kwargs)\n\n def plot_cumulative_density(self, **kwargs):\n \"\"\"\n Plots a pretty figure of {0}.{1}\n\n Matplotlib plot arguments can be passed in inside the kwargs, plus\n\n Parameters\n -----------\n show_censors: bool\n place markers at censorship events. Default: False\n censor_styles: bool\n If show_censors, this dictionary will be passed into the plot call.\n ci_alpha: bool\n the transparency level of the confidence interval. Default: 0.3\n ci_force_lines: bool\n force the confidence intervals to be line plots (versus default shaded areas). Default: False\n ci_show: bool\n show confidence intervals. Default: True\n ci_legend: bool\n if ci_force_lines is True, this is a boolean flag to add the lines' labels to the legend. Default: False\n at_risk_counts: bool\n show group sizes at time points. See function ``add_at_risk_counts`` for details. Default: False\n loc: slice\n specify a time-based subsection of the curves to plot, ex:\n\n >>> model.plot(loc=slice(0.,10.))\n\n will plot the time values between t=0. and t=10.\n iloc: slice\n specify a location-based subsection of the curves to plot, ex:\n\n >>> model.plot(iloc=slice(0,10))\n\n will plot the first 10 time points.\n invert_y_axis: bool\n boolean to invert the y-axis, useful to show cumulative graphs instead of survival graphs. (Deprecated, use ``plot_cumulative_density()``)\n\n Returns\n -------\n ax:\n a pyplot axis object\n \"\"\"\n return _plot_estimate(\n self,\n estimate=self.cumulative_density_,\n confidence_intervals=self.confidence_interval_cumulative_density_,\n **kwargs\n )\n\n def _bounds(self, cumulative_sq_, alpha, ci_labels):\n # This method calculates confidence intervals using the exponential Greenwood formula.\n # See https://www.math.wustl.edu/%7Esawyer/handouts/greenwood.pdf\n z = inv_normal_cdf(1 - alpha / 2)\n df = pd.DataFrame(index=self.timeline)\n v = np.log(self.__estimate.values)\n\n if ci_labels is None:\n ci_labels = [\"%s_upper_%g\" % (self._label, 1 - alpha), \"%s_lower_%g\" % (self._label, 1 - alpha)]\n assert len(ci_labels) == 2, \"ci_labels should be a length 2 array.\"\n\n df[ci_labels[0]] = np.exp(-np.exp(np.log(-v) + z * np.sqrt(cumulative_sq_) / v))\n df[ci_labels[1]] = np.exp(-np.exp(np.log(-v) - z * np.sqrt(cumulative_sq_) / v))\n return df\n\n def _additive_f(self, population, deaths):\n np.seterr(invalid=\"ignore\", divide=\"ignore\")\n return np.log(population - deaths) - np.log(population)\n\n def _additive_var(self, population, deaths):\n np.seterr(divide=\"ignore\")\n population = population.astype(\"uint64\")\n return (deaths / (population * (population - deaths))).replace([np.inf], 0)\n\n def plot_cumulative_hazard(self, **kwargs):\n raise NotImplementedError(\n \"The Kaplan-Meier estimator is not used to estimate the cumulative hazard. Try the NelsonAalenFitter or any other parametric model\"\n )\n\n def plot_hazard(self, **kwargs):\n raise NotImplementedError(\n \"The Kaplan-Meier estimator is not used to estimate the hazard. Try the NelsonAalenFitter or any other parametric model\"\n )\n" ]
[ [ "pandas.DataFrame", "numpy.ones_like", "numpy.asarray", "numpy.exp", "numpy.seterr", "numpy.log", "numpy.sqrt" ] ]
whfh3900/Tacotron-2-korea-example
[ "2799394f14e5d52bed2e5f7495bbd89e020a350a" ]
[ "wavenet_vocoder/feeder.py" ]
[ "import os\nimport threading\nimport time\n\nimport numpy as np\nimport tensorflow as tf\nfrom datasets import audio\nfrom infolog import log\nfrom keras.utils import np_utils\nfrom sklearn.model_selection import train_test_split\n\nfrom .util import is_mulaw_quantize, is_scalar_input\n\n\n\n_batches_per_group = 64\n\n\nclass Feeder:\n\t\"\"\"\n\t\tFeeds batches of data into queue in a background thread.\n\t\"\"\"\n\tdef __init__(self, coordinator, metadata_filename, base_dir, hparams):\n\t\tsuper(Feeder, self).__init__()\n\n\t\tself._coord = coordinator\n\t\tself._hparams = hparams\n\t\tself._train_offset = 0\n\t\tself._test_offset = 0\n\n\t\tif hparams.symmetric_mels:\n\t\t\tself._spec_pad = -hparams.max_abs_value\n\t\telse:\n\t\t\tself._spec_pad = 0.\n\n\t\t#Base directory of the project (to map files from different locations)\n\t\tself._base_dir = base_dir\n\n\t\t#Load metadata\n\t\tself._data_dir = os.path.dirname(metadata_filename)\n\t\twith open(metadata_filename, 'r', encoding='utf-8') as f:\n\t\t\tself._metadata = [line.strip().split('|') for line in f]\n\n\t\t#Train test split\n\t\tif hparams.wavenet_test_size is None:\n\t\t\tassert hparams.wavenet_test_batches is not None\n\n\t\ttest_size = (hparams.wavenet_test_size if hparams.wavenet_test_size is not None\n\t\t\telse hparams.wavenet_test_batches * hparams.wavenet_batch_size)\n\t\tindices = np.arange(len(self._metadata))\n\t\ttrain_indices, test_indices = train_test_split(indices,\n\t\t\ttest_size=test_size, random_state=hparams.wavenet_data_random_state)\n\n\t\t#Make sure test size is a multiple of batch size else round up\n\t\tlen_test_indices = _round_down(len(test_indices), hparams.wavenet_batch_size)\n\t\textra_test = test_indices[len_test_indices:]\n\t\ttest_indices = test_indices[:len_test_indices]\n\t\ttrain_indices = np.concatenate([train_indices, extra_test])\n\n\t\tself._train_meta = list(np.array(self._metadata)[train_indices])\n\t\tself._test_meta = list(np.array(self._metadata)[test_indices])\n\n\t\tself.test_steps = len(self._test_meta) // hparams.wavenet_batch_size\n\n\t\tif hparams.wavenet_test_size is None:\n\t\t\tassert hparams.wavenet_test_batches == self.test_steps\n\n\t\t#Get conditioning status\n\t\tself.local_condition, self.global_condition = self._check_conditions()\n\n\t\twith tf.device('/cpu:0'):\n\t\t\t# Create placeholders for inputs and targets. Don't specify batch size because we want\n\t\t\t# to be able to feed different batch sizes at eval time.\n\t\t\tif is_scalar_input(hparams.input_type):\n\t\t\t\tinput_placeholder = tf.placeholder(tf.float32, shape=(None, 1, None), name='audio_inputs')\n\t\t\t\ttarget_placeholder = tf.placeholder(tf.float32, shape=(None, None, 1), name='audio_targets')\n\t\t\t\ttarget_type = tf.float32\n\t\t\telse:\n\t\t\t\tinput_placeholder = tf.placeholder(tf.float32, shape=(None, hparams.quantize_channels, None), name='audio_inputs')\n\t\t\t\ttarget_placeholder = tf.placeholder(tf.int32, shape=(None, None, 1), name='audio_targets')\n\t\t\t\ttarget_type = tf.int32\n\n\t\t\tself._placeholders = [\n\t\t\tinput_placeholder,\n\t\t\ttarget_placeholder,\n\t\t\ttf.placeholder(tf.int32, shape=(None, ), name='input_lengths'),\n\t\t\t]\n\n\t\t\tqueue_types = [tf.float32, target_type, tf.int32]\n\n\t\t\tif self.local_condition:\n\t\t\t\tself._placeholders.append(tf.placeholder(tf.float32, shape=(None, hparams.num_mels, None), name='local_condition_features'))\n\t\t\t\tqueue_types.append(tf.float32)\n\t\t\tif self.global_condition:\n\t\t\t\tself._placeholders.append(tf.placeholder(tf.int32, shape=(None, 1), name='global_condition_features'))\n\t\t\t\tqueue_types.append(tf.int32)\n\n\t\t\t# Create queue for buffering data\n\t\t\tqueue = tf.FIFOQueue(8, queue_types, name='input_queue')\n\t\t\tself._enqueue_op = queue.enqueue(self._placeholders)\n\t\t\tvariables = queue.dequeue()\n\n\t\t\tself.inputs = variables[0]\n\t\t\tself.inputs.set_shape(self._placeholders[0].shape)\n\t\t\tself.targets = variables[1]\n\t\t\tself.targets.set_shape(self._placeholders[1].shape)\n\t\t\tself.input_lengths = variables[2]\n\t\t\tself.input_lengths.set_shape(self._placeholders[2].shape)\n\n\t\t\tidx = 3\n\n\t\t\t#If local conditioning disabled override c inputs with None\n\t\t\tif hparams.cin_channels < 0:\n\t\t\t\tself.local_condition_features = None\n\t\t\telse:\n\t\t\t\tself.local_condition_features = variables[idx]\n\t\t\t\tself.local_condition_features.set_shape(self._placeholders[idx].shape)\n\t\t\t\tidx += 1\n\n\t\t\t#If global conditioning disabled override g inputs with None\n\t\t\tif hparams.gin_channels < 0:\n\t\t\t\tself.global_condition_features = None\n\t\t\telse:\n\t\t\t\tself.global_condition_features = variables[idx]\n\t\t\t\tself.global_condition_features.set_shape(self._placeholders[idx].shape)\n\n\t\t\t# Create queue for buffering eval data\n\t\t\teval_queue = tf.FIFOQueue(1, queue_types, name='eval_queue')\n\t\t\tself._eval_enqueue_op = eval_queue.enqueue(self._placeholders)\n\t\t\teval_variables = eval_queue.dequeue()\n\n\t\t\tself.eval_inputs = eval_variables[0]\n\t\t\tself.eval_inputs.set_shape(self._placeholders[0].shape)\n\t\t\tself.eval_targets = eval_variables[1]\n\t\t\tself.eval_targets.set_shape(self._placeholders[1].shape)\n\t\t\tself.eval_input_lengths = eval_variables[2]\n\t\t\tself.eval_input_lengths.set_shape(self._placeholders[2].shape)\n\n\t\t\teval_idx = 3\n\n\t\t\t#If local conditioning disabled override c inputs with None\n\t\t\tif hparams.cin_channels < 0:\n\t\t\t\tself.eval_local_condition_features = None\n\t\t\telse:\n\t\t\t\tself.eval_local_condition_features = eval_variables[eval_idx]\n\t\t\t\tself.eval_local_condition_features.set_shape(self._placeholders[eval_idx].shape)\n\t\t\t\teval_idx += 1\n\n\t\t\t#If global conditioning disabled override g inputs with None\n\t\t\tif hparams.gin_channels < 0:\n\t\t\t\tself.eval_global_condition_features = None\n\t\t\telse:\n\t\t\t\tself.eval_global_condition_features = eval_variables[eval_idx]\n\t\t\t\tself.eval_global_condition_features.set_shape(self._placeholders[eval_idx].shape)\n\n\n\tdef start_threads(self, session):\n\t\tself._session = session\n\t\tthread = threading.Thread(name='background', target=self._enqueue_next_train_group)\n\t\tthread.daemon = True #Thread will close when parent quits\n\t\tthread.start()\n\n\t\tthread = threading.Thread(name='background', target=self._enqueue_next_test_group)\n\t\tthread.daemon = True #Thread will close when parent quits\n\t\tthread.start()\n\n\tdef _get_test_groups(self):\n\t\tmeta = self._test_meta[self._test_offset]\n\t\tself._test_offset += 1\n\n\t\tif self._hparams.train_with_GTA:\n\t\t\tmel_file = meta[2]\n\t\telse:\n\t\t\tmel_file = meta[1]\n\t\taudio_file = meta[0]\n\n\t\tinput_data = np.load(os.path.join(self._base_dir, audio_file))\n\n\t\tif self.local_condition:\n\t\t\tlocal_condition_features = np.load(os.path.join(self._base_dir, mel_file))\n\t\telse:\n\t\t\tlocal_condition_features = None\n\n\t\tif self.global_condition:\n\t\t\tglobal_condition_features = meta[3]\n\t\t\tif global_condition_features == '<no_g>':\n\t\t\t\traise RuntimeError('Please redo the wavenet preprocessing (or GTA synthesis) to assign global condition features!')\n\t\telse:\n\t\t\tglobal_condition_features = None\n\n\t\treturn (input_data, local_condition_features, global_condition_features, len(input_data))\n\n\tdef make_test_batches(self):\n\t\tstart = time.time()\n\n\t\t#Read one example for evaluation\n\t\tn = 1\n\n\t\t#Test on entire test set (one sample at an evaluation step)\n\t\texamples = [self._get_test_groups() for i in range(len(self._test_meta))]\n\t\tbatches = [examples[i: i+n] for i in range(0, len(examples), n)]\n\t\tnp.random.shuffle(batches)\n\n\t\tprint('\\nGenerated {} test batches of size {} in {:.3f} sec'.format(len(batches), n, time.time() - start))\n\t\tlog('\\nGenerated {} test batches of size {} in {:.3f} sec'.format(len(batches), n, time.time() - start))\n\t\treturn batches\n\n\tdef _enqueue_next_train_group(self):\n\t\twhile not self._coord.should_stop():\n\t\t\tstart = time.time()\n\n\t\t\t# Read a group of examples\n\t\t\tn = self._hparams.wavenet_batch_size\n\t\t\texamples = [self._get_next_example() for i in range(n * _batches_per_group)]\n\n\t\t\t# Bucket examples base on similiar output length for efficiency\n\t\t\texamples.sort(key=lambda x: x[-1])\n\t\t\tbatches = [examples[i: i+n] for i in range(0, len(examples), n)]\n\t\t\tnp.random.shuffle(batches)\n\n\t\t\tprint('\\nGenerated {} train batches of size {} in {:.3f} sec'.format(len(batches), n, time.time() - start))\n\t\t\tlog('\\nGenerated {} train batches of size {} in {:.3f} sec'.format(len(batches), n, time.time() - start))\n\t\t\tfor batch in batches:\n\t\t\t\tfeed_dict = dict(zip(self._placeholders, self._prepare_batch(batch)))\n\t\t\t\tself._session.run(self._enqueue_op, feed_dict=feed_dict)\n\n\tdef _enqueue_next_test_group(self):\n\t\ttest_batches = self.make_test_batches()\n\t\twhile not self._coord.should_stop():\n\t\t\tfor batch in test_batches:\n\t\t\t\tfeed_dict = dict(zip(self._placeholders, self._prepare_batch(batch)))\n\t\t\t\tself._session.run(self._eval_enqueue_op, feed_dict=feed_dict)\n\n\tdef _get_next_example(self):\n\t\t'''Get a single example (input, output, len_output) from disk\n\t\t'''\n\t\tif self._train_offset >= len(self._train_meta):\n\t\t\tself._train_offset = 0\n\t\t\tnp.random.shuffle(self._train_meta)\n\t\tmeta = self._train_meta[self._train_offset]\n\t\tself._train_offset += 1\n\n\t\tif self._hparams.train_with_GTA:\n\t\t\tmel_file = meta[2]\n\t\t\tif 'linear' in mel_file:\n\t\t\t\traise RuntimeError('Linear spectrogram files selected instead of GTA mels, did you specify the wrong metadata?')\n\t\telse:\n\t\t\tmel_file = meta[1]\n\t\taudio_file = meta[0]\n\n\t\tinput_data = np.load(os.path.join(self._base_dir, audio_file))\n\n\t\tif self.local_condition:\n\t\t\tlocal_condition_features = np.load(os.path.join(self._base_dir, mel_file))\n\t\telse:\n\t\t\tlocal_condition_features = None\n\n\t\tif self.global_condition:\n\t\t\tglobal_condition_features = meta[3]\n\t\t\tif global_condition_features == '<no_g>':\n\t\t\t\traise RuntimeError('Please redo the wavenet preprocessing (or GTA synthesis) to assign global condition features!')\n\t\telse:\n\t\t\tglobal_condition_features = None\n\n\t\treturn (input_data, local_condition_features, global_condition_features, len(input_data))\n\n\n\tdef _prepare_batch(self, batches):\n\t\tassert 0 == len(batches) % self._hparams.wavenet_num_gpus\n\t\tsize_per_device = int(len(batches) / self._hparams.wavenet_num_gpus)\n\t\tnp.random.shuffle(batches)\n\n\t\t#Limit time steps to save GPU Memory usage\n\t\tmax_time_steps = self._limit_time()\n\t\t#Adjust time resolution for upsampling\n\t\tbatches = self._adjust_time_resolution(batches, self.local_condition, max_time_steps)\n\n\t\t#time lengths\n\t\tinput_lengths = np.asarray([len(x[0]) for x in batches], np.int32)\n\t\tmax_input_length = max(input_lengths)\n\n\t\t#Since all inputs/targets will have the same lengths for all GPUs, we can simply treat all GPUs batches as one big batch and stack all data. (fixed length)\n\t\tinputs = self._prepare_inputs([x[0] for x in batches], max_input_length)\n\t\ttargets = self._prepare_targets([x[0] for x in batches], max_input_length)\n\t\tlocal_condition_features = self._prepare_local_conditions(self.local_condition, [x[1] for x in batches])\n\t\tglobal_condition_features = self._prepare_global_conditions(self.global_condition, [x[2] for x in batches])\n\n\t\t#Create final batches\n\t\tnew_batches = (inputs, targets, input_lengths)\n\t\tif local_condition_features is not None:\n\t\t\tnew_batches += (local_condition_features, )\n\t\tif global_condition_features is not None:\n\t\t\tnew_batches += (global_condition_features, )\n\n\t\treturn new_batches\n\n\tdef _prepare_inputs(self, inputs, maxlen):\n\t\tif is_mulaw_quantize(self._hparams.input_type):\n\t\t\t#[batch_size, time_steps, quantize_channels]\n\t\t\tx_batch = np.stack([_pad_inputs(np_utils.to_categorical(\n\t\t\t\tx, num_classes=self._hparams.quantize_channels), maxlen) for x in inputs]).astype(np.float32)\n\t\telse:\n\t\t\t#[batch_size, time_steps, 1]\n\t\t\tx_batch = np.stack([_pad_inputs(x.reshape(-1, 1), maxlen) for x in inputs]).astype(np.float32)\n\t\tassert len(x_batch.shape) == 3\n\t\t#Convert to channels first [batch_size, quantize_channels (or 1), time_steps]\n\t\tx_batch = np.transpose(x_batch, (0, 2, 1))\n\t\treturn x_batch\n\n\tdef _prepare_targets(self, targets, maxlen):\n\t\t#[batch_size, time_steps]\n\t\tif is_mulaw_quantize(self._hparams.input_type):\n\t\t\ty_batch = np.stack([_pad_targets(x, maxlen) for x in targets]).astype(np.int32)\n\t\telse:\n\t\t\ty_batch = np.stack([_pad_targets(x, maxlen) for x in targets]).astype(np.float32)\n\t\tassert len(y_batch.shape) == 2\n\t\t#Add extra axis (make 3 dimension)\n\t\ty_batch = np.expand_dims(y_batch, axis=-1)\n\t\treturn y_batch\n\n\tdef _prepare_local_conditions(self, local_condition, c_features):\n\t\tif local_condition:\n\t\t\tmaxlen = max([len(x) for x in c_features])\n\t\t\t#[-max, max] or [0,max]\n\t\t\tT2_output_range = (-self._hparams.max_abs_value, self._hparams.max_abs_value) if self._hparams.symmetric_mels else (0, self._hparams.max_abs_value)\n\n\t\t\tif self._hparams.clip_for_wavenet:\n\t\t\t\tc_features = [np.clip(x, T2_output_range[0], T2_output_range[1]) for x in c_features]\n\t\t\t\t\n\t\t\tc_batch = np.stack([_pad_inputs(x, maxlen, _pad=T2_output_range[0]) for x in c_features]).astype(np.float32)\n\t\t\tassert len(c_batch.shape) == 3\n\t\t\t#[batch_size, c_channels, time_steps]\n\t\t\tc_batch = np.transpose(c_batch, (0, 2, 1))\n\n\t\t\tif self._hparams.normalize_for_wavenet:\n\t\t\t\t#rerange to [0, 1]\n\t\t\t\tc_batch = _interp(c_batch, T2_output_range).astype(np.float32)\n\n\t\telse:\n\t\t\tc_batch = None\n\n\t\treturn c_batch\n\n\tdef _prepare_global_conditions(self, global_condition, g_features):\n\t\tif global_condition:\n\t\t\tg_batch = np.array(g_features).astype(np.int32).reshape(-1, 1)\n\n\t\telse:\n\t\t\tg_batch = None\n\n\t\treturn g_batch\n\n\tdef _check_conditions(self):\n\t\tlocal_condition = self._hparams.cin_channels > 0\n\t\tglobal_condition = self._hparams.gin_channels > 0\n\t\treturn local_condition, global_condition\n\n\tdef _limit_time(self):\n\t\t'''Limit time resolution to save GPU memory.\n\t\t'''\n\t\tif self._hparams.max_time_sec is not None:\n\t\t\treturn int(self._hparams.max_time_sec * self._hparams.sample_rate)\n\n\t\telif self._hparams.max_time_steps is not None:\n\t\t\treturn self._hparams.max_time_steps\n\n\t\telse:\n\t\t\treturn None\n\n\tdef _adjust_time_resolution(self, batch, local_condition, max_time_steps):\n\t\t'''Adjust time resolution between audio and local condition\n\t\t'''\n\t\tif local_condition:\n\t\t\tnew_batch = []\n\t\t\tfor b in batch:\n\t\t\t\tx, c, g, l = b\n\t\t\t\tself._assert_ready_for_upsample(x, c)\n\t\t\t\tif max_time_steps is not None:\n\t\t\t\t\tmax_steps = _ensure_divisible(max_time_steps, audio.get_hop_size(self._hparams), True)\n\t\t\t\t\tif len(x) > max_time_steps:\n\t\t\t\t\t\tmax_time_frames = max_steps // audio.get_hop_size(self._hparams)\n\t\t\t\t\t\tstart = np.random.randint(0, len(c) - max_time_frames)\n\t\t\t\t\t\ttime_start = start * audio.get_hop_size(self._hparams)\n\t\t\t\t\t\tx = x[time_start: time_start + max_time_frames * audio.get_hop_size(self._hparams)]\n\t\t\t\t\t\tc = c[start: start + max_time_frames, :]\n\t\t\t\t\t\tself._assert_ready_for_upsample(x, c)\n\n\t\t\t\tnew_batch.append((x, c, g, l))\n\t\t\treturn new_batch\n\n\t\telse:\n\t\t\tnew_batch = []\n\t\t\tfor b in batch:\n\t\t\t\tx, c, g, l = b\n\t\t\t\tx = audio.trim_silence(x, hparams)\n\t\t\t\tif max_time_steps is not None and len(x) > max_time_steps:\n\t\t\t\t\tstart = np.random.randint(0, len(c) - max_time_steps)\n\t\t\t\t\tx = x[start: start + max_time_steps]\n\t\t\t\tnew_batch.append((x, c, g, l))\n\t\t\treturn new_batch\n\n\tdef _assert_ready_for_upsample(self, x, c):\n\t\tassert len(x) % len(c) == 0 and len(x) // len(c) == audio.get_hop_size(self._hparams)\n\n\ndef _pad_inputs(x, maxlen, _pad=0):\n\treturn np.pad(x, [(0, maxlen - len(x)), (0, 0)], mode='constant', constant_values=_pad)\n\ndef _pad_targets(x, maxlen, _pad=0):\n\treturn np.pad(x, (0, maxlen - len(x)), mode='constant', constant_values=_pad)\n\ndef _round_up(x, multiple):\n\tremainder = x % multiple\n\treturn x if remainder == 0 else x + multiple - remainder\n\ndef _round_down(x, multiple):\n\tremainder = x % multiple\n\treturn x if remainder == 0 else x - remainder\n\ndef _ensure_divisible(length, divisible_by=256, lower=True):\n\tif length % divisible_by == 0:\n\t\treturn length\n\tif lower:\n\t\treturn length - length % divisible_by\n\telse:\n\t\treturn length + (divisible_by - length % divisible_by)\n\ndef _interp(feats, in_range):\n\t#rescales from [-max, max] (or [0, max]) to [0, 1]\n\treturn (feats - in_range[0]) / (in_range[1] - in_range[0])\n" ]
[ [ "tensorflow.placeholder", "numpy.random.shuffle", "numpy.transpose", "tensorflow.device", "tensorflow.FIFOQueue", "numpy.expand_dims", "numpy.clip", "numpy.array", "numpy.concatenate", "sklearn.model_selection.train_test_split" ] ]
Juanlu001/numpy
[ "1e494f1e283340d545b1c7c15dded04a4aaae939" ]
[ "numpy/core/tests/test_regression.py" ]
[ "from __future__ import division, absolute_import, print_function\n\nimport copy\nimport pickle\nimport sys\nimport platform\nimport gc\nimport warnings\nimport tempfile\nfrom os import path\nfrom io import BytesIO\nfrom itertools import chain\n\nimport numpy as np\nfrom numpy.testing import (\n run_module_suite, assert_, assert_equal, IS_PYPY,\n assert_almost_equal, assert_array_equal, assert_array_almost_equal,\n assert_raises, assert_warns, dec, suppress_warnings,\n _assert_valid_refcount, HAS_REFCOUNT,\n )\nfrom numpy.compat import asbytes, asunicode, long\n\n\nclass TestRegression(object):\n def test_invalid_round(self):\n # Ticket #3\n v = 4.7599999999999998\n assert_array_equal(np.array([v]), np.array(v))\n\n def test_mem_empty(self):\n # Ticket #7\n np.empty((1,), dtype=[('x', np.int64)])\n\n def test_pickle_transposed(self):\n # Ticket #16\n a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))\n f = BytesIO()\n pickle.dump(a, f)\n f.seek(0)\n b = pickle.load(f)\n f.close()\n assert_array_equal(a, b)\n\n def test_typeNA(self):\n # Ticket #31\n assert_equal(np.typeNA[np.int64], 'Int64')\n assert_equal(np.typeNA[np.uint64], 'UInt64')\n\n def test_dtype_names(self):\n # Ticket #35\n # Should succeed\n np.dtype([(('name', 'label'), np.int32, 3)])\n\n def test_reduce(self):\n # Ticket #40\n assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)\n\n def test_zeros_order(self):\n # Ticket #43\n np.zeros([3], int, 'C')\n np.zeros([3], order='C')\n np.zeros([3], int, order='C')\n\n def test_asarray_with_order(self):\n # Check that nothing is done when order='F' and array C/F-contiguous\n a = np.ones(2)\n assert_(a is np.asarray(a, order='F'))\n\n def test_ravel_with_order(self):\n # Check that ravel works when order='F' and array C/F-contiguous\n a = np.ones(2)\n assert_(not a.ravel('F').flags.owndata)\n\n def test_sort_bigendian(self):\n # Ticket #47\n a = np.linspace(0, 10, 11)\n c = a.astype(np.dtype('<f8'))\n c.sort()\n assert_array_almost_equal(c, a)\n\n def test_negative_nd_indexing(self):\n # Ticket #49\n c = np.arange(125).reshape((5, 5, 5))\n origidx = np.array([-1, 0, 1])\n idx = np.array(origidx)\n c[idx]\n assert_array_equal(idx, origidx)\n\n def test_char_dump(self):\n # Ticket #50\n f = BytesIO()\n ca = np.char.array(np.arange(1000, 1010), itemsize=4)\n ca.dump(f)\n f.seek(0)\n ca = np.load(f)\n f.close()\n\n def test_noncontiguous_fill(self):\n # Ticket #58.\n a = np.zeros((5, 3))\n b = a[:, :2,]\n\n def rs():\n b.shape = (10,)\n\n assert_raises(AttributeError, rs)\n\n def test_bool(self):\n # Ticket #60\n np.bool_(1) # Should succeed\n\n def test_indexing1(self):\n # Ticket #64\n descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]\n buffer = ((([6j, 4j],),),)\n h = np.array(buffer, dtype=descr)\n h['x']['y']['z']\n\n def test_indexing2(self):\n # Ticket #65\n descr = [('x', 'i4', (2,))]\n buffer = ([3, 2],)\n h = np.array(buffer, dtype=descr)\n h['x']\n\n def test_round(self):\n # Ticket #67\n x = np.array([1+2j])\n assert_almost_equal(x**(-1), [1/(1+2j)])\n\n def test_scalar_compare(self):\n # Trac Ticket #72\n # https://github.com/numpy/numpy/issues/565\n a = np.array(['test', 'auto'])\n assert_array_equal(a == 'auto', np.array([False, True]))\n assert_(a[1] == 'auto')\n assert_(a[0] != 'auto')\n b = np.linspace(0, 10, 11)\n # This should return true for now, but will eventually raise an error:\n with suppress_warnings() as sup:\n sup.filter(FutureWarning)\n assert_(b != 'auto')\n assert_(b[0] != 'auto')\n\n def test_unicode_swapping(self):\n # Ticket #79\n ulen = 1\n ucs_value = u'\\U0010FFFF'\n ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)\n ua.newbyteorder() # Should succeed.\n\n def test_object_array_fill(self):\n # Ticket #86\n x = np.zeros(1, 'O')\n x.fill([])\n\n def test_mem_dtype_align(self):\n # Ticket #93\n assert_raises(TypeError, np.dtype,\n {'names':['a'], 'formats':['foo']}, align=1)\n\n @dec.knownfailureif((sys.version_info[0] >= 3) or\n (sys.platform == \"win32\" and\n platform.architecture()[0] == \"64bit\"),\n \"numpy.intp('0xff', 16) not supported on Py3, \"\n \"as it does not inherit from Python int\")\n def test_intp(self):\n # Ticket #99\n i_width = np.int_(0).nbytes*2 - 1\n np.intp('0x' + 'f'*i_width, 16)\n assert_raises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16)\n assert_raises(ValueError, np.intp, '0x1', 32)\n assert_equal(255, np.intp('0xFF', 16))\n assert_equal(1024, np.intp(1024))\n\n def test_endian_bool_indexing(self):\n # Ticket #105\n a = np.arange(10., dtype='>f8')\n b = np.arange(10., dtype='<f8')\n xa = np.where((a > 2) & (a < 6))\n xb = np.where((b > 2) & (b < 6))\n ya = ((a > 2) & (a < 6))\n yb = ((b > 2) & (b < 6))\n assert_array_almost_equal(xa, ya.nonzero())\n assert_array_almost_equal(xb, yb.nonzero())\n assert_(np.all(a[ya] > 0.5))\n assert_(np.all(b[yb] > 0.5))\n\n def test_endian_where(self):\n # GitHub issue #369\n net = np.zeros(3, dtype='>f4')\n net[1] = 0.00458849\n net[2] = 0.605202\n max_net = net.max()\n test = np.where(net <= 0., max_net, net)\n correct = np.array([ 0.60520202, 0.00458849, 0.60520202])\n assert_array_almost_equal(test, correct)\n\n def test_endian_recarray(self):\n # Ticket #2185\n dt = np.dtype([\n ('head', '>u4'),\n ('data', '>u4', 2),\n ])\n buf = np.recarray(1, dtype=dt)\n buf[0]['head'] = 1\n buf[0]['data'][:] = [1, 1]\n\n h = buf[0]['head']\n d = buf[0]['data'][0]\n buf[0]['head'] = h\n buf[0]['data'][0] = d\n assert_(buf[0]['head'] == 1)\n\n def test_mem_dot(self):\n # Ticket #106\n x = np.random.randn(0, 1)\n y = np.random.randn(10, 1)\n # Dummy array to detect bad memory access:\n _z = np.ones(10)\n _dummy = np.empty((0, 10))\n z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)\n np.dot(x, np.transpose(y), out=z)\n assert_equal(_z, np.ones(10))\n # Do the same for the built-in dot:\n np.core.multiarray.dot(x, np.transpose(y), out=z)\n assert_equal(_z, np.ones(10))\n\n def test_arange_endian(self):\n # Ticket #111\n ref = np.arange(10)\n x = np.arange(10, dtype='<f8')\n assert_array_equal(ref, x)\n x = np.arange(10, dtype='>f8')\n assert_array_equal(ref, x)\n\n def test_argmax(self):\n # Ticket #119\n a = np.random.normal(0, 1, (4, 5, 6, 7, 8))\n for i in range(a.ndim):\n a.argmax(i) # Should succeed\n\n def test_mem_divmod(self):\n # Ticket #126\n for i in range(10):\n divmod(np.array([i])[0], 10)\n\n def test_hstack_invalid_dims(self):\n # Ticket #128\n x = np.arange(9).reshape((3, 3))\n y = np.array([0, 0, 0])\n assert_raises(ValueError, np.hstack, (x, y))\n\n def test_squeeze_type(self):\n # Ticket #133\n a = np.array([3])\n b = np.array(3)\n assert_(type(a.squeeze()) is np.ndarray)\n assert_(type(b.squeeze()) is np.ndarray)\n\n def test_add_identity(self):\n # Ticket #143\n assert_equal(0, np.add.identity)\n\n def test_numpy_float_python_long_addition(self):\n # Check that numpy float and python longs can be added correctly.\n a = np.float_(23.) + 2**135\n assert_equal(a, 23. + 2**135)\n\n def test_binary_repr_0(self):\n # Ticket #151\n assert_equal('0', np.binary_repr(0))\n\n def test_rec_iterate(self):\n # Ticket #160\n descr = np.dtype([('i', int), ('f', float), ('s', '|S3')])\n x = np.rec.array([(1, 1.1, '1.0'),\n (2, 2.2, '2.0')], dtype=descr)\n x[0].tolist()\n [i for i in x[0]]\n\n def test_unicode_string_comparison(self):\n # Ticket #190\n a = np.array('hello', np.unicode_)\n b = np.array('world')\n a == b\n\n def test_tobytes_FORTRANORDER_discontiguous(self):\n # Fix in r2836\n # Create non-contiguous Fortran ordered array\n x = np.array(np.random.rand(3, 3), order='F')[:, :2]\n assert_array_almost_equal(x.ravel(), np.frombuffer(x.tobytes()))\n\n def test_flat_assignment(self):\n # Correct behaviour of ticket #194\n x = np.empty((3, 1))\n x.flat = np.arange(3)\n assert_array_almost_equal(x, [[0], [1], [2]])\n x.flat = np.arange(3, dtype=float)\n assert_array_almost_equal(x, [[0], [1], [2]])\n\n def test_broadcast_flat_assignment(self):\n # Ticket #194\n x = np.empty((3, 1))\n\n def bfa():\n x[:] = np.arange(3)\n\n def bfb():\n x[:] = np.arange(3, dtype=float)\n\n assert_raises(ValueError, bfa)\n assert_raises(ValueError, bfb)\n\n def test_nonarray_assignment(self):\n # See also Issue gh-2870, test for non-array assignment\n # and equivalent unsafe casted array assignment\n a = np.arange(10)\n b = np.ones(10, dtype=bool)\n r = np.arange(10)\n\n def assign(a, b, c):\n a[b] = c\n\n assert_raises(ValueError, assign, a, b, np.nan)\n a[b] = np.array(np.nan) # but not this.\n assert_raises(ValueError, assign, a, r, np.nan)\n a[r] = np.array(np.nan)\n\n def test_unpickle_dtype_with_object(self):\n # Implemented in r2840\n dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])\n f = BytesIO()\n pickle.dump(dt, f)\n f.seek(0)\n dt_ = pickle.load(f)\n f.close()\n assert_equal(dt, dt_)\n\n def test_mem_array_creation_invalid_specification(self):\n # Ticket #196\n dt = np.dtype([('x', int), ('y', np.object_)])\n # Wrong way\n assert_raises(ValueError, np.array, [1, 'object'], dt)\n # Correct way\n np.array([(1, 'object')], dt)\n\n def test_recarray_single_element(self):\n # Ticket #202\n a = np.array([1, 2, 3], dtype=np.int32)\n b = a.copy()\n r = np.rec.array(a, shape=1, formats=['3i4'], names=['d'])\n assert_array_equal(a, b)\n assert_equal(a, r[0][0])\n\n def test_zero_sized_array_indexing(self):\n # Ticket #205\n tmp = np.array([])\n\n def index_tmp():\n tmp[np.array(10)]\n\n assert_raises(IndexError, index_tmp)\n\n def test_chararray_rstrip(self):\n # Ticket #222\n x = np.chararray((1,), 5)\n x[0] = b'a '\n x = x.rstrip()\n assert_equal(x[0], b'a')\n\n def test_object_array_shape(self):\n # Ticket #239\n assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,))\n assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2))\n assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2))\n assert_equal(np.array([], dtype=object).shape, (0,))\n assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0))\n assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,))\n\n def test_mem_around(self):\n # Ticket #243\n x = np.zeros((1,))\n y = [0]\n decimal = 6\n np.around(abs(x-y), decimal) <= 10.0**(-decimal)\n\n def test_character_array_strip(self):\n # Ticket #246\n x = np.char.array((\"x\", \"x \", \"x \"))\n for c in x:\n assert_equal(c, \"x\")\n\n def test_lexsort(self):\n # Lexsort memory error\n v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n assert_equal(np.lexsort(v), 0)\n\n def test_lexsort_invalid_sequence(self):\n # Issue gh-4123\n class BuggySequence(object):\n def __len__(self):\n return 4\n\n def __getitem__(self, key):\n raise KeyError\n\n assert_raises(KeyError, np.lexsort, BuggySequence())\n\n def test_pickle_py2_bytes_encoding(self):\n # Check that arrays and scalars pickled on Py2 are\n # unpickleable on Py3 using encoding='bytes'\n\n test_data = [\n # (original, py2_pickle)\n (np.unicode_('\\u6f2c'),\n b\"cnumpy.core.multiarray\\nscalar\\np0\\n(cnumpy\\ndtype\\np1\\n\"\n b\"(S'U1'\\np2\\nI0\\nI1\\ntp3\\nRp4\\n(I3\\nS'<'\\np5\\nNNNI4\\nI4\\n\"\n b\"I0\\ntp6\\nbS',o\\\\x00\\\\x00'\\np7\\ntp8\\nRp9\\n.\"),\n\n (np.array([9e123], dtype=np.float64),\n b\"cnumpy.core.multiarray\\n_reconstruct\\np0\\n(cnumpy\\nndarray\\n\"\n b\"p1\\n(I0\\ntp2\\nS'b'\\np3\\ntp4\\nRp5\\n(I1\\n(I1\\ntp6\\ncnumpy\\ndtype\\n\"\n b\"p7\\n(S'f8'\\np8\\nI0\\nI1\\ntp9\\nRp10\\n(I3\\nS'<'\\np11\\nNNNI-1\\nI-1\\n\"\n b\"I0\\ntp12\\nbI00\\nS'O\\\\x81\\\\xb7Z\\\\xaa:\\\\xabY'\\np13\\ntp14\\nb.\"),\n\n (np.array([(9e123,)], dtype=[('name', float)]),\n b\"cnumpy.core.multiarray\\n_reconstruct\\np0\\n(cnumpy\\nndarray\\np1\\n\"\n b\"(I0\\ntp2\\nS'b'\\np3\\ntp4\\nRp5\\n(I1\\n(I1\\ntp6\\ncnumpy\\ndtype\\np7\\n\"\n b\"(S'V8'\\np8\\nI0\\nI1\\ntp9\\nRp10\\n(I3\\nS'|'\\np11\\nN(S'name'\\np12\\ntp13\\n\"\n b\"(dp14\\ng12\\n(g7\\n(S'f8'\\np15\\nI0\\nI1\\ntp16\\nRp17\\n(I3\\nS'<'\\np18\\nNNNI-1\\n\"\n b\"I-1\\nI0\\ntp19\\nbI0\\ntp20\\nsI8\\nI1\\nI0\\ntp21\\n\"\n b\"bI00\\nS'O\\\\x81\\\\xb7Z\\\\xaa:\\\\xabY'\\np22\\ntp23\\nb.\"),\n ]\n\n if sys.version_info[:2] >= (3, 4):\n # encoding='bytes' was added in Py3.4\n for original, data in test_data:\n result = pickle.loads(data, encoding='bytes')\n assert_equal(result, original)\n\n if isinstance(result, np.ndarray) and result.dtype.names:\n for name in result.dtype.names:\n assert_(isinstance(name, str))\n\n def test_pickle_dtype(self):\n # Ticket #251\n pickle.dumps(float)\n\n def test_swap_real(self):\n # Ticket #265\n assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)\n assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0)\n assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0)\n assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)\n\n def test_object_array_from_list(self):\n # Ticket #270\n assert_(np.array([1, 'A', None]).shape == (3,))\n\n def test_multiple_assign(self):\n # Ticket #273\n a = np.zeros((3, 1), int)\n a[[1, 2]] = 1\n\n def test_empty_array_type(self):\n assert_equal(np.array([]).dtype, np.zeros(0).dtype)\n\n def test_void_copyswap(self):\n dt = np.dtype([('one', '<i4'), ('two', '<i4')])\n x = np.array((1, 2), dtype=dt)\n x = x.byteswap()\n assert_(x['one'] > 1 and x['two'] > 2)\n\n def test_method_args(self):\n # Make sure methods and functions have same default axis\n # keyword and arguments\n funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'),\n ('sometrue', 'any'),\n ('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),\n 'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',\n 'round', 'min', 'max', 'argsort', 'sort']\n funcs2 = ['compress', 'take', 'repeat']\n\n for func in funcs1:\n arr = np.random.rand(8, 7)\n arr2 = arr.copy()\n if isinstance(func, tuple):\n func_meth = func[1]\n func = func[0]\n else:\n func_meth = func\n res1 = getattr(arr, func_meth)()\n res2 = getattr(np, func)(arr2)\n if res1 is None:\n res1 = arr\n\n if res1.dtype.kind in 'uib':\n assert_((res1 == res2).all(), func)\n else:\n assert_(abs(res1-res2).max() < 1e-8, func)\n\n for func in funcs2:\n arr1 = np.random.rand(8, 7)\n arr2 = np.random.rand(8, 7)\n res1 = None\n if func == 'compress':\n arr1 = arr1.ravel()\n res1 = getattr(arr2, func)(arr1)\n else:\n arr2 = (15*arr2).astype(int).ravel()\n if res1 is None:\n res1 = getattr(arr1, func)(arr2)\n res2 = getattr(np, func)(arr1, arr2)\n assert_(abs(res1-res2).max() < 1e-8, func)\n\n def test_mem_lexsort_strings(self):\n # Ticket #298\n lst = ['abc', 'cde', 'fgh']\n np.lexsort((lst,))\n\n def test_fancy_index(self):\n # Ticket #302\n x = np.array([1, 2])[np.array([0])]\n assert_equal(x.shape, (1,))\n\n def test_recarray_copy(self):\n # Ticket #312\n dt = [('x', np.int16), ('y', np.float64)]\n ra = np.array([(1, 2.3)], dtype=dt)\n rb = np.rec.array(ra, dtype=dt)\n rb['x'] = 2.\n assert_(ra['x'] != rb['x'])\n\n def test_rec_fromarray(self):\n # Ticket #322\n x1 = np.array([[1, 2], [3, 4], [5, 6]])\n x2 = np.array(['a', 'dd', 'xyz'])\n x3 = np.array([1.1, 2, 3])\n np.rec.fromarrays([x1, x2, x3], formats=\"(2,)i4,a3,f8\")\n\n def test_object_array_assign(self):\n x = np.empty((2, 2), object)\n x.flat[2] = (1, 2, 3)\n assert_equal(x.flat[2], (1, 2, 3))\n\n def test_ndmin_float64(self):\n # Ticket #324\n x = np.array([1, 2, 3], dtype=np.float64)\n assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)\n assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)\n\n def test_ndmin_order(self):\n # Issue #465 and related checks\n assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)\n assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)\n assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)\n assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)\n\n def test_mem_axis_minimization(self):\n # Ticket #327\n data = np.arange(5)\n data = np.add.outer(data, data)\n\n def test_mem_float_imag(self):\n # Ticket #330\n np.float64(1.0).imag\n\n def test_dtype_tuple(self):\n # Ticket #334\n assert_(np.dtype('i4') == np.dtype(('i4', ())))\n\n def test_dtype_posttuple(self):\n # Ticket #335\n np.dtype([('col1', '()i4')])\n\n def test_numeric_carray_compare(self):\n # Ticket #341\n assert_equal(np.array(['X'], 'c'), b'X')\n\n def test_string_array_size(self):\n # Ticket #342\n assert_raises(ValueError,\n np.array, [['X'], ['X', 'X', 'X']], '|S1')\n\n def test_dtype_repr(self):\n # Ticket #344\n dt1 = np.dtype(('uint32', 2))\n dt2 = np.dtype(('uint32', (2,)))\n assert_equal(dt1.__repr__(), dt2.__repr__())\n\n def test_reshape_order(self):\n # Make sure reshape order works.\n a = np.arange(6).reshape(2, 3, order='F')\n assert_equal(a, [[0, 2, 4], [1, 3, 5]])\n a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n b = a[:, 1]\n assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])\n\n def test_reshape_zero_strides(self):\n # Issue #380, test reshaping of zero strided arrays\n a = np.ones(1)\n a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))\n assert_(a.reshape(5, 1).strides[0] == 0)\n\n def test_reshape_zero_size(self):\n # GitHub Issue #2700, setting shape failed for 0-sized arrays\n a = np.ones((0, 2))\n a.shape = (-1, 2)\n\n # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.\n # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous.\n @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)\n def test_reshape_trailing_ones_strides(self):\n # GitHub issue gh-2949, bad strides for trailing ones of new shape\n a = np.zeros(12, dtype=np.int32)[::2] # not contiguous\n strides_c = (16, 8, 8, 8)\n strides_f = (8, 24, 48, 48)\n assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c)\n assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)\n assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))\n\n def test_repeat_discont(self):\n # Ticket #352\n a = np.arange(12).reshape(4, 3)[:, 2]\n assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])\n\n def test_array_index(self):\n # Make sure optimization is not called in this case.\n a = np.array([1, 2, 3])\n a2 = np.array([[1, 2, 3]])\n assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)])\n\n def test_object_argmax(self):\n a = np.array([1, 2, 3], dtype=object)\n assert_(a.argmax() == 2)\n\n def test_recarray_fields(self):\n # Ticket #372\n dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])\n dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])\n for a in [np.array([(1, 2), (3, 4)], \"i4,i4\"),\n np.rec.array([(1, 2), (3, 4)], \"i4,i4\"),\n np.rec.array([(1, 2), (3, 4)]),\n np.rec.fromarrays([(1, 2), (3, 4)], \"i4,i4\"),\n np.rec.fromarrays([(1, 2), (3, 4)])]:\n assert_(a.dtype in [dt0, dt1])\n\n def test_random_shuffle(self):\n # Ticket #374\n a = np.arange(5).reshape((5, 1))\n b = a.copy()\n np.random.shuffle(b)\n assert_equal(np.sort(b, axis=0), a)\n\n def test_refcount_vdot(self):\n # Changeset #3443\n _assert_valid_refcount(np.vdot)\n\n def test_startswith(self):\n ca = np.char.array(['Hi', 'There'])\n assert_equal(ca.startswith('H'), [True, False])\n\n def test_noncommutative_reduce_accumulate(self):\n # Ticket #413\n tosubtract = np.arange(5)\n todivide = np.array([2.0, 0.5, 0.25])\n assert_equal(np.subtract.reduce(tosubtract), -10)\n assert_equal(np.divide.reduce(todivide), 16.0)\n assert_array_equal(np.subtract.accumulate(tosubtract),\n np.array([0, -1, -3, -6, -10]))\n assert_array_equal(np.divide.accumulate(todivide),\n np.array([2., 4., 16.]))\n\n def test_convolve_empty(self):\n # Convolve should raise an error for empty input array.\n assert_raises(ValueError, np.convolve, [], [1])\n assert_raises(ValueError, np.convolve, [1], [])\n\n def test_multidim_byteswap(self):\n # Ticket #449\n r = np.array([(1, (0, 1, 2))], dtype=\"i2,3i2\")\n assert_array_equal(r.byteswap(),\n np.array([(256, (0, 256, 512))], r.dtype))\n\n def test_string_NULL(self):\n # Changeset 3557\n assert_equal(np.array(\"a\\x00\\x0b\\x0c\\x00\").item(),\n 'a\\x00\\x0b\\x0c')\n\n def test_junk_in_string_fields_of_recarray(self):\n # Ticket #483\n r = np.array([[b'abc']], dtype=[('var1', '|S20')])\n assert_(asbytes(r['var1'][0][0]) == b'abc')\n\n def test_take_output(self):\n # Ensure that 'take' honours output parameter.\n x = np.arange(12).reshape((3, 4))\n a = np.take(x, [0, 2], axis=1)\n b = np.zeros_like(a)\n np.take(x, [0, 2], axis=1, out=b)\n assert_array_equal(a, b)\n\n def test_take_object_fail(self):\n # Issue gh-3001\n d = 123.\n a = np.array([d, 1], dtype=object)\n if HAS_REFCOUNT:\n ref_d = sys.getrefcount(d)\n try:\n a.take([0, 100])\n except IndexError:\n pass\n if HAS_REFCOUNT:\n assert_(ref_d == sys.getrefcount(d))\n\n def test_array_str_64bit(self):\n # Ticket #501\n s = np.array([1, np.nan], dtype=np.float64)\n with np.errstate(all='raise'):\n np.array_str(s) # Should succeed\n\n def test_frompyfunc_endian(self):\n # Ticket #503\n from math import radians\n uradians = np.frompyfunc(radians, 1, 1)\n big_endian = np.array([83.4, 83.5], dtype='>f8')\n little_endian = np.array([83.4, 83.5], dtype='<f8')\n assert_almost_equal(uradians(big_endian).astype(float),\n uradians(little_endian).astype(float))\n\n def test_mem_string_arr(self):\n # Ticket #514\n s = \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"\n t = []\n np.hstack((t, s))\n\n def test_arr_transpose(self):\n # Ticket #516\n x = np.random.rand(*(2,)*16)\n x.transpose(list(range(16))) # Should succeed\n\n def test_string_mergesort(self):\n # Ticket #540\n x = np.array(['a']*32)\n assert_array_equal(x.argsort(kind='m'), np.arange(32))\n\n def test_argmax_byteorder(self):\n # Ticket #546\n a = np.arange(3, dtype='>f')\n assert_(a[a.argmax()] == a.max())\n\n def test_rand_seed(self):\n # Ticket #555\n for l in np.arange(4):\n np.random.seed(l)\n\n def test_mem_deallocation_leak(self):\n # Ticket #562\n a = np.zeros(5, dtype=float)\n b = np.array(a, dtype=float)\n del a, b\n\n def test_mem_on_invalid_dtype(self):\n \"Ticket #583\"\n assert_raises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)\n\n def test_dot_negative_stride(self):\n # Ticket #588\n x = np.array([[1, 5, 25, 125., 625]])\n y = np.array([[20.], [160.], [640.], [1280.], [1024.]])\n z = y[::-1].copy()\n y2 = y[::-1]\n assert_equal(np.dot(x, z), np.dot(x, y2))\n\n def test_object_casting(self):\n # This used to trigger the object-type version of\n # the bitwise_or operation, because float64 -> object\n # casting succeeds\n def rs():\n x = np.ones([484, 286])\n y = np.zeros([484, 286])\n x |= y\n\n assert_raises(TypeError, rs)\n\n def test_unicode_scalar(self):\n # Ticket #600\n x = np.array([\"DROND\", \"DROND1\"], dtype=\"U6\")\n el = x[1]\n new = pickle.loads(pickle.dumps(el))\n assert_equal(new, el)\n\n def test_arange_non_native_dtype(self):\n # Ticket #616\n for T in ('>f4', '<f4'):\n dt = np.dtype(T)\n assert_equal(np.arange(0, dtype=dt).dtype, dt)\n assert_equal(np.arange(0.5, dtype=dt).dtype, dt)\n assert_equal(np.arange(5, dtype=dt).dtype, dt)\n\n def test_bool_flat_indexing_invalid_nr_elements(self):\n s = np.ones(10, dtype=float)\n x = np.array((15,), dtype=float)\n\n def ia(x, s, v):\n x[(s > 0)] = v\n\n assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float))\n assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float))\n\n # Old special case (different code path):\n assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))\n assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))\n\n def test_mem_scalar_indexing(self):\n # Ticket #603\n x = np.array([0], dtype=float)\n index = np.array(0, dtype=np.int32)\n x[index]\n\n def test_binary_repr_0_width(self):\n assert_equal(np.binary_repr(0, width=3), '000')\n\n def test_fromstring(self):\n assert_equal(np.fromstring(\"12:09:09\", dtype=int, sep=\":\"),\n [12, 9, 9])\n\n def test_searchsorted_variable_length(self):\n x = np.array(['a', 'aa', 'b'])\n y = np.array(['d', 'e'])\n assert_equal(x.searchsorted(y), [3, 3])\n\n def test_string_argsort_with_zeros(self):\n # Check argsort for strings containing zeros.\n x = np.frombuffer(b\"\\x00\\x02\\x00\\x01\", dtype=\"|S2\")\n assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))\n assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))\n\n def test_string_sort_with_zeros(self):\n # Check sort for strings containing zeros.\n x = np.frombuffer(b\"\\x00\\x02\\x00\\x01\", dtype=\"|S2\")\n y = np.frombuffer(b\"\\x00\\x01\\x00\\x02\", dtype=\"|S2\")\n assert_array_equal(np.sort(x, kind=\"q\"), y)\n\n def test_copy_detection_zero_dim(self):\n # Ticket #658\n np.indices((0, 3, 4)).T.reshape(-1, 3)\n\n def test_flat_byteorder(self):\n # Ticket #657\n x = np.arange(10)\n assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:])\n assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4'))\n\n def test_uint64_from_negative(self):\n assert_equal(np.uint64(-2), np.uint64(18446744073709551614))\n\n def test_sign_bit(self):\n x = np.array([0, -0.0, 0])\n assert_equal(str(np.abs(x)), '[0. 0. 0.]')\n\n def test_flat_index_byteswap(self):\n for dt in (np.dtype('<i4'), np.dtype('>i4')):\n x = np.array([-1, 0, 1], dtype=dt)\n assert_equal(x.flat[0].dtype, x[0].dtype)\n\n def test_copy_detection_corner_case(self):\n # Ticket #658\n np.indices((0, 3, 4)).T.reshape(-1, 3)\n\n # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.\n # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,\n # 0-sized reshape itself is tested elsewhere.\n @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)\n def test_copy_detection_corner_case2(self):\n # Ticket #771: strides are not set correctly when reshaping 0-sized\n # arrays\n b = np.indices((0, 3, 4)).T.reshape(-1, 3)\n assert_equal(b.strides, (3 * b.itemsize, b.itemsize))\n\n def test_object_array_refcounting(self):\n # Ticket #633\n if not hasattr(sys, 'getrefcount'):\n return\n\n # NB. this is probably CPython-specific\n\n cnt = sys.getrefcount\n\n a = object()\n b = object()\n c = object()\n\n cnt0_a = cnt(a)\n cnt0_b = cnt(b)\n cnt0_c = cnt(c)\n\n # -- 0d -> 1-d broadcast slice assignment\n\n arr = np.zeros(5, dtype=np.object_)\n\n arr[:] = a\n assert_equal(cnt(a), cnt0_a + 5)\n\n arr[:] = b\n assert_equal(cnt(a), cnt0_a)\n assert_equal(cnt(b), cnt0_b + 5)\n\n arr[:2] = c\n assert_equal(cnt(b), cnt0_b + 3)\n assert_equal(cnt(c), cnt0_c + 2)\n\n del arr\n\n # -- 1-d -> 2-d broadcast slice assignment\n\n arr = np.zeros((5, 2), dtype=np.object_)\n arr0 = np.zeros(2, dtype=np.object_)\n\n arr0[0] = a\n assert_(cnt(a) == cnt0_a + 1)\n arr0[1] = b\n assert_(cnt(b) == cnt0_b + 1)\n\n arr[:, :] = arr0\n assert_(cnt(a) == cnt0_a + 6)\n assert_(cnt(b) == cnt0_b + 6)\n\n arr[:, 0] = None\n assert_(cnt(a) == cnt0_a + 1)\n\n del arr, arr0\n\n # -- 2-d copying + flattening\n\n arr = np.zeros((5, 2), dtype=np.object_)\n\n arr[:, 0] = a\n arr[:, 1] = b\n assert_(cnt(a) == cnt0_a + 5)\n assert_(cnt(b) == cnt0_b + 5)\n\n arr2 = arr.copy()\n assert_(cnt(a) == cnt0_a + 10)\n assert_(cnt(b) == cnt0_b + 10)\n\n arr2 = arr[:, 0].copy()\n assert_(cnt(a) == cnt0_a + 10)\n assert_(cnt(b) == cnt0_b + 5)\n\n arr2 = arr.flatten()\n assert_(cnt(a) == cnt0_a + 10)\n assert_(cnt(b) == cnt0_b + 10)\n\n del arr, arr2\n\n # -- concatenate, repeat, take, choose\n\n arr1 = np.zeros((5, 1), dtype=np.object_)\n arr2 = np.zeros((5, 1), dtype=np.object_)\n\n arr1[...] = a\n arr2[...] = b\n assert_(cnt(a) == cnt0_a + 5)\n assert_(cnt(b) == cnt0_b + 5)\n\n tmp = np.concatenate((arr1, arr2))\n assert_(cnt(a) == cnt0_a + 5 + 5)\n assert_(cnt(b) == cnt0_b + 5 + 5)\n\n tmp = arr1.repeat(3, axis=0)\n assert_(cnt(a) == cnt0_a + 5 + 3*5)\n\n tmp = arr1.take([1, 2, 3], axis=0)\n assert_(cnt(a) == cnt0_a + 5 + 3)\n\n x = np.array([[0], [1], [0], [1], [1]], int)\n tmp = x.choose(arr1, arr2)\n assert_(cnt(a) == cnt0_a + 5 + 2)\n assert_(cnt(b) == cnt0_b + 5 + 3)\n\n del tmp # Avoid pyflakes unused variable warning\n\n def test_mem_custom_float_to_array(self):\n # Ticket 702\n class MyFloat(object):\n def __float__(self):\n return 1.0\n\n tmp = np.atleast_1d([MyFloat()])\n tmp.astype(float) # Should succeed\n\n def test_object_array_refcount_self_assign(self):\n # Ticket #711\n class VictimObject(object):\n deleted = False\n\n def __del__(self):\n self.deleted = True\n\n d = VictimObject()\n arr = np.zeros(5, dtype=np.object_)\n arr[:] = d\n del d\n arr[:] = arr # refcount of 'd' might hit zero here\n assert_(not arr[0].deleted)\n arr[:] = arr # trying to induce a segfault by doing it again...\n assert_(not arr[0].deleted)\n\n def test_mem_fromiter_invalid_dtype_string(self):\n x = [1, 2, 3]\n assert_raises(ValueError,\n np.fromiter, [xi for xi in x], dtype='S')\n\n def test_reduce_big_object_array(self):\n # Ticket #713\n oldsize = np.setbufsize(10*16)\n a = np.array([None]*161, object)\n assert_(not np.any(a))\n np.setbufsize(oldsize)\n\n def test_mem_0d_array_index(self):\n # Ticket #714\n np.zeros(10)[np.array(0)]\n\n def test_floats_from_string(self):\n # Ticket #640, floats from string\n fsingle = np.single('1.234')\n fdouble = np.double('1.234')\n flongdouble = np.longdouble('1.234')\n assert_almost_equal(fsingle, 1.234)\n assert_almost_equal(fdouble, 1.234)\n assert_almost_equal(flongdouble, 1.234)\n\n def test_nonnative_endian_fill(self):\n # Non-native endian arrays were incorrectly filled with scalars\n # before r5034.\n if sys.byteorder == 'little':\n dtype = np.dtype('>i4')\n else:\n dtype = np.dtype('<i4')\n x = np.empty([1], dtype=dtype)\n x.fill(1)\n assert_equal(x, np.array([1], dtype=dtype))\n\n def test_dot_alignment_sse2(self):\n # Test for ticket #551, changeset r5140\n x = np.zeros((30, 40))\n y = pickle.loads(pickle.dumps(x))\n # y is now typically not aligned on a 8-byte boundary\n z = np.ones((1, y.shape[0]))\n # This shouldn't cause a segmentation fault:\n np.dot(z, y)\n\n def test_astype_copy(self):\n # Ticket #788, changeset r5155\n # The test data file was generated by scipy.io.savemat.\n # The dtype is float64, but the isbuiltin attribute is 0.\n data_dir = path.join(path.dirname(__file__), 'data')\n filename = path.join(data_dir, \"astype_copy.pkl\")\n if sys.version_info[0] >= 3:\n f = open(filename, 'rb')\n xp = pickle.load(f, encoding='latin1')\n f.close()\n else:\n f = open(filename)\n xp = pickle.load(f)\n f.close()\n xpd = xp.astype(np.float64)\n assert_((xp.__array_interface__['data'][0] !=\n xpd.__array_interface__['data'][0]))\n\n def test_compress_small_type(self):\n # Ticket #789, changeset 5217.\n # compress with out argument segfaulted if cannot cast safely\n import numpy as np\n a = np.array([[1, 2], [3, 4]])\n b = np.zeros((2, 1), dtype=np.single)\n try:\n a.compress([True, False], axis=1, out=b)\n raise AssertionError(\"compress with an out which cannot be \"\n \"safely casted should not return \"\n \"successfully\")\n except TypeError:\n pass\n\n def test_attributes(self):\n # Ticket #791\n class TestArray(np.ndarray):\n def __new__(cls, data, info):\n result = np.array(data)\n result = result.view(cls)\n result.info = info\n return result\n\n def __array_finalize__(self, obj):\n self.info = getattr(obj, 'info', '')\n\n dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')\n assert_(dat.info == 'jubba')\n dat.resize((4, 2))\n assert_(dat.info == 'jubba')\n dat.sort()\n assert_(dat.info == 'jubba')\n dat.fill(2)\n assert_(dat.info == 'jubba')\n dat.put([2, 3, 4], [6, 3, 4])\n assert_(dat.info == 'jubba')\n dat.setfield(4, np.int32, 0)\n assert_(dat.info == 'jubba')\n dat.setflags()\n assert_(dat.info == 'jubba')\n assert_(dat.all(1).info == 'jubba')\n assert_(dat.any(1).info == 'jubba')\n assert_(dat.argmax(1).info == 'jubba')\n assert_(dat.argmin(1).info == 'jubba')\n assert_(dat.argsort(1).info == 'jubba')\n assert_(dat.astype(TestArray).info == 'jubba')\n assert_(dat.byteswap().info == 'jubba')\n assert_(dat.clip(2, 7).info == 'jubba')\n assert_(dat.compress([0, 1, 1]).info == 'jubba')\n assert_(dat.conj().info == 'jubba')\n assert_(dat.conjugate().info == 'jubba')\n assert_(dat.copy().info == 'jubba')\n dat2 = TestArray([2, 3, 1, 0], 'jubba')\n choices = [[0, 1, 2, 3], [10, 11, 12, 13],\n [20, 21, 22, 23], [30, 31, 32, 33]]\n assert_(dat2.choose(choices).info == 'jubba')\n assert_(dat.cumprod(1).info == 'jubba')\n assert_(dat.cumsum(1).info == 'jubba')\n assert_(dat.diagonal().info == 'jubba')\n assert_(dat.flatten().info == 'jubba')\n assert_(dat.getfield(np.int32, 0).info == 'jubba')\n assert_(dat.imag.info == 'jubba')\n assert_(dat.max(1).info == 'jubba')\n assert_(dat.mean(1).info == 'jubba')\n assert_(dat.min(1).info == 'jubba')\n assert_(dat.newbyteorder().info == 'jubba')\n assert_(dat.prod(1).info == 'jubba')\n assert_(dat.ptp(1).info == 'jubba')\n assert_(dat.ravel().info == 'jubba')\n assert_(dat.real.info == 'jubba')\n assert_(dat.repeat(2).info == 'jubba')\n assert_(dat.reshape((2, 4)).info == 'jubba')\n assert_(dat.round().info == 'jubba')\n assert_(dat.squeeze().info == 'jubba')\n assert_(dat.std(1).info == 'jubba')\n assert_(dat.sum(1).info == 'jubba')\n assert_(dat.swapaxes(0, 1).info == 'jubba')\n assert_(dat.take([2, 3, 5]).info == 'jubba')\n assert_(dat.transpose().info == 'jubba')\n assert_(dat.T.info == 'jubba')\n assert_(dat.var(1).info == 'jubba')\n assert_(dat.view(TestArray).info == 'jubba')\n # These methods do not preserve subclasses\n assert_(type(dat.nonzero()[0]) is np.ndarray)\n assert_(type(dat.nonzero()[1]) is np.ndarray)\n\n def test_recarray_tolist(self):\n # Ticket #793, changeset r5215\n # Comparisons fail for NaN, so we can't use random memory\n # for the test.\n buf = np.zeros(40, dtype=np.int8)\n a = np.recarray(2, formats=\"i4,f8,f8\", names=\"id,x,y\", buf=buf)\n b = a.tolist()\n assert_( a[0].tolist() == b[0])\n assert_( a[1].tolist() == b[1])\n\n def test_nonscalar_item_method(self):\n # Make sure that .item() fails graciously when it should\n a = np.arange(5)\n assert_raises(ValueError, a.item)\n\n def test_char_array_creation(self):\n a = np.array('123', dtype='c')\n b = np.array([b'1', b'2', b'3'])\n assert_equal(a, b)\n\n def test_unaligned_unicode_access(self):\n # Ticket #825\n for i in range(1, 9):\n msg = 'unicode offset: %d chars' % i\n t = np.dtype([('a', 'S%d' % i), ('b', 'U2')])\n x = np.array([(b'a', u'b')], dtype=t)\n if sys.version_info[0] >= 3:\n assert_equal(str(x), \"[(b'a', 'b')]\", err_msg=msg)\n else:\n assert_equal(str(x), \"[('a', u'b')]\", err_msg=msg)\n\n def test_sign_for_complex_nan(self):\n # Ticket 794.\n with np.errstate(invalid='ignore'):\n C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])\n have = np.sign(C)\n want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])\n assert_equal(have, want)\n\n def test_for_equal_names(self):\n # Ticket #674\n dt = np.dtype([('foo', float), ('bar', float)])\n a = np.zeros(10, dt)\n b = list(a.dtype.names)\n b[0] = \"notfoo\"\n a.dtype.names = b\n assert_(a.dtype.names[0] == \"notfoo\")\n assert_(a.dtype.names[1] == \"bar\")\n\n def test_for_object_scalar_creation(self):\n # Ticket #816\n a = np.object_()\n b = np.object_(3)\n b2 = np.object_(3.0)\n c = np.object_([4, 5])\n d = np.object_([None, {}, []])\n assert_(a is None)\n assert_(type(b) is int)\n assert_(type(b2) is float)\n assert_(type(c) is np.ndarray)\n assert_(c.dtype == object)\n assert_(d.dtype == object)\n\n def test_array_resize_method_system_error(self):\n # Ticket #840 - order should be an invalid keyword.\n x = np.array([[0, 1], [2, 3]])\n assert_raises(TypeError, x.resize, (2, 2), order='C')\n\n def test_for_zero_length_in_choose(self):\n \"Ticket #882\"\n a = np.array(1)\n assert_raises(ValueError, lambda x: x.choose([]), a)\n\n def test_array_ndmin_overflow(self):\n \"Ticket #947.\"\n assert_raises(ValueError, lambda: np.array([1], ndmin=33))\n\n def test_void_scalar_with_titles(self):\n # No ticket\n data = [('john', 4), ('mary', 5)]\n dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]\n arr = np.array(data, dtype=dtype1)\n assert_(arr[0][0] == 'john')\n assert_(arr[0][1] == 4)\n\n def test_void_scalar_constructor(self):\n #Issue #1550\n\n #Create test string data, construct void scalar from data and assert\n #that void scalar contains original data.\n test_string = np.array(\"test\")\n test_string_void_scalar = np.core.multiarray.scalar(\n np.dtype((\"V\", test_string.dtype.itemsize)), test_string.tobytes())\n\n assert_(test_string_void_scalar.view(test_string.dtype) == test_string)\n\n #Create record scalar, construct from data and assert that\n #reconstructed scalar is correct.\n test_record = np.ones((), \"i,i\")\n test_record_void_scalar = np.core.multiarray.scalar(\n test_record.dtype, test_record.tobytes())\n\n assert_(test_record_void_scalar == test_record)\n\n #Test pickle and unpickle of void and record scalars\n assert_(pickle.loads(pickle.dumps(test_string)) == test_string)\n assert_(pickle.loads(pickle.dumps(test_record)) == test_record)\n\n def test_blasdot_uninitialized_memory(self):\n # Ticket #950\n for m in [0, 1, 2]:\n for n in [0, 1, 2]:\n for k in range(3):\n # Try to ensure that x->data contains non-zero floats\n x = np.array([123456789e199], dtype=np.float64)\n if IS_PYPY:\n x.resize((m, 0), refcheck=False)\n else:\n x.resize((m, 0))\n y = np.array([123456789e199], dtype=np.float64)\n if IS_PYPY:\n y.resize((0, n), refcheck=False)\n else:\n y.resize((0, n))\n\n # `dot` should just return zero (m, n) matrix\n z = np.dot(x, y)\n assert_(np.all(z == 0))\n assert_(z.shape == (m, n))\n\n def test_zeros(self):\n # Regression test for #1061.\n # Set a size which cannot fit into a 64 bits signed integer\n sz = 2 ** 64\n good = 'Maximum allowed dimension exceeded'\n try:\n np.empty(sz)\n except ValueError as e:\n if not str(e) == good:\n self.fail(\"Got msg '%s', expected '%s'\" % (e, good))\n except Exception as e:\n self.fail(\"Got exception of type %s instead of ValueError\" % type(e))\n\n def test_huge_arange(self):\n # Regression test for #1062.\n # Set a size which cannot fit into a 64 bits signed integer\n sz = 2 ** 64\n good = 'Maximum allowed size exceeded'\n try:\n np.arange(sz)\n assert_(np.size == sz)\n except ValueError as e:\n if not str(e) == good:\n self.fail(\"Got msg '%s', expected '%s'\" % (e, good))\n except Exception as e:\n self.fail(\"Got exception of type %s instead of ValueError\" % type(e))\n\n def test_fromiter_bytes(self):\n # Ticket #1058\n a = np.fromiter(list(range(10)), dtype='b')\n b = np.fromiter(list(range(10)), dtype='B')\n assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\n assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\n\n def test_array_from_sequence_scalar_array(self):\n # Ticket #1078: segfaults when creating an array with a sequence of\n # 0d arrays.\n a = np.array((np.ones(2), np.array(2)))\n assert_equal(a.shape, (2,))\n assert_equal(a.dtype, np.dtype(object))\n assert_equal(a[0], np.ones(2))\n assert_equal(a[1], np.array(2))\n\n a = np.array(((1,), np.array(1)))\n assert_equal(a.shape, (2,))\n assert_equal(a.dtype, np.dtype(object))\n assert_equal(a[0], (1,))\n assert_equal(a[1], np.array(1))\n\n def test_array_from_sequence_scalar_array2(self):\n # Ticket #1081: weird array with strange input...\n t = np.array([np.array([]), np.array(0, object)])\n assert_equal(t.shape, (2,))\n assert_equal(t.dtype, np.dtype(object))\n\n def test_array_too_big(self):\n # Ticket #1080.\n assert_raises(ValueError, np.zeros, [975]*7, np.int8)\n assert_raises(ValueError, np.zeros, [26244]*5, np.int8)\n\n def test_dtype_keyerrors_(self):\n # Ticket #1106.\n dt = np.dtype([('f1', np.uint)])\n assert_raises(KeyError, dt.__getitem__, \"f2\")\n assert_raises(IndexError, dt.__getitem__, 1)\n assert_raises(ValueError, dt.__getitem__, 0.0)\n\n def test_lexsort_buffer_length(self):\n # Ticket #1217, don't segfault.\n a = np.ones(100, dtype=np.int8)\n b = np.ones(100, dtype=np.int32)\n i = np.lexsort((a[::-1], b))\n assert_equal(i, np.arange(100, dtype=int))\n\n def test_object_array_to_fixed_string(self):\n # Ticket #1235.\n a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)\n b = np.array(a, dtype=(np.str_, 8))\n assert_equal(a, b)\n c = np.array(a, dtype=(np.str_, 5))\n assert_equal(c, np.array(['abcde', 'ijklm']))\n d = np.array(a, dtype=(np.str_, 12))\n assert_equal(a, d)\n e = np.empty((2, ), dtype=(np.str_, 8))\n e[:] = a[:]\n assert_equal(a, e)\n\n def test_unicode_to_string_cast(self):\n # Ticket #1240.\n a = np.array([[u'abc', u'\\u03a3'],\n [u'asdf', u'erw']],\n dtype='U')\n assert_raises(UnicodeEncodeError, np.array, a, 'S4')\n\n def test_mixed_string_unicode_array_creation(self):\n a = np.array(['1234', u'123'])\n assert_(a.itemsize == 16)\n a = np.array([u'123', '1234'])\n assert_(a.itemsize == 16)\n a = np.array(['1234', u'123', '12345'])\n assert_(a.itemsize == 20)\n a = np.array([u'123', '1234', u'12345'])\n assert_(a.itemsize == 20)\n a = np.array([u'123', '1234', u'1234'])\n assert_(a.itemsize == 16)\n\n def test_misaligned_objects_segfault(self):\n # Ticket #1198 and #1267\n a1 = np.zeros((10,), dtype='O,c')\n a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')\n a1['f0'] = a2\n repr(a1)\n np.argmax(a1['f0'])\n a1['f0'][1] = \"FOO\"\n a1['f0'] = \"FOO\"\n np.array(a1['f0'], dtype='S')\n np.nonzero(a1['f0'])\n a1.sort()\n copy.deepcopy(a1)\n\n def test_misaligned_scalars_segfault(self):\n # Ticket #1267\n s1 = np.array(('a', 'Foo'), dtype='c,O')\n s2 = np.array(('b', 'Bar'), dtype='c,O')\n s1['f1'] = s2['f1']\n s1['f1'] = 'Baz'\n\n def test_misaligned_dot_product_objects(self):\n # Ticket #1267\n # This didn't require a fix, but it's worth testing anyway, because\n # it may fail if .dot stops enforcing the arrays to be BEHAVED\n a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')\n b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')\n np.dot(a['f0'], b['f0'])\n\n def test_byteswap_complex_scalar(self):\n # Ticket #1259 and gh-441\n for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:\n z = np.array([2.2-1.1j], dtype)\n x = z[0] # always native-endian\n y = x.byteswap()\n if x.dtype.byteorder == z.dtype.byteorder:\n # little-endian machine\n assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype.newbyteorder()))\n else:\n # big-endian machine\n assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype))\n # double check real and imaginary parts:\n assert_equal(x.real, y.real.byteswap())\n assert_equal(x.imag, y.imag.byteswap())\n\n def test_structured_arrays_with_objects1(self):\n # Ticket #1299\n stra = 'aaaa'\n strb = 'bbbb'\n x = np.array([[(0, stra), (1, strb)]], 'i8,O')\n x[x.nonzero()] = x.ravel()[:1]\n assert_(x[0, 1] == x[0, 0])\n\n @dec.skipif(not HAS_REFCOUNT, \"python has no sys.getrefcount\")\n def test_structured_arrays_with_objects2(self):\n # Ticket #1299 second test\n stra = 'aaaa'\n strb = 'bbbb'\n numb = sys.getrefcount(strb)\n numa = sys.getrefcount(stra)\n x = np.array([[(0, stra), (1, strb)]], 'i8,O')\n x[x.nonzero()] = x.ravel()[:1]\n assert_(sys.getrefcount(strb) == numb)\n assert_(sys.getrefcount(stra) == numa + 2)\n\n def test_duplicate_title_and_name(self):\n # Ticket #1254\n dtspec = [(('a', 'a'), 'i'), ('b', 'i')]\n assert_raises(ValueError, np.dtype, dtspec)\n\n def test_signed_integer_division_overflow(self):\n # Ticket #1317.\n def test_type(t):\n min = np.array([np.iinfo(t).min])\n min //= -1\n\n with np.errstate(divide=\"ignore\"):\n for t in (np.int8, np.int16, np.int32, np.int64, int, np.long):\n test_type(t)\n\n def test_buffer_hashlib(self):\n try:\n from hashlib import md5\n except ImportError:\n from md5 import new as md5\n\n x = np.array([1, 2, 3], dtype=np.dtype('<i4'))\n assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')\n\n def test_0d_string_scalar(self):\n # Bug #1436; the following should succeed\n np.asarray('x', '>c')\n\n def test_log1p_compiler_shenanigans(self):\n # Check if log1p is behaving on 32 bit intel systems.\n assert_(np.isfinite(np.log1p(np.exp2(-53))))\n\n def test_fromiter_comparison(self):\n a = np.fromiter(list(range(10)), dtype='b')\n b = np.fromiter(list(range(10)), dtype='B')\n assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\n assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\n\n def test_fromstring_crash(self):\n # Ticket #1345: the following should not cause a crash\n np.fromstring(b'aa, aa, 1.0', sep=',')\n\n def test_ticket_1539(self):\n dtypes = [x for x in np.typeDict.values()\n if (issubclass(x, np.number)\n and not issubclass(x, np.timedelta64))]\n a = np.array([], np.bool_) # not x[0] because it is unordered\n failures = []\n\n for x in dtypes:\n b = a.astype(x)\n for y in dtypes:\n c = a.astype(y)\n try:\n np.dot(b, c)\n except TypeError:\n failures.append((x, y))\n if failures:\n raise AssertionError(\"Failures: %r\" % failures)\n\n def test_ticket_1538(self):\n x = np.finfo(np.float32)\n for name in 'eps epsneg max min resolution tiny'.split():\n assert_equal(type(getattr(x, name)), np.float32,\n err_msg=name)\n\n def test_ticket_1434(self):\n # Check that the out= argument in var and std has an effect\n data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)))\n out = np.zeros((3,))\n\n ret = data.var(axis=1, out=out)\n assert_(ret is out)\n assert_array_equal(ret, data.var(axis=1))\n\n ret = data.std(axis=1, out=out)\n assert_(ret is out)\n assert_array_equal(ret, data.std(axis=1))\n\n def test_complex_nan_maximum(self):\n cnan = complex(0, np.nan)\n assert_equal(np.maximum(1, cnan), cnan)\n\n def test_subclass_int_tuple_assignment(self):\n # ticket #1563\n class Subclass(np.ndarray):\n def __new__(cls, i):\n return np.ones((i,)).view(cls)\n\n x = Subclass(5)\n x[(0,)] = 2 # shouldn't raise an exception\n assert_equal(x[0], 2)\n\n def test_ufunc_no_unnecessary_views(self):\n # ticket #1548\n class Subclass(np.ndarray):\n pass\n x = np.array([1, 2, 3]).view(Subclass)\n y = np.add(x, x, x)\n assert_equal(id(x), id(y))\n\n @dec.skipif(not HAS_REFCOUNT, \"python has no sys.getrefcount\")\n def test_take_refcount(self):\n # ticket #939\n a = np.arange(16, dtype=float)\n a.shape = (4, 4)\n lut = np.ones((5 + 3, 4), float)\n rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)\n c1 = sys.getrefcount(rgba)\n try:\n lut.take(a, axis=0, mode='clip', out=rgba)\n except TypeError:\n pass\n c2 = sys.getrefcount(rgba)\n assert_equal(c1, c2)\n\n def test_fromfile_tofile_seeks(self):\n # On Python 3, tofile/fromfile used to get (#1610) the Python\n # file handle out of sync\n f0 = tempfile.NamedTemporaryFile()\n f = f0.file\n f.write(np.arange(255, dtype='u1').tobytes())\n\n f.seek(20)\n ret = np.fromfile(f, count=4, dtype='u1')\n assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))\n assert_equal(f.tell(), 24)\n\n f.seek(40)\n np.array([1, 2, 3], dtype='u1').tofile(f)\n assert_equal(f.tell(), 43)\n\n f.seek(40)\n data = f.read(3)\n assert_equal(data, b\"\\x01\\x02\\x03\")\n\n f.seek(80)\n f.read(4)\n data = np.fromfile(f, dtype='u1', count=4)\n assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))\n\n f.close()\n\n def test_complex_scalar_warning(self):\n for tp in [np.csingle, np.cdouble, np.clongdouble]:\n x = tp(1+2j)\n assert_warns(np.ComplexWarning, float, x)\n with suppress_warnings() as sup:\n sup.filter(np.ComplexWarning)\n assert_equal(float(x), float(x.real))\n\n def test_complex_scalar_complex_cast(self):\n for tp in [np.csingle, np.cdouble, np.clongdouble]:\n x = tp(1+2j)\n assert_equal(complex(x), 1+2j)\n\n def test_complex_boolean_cast(self):\n # Ticket #2218\n for tp in [np.csingle, np.cdouble, np.clongdouble]:\n x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp)\n assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool))\n assert_(np.any(x))\n assert_(np.all(x[1:]))\n\n def test_uint_int_conversion(self):\n x = 2**64 - 1\n assert_equal(int(np.uint64(x)), x)\n\n def test_duplicate_field_names_assign(self):\n ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8')\n ra.dtype.names = ('f1', 'f2')\n repr(ra) # should not cause a segmentation fault\n assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))\n\n def test_eq_string_and_object_array(self):\n # From e-mail thread \"__eq__ with str and object\" (Keith Goodman)\n a1 = np.array(['a', 'b'], dtype=object)\n a2 = np.array(['a', 'c'])\n assert_array_equal(a1 == a2, [True, False])\n assert_array_equal(a2 == a1, [True, False])\n\n def test_nonzero_byteswap(self):\n a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)\n a.dtype = np.float32\n assert_equal(a.nonzero()[0], [1])\n a = a.byteswap().newbyteorder()\n assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap\n\n def test_find_common_type_boolean(self):\n # Ticket #1695\n assert_(np.find_common_type([], ['?', '?']) == '?')\n\n def test_empty_mul(self):\n a = np.array([1.])\n a[1:1] *= 2\n assert_equal(a, [1.])\n\n def test_array_side_effect(self):\n # The second use of itemsize was throwing an exception because in\n # ctors.c, discover_itemsize was calling PyObject_Length without\n # checking the return code. This failed to get the length of the\n # number 2, and the exception hung around until something checked\n # PyErr_Occurred() and returned an error.\n assert_equal(np.dtype('S10').itemsize, 10)\n np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)\n assert_equal(np.dtype('S10').itemsize, 10)\n\n def test_any_float(self):\n # all and any for floats\n a = np.array([0.1, 0.9])\n assert_(np.any(a))\n assert_(np.all(a))\n\n def test_large_float_sum(self):\n a = np.arange(10000, dtype='f')\n assert_equal(a.sum(dtype='d'), a.astype('d').sum())\n\n def test_ufunc_casting_out(self):\n a = np.array(1.0, dtype=np.float32)\n b = np.array(1.0, dtype=np.float64)\n c = np.array(1.0, dtype=np.float32)\n np.add(a, b, out=c)\n assert_equal(c, 2.0)\n\n def test_array_scalar_contiguous(self):\n # Array scalars are both C and Fortran contiguous\n assert_(np.array(1.0).flags.c_contiguous)\n assert_(np.array(1.0).flags.f_contiguous)\n assert_(np.array(np.float32(1.0)).flags.c_contiguous)\n assert_(np.array(np.float32(1.0)).flags.f_contiguous)\n\n def test_squeeze_contiguous(self):\n # Similar to GitHub issue #387\n a = np.zeros((1, 2)).squeeze()\n b = np.zeros((2, 2, 2), order='F')[:, :, ::2].squeeze()\n assert_(a.flags.c_contiguous)\n assert_(a.flags.f_contiguous)\n assert_(b.flags.f_contiguous)\n\n def test_reduce_contiguous(self):\n # GitHub issue #387\n a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))\n b = np.add.reduce(np.zeros((2, 1, 2)), 1)\n assert_(a.flags.c_contiguous)\n assert_(a.flags.f_contiguous)\n assert_(b.flags.c_contiguous)\n\n def test_object_array_self_reference(self):\n # Object arrays with references to themselves can cause problems\n a = np.array(0, dtype=object)\n a[()] = a\n assert_raises(TypeError, int, a)\n assert_raises(TypeError, long, a)\n assert_raises(TypeError, float, a)\n assert_raises(TypeError, oct, a)\n assert_raises(TypeError, hex, a)\n\n # Test the same for a circular reference.\n b = np.array(a, dtype=object)\n a[()] = b\n assert_raises(TypeError, int, a)\n # NumPy has no tp_traverse currently, so circular references\n # cannot be detected. So resolve it:\n a[()] = 0\n\n # This was causing a to become like the above\n a = np.array(0, dtype=object)\n a[...] += 1\n assert_equal(a, 1)\n\n def test_object_array_self_copy(self):\n # An object array being copied into itself DECREF'ed before INCREF'ing\n # causing segmentation faults (gh-3787)\n a = np.array(object(), dtype=object)\n np.copyto(a, a)\n if HAS_REFCOUNT:\n assert_(sys.getrefcount(a[()]) == 2)\n a[()].__class__ # will segfault if object was deleted\n\n def test_zerosize_accumulate(self):\n \"Ticket #1733\"\n x = np.array([[42, 0]], dtype=np.uint32)\n assert_equal(np.add.accumulate(x[:-1, 0]), [])\n\n def test_objectarray_setfield(self):\n # Setfield should not overwrite Object fields with non-Object data\n x = np.array([1, 2, 3], dtype=object)\n assert_raises(TypeError, x.setfield, 4, np.int32, 0)\n\n def test_setting_rank0_string(self):\n \"Ticket #1736\"\n s1 = b\"hello1\"\n s2 = b\"hello2\"\n a = np.zeros((), dtype=\"S10\")\n a[()] = s1\n assert_equal(a, np.array(s1))\n a[()] = np.array(s2)\n assert_equal(a, np.array(s2))\n\n a = np.zeros((), dtype='f4')\n a[()] = 3\n assert_equal(a, np.array(3))\n a[()] = np.array(4)\n assert_equal(a, np.array(4))\n\n def test_string_astype(self):\n \"Ticket #1748\"\n s1 = b'black'\n s2 = b'white'\n s3 = b'other'\n a = np.array([[s1], [s2], [s3]])\n assert_equal(a.dtype, np.dtype('S5'))\n b = a.astype(np.dtype('S0'))\n assert_equal(b.dtype, np.dtype('S5'))\n\n def test_ticket_1756(self):\n # Ticket #1756\n s = b'0123456789abcdef'\n a = np.array([s]*5)\n for i in range(1, 17):\n a1 = np.array(a, \"|S%d\" % i)\n a2 = np.array([s[:i]]*5)\n assert_equal(a1, a2)\n\n def test_fields_strides(self):\n \"gh-2355\"\n r = np.frombuffer(b'abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')\n assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])\n assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])\n assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])\n assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)\n\n def test_alignment_update(self):\n # Check that alignment flag is updated on stride setting\n a = np.arange(10)\n assert_(a.flags.aligned)\n a.strides = 3\n assert_(not a.flags.aligned)\n\n def test_ticket_1770(self):\n \"Should not segfault on python 3k\"\n import numpy as np\n try:\n a = np.zeros((1,), dtype=[('f1', 'f')])\n a['f1'] = 1\n a['f2'] = 1\n except ValueError:\n pass\n except Exception:\n raise AssertionError\n\n def test_ticket_1608(self):\n \"x.flat shouldn't modify data\"\n x = np.array([[1, 2], [3, 4]]).T\n np.array(x.flat)\n assert_equal(x, [[1, 3], [2, 4]])\n\n def test_pickle_string_overwrite(self):\n import re\n\n data = np.array([1], dtype='b')\n blob = pickle.dumps(data, protocol=1)\n data = pickle.loads(blob)\n\n # Check that loads does not clobber interned strings\n s = re.sub(\"a(.)\", \"\\x01\\\\1\", \"a_\")\n assert_equal(s[0], \"\\x01\")\n data[0] = 0xbb\n s = re.sub(\"a(.)\", \"\\x01\\\\1\", \"a_\")\n assert_equal(s[0], \"\\x01\")\n\n def test_pickle_bytes_overwrite(self):\n if sys.version_info[0] >= 3:\n data = np.array([1], dtype='b')\n data = pickle.loads(pickle.dumps(data))\n data[0] = 0xdd\n bytestring = \"\\x01 \".encode('ascii')\n assert_equal(bytestring[0:1], '\\x01'.encode('ascii'))\n\n def test_pickle_py2_array_latin1_hack(self):\n # Check that unpickling hacks in Py3 that support\n # encoding='latin1' work correctly.\n\n # Python2 output for pickle.dumps(numpy.array([129], dtype='b'))\n data = (b\"cnumpy.core.multiarray\\n_reconstruct\\np0\\n(cnumpy\\nndarray\\np1\\n(I0\\n\"\n b\"tp2\\nS'b'\\np3\\ntp4\\nRp5\\n(I1\\n(I1\\ntp6\\ncnumpy\\ndtype\\np7\\n(S'i1'\\np8\\n\"\n b\"I0\\nI1\\ntp9\\nRp10\\n(I3\\nS'|'\\np11\\nNNNI-1\\nI-1\\nI0\\ntp12\\nbI00\\nS'\\\\x81'\\n\"\n b\"p13\\ntp14\\nb.\")\n if sys.version_info[0] >= 3:\n # This should work:\n result = pickle.loads(data, encoding='latin1')\n assert_array_equal(result, np.array([129], dtype='b'))\n # Should not segfault:\n assert_raises(Exception, pickle.loads, data, encoding='koi8-r')\n\n def test_pickle_py2_scalar_latin1_hack(self):\n # Check that scalar unpickling hack in Py3 that supports\n # encoding='latin1' work correctly.\n\n # Python2 output for pickle.dumps(...)\n datas = [\n # (original, python2_pickle, koi8r_validity)\n (np.unicode_('\\u6bd2'),\n (b\"cnumpy.core.multiarray\\nscalar\\np0\\n(cnumpy\\ndtype\\np1\\n\"\n b\"(S'U1'\\np2\\nI0\\nI1\\ntp3\\nRp4\\n(I3\\nS'<'\\np5\\nNNNI4\\nI4\\nI0\\n\"\n b\"tp6\\nbS'\\\\xd2k\\\\x00\\\\x00'\\np7\\ntp8\\nRp9\\n.\"),\n 'invalid'),\n\n (np.float64(9e123),\n (b\"cnumpy.core.multiarray\\nscalar\\np0\\n(cnumpy\\ndtype\\np1\\n(S'f8'\\n\"\n b\"p2\\nI0\\nI1\\ntp3\\nRp4\\n(I3\\nS'<'\\np5\\nNNNI-1\\nI-1\\nI0\\ntp6\\n\"\n b\"bS'O\\\\x81\\\\xb7Z\\\\xaa:\\\\xabY'\\np7\\ntp8\\nRp9\\n.\"),\n 'invalid'),\n\n (np.bytes_(b'\\x9c'), # different 8-bit code point in KOI8-R vs latin1\n (b\"cnumpy.core.multiarray\\nscalar\\np0\\n(cnumpy\\ndtype\\np1\\n(S'S1'\\np2\\n\"\n b\"I0\\nI1\\ntp3\\nRp4\\n(I3\\nS'|'\\np5\\nNNNI1\\nI1\\nI0\\ntp6\\nbS'\\\\x9c'\\np7\\n\"\n b\"tp8\\nRp9\\n.\"),\n 'different'),\n ]\n if sys.version_info[0] >= 3:\n for original, data, koi8r_validity in datas:\n result = pickle.loads(data, encoding='latin1')\n assert_equal(result, original)\n\n # Decoding under non-latin1 encoding (e.g.) KOI8-R can\n # produce bad results, but should not segfault.\n if koi8r_validity == 'different':\n # Unicode code points happen to lie within latin1,\n # but are different in koi8-r, resulting to silent\n # bogus results\n result = pickle.loads(data, encoding='koi8-r')\n assert_(result != original)\n elif koi8r_validity == 'invalid':\n # Unicode code points outside latin1, so results\n # to an encoding exception\n assert_raises(ValueError, pickle.loads, data, encoding='koi8-r')\n else:\n raise ValueError(koi8r_validity)\n\n def test_structured_type_to_object(self):\n a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8')\n a_obj = np.empty((2,), dtype=object)\n a_obj[0] = (0, 1)\n a_obj[1] = (3, 2)\n # astype records -> object\n assert_equal(a_rec.astype(object), a_obj)\n # '=' records -> object\n b = np.empty_like(a_obj)\n b[...] = a_rec\n assert_equal(b, a_obj)\n # '=' object -> records\n b = np.empty_like(a_rec)\n b[...] = a_obj\n assert_equal(b, a_rec)\n\n def test_assign_obj_listoflists(self):\n # Ticket # 1870\n # The inner list should get assigned to the object elements\n a = np.zeros(4, dtype=object)\n b = a.copy()\n a[0] = [1]\n a[1] = [2]\n a[2] = [3]\n a[3] = [4]\n b[...] = [[1], [2], [3], [4]]\n assert_equal(a, b)\n # The first dimension should get broadcast\n a = np.zeros((2, 2), dtype=object)\n a[...] = [[1, 2]]\n assert_equal(a, [[1, 2], [1, 2]])\n\n def test_memoryleak(self):\n # Ticket #1917 - ensure that array data doesn't leak\n for i in range(1000):\n # 100MB times 1000 would give 100GB of memory usage if it leaks\n a = np.empty((100000000,), dtype='i1')\n del a\n\n @dec.skipif(not HAS_REFCOUNT, \"python has no sys.getrefcount\")\n def test_ufunc_reduce_memoryleak(self):\n a = np.arange(6)\n acnt = sys.getrefcount(a)\n np.add.reduce(a)\n assert_equal(sys.getrefcount(a), acnt)\n\n def test_search_sorted_invalid_arguments(self):\n # Ticket #2021, should not segfault.\n x = np.arange(0, 4, dtype='datetime64[D]')\n assert_raises(TypeError, x.searchsorted, 1)\n\n def test_string_truncation(self):\n # Ticket #1990 - Data can be truncated in creation of an array from a\n # mixed sequence of numeric values and strings\n for val in [True, 1234, 123.4, complex(1, 234)]:\n for tostr in [asunicode, asbytes]:\n b = np.array([val, tostr('xx')])\n assert_equal(tostr(b[0]), tostr(val))\n b = np.array([tostr('xx'), val])\n assert_equal(tostr(b[1]), tostr(val))\n\n # test also with longer strings\n b = np.array([val, tostr('xxxxxxxxxx')])\n assert_equal(tostr(b[0]), tostr(val))\n b = np.array([tostr('xxxxxxxxxx'), val])\n assert_equal(tostr(b[1]), tostr(val))\n\n def test_string_truncation_ucs2(self):\n # Ticket #2081. Python compiled with two byte unicode\n # can lead to truncation if itemsize is not properly\n # adjusted for NumPy's four byte unicode.\n if sys.version_info[0] >= 3:\n a = np.array(['abcd'])\n else:\n a = np.array([u'abcd'])\n assert_equal(a.dtype.itemsize, 16)\n\n def test_unique_stable(self):\n # Ticket #2063 must always choose stable sort for argsort to\n # get consistent results\n v = np.array(([0]*5 + [1]*6 + [2]*6)*4)\n res = np.unique(v, return_index=True)\n tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))\n assert_equal(res, tgt)\n\n def test_unicode_alloc_dealloc_match(self):\n # Ticket #1578, the mismatch only showed up when running\n # python-debug for python versions >= 2.7, and then as\n # a core dump and error message.\n a = np.array(['abc'], dtype=np.unicode)[0]\n del a\n\n def test_refcount_error_in_clip(self):\n # Ticket #1588\n a = np.zeros((2,), dtype='>i2').clip(min=0)\n x = a + a\n # This used to segfault:\n y = str(x)\n # Check the final string:\n assert_(y == \"[0 0]\")\n\n def test_searchsorted_wrong_dtype(self):\n # Ticket #2189, it used to segfault, so we check that it raises the\n # proper exception.\n a = np.array([('a', 1)], dtype='S1, int')\n assert_raises(TypeError, np.searchsorted, a, 1.2)\n # Ticket #2066, similar problem:\n dtype = np.format_parser(['i4', 'i4'], [], [])\n a = np.recarray((2, ), dtype)\n assert_raises(TypeError, np.searchsorted, a, 1)\n\n def test_complex64_alignment(self):\n # Issue gh-2668 (trac 2076), segfault on sparc due to misalignment\n dtt = np.complex64\n arr = np.arange(10, dtype=dtt)\n # 2D array\n arr2 = np.reshape(arr, (2, 5))\n # Fortran write followed by (C or F) read caused bus error\n data_str = arr2.tobytes('F')\n data_back = np.ndarray(arr2.shape,\n arr2.dtype,\n buffer=data_str,\n order='F')\n assert_array_equal(arr2, data_back)\n\n def test_structured_count_nonzero(self):\n arr = np.array([0, 1]).astype('i4, (2)i4')[:1]\n count = np.count_nonzero(arr)\n assert_equal(count, 0)\n\n def test_copymodule_preserves_f_contiguity(self):\n a = np.empty((2, 2), order='F')\n b = copy.copy(a)\n c = copy.deepcopy(a)\n assert_(b.flags.fortran)\n assert_(b.flags.f_contiguous)\n assert_(c.flags.fortran)\n assert_(c.flags.f_contiguous)\n\n def test_fortran_order_buffer(self):\n import numpy as np\n a = np.array([['Hello', 'Foob']], dtype='U5', order='F')\n arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a)\n arr2 = np.array([[[u'H', u'e', u'l', u'l', u'o'],\n [u'F', u'o', u'o', u'b', u'']]])\n assert_array_equal(arr, arr2)\n\n def test_assign_from_sequence_error(self):\n # Ticket #4024.\n arr = np.array([1, 2, 3])\n assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9])\n arr.__setitem__(slice(None), [9])\n assert_equal(arr, [9, 9, 9])\n\n def test_format_on_flex_array_element(self):\n # Ticket #4369.\n dt = np.dtype([('date', '<M8[D]'), ('val', '<f8')])\n arr = np.array([('2000-01-01', 1)], dt)\n formatted = '{0}'.format(arr[0])\n assert_equal(formatted, str(arr[0]))\n\n def test_deepcopy_on_0d_array(self):\n # Ticket #3311.\n arr = np.array(3)\n arr_cp = copy.deepcopy(arr)\n\n assert_equal(arr, arr_cp)\n assert_equal(arr.shape, arr_cp.shape)\n assert_equal(int(arr), int(arr_cp))\n assert_(arr is not arr_cp)\n assert_(isinstance(arr_cp, type(arr)))\n\n def test_deepcopy_F_order_object_array(self):\n # Ticket #6456.\n a = {'a': 1}\n b = {'b': 2}\n arr = np.array([[a, b], [a, b]], order='F')\n arr_cp = copy.deepcopy(arr)\n\n assert_equal(arr, arr_cp)\n assert_(arr is not arr_cp)\n # Ensure that we have actually copied the item.\n assert_(arr[0, 1] is not arr_cp[1, 1])\n # Ensure we are allowed to have references to the same object.\n assert_(arr[0, 1] is arr[1, 1])\n # Check the references hold for the copied objects.\n assert_(arr_cp[0, 1] is arr_cp[1, 1])\n\n def test_deepcopy_empty_object_array(self):\n # Ticket #8536.\n # Deepcopy should succeed\n a = np.array([], dtype=object)\n b = copy.deepcopy(a)\n assert_(a.shape == b.shape)\n\n def test_bool_subscript_crash(self):\n # gh-4494\n c = np.rec.array([(1, 2, 3), (4, 5, 6)])\n masked = c[np.array([True, False])]\n base = masked.base\n del masked, c\n base.dtype\n\n def test_richcompare_crash(self):\n # gh-4613\n import operator as op\n\n # dummy class where __array__ throws exception\n class Foo(object):\n __array_priority__ = 1002\n\n def __array__(self, *args, **kwargs):\n raise Exception()\n\n rhs = Foo()\n lhs = np.array(1)\n for f in [op.lt, op.le, op.gt, op.ge]:\n if sys.version_info[0] >= 3:\n assert_raises(TypeError, f, lhs, rhs)\n elif not sys.py3kwarning:\n # With -3 switch in python 2, DeprecationWarning is raised\n # which we are not interested in\n f(lhs, rhs)\n assert_(not op.eq(lhs, rhs))\n assert_(op.ne(lhs, rhs))\n\n def test_richcompare_scalar_and_subclass(self):\n # gh-4709\n class Foo(np.ndarray):\n def __eq__(self, other):\n return \"OK\"\n\n x = np.array([1, 2, 3]).view(Foo)\n assert_equal(10 == x, \"OK\")\n assert_equal(np.int32(10) == x, \"OK\")\n assert_equal(np.array([10]) == x, \"OK\")\n\n def test_pickle_empty_string(self):\n # gh-3926\n\n import pickle\n test_string = np.string_('')\n assert_equal(pickle.loads(pickle.dumps(test_string)), test_string)\n\n def test_frompyfunc_many_args(self):\n # gh-5672\n\n def passer(*args):\n pass\n\n assert_raises(ValueError, np.frompyfunc, passer, 32, 1)\n\n def test_repeat_broadcasting(self):\n # gh-5743\n a = np.arange(60).reshape(3, 4, 5)\n for axis in chain(range(-a.ndim, a.ndim), [None]):\n assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis))\n\n def test_frompyfunc_nout_0(self):\n # gh-2014\n\n def f(x):\n x[0], x[-1] = x[-1], x[0]\n\n uf = np.frompyfunc(f, 1, 0)\n a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]])\n assert_equal(uf(a), ())\n assert_array_equal(a, [[3, 2, 1], [5, 4], [9, 7, 8, 6]])\n\n @dec.skipif(not HAS_REFCOUNT, \"python has no sys.getrefcount\")\n def test_leak_in_structured_dtype_comparison(self):\n # gh-6250\n recordtype = np.dtype([('a', np.float64),\n ('b', np.int32),\n ('d', (str, 5))])\n\n # Simple case\n a = np.zeros(2, dtype=recordtype)\n for i in range(100):\n a == a\n assert_(sys.getrefcount(a) < 10)\n\n # The case in the bug report.\n before = sys.getrefcount(a)\n u, v = a[0], a[1]\n u == v\n del u, v\n gc.collect()\n after = sys.getrefcount(a)\n assert_equal(before, after)\n\n def test_empty_percentile(self):\n # gh-6530 / gh-6553\n assert_array_equal(np.percentile(np.arange(10), []), np.array([]))\n\n def test_void_compare_segfault(self):\n # gh-6922. The following should not segfault\n a = np.ones(3, dtype=[('object', 'O'), ('int', '<i2')])\n a.sort()\n\n def test_reshape_size_overflow(self):\n # gh-7455\n a = np.ones(20)[::2]\n if np.dtype(np.intp).itemsize == 8:\n # 64 bit. The following are the prime factors of 2**63 + 5,\n # plus a leading 2, so when multiplied together as int64,\n # the result overflows to a total size of 10.\n new_shape = (2, 13, 419, 691, 823, 2977518503)\n else:\n # 32 bit. The following are the prime factors of 2**31 + 5,\n # plus a leading 2, so when multiplied together as int32,\n # the result overflows to a total size of 10.\n new_shape = (2, 7, 7, 43826197)\n assert_raises(ValueError, a.reshape, new_shape)\n\n def test_invalid_structured_dtypes(self):\n # gh-2865\n # mapping python objects to other dtypes\n assert_raises(ValueError, np.dtype, ('O', [('name', 'i8')]))\n assert_raises(ValueError, np.dtype, ('i8', [('name', 'O')]))\n assert_raises(ValueError, np.dtype,\n ('i8', [('name', [('name', 'O')])]))\n assert_raises(ValueError, np.dtype, ([('a', 'i4'), ('b', 'i4')], 'O'))\n assert_raises(ValueError, np.dtype, ('i8', 'O'))\n # wrong number/type of tuple elements in dict\n assert_raises(ValueError, np.dtype,\n ('i', {'name': ('i', 0, 'title', 'oops')}))\n assert_raises(ValueError, np.dtype,\n ('i', {'name': ('i', 'wrongtype', 'title')}))\n # disallowed as of 1.13\n assert_raises(ValueError, np.dtype,\n ([('a', 'O'), ('b', 'O')], [('c', 'O'), ('d', 'O')]))\n # allowed as a special case due to existing use, see gh-2798\n a = np.ones(1, dtype=('O', [('name', 'O')]))\n assert_equal(a[0], 1)\n\n def test_correct_hash_dict(self):\n # gh-8887 - __hash__ would be None despite tp_hash being set\n all_types = set(np.typeDict.values()) - {np.void}\n for t in all_types:\n val = t()\n\n try:\n hash(val)\n except TypeError as e:\n assert_equal(t.__hash__, None)\n else:\n assert_(t.__hash__ != None)\n\n def test_scalar_copy(self):\n scalar_types = set(np.sctypeDict.values())\n values = {\n np.void: b\"a\",\n np.bytes_: b\"a\",\n np.unicode_: \"a\",\n np.datetime64: \"2017-08-25\",\n }\n for sctype in scalar_types:\n item = sctype(values.get(sctype, 1))\n item2 = copy.copy(item)\n assert_equal(item, item2)\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n" ]
[ [ "numpy.ones", "numpy.testing.assert_equal", "numpy.any", "numpy.asarray", "numpy.testing.assert_warns", "numpy.transpose", "numpy.abs", "numpy.bool_", "numpy.testing.dec.skipif", "numpy.bytes_", "numpy.unique", "numpy.typeDict.values", "numpy.string_", "numpy.float32", "numpy.errstate", "numpy.rec.fromarrays", "numpy.unicode_", "numpy.random.normal", "numpy.array", "numpy.dot", "numpy.subtract.reduce", "numpy.find_common_type", "numpy.random.seed", "numpy.add", "numpy.add.reduce", "numpy.char.array", "numpy.reshape", "numpy.fromstring", "numpy.binary_repr", "numpy.load", "numpy.single", "numpy.argmax", "numpy.uint64", "numpy.divide.reduce", "numpy.copyto", "numpy.hstack", "numpy.count_nonzero", "numpy.int32", "numpy.finfo", "numpy.intp", "numpy.testing.assert_raises", "numpy.zeros_like", "numpy.random.shuffle", "numpy.format_parser", "numpy.concatenate", "numpy.rec.array", "numpy.array_str", "numpy.frombuffer", "numpy.take", "numpy.testing.run_module_suite", "numpy.int_", "numpy.add.outer", "numpy.frompyfunc", "numpy.float64", "numpy.empty_like", "numpy.sctypeDict.values", "numpy.random.rand", "numpy.nonzero", "numpy.fromfile", "numpy.float_", "numpy.zeros", "numpy.arange", "numpy.lexsort", "numpy.testing.assert_array_almost_equal", "numpy.recarray", "numpy.chararray", "numpy.sort", "numpy.divide.accumulate", "numpy.random.randn", "numpy.compat.asbytes", "numpy.subtract.accumulate", "numpy.setbufsize", "numpy.dtype", "numpy.testing._assert_valid_refcount", "numpy.object_", "numpy.testing.assert_almost_equal", "numpy.testing.assert_array_equal", "numpy.ndarray", "numpy.where", "numpy.linspace", "numpy.longdouble", "numpy.double", "numpy.exp2", "numpy.lib.stride_tricks.as_strided", "numpy.all", "numpy.indices", "numpy.maximum", "numpy.sign", "numpy.empty", "numpy.add.accumulate", "numpy.iinfo", "numpy.testing.suppress_warnings", "numpy.testing.assert_" ] ]
Talendar/qdeep
[ "7228edc9cc7d7e6c6bc59e93a3eb726fda15704d" ]
[ "qdeep/dqn/agent.py" ]
[ "\"\"\" DQN agent implementation.\n\nHeavily based on: https://github.com/deepmind/acme/blob/master/acme/agents/tf/dqn/agent.py\n\"\"\"\n\nimport copy\nfrom typing import Optional, List, Dict\n\nimport numpy as np\nimport reverb\nimport sonnet as snt\nimport tensorflow as tf\nimport trfl\nfrom acme import datasets\nfrom acme import specs\nfrom acme.adders import reverb as adders\nfrom acme.agents import agent\nfrom acme.agents.tf import actors\nfrom acme.tf import utils as tf2_utils\nfrom acme.utils import loggers\n\nfrom qdeep.dqn import learning\n\n\nclass DQNAgent(agent.Agent):\n \"\"\" DQN agent.\n\n This implements a single-process DQN agent. This is a simple Q-learning\n algorithm that inserts N-step transitions into a replay buffer, and\n periodically updates its policy by sampling these transitions using\n prioritization.\n\n Args:\n environment_spec: description of the actions, observations, etc.\n network: the online Q network (the one being optimized)\n batch_size: batch size for updates.\n prefetch_size: size to prefetch from replay.\n target_update_period: number of learner steps to perform before\n updating the target networks.\n samples_per_insert: number of samples to take from replay for every\n insert that is made.\n min_replay_size: minimum replay size before updating. This and all\n following arguments are related to dataset construction and will be\n ignored if a dataset argument is passed.\n max_replay_size: maximum replay size.\n importance_sampling_exponent: power to which importance weights are\n raised before normalizing.\n priority_exponent: exponent used in prioritized sampling.\n n_step: number of steps to squash into a single transition.\n epsilon: probability of taking a random action; ignored if a policy\n network is given.\n learning_rate: learning rate for the q-network update.\n discount: discount to use for TD updates.\n logger: logger object to be used by learner.\n max_gradient_norm: used for gradient clipping.\n expert_data: List of dictionaries containing the expert data to be added\n to the agent's replay memory. Each dictionary represents and episode\n and must have two keys: \"first\" and \"mid\". The \"first\" key's value\n must be a `TimeStep` object of the type `StepType.FIRST`. The \"mid\"\n key's value, on the other hand, must be a list containing tuples\n with, respectively, an action and a `TimeStep` object.\n \"\"\"\n\n def __init__(\n self,\n environment_spec: specs.EnvironmentSpec,\n network: snt.Module,\n batch_size: int = 32,\n prefetch_size: int = 4,\n target_update_period: int = 100,\n samples_per_insert: float = 32.0,\n min_replay_size: int = 1000,\n max_replay_size: int = 100000,\n importance_sampling_exponent: float = 0.2,\n priority_exponent: float = 0.6,\n n_step: int = 5,\n epsilon: Optional[float] = 0.05,\n learning_rate: float = 1e-3,\n discount: float = 0.99,\n logger: loggers.Logger = None,\n max_gradient_norm: Optional[float] = None,\n expert_data: List[Dict] = None,\n ) -> None:\n \"\"\" Initialize the agent. \"\"\"\n\n # Create a replay server to add data to. This uses no limiter behavior\n # in order to allow the Agent interface to handle it.\n replay_table = reverb.Table(\n name=adders.DEFAULT_PRIORITY_TABLE,\n sampler=reverb.selectors.Prioritized(priority_exponent),\n remover=reverb.selectors.Fifo(),\n max_size=max_replay_size,\n rate_limiter=reverb.rate_limiters.MinSize(1),\n signature=adders.NStepTransitionAdder.signature(environment_spec))\n self._server = reverb.Server([replay_table], port=None)\n\n # The adder is used to insert observations into replay.\n address = f'localhost:{self._server.port}'\n adder = adders.NStepTransitionAdder(\n client=reverb.Client(address),\n n_step=n_step,\n discount=discount)\n\n # Adding expert data to the replay memory:\n if expert_data is not None:\n for d in expert_data:\n adder.add_first(d[\"first\"])\n for (action, next_ts) in d[\"mid\"]:\n adder.add(np.int32(action), next_ts)\n\n # The dataset provides an interface to sample from replay.\n replay_client = reverb.TFClient(address)\n dataset = datasets.make_reverb_dataset(\n server_address=address,\n batch_size=batch_size,\n prefetch_size=prefetch_size)\n\n # Creating the epsilon greedy policy network:\n epsilon = tf.Variable(epsilon)\n policy_network = snt.Sequential([\n network,\n lambda q: trfl.epsilon_greedy(q, epsilon=epsilon).sample(),\n ])\n\n # Create a target network.\n target_network = copy.deepcopy(network)\n\n # Ensure that we create the variables before proceeding (maybe not\n # needed).\n tf2_utils.create_variables(network, [environment_spec.observations])\n tf2_utils.create_variables(target_network,\n [environment_spec.observations])\n\n # Create the actor which defines how we take actions.\n actor = actors.FeedForwardActor(policy_network, adder)\n\n # The learner updates the parameters (and initializes them).\n learner = learning.DQNLearner(\n network=network,\n target_network=target_network,\n discount=discount,\n importance_sampling_exponent=importance_sampling_exponent,\n learning_rate=learning_rate,\n target_update_period=target_update_period,\n dataset=dataset,\n replay_client=replay_client,\n max_gradient_norm=max_gradient_norm,\n logger=logger,\n )\n\n super().__init__(\n actor=actor,\n learner=learner,\n min_observations=max(batch_size, min_replay_size),\n observations_per_step=float(batch_size) / samples_per_insert)\n" ]
[ [ "numpy.int32", "tensorflow.Variable" ] ]
sflender/autogluon
[ "058398b61d1b2011f56a9dce149b0989adbbb04a" ]
[ "core/src/autogluon/core/models/ensemble/bagged_ensemble_model.py" ]
[ "import copy\nimport logging\nimport os\nimport time\nfrom collections import Counter\nfrom statistics import mean\nfrom functools import reduce\n\nimport numpy as np\nimport pandas as pd\n\nfrom ...constants import MULTICLASS, REGRESSION, SOFTCLASS, REFIT_FULL_SUFFIX\nfrom ...utils.exceptions import TimeLimitExceeded\nfrom ...utils.loaders import load_pkl\nfrom ...utils.savers import save_pkl\nfrom ...utils.utils import generate_kfold, _compute_fi_with_stddev\n\nfrom ..abstract.abstract_model import AbstractModel\n\nlogger = logging.getLogger(__name__)\n\n\n# TODO: Add metadata object with info like score on each model, train time on each model, etc.\nclass BaggedEnsembleModel(AbstractModel):\n \"\"\"\n Bagged ensemble meta-model which fits a given model multiple times across different splits of the training data.\n \"\"\"\n _oof_filename = 'oof.pkl'\n\n def __init__(self, model_base: AbstractModel, random_state=0, **kwargs):\n self.model_base = model_base\n self._child_type = type(self.model_base)\n self.models = []\n self._oof_pred_proba = None\n self._oof_pred_model_repeats = None\n self._n_repeats = 0 # Number of n_repeats with at least 1 model fit, if kfold=5 and 8 models have been fit, _n_repeats is 2\n self._n_repeats_finished = 0 # Number of n_repeats finished, if kfold=5 and 8 models have been fit, _n_repeats_finished is 1\n self._k_fold_end = 0 # Number of models fit in current n_repeat (0 if completed), if kfold=5 and 8 models have been fit, _k_fold_end is 3\n self._k = None # k models per n_repeat, equivalent to kfold value\n self._k_per_n_repeat = [] # k-fold used for each n_repeat. == [5, 10, 3] if first kfold was 5, second was 10, and third was 3\n self._random_state = random_state\n self.low_memory = True\n self.bagged_mode = None\n\n try:\n feature_metadata = self.model_base.feature_metadata\n except:\n feature_metadata = None\n\n eval_metric = kwargs.pop('eval_metric', self.model_base.eval_metric)\n stopping_metric = kwargs.pop('stopping_metric', self.model_base.stopping_metric)\n\n super().__init__(problem_type=self.model_base.problem_type, eval_metric=eval_metric, stopping_metric=stopping_metric, feature_metadata=feature_metadata, **kwargs)\n\n def _set_default_params(self):\n default_params = {'save_bag_folds': True}\n for param, val in default_params.items():\n self._set_default_param_value(param, val)\n super()._set_default_params()\n\n def is_valid(self):\n return self.is_fit() and (self._n_repeats == self._n_repeats_finished)\n\n def can_infer(self):\n return self.is_fit() and self.params.get('save_bag_folds', True)\n\n def is_stratified(self):\n if self.problem_type == REGRESSION or self.problem_type == SOFTCLASS:\n return False\n else:\n return True\n\n def is_fit(self):\n return len(self.models) != 0\n\n # TODO: This assumes bagged ensemble has a complete k_fold and no partial k_fold models, this is likely fine but will act incorrectly if called when only a partial k_fold has been completed\n # Solving this is memory intensive, requires all oof_pred_probas from all n_repeats, so its probably not worth it.\n @property\n def oof_pred_proba(self):\n # TODO: Require is_valid == True (add option param to ignore is_valid)\n return self._oof_pred_proba_func(self._oof_pred_proba, self._oof_pred_model_repeats)\n\n @staticmethod\n def _oof_pred_proba_func(oof_pred_proba, oof_pred_model_repeats):\n oof_pred_model_repeats_without_0 = np.where(oof_pred_model_repeats == 0, 1, oof_pred_model_repeats)\n if oof_pred_proba.ndim == 2:\n oof_pred_model_repeats_without_0 = oof_pred_model_repeats_without_0[:, None]\n return oof_pred_proba / oof_pred_model_repeats_without_0\n\n def preprocess(self, X, preprocess_nonadaptive=True, model=None, **kwargs):\n if preprocess_nonadaptive:\n if model is None:\n if not self.models:\n return X\n model = self.models[0]\n model = self.load_child(model)\n return model.preprocess(X, preprocess_stateful=False)\n else:\n return X\n\n def _fit(self, X_train, y_train, k_fold=5, k_fold_start=0, k_fold_end=None, n_repeats=1, n_repeat_start=0, time_limit=None, **kwargs):\n if k_fold < 1:\n k_fold = 1\n if k_fold_end is None:\n k_fold_end = k_fold\n\n if self._oof_pred_proba is None and (k_fold_start != 0 or n_repeat_start != 0):\n self._load_oof()\n if n_repeat_start != self._n_repeats_finished:\n raise ValueError(f'n_repeat_start must equal self._n_repeats_finished, values: ({n_repeat_start}, {self._n_repeats_finished})')\n if n_repeats <= n_repeat_start:\n raise ValueError(f'n_repeats must be greater than n_repeat_start, values: ({n_repeats}, {n_repeat_start})')\n if k_fold_start != self._k_fold_end:\n raise ValueError(f'k_fold_start must equal previous k_fold_end, values: ({k_fold_start}, {self._k_fold_end})')\n if k_fold_start >= k_fold_end:\n # TODO: Remove this limitation if n_repeats > 1\n raise ValueError(f'k_fold_end must be greater than k_fold_start, values: ({k_fold_end}, {k_fold_start})')\n if (n_repeats - n_repeat_start) > 1 and k_fold_end != k_fold:\n # TODO: Remove this limitation\n raise ValueError(f'k_fold_end must equal k_fold when (n_repeats - n_repeat_start) > 1, values: ({k_fold_end}, {k_fold})')\n if self._k is not None and self._k != k_fold:\n raise ValueError(f'k_fold must equal previously fit k_fold value for the current n_repeat, values: (({k_fold}, {self._k})')\n fold_start = n_repeat_start * k_fold + k_fold_start\n fold_end = (n_repeats - 1) * k_fold + k_fold_end\n time_start = time.time()\n\n model_base = self._get_model_base()\n if self.features is not None:\n model_base.features = self.features\n model_base.feature_metadata = self.feature_metadata # TODO: Don't pass this here\n\n if self.model_base is not None:\n self.save_model_base(self.model_base)\n self.model_base = None\n\n if k_fold == 1:\n if self._n_repeats != 0:\n raise ValueError(f'n_repeats must equal 0 when fitting a single model with k_fold < 2, values: ({self._n_repeats}, {k_fold})')\n model_base.set_contexts(path_context=self.path + model_base.name + os.path.sep)\n time_start_fit = time.time()\n model_base.fit(X_train=X_train, y_train=y_train, time_limit=time_limit, **kwargs)\n model_base.fit_time = time.time() - time_start_fit\n model_base.predict_time = None\n self._oof_pred_proba = model_base.predict_proba(X=X_train) # TODO: Cheater value, will be overfit to valid set\n self._oof_pred_model_repeats = np.ones(shape=len(X_train), dtype=np.uint8)\n self._n_repeats = 1\n self._n_repeats_finished = 1\n self._k_per_n_repeat = [1]\n self.bagged_mode = False\n model_base.reduce_memory_size(remove_fit=True, remove_info=False, requires_save=True)\n if not self.params.get('save_bag_folds', True):\n model_base.model = None\n if self.low_memory:\n self.save_child(model_base, verbose=False)\n self.models = [model_base.name]\n else:\n self.models = [model_base]\n self._add_child_times_to_bag(model=model_base)\n return\n\n # TODO: Preprocess data here instead of repeatedly\n kfolds = generate_kfold(X=X_train, y=y_train, n_splits=k_fold, stratified=self.is_stratified(), random_state=self._random_state, n_repeats=n_repeats)\n\n oof_pred_proba, oof_pred_model_repeats = self._construct_empty_oof(X=X_train, y=y_train)\n\n models = []\n folds_to_fit = fold_end - fold_start\n for j in range(n_repeat_start, n_repeats): # For each n_repeat\n cur_repeat_count = j - n_repeat_start\n fold_start_n_repeat = fold_start + cur_repeat_count * k_fold\n fold_end_n_repeat = min(fold_start_n_repeat + k_fold, fold_end)\n # TODO: Consider moving model fit inner for loop to a function to simply this code\n for i in range(fold_start_n_repeat, fold_end_n_repeat): # For each fold\n folds_finished = i - fold_start\n folds_left = fold_end - i\n fold = kfolds[i]\n time_elapsed = time.time() - time_start\n if time_limit is not None:\n time_left = time_limit - time_elapsed\n required_time_per_fold = time_left / folds_left\n time_limit_fold = required_time_per_fold * 0.8\n if folds_finished > 0:\n expected_time_required = time_elapsed * folds_to_fit / folds_finished\n expected_remaining_time_required = expected_time_required * folds_left / folds_to_fit\n if expected_remaining_time_required > time_left:\n raise TimeLimitExceeded\n if time_left <= 0:\n raise TimeLimitExceeded\n else:\n time_limit_fold = None\n\n time_start_fold = time.time()\n train_index, val_index = fold\n X_train_fold, X_val_fold = X_train.iloc[train_index, :], X_train.iloc[val_index, :]\n y_train_fold, y_val_fold = y_train.iloc[train_index], y_train.iloc[val_index]\n fold_model = copy.deepcopy(model_base)\n fold_model.name = f'{fold_model.name}_F{i+1}'\n fold_model.set_contexts(self.path + fold_model.name + os.path.sep)\n fold_model.fit(X_train=X_train_fold, y_train=y_train_fold, X_val=X_val_fold, y_val=y_val_fold, time_limit=time_limit_fold, **kwargs)\n time_train_end_fold = time.time()\n if time_limit is not None: # Check to avoid unnecessarily predicting and saving a model when an Exception is going to be raised later\n if i != (fold_end - 1):\n time_elapsed = time.time() - time_start\n time_left = time_limit - time_elapsed\n expected_time_required = time_elapsed * folds_to_fit / (folds_finished + 1)\n expected_remaining_time_required = expected_time_required * (folds_left - 1) / folds_to_fit\n if expected_remaining_time_required > time_left:\n raise TimeLimitExceeded\n pred_proba = fold_model.predict_proba(X_val_fold)\n time_predict_end_fold = time.time()\n fold_model.fit_time = time_train_end_fold - time_start_fold\n fold_model.predict_time = time_predict_end_fold - time_train_end_fold\n fold_model.val_score = fold_model.score_with_y_pred_proba(y=y_val_fold, y_pred_proba=pred_proba)\n fold_model.reduce_memory_size(remove_fit=True, remove_info=False, requires_save=True)\n if not self.params.get('save_bag_folds', True):\n fold_model.model = None\n if self.low_memory:\n self.save_child(fold_model, verbose=False)\n models.append(fold_model.name)\n else:\n models.append(fold_model)\n oof_pred_proba[val_index] += pred_proba\n oof_pred_model_repeats[val_index] += 1\n self._add_child_times_to_bag(model=fold_model)\n if (fold_end_n_repeat != fold_end) or (k_fold == k_fold_end):\n self._k_per_n_repeat.append(k_fold)\n self.models += models\n\n self.bagged_mode = True\n\n if self._oof_pred_proba is None:\n self._oof_pred_proba = oof_pred_proba\n self._oof_pred_model_repeats = oof_pred_model_repeats\n else:\n self._oof_pred_proba += oof_pred_proba\n self._oof_pred_model_repeats += oof_pred_model_repeats\n\n self._n_repeats = n_repeats\n if k_fold == k_fold_end:\n self._k = None\n self._k_fold_end = 0\n self._n_repeats_finished = self._n_repeats\n else:\n self._k = k_fold\n self._k_fold_end = k_fold_end\n self._n_repeats_finished = self._n_repeats - 1\n\n def predict_proba(self, X, normalize=None, **kwargs):\n model = self.load_child(self.models[0])\n X = self.preprocess(X, model=model, **kwargs)\n pred_proba = model.predict_proba(X=X, preprocess_nonadaptive=False, normalize=normalize)\n for model in self.models[1:]:\n model = self.load_child(model)\n pred_proba += model.predict_proba(X=X, preprocess_nonadaptive=False, normalize=normalize)\n pred_proba = pred_proba / len(self.models)\n\n return pred_proba\n\n def _predict_proba(self, X, normalize=False, **kwargs):\n return self.predict_proba(X=X, normalize=normalize, **kwargs)\n\n def score_with_oof(self, y):\n self._load_oof()\n valid_indices = self._oof_pred_model_repeats > 0\n y = y[valid_indices]\n y_pred_proba = self.oof_pred_proba[valid_indices]\n\n return self.score_with_y_pred_proba(y=y, y_pred_proba=y_pred_proba)\n\n # TODO: Augment to generate OOF after shuffling each column in X (Batching), this is the fastest way.\n # TODO: v0.1 Reduce logging clutter during OOF importance calculation (Currently logs separately for each child)\n # Generates OOF predictions from pre-trained bagged models, assuming X and y are in the same row order as used in .fit(X, y)\n def compute_feature_importance(self, X, y, features=None, is_oof=True, time_limit=None, silent=False, **kwargs) -> pd.DataFrame:\n if features is None:\n features = self.load_child(model=self.models[0]).features\n if not is_oof:\n return super().compute_feature_importance(X, y, features=features, time_limit=time_limit, silent=silent, **kwargs)\n fi_fold_list = []\n model_index = 0\n num_children = len(self.models)\n if time_limit is not None:\n time_limit_per_child = time_limit / num_children\n else:\n time_limit_per_child = None\n if not silent:\n logging_message = f'Computing feature importance via permutation shuffling for {len(features)} features using out-of-fold (OOF) data aggregated across {num_children} child models...'\n if time_limit is not None:\n logging_message = f'{logging_message} Time limit: {time_limit}s...'\n logger.log(20, logging_message)\n\n time_start = time.time()\n early_stop = False\n children_completed = 0\n log_final_suffix = ''\n for n_repeat, k in enumerate(self._k_per_n_repeat):\n if is_oof:\n if not self.bagged_mode:\n raise AssertionError('Model trained with no validation data cannot get feature importances on training data, please specify new test data to compute feature importances (model=%s)' % self.name)\n kfolds = generate_kfold(X=X, y=y, n_splits=k, stratified=self.is_stratified(), random_state=self._random_state, n_repeats=n_repeat + 1)\n cur_kfolds = kfolds[n_repeat * k:(n_repeat+1) * k]\n else:\n cur_kfolds = [(None, list(range(len(X))))]*k\n for i, fold in enumerate(cur_kfolds):\n _, test_index = fold\n model = self.load_child(self.models[model_index + i])\n fi_fold = model.compute_feature_importance(X=X.iloc[test_index, :], y=y.iloc[test_index], features=features, time_limit=time_limit_per_child,\n silent=silent, log_prefix='\\t', importance_as_list=True, **kwargs)\n fi_fold_list.append(fi_fold)\n\n children_completed += 1\n if time_limit is not None and children_completed != num_children:\n time_now = time.time()\n time_left = time_limit - (time_now - time_start)\n time_child_average = (time_now - time_start) / children_completed\n if time_left < (time_child_average * 1.1):\n log_final_suffix = f' (Early stopping due to lack of time...)'\n early_stop = True\n break\n if early_stop:\n break\n model_index += k\n # TODO: DON'T THROW AWAY SAMPLES! USE LARGER N\n fi_list_dict = dict()\n for val in fi_fold_list:\n val = val['importance'].to_dict() # TODO: Don't throw away stddev information of children\n for key in val:\n if key not in fi_list_dict:\n fi_list_dict[key] = []\n fi_list_dict[key] += val[key]\n fi_df = _compute_fi_with_stddev(fi_list_dict)\n\n if not silent:\n logger.log(20, f'\\t{round(time.time() - time_start, 2)}s\\t= Actual runtime (Completed {children_completed} of {num_children} children){log_final_suffix}')\n\n return fi_df\n\n def load_child(self, model, verbose=False) -> AbstractModel:\n if isinstance(model, str):\n child_path = self.create_contexts(self.path + model + os.path.sep)\n return self._child_type.load(path=child_path, verbose=verbose)\n else:\n return model\n\n def save_child(self, model, verbose=False):\n child = self.load_child(model)\n child.set_contexts(self.path + child.name + os.path.sep)\n child.save(verbose=verbose)\n\n # TODO: Multiply epochs/n_iterations by some value (such as 1.1) to account for having more training data than bagged models\n def convert_to_refit_full_template(self):\n init_args = self._get_init_args()\n init_args['hyperparameters']['save_bag_folds'] = True # refit full models must save folds\n model_base_name_orig = init_args['model_base'].name\n init_args['model_base'] = self.convert_to_refitfull_template_child()\n model_base_name_new = init_args['model_base'].name\n if model_base_name_orig in init_args['name'] and model_base_name_orig != model_base_name_new:\n init_args['name'] = init_args['name'].replace(model_base_name_orig, model_base_name_new, 1)\n else:\n init_args['name'] = init_args['name'] + '_FULL'\n\n model_full_template = self.__class__(**init_args)\n return model_full_template\n\n def convert_to_refitfull_template_child(self):\n compressed_params = self._get_compressed_params()\n child_compressed = copy.deepcopy(self._get_model_base())\n child_compressed.feature_metadata = self.feature_metadata # TODO: Don't pass this here\n child_compressed.params = compressed_params\n child_compressed.name = child_compressed.name + REFIT_FULL_SUFFIX\n child_compressed.set_contexts(self.path_root + child_compressed.name + os.path.sep)\n return child_compressed\n\n def _get_init_args(self):\n init_args = dict(\n model_base=self._get_model_base(),\n random_state=self._random_state,\n )\n init_args.update(super()._get_init_args())\n init_args.pop('problem_type')\n init_args.pop('feature_metadata')\n return init_args\n\n def _get_compressed_params(self, model_params_list=None):\n if model_params_list is None:\n model_params_list = [\n self.load_child(child).get_trained_params()\n for child in self.models\n ]\n\n model_params_compressed = dict()\n for param in model_params_list[0].keys():\n model_param_vals = [model_params[param] for model_params in model_params_list]\n if all(isinstance(val, bool) for val in model_param_vals):\n counter = Counter(model_param_vals)\n compressed_val = counter.most_common(1)[0][0]\n elif all(isinstance(val, int) for val in model_param_vals):\n compressed_val = round(mean(model_param_vals))\n elif all(isinstance(val, float) for val in model_param_vals):\n compressed_val = mean(model_param_vals)\n else:\n try:\n counter = Counter(model_param_vals)\n compressed_val = counter.most_common(1)[0][0]\n except TypeError:\n compressed_val = model_param_vals[0]\n model_params_compressed[param] = compressed_val\n return model_params_compressed\n\n def _get_compressed_params_trained(self):\n model_params_list = [\n self.load_child(child).params_trained\n for child in self.models\n ]\n return self._get_compressed_params(model_params_list=model_params_list)\n\n def _get_model_base(self):\n if self.model_base is None:\n return self.load_model_base()\n else:\n return self.model_base\n\n def _add_child_times_to_bag(self, model):\n if self.fit_time is None:\n self.fit_time = model.fit_time\n else:\n self.fit_time += model.fit_time\n\n if self.predict_time is None:\n self.predict_time = model.predict_time\n else:\n self.predict_time += model.predict_time\n\n @classmethod\n def load(cls, path: str, reset_paths=True, low_memory=True, load_oof=False, verbose=True):\n model = super().load(path=path, reset_paths=reset_paths, verbose=verbose)\n if not low_memory:\n model.persist_child_models(reset_paths=reset_paths)\n if load_oof:\n model._load_oof()\n return model\n\n @classmethod\n def load_oof(cls, path, verbose=True):\n try:\n oof = load_pkl.load(path=path + 'utils' + os.path.sep + cls._oof_filename, verbose=verbose)\n oof_pred_proba = oof['_oof_pred_proba']\n oof_pred_model_repeats = oof['_oof_pred_model_repeats']\n except FileNotFoundError:\n model = cls.load(path=path, reset_paths=True, verbose=verbose)\n model._load_oof()\n oof_pred_proba = model._oof_pred_proba\n oof_pred_model_repeats = model._oof_pred_model_repeats\n return cls._oof_pred_proba_func(oof_pred_proba=oof_pred_proba, oof_pred_model_repeats=oof_pred_model_repeats)\n\n def _load_oof(self):\n if self._oof_pred_proba is not None:\n pass\n else:\n oof = load_pkl.load(path=self.path + 'utils' + os.path.sep + self._oof_filename)\n self._oof_pred_proba = oof['_oof_pred_proba']\n self._oof_pred_model_repeats = oof['_oof_pred_model_repeats']\n\n def persist_child_models(self, reset_paths=True):\n for i, model_name in enumerate(self.models):\n if isinstance(model_name, str):\n child_path = self.create_contexts(self.path + model_name + os.path.sep)\n child_model = self._child_type.load(path=child_path, reset_paths=reset_paths, verbose=True)\n self.models[i] = child_model\n\n def load_model_base(self):\n return load_pkl.load(path=self.path + 'utils' + os.path.sep + 'model_template.pkl')\n\n def save_model_base(self, model_base):\n save_pkl.save(path=self.path + 'utils' + os.path.sep + 'model_template.pkl', object=model_base)\n\n def save(self, path=None, verbose=True, save_oof=True, save_children=False) -> str:\n if path is None:\n path = self.path\n\n if save_children:\n model_names = []\n for child in self.models:\n child = self.load_child(child)\n child.set_contexts(path + child.name + os.path.sep)\n child.save(verbose=False)\n model_names.append(child.name)\n self.models = model_names\n\n if save_oof and self._oof_pred_proba is not None:\n save_pkl.save(path=path + 'utils' + os.path.sep + self._oof_filename, object={\n '_oof_pred_proba': self._oof_pred_proba,\n '_oof_pred_model_repeats': self._oof_pred_model_repeats,\n })\n self._oof_pred_proba = None\n self._oof_pred_model_repeats = None\n\n return super().save(path=path, verbose=verbose)\n\n # If `remove_fit_stack=True`, variables will be removed that are required to fit more folds and to fit new stacker models which use this model as a base model.\n # This includes OOF variables.\n def reduce_memory_size(self, remove_fit_stack=False, remove_fit=True, remove_info=False, requires_save=True, reduce_children=False, **kwargs):\n super().reduce_memory_size(remove_fit=remove_fit, remove_info=remove_info, requires_save=requires_save, **kwargs)\n if remove_fit_stack:\n try:\n os.remove(self.path + 'utils' + os.path.sep + self._oof_filename)\n except FileNotFoundError:\n pass\n if requires_save:\n self._oof_pred_proba = None\n self._oof_pred_model_repeats = None\n try:\n os.remove(self.path + 'utils' + os.path.sep + 'model_template.pkl')\n except FileNotFoundError:\n pass\n if requires_save:\n self.model_base = None\n try:\n os.rmdir(self.path + 'utils')\n except OSError:\n pass\n if reduce_children:\n for model in self.models:\n model = self.load_child(model)\n model.reduce_memory_size(remove_fit=remove_fit, remove_info=remove_info, requires_save=requires_save, **kwargs)\n if requires_save and self.low_memory:\n self.save_child(model=model)\n\n def _get_model_names(self):\n model_names = []\n for model in self.models:\n if isinstance(model, str):\n model_names.append(model)\n else:\n model_names.append(model.name)\n return model_names\n\n def get_info(self):\n info = super().get_info()\n children_info = self._get_child_info()\n child_memory_sizes = [child['memory_size'] for child in children_info.values()]\n sum_memory_size_child = sum(child_memory_sizes)\n if child_memory_sizes:\n max_memory_size_child = max(child_memory_sizes)\n else:\n max_memory_size_child = 0\n if self.low_memory:\n max_memory_size = info['memory_size'] + sum_memory_size_child\n min_memory_size = info['memory_size'] + max_memory_size_child\n else:\n max_memory_size = info['memory_size']\n min_memory_size = info['memory_size'] - sum_memory_size_child + max_memory_size_child\n\n bagged_info = dict(\n child_model_type=self._child_type.__name__,\n num_child_models=len(self.models),\n child_model_names=self._get_model_names(),\n _n_repeats=self._n_repeats,\n # _n_repeats_finished=self._n_repeats_finished, # commented out because these are too technical\n # _k_fold_end=self._k_fold_end,\n # _k=self._k,\n _k_per_n_repeat=self._k_per_n_repeat,\n _random_state=self._random_state,\n low_memory=self.low_memory, # If True, then model will attempt to use at most min_memory_size memory by having at most one child in memory. If False, model will use max_memory_size memory.\n bagged_mode=self.bagged_mode,\n max_memory_size=max_memory_size, # Memory used when all children are loaded into memory at once.\n min_memory_size=min_memory_size, # Memory used when only the largest child is loaded into memory.\n child_hyperparameters=self._get_model_base().params,\n child_hyperparameters_fit = self._get_compressed_params_trained(),\n child_ag_args_fit = self._get_model_base().params_aux,\n )\n info['bagged_info'] = bagged_info\n info['children_info'] = children_info\n\n child_features_full = list(set().union(*[child['features'] for child in children_info.values()]))\n info['features'] = child_features_full\n info['num_features'] = len(child_features_full)\n\n return info\n\n def get_memory_size(self):\n models = self.models\n self.models = None\n memory_size = super().get_memory_size()\n self.models = models\n return memory_size\n\n def _get_child_info(self):\n child_info_dict = dict()\n for model in self.models:\n if isinstance(model, str):\n child_path = self.create_contexts(self.path + model + os.path.sep)\n child_info_dict[model] = self._child_type.load_info(child_path)\n else:\n child_info_dict[model.name] = model.get_info()\n return child_info_dict\n\n def _construct_empty_oof(self, X, y):\n if self.problem_type == MULTICLASS:\n oof_pred_proba = np.zeros(shape=(len(X), len(y.unique())), dtype=np.float32)\n elif self.problem_type == SOFTCLASS:\n oof_pred_proba = np.zeros(shape=y.shape, dtype=np.float32)\n else:\n oof_pred_proba = np.zeros(shape=len(X), dtype=np.float32)\n oof_pred_model_repeats = np.zeros(shape=len(X), dtype=np.uint8)\n return oof_pred_proba, oof_pred_model_repeats\n\n def _preprocess_fit_resources(self, silent=False, **kwargs):\n \"\"\"Pass along to child models to avoid altering up-front\"\"\"\n return kwargs\n\n # TODO: Currently double disk usage, saving model in HPO and also saving model in bag\n def _hyperparameter_tune(self, X_train, y_train, k_fold, scheduler_options, preprocess_kwargs=None, **kwargs):\n if len(self.models) != 0:\n raise ValueError('self.models must be empty to call hyperparameter_tune, value: %s' % self.models)\n\n self.model_base.feature_metadata = self.feature_metadata # TODO: Move this\n\n # TODO: Preprocess data here instead of repeatedly\n if preprocess_kwargs is None:\n preprocess_kwargs = dict()\n X_train = self.preprocess(X=X_train, preprocess=False, fit=True, **preprocess_kwargs)\n kfolds = generate_kfold(X=X_train, y=y_train, n_splits=k_fold, stratified=self.is_stratified(), random_state=self._random_state, n_repeats=1)\n\n train_index, test_index = kfolds[0]\n X_train_fold, X_val_fold = X_train.iloc[train_index, :], X_train.iloc[test_index, :]\n y_train_fold, y_val_fold = y_train.iloc[train_index], y_train.iloc[test_index]\n orig_time = scheduler_options[1]['time_out']\n scheduler_options[1]['time_out'] = orig_time * 0.8 # TODO: Scheduler doesn't early stop on final model, this is a safety net. Scheduler should be updated to early stop\n hpo_models, hpo_model_performances, hpo_results = self.model_base.hyperparameter_tune(X_train=X_train_fold, y_train=y_train_fold, X_val=X_val_fold, y_val=y_val_fold, scheduler_options=scheduler_options, **kwargs)\n scheduler_options[1]['time_out'] = orig_time\n\n bags = {}\n bags_performance = {}\n for i, (model_name, model_path) in enumerate(hpo_models.items()):\n child: AbstractModel = self._child_type.load(path=model_path)\n y_pred_proba = child.predict_proba(X_val_fold)\n\n # TODO: Create new Ensemble Here\n bag = copy.deepcopy(self)\n bag.name = bag.name + os.path.sep + str(i)\n bag.set_contexts(self.path_root + bag.name + os.path.sep)\n\n oof_pred_proba, oof_pred_model_repeats = self._construct_empty_oof(X=X_train, y=y_train)\n oof_pred_proba[test_index] += y_pred_proba\n oof_pred_model_repeats[test_index] += 1\n\n bag.model_base = None\n child.set_contexts(bag.path + child.name + os.path.sep)\n bag.save_model_base(child.convert_to_template())\n\n bag._k = k_fold\n bag._k_fold_end = 1\n bag._n_repeats = 1\n bag._oof_pred_proba = oof_pred_proba\n bag._oof_pred_model_repeats = oof_pred_model_repeats\n child.name = child.name + '_fold_0'\n child.set_contexts(bag.path + child.name + os.path.sep)\n if not self.params.get('save_bag_folds', True):\n child.model = None\n if bag.low_memory:\n bag.save_child(child, verbose=False)\n bag.models.append(child.name)\n else:\n bag.models.append(child)\n bag.val_score = child.val_score\n bag._add_child_times_to_bag(model=child)\n\n bag.save()\n bags[bag.name] = bag.path\n bags_performance[bag.name] = bag.val_score\n\n # TODO: hpo_results likely not correct because no renames\n return bags, bags_performance, hpo_results\n" ]
[ [ "numpy.where", "numpy.zeros" ] ]
baidut/PatchVQ
[ "040486b6342dfd36695f1daea0b5c4d77d728a23" ]
[ "fastiqa/browser.py" ]
[ "\"\"\"\nWhat you should know about browser:\n\n* (Browser() << KonIQ) + (Browser() << CLIVE) -- close the first one will close all\n* sync mode: show results of different methods. (qmap comparison, or one showing qmap, one showing the results, very flexible)\n\n# Browser(methods=['PaQ2PiQ-BM', 'PaQ2PiQ-RM']) << KonIQ\n\n# Browser() << KoNViD\n\"\"\"\n\nfrom .bunch import IqaDataBunch\nfrom cached_property import cached_property\n# IqaData, Rois0123Label, cached_property\nfrom pathlib import Path\nimport os, io\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom PIL import Image, ImageTk # put it after tkinter to overwrite tkinter.Image\nimport numpy as np # np.roll\nimport logging\n\n\"\"\"\n# %% browse one database\nfrom fastiqa.bunches.iqa.im2mos import *\nfrom fastiqa.iqa import *\ndls = Im2MOS(bs=2)\ndls.bs\ndls.df\ndls.get_df()\n# %%\ndls2 = dls << CLIVE\ndls2.bs\ndls.bs\n\n# %%\n# dls.show_batch()\ndls.bs\n\n\n# %%\nself = IqaDataBunch() << CLIVE\nself.df\n# %%\nfrom fastiqa.iqa import *\nfrom fastiqa.browser import *\nself = Browser() << LIVE_FB_IQA # CLIVE\nself\n\npropobj = getattr(self.__class__, 'index', None)\npropobj\n# %%\nself.df\n\nself.reload()\nprint(self._df_view)\nself.df\n# %%\n\n# NOTE: exit to run next browser\nBrowser(KonIQ)\nBrowser(FLIVE)\nBrowser(FLIVE640)\n\n# %% browse multiple database at the same time\nfrom fastiqa.gui import *; Browser(FLIVE640) + Browser(CLIVE) + Browser(KonIQ)\n\n\n# %%\nfrom fastiqa.browser import *\nfrom fastiqa.iqa import *\n# Browser() << KonIQ\n(Browser() << KonIQ) + (Browser() << CLIVE)\n# %%\na.label_types\na.label_col\n# Browser() << CLIVE\n#\n\n\n# %%\n\n\nfrom fastiqa.vqa import *\n\n\n\n# Browser << KonIQ << CLIVE\nVidBrowser() << KoNViD\n# %%\n\"\"\"\n\nclass Browser(IqaDataBunch):\n # TODO label_types: also show predictions\n pred = None\n fn = None\n img = None\n tk_img = None\n canvas = None\n _index = 0\n percent = 1 # 100%\n cfg_rectangle = {}\n hide_scores = False\n opt_bbox_width = [4, 0, 1]\n out_dir = Path('')\n _df_view = None\n width = None\n height = None\n # label_types = None # 'mos', # 'mos', 'PaQ2PiQ', 'NIQE'\n label_range = None # map the mos to (0, 100)\n roi_col = [[\"left\", \"top\", \"right\", \"bottom\"]]\n\n @cached_property\n def label_cols(self):\n return self.label_col if isinstance( self.label_col, (list, tuple) ) else [self.label_col]\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def get_df(self):\n df = super().get_df()\n self.opt_label_col = self.label_cols\n self.roi_col = np.array(self.roi_col).reshape(-1,4)\n self.opt_roi_col_idx = list(range(len(self.roi_col)))\n # if self.width is not None: # and not isinstance(self.label_col, (list, tuple)):\n if len(self.opt_label_col) == 1: # no roi location\n df['left'] = 0\n df['top'] = 0\n if self.width is not None:\n df['right'] = self.width\n df['bottom'] = self.height\n else:\n df['bottom'] = df['height']\n df['right'] = df['width']\n if self.label_range is not None:\n print('scores are mapped to (0, 100) for browsing')\n min, max = self.label_range\n for col in self.opt_label_col:\n df[col] = (df[col] - min )*100/(max-min)\n\n if self.pred is not None:\n print('sort by pred error')\n if len(self.pred) != len(df): # only valid set\n df = df[df.is_valid]\n\n df['pred'] = self.pred\n assert len(self.pred) == len(df), 'number of predictions does not match with number of actual values'\n assert len(df[df['pred'].isna()]) == 0, \"self.pred = df['output'].tolist()\"\n df['pred_err'] = df['pred'] - df[self.opt_label_col[0]]\n df = df.sort_values(by='pred_err', ignore_index=True) # pred > target, pred < target\n return df\n\n def __add__(self, other):\n other.window = Toplevel(master=self.window)\n other.load_frame()\n return self\n\n def load_frame(self):\n self.reload()\n self.frame = Frame(self.window, width=500, height=400, bd=1)\n self.frame.pack()\n self.frame.bind(\"<Key>\", self.on_key) # canvas covered by image don't response to key press...\n self.frame.bind(\"<Left>\", self.prev)\n self.frame.bind(\"<Right>\", self.next)\n self.frame.bind(\"<Up>\", self.prev_mode)\n self.frame.bind(\"<Down>\", self.next_mode)\n self.frame.bind(\"<Escape>\", self.exit)\n self.canvas = Canvas(self.frame)\n # self.canvas.bind(\"<Button-1>\", self.callback)\n self.frame.focus_set()\n self.window.protocol(\"WM_DELETE_WINDOW\", self.exit)\n self.show()\n\n @cached_property\n def window(self):\n return Tk()\n\n def _repr_html_(self):\n self.load_frame()\n return self.window.mainloop()\n\n @property\n def index(self):\n return self._index\n\n @index.setter # __setattr__ conflict\n def index(self, value):\n logging.debug('index:', value)\n self._index = int(value) % len(self._df_view)\n\n def show(self):\n # suffix\n # zscore? prefix\n #\n def add_bbox(roi_col_idx):\n #x1, x2 = self._df_view['left' + suffix][self.index], self._df_view['right' + suffix][self.index]\n # y1, y2 = self._df_view['top' + suffix][self.index], self._df_view['bottom' + suffix][self.index]\n roi_col = self.roi_col[roi_col_idx]\n x1, y1, x2, y2 = self._df_view.loc[self.index, roi_col].tolist()\n\n color = 'lightgreen' if roi_col_idx == self.opt_roi_col_idx[0] else 'yellow'\n self.canvas.create_rectangle(x1, y1, x2, y2, outline=color, width=self.opt_bbox_width[0], **self.cfg_rectangle)\n\n if not self.hide_scores:\n # TODO self.label_cols[0] mos or zscore (add score_mode)\n # show all predictions? mos, zscore, pred\n # assert type(self.label_col) != list\n s = f\"{self._df_view[self.label_cols[roi_col_idx]][self.index]:.1f}\"\n # if len(self.opt_roi_col_idx)==1 and self.pred is not None:\n if roi_col_idx==0 and self.pred is not None: # image score\n s = f\"Actual: {s} / Predication: {self.pred[self.index]:.1f}\" # load from the table!!!!\n text = self.canvas.create_text((x1, y1), anchor=NW, text=s)\n r = self.canvas.create_rectangle(self.canvas.bbox(text), fill=color, outline=color)\n self.canvas.tag_lower(r, text)\n\n self.fn = self._df_view[self.fn_col][self.index]\n file = self.path / self.folder / (str(self.fn)+self.fn_suffix) # some database (e.g. KoNViD, AVA) contain fn typed int, convert it first\n self.img = self.open_image(file)\n width, height = self.img.size\n # PIL image\n self.tk_img = ImageTk.PhotoImage(self.img)\n # tk_img = ImageTk.PhotoImage(im)\n # self.canvas.itemconfig(self.image_on_canvas, image=tk_img)\n # then it will be optimized, showing nothing\n\n self.canvas.delete(\"all\")\n self.canvas.config(width=width, height=height)\n\n self.canvas.create_image(0, 0, image=self.tk_img, anchor=NW)\n\n # only for Rois0123Label\n # if isinstance(self.label, Rois0123Label):\n for idx in self.opt_roi_col_idx:\n add_bbox(idx)\n # add_bbox('_image')\n # add_bbox('_patch_1')\n # add_bbox('_patch_2')\n # add_bbox('_patch_3')\n\n # self.image_on_canvas =\n # self.canvas.itemconfig(self.image_on_canvas, image=self.tk_img)\n #\n # self.canvas.coords(self.patch1_on_canvas,\n # self._df_view.left_patch_1[self.index],\n # self._df_view.top_patch_1[self.index],\n # self._df_view.right_patch_1[self.index],\n # self._df_view.bottom_patch_1[self.index],\n # )\n\n self.canvas.pack()\n fn = self._df_view[self.fn_col][self.index]\n self.window.title(f'[{width}x{height}]({self.index + 1}/{len(self._df_view)}: {self.percent * 100:.2f}%) {fn}')\n\n # some API to custom your browser\n def open_image(self, file):\n \"\"\"\n\n :param file:\n :return: a PIL image\n \"\"\"\n return Image.open(file) # \"../data/FLIVE/EE371R/cj23478+019.jpg\"\n # if self.apply_img_proc: im = self.img_proc(im)\n\n\n def prev(self, event=None):\n self.index -= 1\n self.show()\n\n def next(self, event=None):\n self.index += 1\n self.show()\n\n def prev_mode(self, event=None):\n self.opt_roi_col_idx = np.roll(self.opt_roi_col_idx, -1)\n self.show()\n\n def next_mode(self, event=None):\n self.opt_roi_col_idx = np.roll(self.opt_roi_col_idx, 1)\n self.show()\n\n # def reset(self, event):\n # self.valid_mos = None\n\n def exit(self, event=None):\n self.window.destroy()\n\n def filter(self, func):\n df = self._df_view[func(self._df_view)]\n if len(df) == 0:\n messagebox.showwarning(\"Warning\", \"No image found!\")\n else:\n self.percent = len(df) / len(self._df_view)\n self._df_view = df.reset_index() # otherwise index 0 will be dropped\n self._index = 0\n self.show()\n return self\n\n def save_image(self):\n # self.grab_image(self.canvas).save(self.fn)\n # https://stackoverflow.com/questions/41940945/saving-canvas-from-tkinter-to-file?rq=1\n ps = self.canvas.postscript(colormode='color')\n img = Image.open(io.BytesIO(ps.encode('utf-8')))\n img.save(self.out_dir / self.fn.rsplit('/', 1)[1], 'jpeg')\n\n def reload(self):\n self._df_view = self.df\n\n def on_key(self, event):\n self.frame.focus_set()\n # print(\"pressed\", repr(event.char))\n if event.char in [str(n) for n in range(10)]:\n self.reload()\n col_name = self.opt_label_col[0]\n # there might not be valid data\n self.filter(lambda x: x[col_name] // 10 == int(event.char))\n\n elif event.char is ' ':\n self.reload()\n self.show()\n elif event.char is 's': # save capture\n self.save_image()\n\n elif event.char is 'h': # hide score\n self.hide_scores = not self.hide_scores\n self.show()\n elif event.char is 'w': # i\n self.opt_bbox_width = np.roll(self.opt_bbox_width, 1)\n self.show()\n else:\n pass\n # print(self.index)\n\n\n # https://stackoverflow.com/questions/9886274/how-can-i-convert-canvas-content-to-an-image\n # def grab_image(self, widget):\n # x = self.window.winfo_rootx() + widget.winfo_x()\n # y = self.window.winfo_rooty() + widget.winfo_y()\n # x1 = x + widget.winfo_width()\n # y1 = y + widget.winfo_height()\n # return ImageGrab.grab().crop((x, y, x1, y1))\n # # .save(filename)\n\n def callback(self, event):\n self.frame.focus_set()\n print(\"clicked at\", event.x, event.y)\n print(self._df_view[self.fn_col][self.index])\n\n\n\nclass VidBrowser(Browser):\n def open_image(self, file):\n \"\"\"\n\n :param file:\n :return: a PIL image\n \"\"\"\n file = file/'image_00001.jpg'\n return Image.open(file) # \"../data/FLIVE/EE371R/cj23478+019.jpg\"\n # if self.apply_img_proc: im = self.img_proc(im)\n\n\"\"\"\nWontFix\n* support different backend: tkinter or matplotlib\n\nReference\n=========\n\nhttps://effbot.org/tkinterbook/tkinter-events-and-bindings.htm\n\nMatplotlib backbone\n===================\n\nhttps://matplotlib.org/gallery/animation/image_slices_viewer.html\n\n\nPySimpleGUI\n============\n\nPySimpleGUI is a wrapper for Tkinter and Qt (others on the way). The amount of code required to implement custom GUIs is much shorter using PySimpleGUI than if the same GUI were written directly using Tkinter or Qt.\n\nsudo apt-get install python-tk\nsudo apt-get install python3-tk\n\nhttps://github.com/PySimpleGUI/PySimpleGUI\n\nnot working here, cannot switch images\n\nTkinter\n========\n\nsudo apt-get install python3.6-tk\n\n\nwont support python 2\nfor browser only, support python 2\n\nimport sys\nif sys.version_info[0] == 3:\n # for Python3\n from tkinter import *\n # print(TclVersion)\nelse:\n # for Python2\n from Tkinter import *\n\n\"\"\"\n" ]
[ [ "numpy.array", "numpy.roll" ] ]
SamPaskewitz/statsrat
[ "3f970f1731b7ec2e22c36a49375619e6afb802a8" ]
[ "statsrat/exemplar/atn_update.py" ]
[ "import numpy as np\n\ndef null(sim, x, y, y_psb, rtrv, y_hat, y_lrn, x_ex, y_ex, n_x, n_y, ex_seen_yet, ex_counts, n_ex, sim_pars):\n '''\n Don't update attention (it remains constant).\n '''\n return 0\nnull.par_names = []\n\ndef gradient_ngsec(sim, x, y, y_psb, rtrv, y_hat, y_lrn, x_ex, y_ex, n_x, n_y, ex_seen_yet, ex_counts, n_ex, sim_pars):\n '''\n Gradient descent on total squared error (assuming separate attention weights for each exemplar)\n when rtrv = normalized_sim_ex_counts and sim = Gaussian.\n \n Notes\n -----\n I have double checked that the math is correct (SP, 4/14/2021).\n '''\n delta = y - y_hat\n # use loops to keep things simple for now\n update = sim_pars['atn_lrate_par']*sim_pars['decay_rate']*np.ones((n_ex, n_x))\n for m in range(n_ex):\n for n in range(n_x):\n sq_dist = (x[n] - x_ex[m, n])**2\n error_factor = np.sum(delta*(y_hat - y_ex[m, :]))\n update[m, n] *= rtrv[m]*sq_dist*error_factor\n return update\ngradient_ngsec.par_names = ['atn_lrate_par']\n\ndef gradient_ngsec_common(sim, x, y, y_psb, rtrv, y_hat, y_lrn, x_ex, y_ex, n_x, n_y, ex_seen_yet, ex_counts, n_ex, sim_pars):\n '''\n Gradient descent on total squared error (assuming common attention weights across exemplars)\n when rtrv = normalized_sim_ex_counts and sim = Gaussian.\n '''\n delta = y - y_hat\n # use loops to keep things simple for now\n update = -sim_pars['atn_lrate_par']*sim_pars['decay_rate']*np.ones((n_ex, n_x))\n for n in range(n_x):\n sq_dist = (x[n] - x_ex[:, n])**2\n rwsd = np.sum(rtrv*sq_dist) # retrieval weighted sum of sq_dist\n foo = y_ex*(rtrv*(sq_dist - rwsd)).reshape((n_ex, 1))\n ex_factor = np.sum(foo, axis = 0)\n update[:, n] *= np.sum(delta*ex_factor)\n return update\ngradient_ngsec_common.par_names = ['atn_lrate_par']\n\ndef gradient_ngsec_both(sim, x, y, y_psb, rtrv, y_hat, y_lrn, x_ex, y_ex, n_x, n_y, ex_seen_yet, ex_counts, n_ex, sim_pars):\n '''\n Gradient descent on total squared error when rtrv = normalized_sim_ex_counts and sim = Gaussian.\n Attention weights have two parts: one that is common across exemplars (for each cue) and one\n that is unique to each exemplar/cue.\n '''\n delta = y - y_hat\n # update for common part of weights\n update_c = -sim_pars['atn_lrate_par']*sim_pars['decay_rate']*np.ones((n_ex, n_x))\n for n in range(n_x):\n sq_dist = (x[n] - x_ex[:, n])**2\n rwsd = np.sum(rtrv*sq_dist) # retrieval weighted sum of sq_dist\n foo = y_ex*(rtrv*(sq_dist - rwsd)).reshape((n_ex, 1))\n ex_factor = np.sum(foo, axis = 0)\n update_c[:, n] *= np.sum(delta*ex_factor)\n \n # update for separate part of weights\n update_s = sim_pars['atn_lrate_par']*sim_pars['decay_rate']*np.ones((n_ex, n_x))\n for m in range(n_ex):\n for n in range(n_x):\n sq_dist = (x[n] - x_ex[m, n])**2\n error_factor = np.sum(delta*(y_hat - y_ex[m, :]))\n update_s[m, n] *= rtrv[m]*sq_dist*error_factor\n \n return update_c + update_s\ngradient_ngsec_both.par_names = ['atn_lrate_par']\n\ndef gradient_norm_cityblock_common(sim, x, y, y_psb, rtrv, y_hat, y_lrn, x_ex, y_ex, n_x, n_y, ex_seen_yet, ex_counts, n_ex, sim_pars):\n '''\n Gradient descent on total squared error (assuming common attention weights across exemplars)\n when rtrv = normalized_sim_ex_counts and sim = city_block (based on L1 distance).\n '''\n delta = y - y_hat\n # use loops to keep things simple for now\n update = -sim_pars['atn_lrate_par']*sim_pars['decay_rate']*np.ones((n_ex, n_x))\n for n in range(n_x):\n abs_dif = np.abs(x[n] - x_ex[:, n])\n rwsd = np.sum(rtrv*abs_dif) # retrieval weighted sum of sq_dist\n foo = y_ex*(rtrv*(abs_dif - rwsd)).reshape((n_ex, 1))\n ex_factor = np.sum(foo, axis = 0)\n update[:, n] *= np.sum(delta*ex_factor)\n return update\ngradient_norm_cityblock_common.par_names = ['atn_lrate_par']\n\ndef heuristic(sim, x, y, y_psb, rtrv, y_hat, y_lrn, x_ex, y_ex, n_x, n_y, ex_seen_yet, ex_counts, n_ex, sim_pars):\n '''\n Heuristic designed to adjust attention toward relevant stimuli.\n Each exemplar has a separate set of attention weights.\n Only the current exemplar's weights are adjusted.\n '''\n current = sim == 1 # assume that current exemplar has a similarity of 1, and no others do\n update = np.zeros((n_ex, n_x))\n for m in range(n_ex):\n if ex_seen_yet[m]:\n sq_y_dist = np.sum((y_ex[m, :] - y)**2)\n for n in range(n_x):\n sq_x_dist = (x_ex[m, n] - x[n])**2\n update[current, n] += sim_pars['atn_lrate_par']*sq_x_dist*sq_y_dist\n return update\nheuristic.par_names = ['atn_lrate_par']" ]
[ [ "numpy.ones", "numpy.sum", "numpy.abs", "numpy.zeros" ] ]
MohitChaudhari7/Machine-Learning-Using-Python
[ "ff368791133a54df098490d283daf30547b10e8e" ]
[ "Classification/logistic_regression.py" ]
[ "# Logistic Regression\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('../Datasets/advertising.csv')\nind = dataset.iloc[:, [0, 2]].values #independent variables(daily time spent on the site and income)\ndep = dataset.iloc[:, -1].values #dependent variables\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nind_train, ind_test, dep_train, dep_test = train_test_split(ind, dep, test_size = 0.2, random_state = 0)\n\n# Feature Scaling ,we do not scale the dep variable as it gives only 1 or 0\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nind_train = sc.fit_transform(ind_train) #we fit the data to training set and not the test set\nind_test = sc.transform(ind_test)\n\n# Training the Logistic Regression model on the Training set\nfrom sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression(random_state = 0)\nclassifier.fit(ind_train, dep_train) #we train the classifier\n\ndep_pred = classifier.predict(ind_test) #we predict the test set results\n\n# read about plotting of contours here \"https://matplotlib.org/3.1.1/gallery/images_contours_and_fields/contour_image.html#sphx-glr-gallery-images-contours-and-fields-contour-image-py\"\n# Plotting the Training set results\nfrom matplotlib.colors import ListedColormap\nx, y = ind_train, dep_train\nX, Y = np.meshgrid(np.arange(start = x[:, 0].min() - 0.5, stop = x[:, 0].max() + 0.5, step = 0.01),\n np.arange(start = x[:, 1].min() - 0.5, stop = x[:, 1].max() + 0.5, step = 0.01))\nplt.xlim(X.min(), X.max())\nplt.ylim(Y.min(), Y.max())\nplt.contourf(X, Y, classifier.predict(np.array([X.ravel(), Y.ravel()]).T).reshape(X.shape),\n alpha = 0.5, cmap = ListedColormap(('red', 'blue')))\n#plotting the data points\nun_y =np.unique(y)\nfor i, j in enumerate(un_y):\n plt.scatter(x[y == j, 0], x[y == j, 1],c = ListedColormap(('red', 'blue'))(i), label = j)\nplt.title('Training Set Results')\nplt.xlabel('Daily time spent on the site')\nplt.ylabel('Income')\nplt.legend()\nplt.show()\n\n# Plotting the Test set results\nx, y = ind_test, dep_test\nX, Y = np.meshgrid(np.arange(start = x[:, 0].min() - 0.5, stop = x[:, 0].max() + 0.5, step = 0.01),\n np.arange(start = x[:, 1].min() - 0.5, stop = x[:, 1].max() + 0.5, step = 0.01))\nplt.xlim(X.min(), X.max())\nplt.ylim(Y.min(), Y.max())\nplt.contourf(X, Y, classifier.predict(np.array([X.ravel(), Y.ravel()]).T).reshape(X.shape),\n alpha = 0.5, cmap = ListedColormap(('red', 'blue')))\n#plotting the data points\nun_y =np.unique(y)\nfor i, j in enumerate(un_y):\n plt.scatter(x[y == j, 0], x[y == j, 1],c = ListedColormap(('red', 'blue'))(i), label = j)\nplt.title('Test Set results')\nplt.xlabel('Daily time spent on the site')\nplt.ylabel('Income')\nplt.legend()\nplt.show()\n\n# Confusion Matrix(this matrix contains the amount of datapoints those are in correct region and those are in incorrect region)\nfrom sklearn.metrics import confusion_matrix\nprint(confusion_matrix(dep_test, dep_pred))\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.colors.ListedColormap", "pandas.read_csv", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "sklearn.metrics.confusion_matrix", "matplotlib.pyplot.show", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.ylabel", "sklearn.linear_model.LogisticRegression", "sklearn.preprocessing.StandardScaler", "numpy.unique" ] ]
vmarcin/FIT-projects
[ "69e3e0f1f271aefd3135f92a681738a4f1a24395" ]
[ "sui/dicewars/ai/dt/wpm_c.py" ]
[ "import numpy\nimport logging\n\nfrom ..utils import probability_of_successful_attack, sigmoid\nfrom ..utils import possible_attacks\n\nfrom dicewars.client.ai_driver import BattleCommand, EndTurnCommand\n\n\nclass AI:\n \"\"\"Agent using Win Probability Maximization (WPM) using logarithms\n of player scores and dice\n\n This agent estimates win probability given the current state of the game.\n As a feature to describe the state, a vector of logarithms of players' dice\n and scores is used. The agent choses such moves, that will have the highest\n improvement in the estimated probability.\n \"\"\"\n def __init__(self, player_name, board, players_order):\n \"\"\"\n Parameters\n ----------\n game : Game\n\n Attributes\n ----------\n players_order : list of int\n Names of players in the order they are playing, with the agent being first\n weights : dict of numpy.array\n Weights for estimating win probability\n largest_region: list of int\n Names of areas in the largest region\n \"\"\"\n self.player_name = player_name\n self.logger = logging.getLogger('AI')\n self.players = board.nb_players_alive()\n self.largest_region = []\n\n self.players_order = players_order\n while self.player_name != self.players_order[0]:\n self.players_order.append(self.players_order.pop(0))\n\n self.weights = {\n 2: numpy.array([1.30214778, 2.25563871, -1.30214778, -2.25563871]),\n 3: numpy.array([1.03427841, 0.50262886, -0.78619448, -0.31264667,\n -0.74070513, -0.3344083]),\n 4: numpy.array([1.04279419, 0.25416893, -0.64830571, -0.15321224,\n -0.64217824, -0.11354054, -0.59113493, -0.19902261]),\n 5: numpy.array([0.88792394, 0.23898045, -0.50630318, -0.10684734,\n -0.48406202, -0.12877724, -0.48004353, -0.17429738,\n -0.51195613, -0.12572176]),\n 6: numpy.array([0.84452717, 0.20915755, -0.4275969, -0.12319906,\n -0.438397, -0.11476484, -0.44610219, -0.10640943,\n -0.42926595, -0.15994294, -0.40215393, -0.12508173]),\n 7: numpy.array([0.77043331, 0.22744643, -0.34448306, -0.16104125,\n -0.34304867, -0.16545059, -0.36316993, -0.14238659,\n -0.37359036, -0.13535348, -0.34917492, -0.13725688,\n -0.36908313, -0.11803061]),\n 8: numpy.array([0.71518557, 0.2580538, -0.3303392, -0.13374949,\n -0.3288953, -0.16076534, -0.31261043, -0.14316612,\n -0.31785557, -0.16003507, -0.31410674, -0.16487769,\n -0.33290964, -0.12624279, -0.33843017, -0.14888412]),\n }[self.players]\n numpy.warnings.filterwarnings('ignore')\n\n def ai_turn(self, board, nb_moves_this_turn, nb_turns_this_game, time_left):\n \"\"\"AI agent's turn\n\n This agent estimates probability to win the game from the feature vector associated\n with the outcome of the move and chooses such that has highest improvement in the\n probability.\n \"\"\"\n self.board = board\n self.logger.debug(\"Looking for possible turns.\")\n turns = self.possible_turns()\n if turns and turns[0][0] != 'end':\n turn = turns[0]\n area_name = turn[0]\n self.logger.debug(\"Possible turn: {}\".format(turn))\n atk_area = self.board.get_area(turn[0])\n atk_power = atk_area.get_dice()\n\n if turn[2] >= -0.05 or atk_power == 8:\n return BattleCommand(turn[0], turn[1])\n\n if turns and turns[0][0] == 'end':\n for i in range(1, len(turns)):\n area_name = turns[i][0]\n atk_area = self.board.get_area(area_name)\n atk_power = atk_area.get_dice()\n if atk_power == 8:\n return BattleCommand(area_name, turns[i][1])\n\n self.logger.debug(\"Don't want to attack anymore.\")\n return EndTurnCommand()\n\n def get_features(self, end_turn=False):\n \"\"\"Get features associated with a move\n\n Parameters\n ----------\n end_turn : bool\n The move is ending the turn\n\n Returns\n -------\n list of int\n \"\"\"\n features = []\n for p in self.players_order:\n score = numpy.log(self.get_score_by_player(p) + 1)\n if end_turn and p == self.player_name:\n dice = numpy.log(self.board.get_player_dice(p) + self.get_score_by_player(p) + 1)\n else:\n dice = numpy.log(self.board.get_player_dice(p) + 1)\n features.append(score)\n features.append(dice)\n return features\n\n def possible_turns(self):\n \"\"\"Get list of possible turns with the associated improvement\n in estimated win probability. The list is sorted in descending order\n with respect to the improvement.\n \"\"\"\n turns = []\n name = self.player_name\n\n features = self.get_features()\n wp_start = numpy.log(sigmoid(numpy.dot(numpy.array(features), self.weights)))\n\n end_features = self.get_features(end_turn=True)\n wp_end = numpy.log(sigmoid(numpy.dot(numpy.array(end_features), self.weights)))\n improvement = wp_end - wp_start\n\n turns.append(['end', 0, improvement])\n\n for source, target in possible_attacks(self.board, self.player_name):\n area_name = source.get_name()\n atk_power = source.get_dice()\n def_power = target.get_dice()\n opponent_name = target.get_owner_name()\n # check whether the attack would expand the largest region\n increase_score = False\n if area_name in self.largest_region:\n increase_score = True\n else:\n for n in target.get_adjacent_areas():\n if n in self.largest_region:\n increase_score = True\n break\n\n a_dice = self.board.get_player_dice(name)\n a_score = self.get_score_by_player(name)\n if increase_score:\n a_score += 1\n\n atk_dice = {\n \"current\": a_dice,\n \"win\": a_dice + a_score,\n \"loss\": a_dice + a_score - atk_power + 1,\n }\n\n d_dice = self.board.get_player_dice(opponent_name)\n def_dice = {\n \"loss\": d_dice,\n \"win\": d_dice - def_power,\n }\n\n atk_prob = probability_of_successful_attack(self.board, area_name, target.get_name())\n opponent_idx = self.players_order.index(opponent_name) * 2 + 1\n win_features = [d for d in features]\n win_features[1] = numpy.log(atk_dice[\"win\"] + 1)\n win_features[opponent_idx] = numpy.log(def_dice[\"win\"] + 1)\n\n loss_features = [d for d in features]\n loss_features[1] = numpy.log(atk_dice[\"loss\"] + 1)\n loss_features[opponent_idx] = numpy.log(def_dice[\"loss\"] + 1)\n\n wp_win = sigmoid(numpy.dot(numpy.array(win_features), self.weights))\n wp_loss = sigmoid(numpy.dot(numpy.array(loss_features), self.weights))\n\n wp_win = sigmoid(numpy.dot(numpy.array(win_features), self.weights))\n wp_loss = sigmoid(numpy.dot(numpy.array(loss_features), self.weights))\n total_prob = (wp_win * atk_prob) + (wp_loss * (1.0 - atk_prob))\n wp_atk = numpy.log(total_prob)\n\n improvement = wp_atk - wp_start\n turns.append([area_name, target.get_name(), improvement])\n\n return sorted(turns, key=lambda turn: turn[2], reverse=True)\n\n def get_score_by_player(self, player_name, skip_area=None):\n \"\"\"Get score of a player\n\n Parameters\n ----------\n player_name : int\n skip_area : int\n Name of an area to be excluded from the calculation\n\n Returns\n -------\n int\n score of the player\n \"\"\"\n players_regions = self.board.get_players_regions(self.player_name, skip_area=skip_area)\n max_region_size = max(len(region) for region in players_regions)\n\n return max_region_size\n\n def get_largest_region(self):\n \"\"\"Get size of the largest region, including the areas within\n\n Attributes\n ----------\n largest_region : list of int\n Names of areas in the largest region\n\n Returns\n -------\n int\n Number of areas in the largest region\n \"\"\"\n self.largest_region = []\n\n players_regions = self.board.get_players_regions(self.player_name)\n max_region_size = max(len(region) for region in players_regions)\n max_sized_regions = [region for region in players_regions if len(region) == max_region_size]\n\n for region in max_sized_regions:\n for area in region:\n self.largest_region.append(area)\n return max_region_size\n" ]
[ [ "numpy.log", "numpy.warnings.filterwarnings", "numpy.array" ] ]
XYHC-MMDA/Multi-modal-Multi-task-DA
[ "ed8297eb489d50c580795713cccb72bc958f406f" ]
[ "nuscenes/eval/common/loaders.py" ]
[ "# nuScenes dev-kit.\n# Code written by Oscar Beijbom, 2019.\n\nimport json\nfrom typing import Dict, Tuple\n\nimport numpy as np\nimport tqdm\nfrom pyquaternion import Quaternion\n\nfrom nuscenes import NuScenes\nfrom nuscenes.eval.common.data_classes import EvalBoxes\nfrom nuscenes.eval.detection.data_classes import DetectionBox\nfrom nuscenes.eval.detection.utils import category_to_detection_name\nfrom nuscenes.eval.tracking.data_classes import TrackingBox\nfrom nuscenes.eval.tracking.utils import category_to_tracking_name\nfrom nuscenes.utils.data_classes import Box\nfrom nuscenes.utils.geometry_utils import points_in_box\nfrom nuscenes.utils.splits import create_splits_scenes\n\n\ndef load_prediction(result_path: str, max_boxes_per_sample: int, box_cls, verbose: bool = False) \\\n -> Tuple[EvalBoxes, Dict]:\n \"\"\"\n Loads object predictions from file.\n :param result_path: Path to the .json result file provided by the user.\n :param max_boxes_per_sample: Maximim number of boxes allowed per sample.\n :param box_cls: Type of box to load, e.g. DetectionBox or TrackingBox.\n :param verbose: Whether to print messages to stdout.\n :return: The deserialized results and meta data.\n \"\"\"\n\n # Load from file and check that the format is correct.\n with open(result_path) as f:\n data = json.load(f)\n assert 'results' in data, 'Error: No field `results` in result file. Please note that the result format changed.' \\\n 'See https://www.nuscenes.org/object-detection for more information.'\n\n # Deserialize results and get meta data.\n all_results = EvalBoxes.deserialize(data['results'], box_cls)\n meta = data['meta']\n if verbose:\n print(\"Loaded results from {}. Found detections for {} samples.\"\n .format(result_path, len(all_results.sample_tokens)))\n\n # Check that each sample has no more than x predicted boxes.\n for sample_token in all_results.sample_tokens:\n assert len(all_results.boxes[sample_token]) <= max_boxes_per_sample, \\\n \"Error: Only <= %d boxes per sample allowed!\" % max_boxes_per_sample\n\n return all_results, meta\n\n\ndef load_merge_from_pkl(nusc: NuScenes, pkl_path: str, box_cls, verbose: bool = False) -> EvalBoxes:\n # Init.\n if box_cls == DetectionBox:\n attribute_map = {a['token']: a['name'] for a in nusc.attribute}\n\n if verbose:\n print('Loading annotations for {} split from nuScenes version: {}'.format(pkl_path, nusc.version))\n\n import mmcv\n infos = mmcv.load(pkl_path)['infos']\n samples = []\n for info in infos:\n samples.append(nusc.get('sample', info['token']))\n all_annotations = EvalBoxes()\n\n # Load annotations and filter predictions and annotations.\n merge_map = dict(car='vehicle',\n truck='vehicle',\n bus='vehicle',\n trailer='vehicle',\n construction_vehicle='vehicle',\n pedestrian='pedestrian',\n motorcycle='bike',\n bicycle='bike',\n traffic_cone='traffic_boundary',\n barrier='traffic_boundary')\n for sample in tqdm.tqdm(samples, leave=verbose):\n sample_token = sample['token']\n cam_token = sample['data']['CAM_FRONT']\n _, boxes_cam, _ = nusc.get_sample_data(cam_token)\n sample_annotation_tokens = [box.token for box in boxes_cam]\n\n # sample = nusc.get('sample', sample_token)\n # sample_annotation_tokens = sample['anns']\n\n sample_boxes = []\n for sample_annotation_token in sample_annotation_tokens:\n\n sample_annotation = nusc.get('sample_annotation', sample_annotation_token)\n if box_cls == DetectionBox:\n # Get label name in detection task and filter unused labels.\n detection_name = category_to_detection_name(sample_annotation['category_name'])\n if detection_name is None:\n continue\n detection_name = merge_map[detection_name]\n\n # Get attribute_name.\n attr_tokens = sample_annotation['attribute_tokens']\n attr_count = len(attr_tokens)\n if attr_count == 0:\n attribute_name = ''\n elif attr_count == 1:\n attribute_name = attribute_map[attr_tokens[0]]\n else:\n raise Exception('Error: GT annotations must not have more than one attribute!')\n\n sample_boxes.append(\n box_cls(\n sample_token=sample_token,\n translation=sample_annotation['translation'],\n size=sample_annotation['size'],\n rotation=sample_annotation['rotation'],\n velocity=nusc.box_velocity(sample_annotation['token'])[:2],\n num_pts=sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts'],\n detection_name=detection_name,\n detection_score=-1.0, # GT samples do not have a score.\n attribute_name=attribute_name\n )\n )\n else:\n raise NotImplementedError('Error: Invalid box_cls %s!' % box_cls)\n\n all_annotations.add_boxes(sample_token, sample_boxes)\n\n if verbose:\n print(\"Loaded ground truth annotations for {} samples.\".format(len(all_annotations.sample_tokens)))\n\n return all_annotations\n\n\ndef load_pkl_front_cam(nusc: NuScenes, pkl_path: str, box_cls, verbose: bool = False) -> EvalBoxes:\n # Init.\n if box_cls == DetectionBox:\n attribute_map = {a['token']: a['name'] for a in nusc.attribute}\n\n if verbose:\n print('Loading annotations for {} split from nuScenes version: {}'.format(pkl_path, nusc.version))\n\n import mmcv\n infos = mmcv.load(pkl_path)['infos']\n samples = []\n for info in infos:\n samples.append(nusc.get('sample', info['token']))\n\n all_annotations = EvalBoxes()\n\n # Load annotations and filter predictions and annotations.\n for sample in tqdm.tqdm(samples, leave=verbose):\n sample_token = sample['token']\n cam_token = sample['data']['CAM_FRONT']\n _, boxes_cam, _ = nusc.get_sample_data(cam_token)\n sample_annotation_tokens = [box.token for box in boxes_cam]\n\n # sample = nusc.get('sample', sample_token)\n # sample_annotation_tokens = sample['anns']\n\n sample_boxes = []\n for sample_annotation_token in sample_annotation_tokens:\n\n sample_annotation = nusc.get('sample_annotation', sample_annotation_token)\n if box_cls == DetectionBox:\n # Get label name in detection task and filter unused labels.\n detection_name = category_to_detection_name(sample_annotation['category_name'])\n if detection_name is None:\n continue\n\n # Get attribute_name.\n attr_tokens = sample_annotation['attribute_tokens']\n attr_count = len(attr_tokens)\n if attr_count == 0:\n attribute_name = ''\n elif attr_count == 1:\n attribute_name = attribute_map[attr_tokens[0]]\n else:\n raise Exception('Error: GT annotations must not have more than one attribute!')\n\n sample_boxes.append(\n box_cls(\n sample_token=sample_token,\n translation=sample_annotation['translation'],\n size=sample_annotation['size'],\n rotation=sample_annotation['rotation'],\n velocity=nusc.box_velocity(sample_annotation['token'])[:2],\n num_pts=sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts'],\n detection_name=detection_name,\n detection_score=-1.0, # GT samples do not have a score.\n attribute_name=attribute_name\n )\n )\n else:\n raise NotImplementedError('Error: Invalid box_cls %s!' % box_cls)\n\n all_annotations.add_boxes(sample_token, sample_boxes)\n\n if verbose:\n print(\"Loaded ground truth annotations for {} samples.\".format(len(all_annotations.sample_tokens)))\n\n return all_annotations\n\n\ndef load_gt_front_cam(nusc: NuScenes, eval_split: str, box_cls, verbose: bool = False) -> EvalBoxes:\n # Init.\n if box_cls == DetectionBox:\n attribute_map = {a['token']: a['name'] for a in nusc.attribute}\n\n if verbose:\n print('Loading annotations for {} split from nuScenes version: {}'.format(eval_split, nusc.version))\n\n # Only keep samples from this split.\n splits = create_splits_scenes()\n\n # Check compatibility of split with nusc_version.\n version = nusc.version\n if eval_split in {'train', 'val', 'train_detect', 'train_track'}:\n assert version.endswith('trainval'), \\\n 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)\n elif eval_split in {'mini_train', 'mini_val'}:\n assert version.endswith('mini'), \\\n 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)\n elif eval_split == 'test':\n assert version.endswith('test'), \\\n 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)\n else:\n raise ValueError('Error: Requested split {} which this function cannot map to the correct NuScenes version.'\n .format(eval_split))\n\n if eval_split == 'test':\n # Check that you aren't trying to cheat :).\n assert len(nusc.sample_annotation) > 0, \\\n 'Error: You are trying to evaluate on the test set but you do not have the annotations!'\n\n samples = []\n for sample in nusc.sample:\n scene_record = nusc.get('scene', sample['scene_token'])\n if scene_record['name'] in splits[eval_split]:\n samples.append(sample)\n\n all_annotations = EvalBoxes()\n\n # Load annotations and filter predictions and annotations.\n tracking_id_set = set()\n for sample in tqdm.tqdm(samples, leave=verbose):\n sample_token = sample['token']\n cam_token = sample['data']['CAM_FRONT']\n _, boxes_cam, _ = nusc.get_sample_data(cam_token)\n sample_annotation_tokens = [box.token for box in boxes_cam]\n\n # sample = nusc.get('sample', sample_token)\n # sample_annotation_tokens = sample['anns']\n\n sample_boxes = []\n for sample_annotation_token in sample_annotation_tokens:\n\n sample_annotation = nusc.get('sample_annotation', sample_annotation_token)\n if box_cls == DetectionBox:\n # Get label name in detection task and filter unused labels.\n detection_name = category_to_detection_name(sample_annotation['category_name'])\n if detection_name is None:\n continue\n\n # Get attribute_name.\n attr_tokens = sample_annotation['attribute_tokens']\n attr_count = len(attr_tokens)\n if attr_count == 0:\n attribute_name = ''\n elif attr_count == 1:\n attribute_name = attribute_map[attr_tokens[0]]\n else:\n raise Exception('Error: GT annotations must not have more than one attribute!')\n\n sample_boxes.append(\n box_cls(\n sample_token=sample_token,\n translation=sample_annotation['translation'],\n size=sample_annotation['size'],\n rotation=sample_annotation['rotation'],\n velocity=nusc.box_velocity(sample_annotation['token'])[:2],\n num_pts=sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts'],\n detection_name=detection_name,\n detection_score=-1.0, # GT samples do not have a score.\n attribute_name=attribute_name\n )\n )\n elif box_cls == TrackingBox:\n # Use nuScenes token as tracking id.\n tracking_id = sample_annotation['instance_token']\n tracking_id_set.add(tracking_id)\n\n # Get label name in detection task and filter unused labels.\n tracking_name = category_to_tracking_name(sample_annotation['category_name'])\n if tracking_name is None:\n continue\n\n sample_boxes.append(\n box_cls(\n sample_token=sample_token,\n translation=sample_annotation['translation'],\n size=sample_annotation['size'],\n rotation=sample_annotation['rotation'],\n velocity=nusc.box_velocity(sample_annotation['token'])[:2],\n num_pts=sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts'],\n tracking_id=tracking_id,\n tracking_name=tracking_name,\n tracking_score=-1.0 # GT samples do not have a score.\n )\n )\n else:\n raise NotImplementedError('Error: Invalid box_cls %s!' % box_cls)\n\n all_annotations.add_boxes(sample_token, sample_boxes)\n\n if verbose:\n print(\"Loaded ground truth annotations for {} samples.\".format(len(all_annotations.sample_tokens)))\n\n return all_annotations\n\n\ndef load_gt(nusc: NuScenes, eval_split: str, box_cls, verbose: bool = False) -> EvalBoxes:\n \"\"\"\n Loads ground truth boxes from DB.\n :param nusc: A NuScenes instance.\n :param eval_split: The evaluation split for which we load GT boxes.\n :param box_cls: Type of box to load, e.g. DetectionBox or TrackingBox.\n :param verbose: Whether to print messages to stdout.\n :return: The GT boxes.\n \"\"\"\n # Init.\n if box_cls == DetectionBox:\n attribute_map = {a['token']: a['name'] for a in nusc.attribute}\n\n if verbose:\n print('Loading annotations for {} split from nuScenes version: {}'.format(eval_split, nusc.version))\n # Read out all sample_tokens in DB.\n sample_tokens_all = [s['token'] for s in nusc.sample]\n assert len(sample_tokens_all) > 0, \"Error: Database has no samples!\"\n\n # Only keep samples from this split.\n splits = create_splits_scenes()\n\n # Check compatibility of split with nusc_version.\n version = nusc.version\n if eval_split in {'train', 'val', 'train_detect', 'train_track'}:\n assert version.endswith('trainval'), \\\n 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)\n elif eval_split in {'mini_train', 'mini_val'}:\n assert version.endswith('mini'), \\\n 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)\n elif eval_split == 'test':\n assert version.endswith('test'), \\\n 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)\n else:\n raise ValueError('Error: Requested split {} which this function cannot map to the correct NuScenes version.'\n .format(eval_split))\n\n if eval_split == 'test':\n # Check that you aren't trying to cheat :).\n assert len(nusc.sample_annotation) > 0, \\\n 'Error: You are trying to evaluate on the test set but you do not have the annotations!'\n\n sample_tokens = []\n for sample_token in sample_tokens_all:\n scene_token = nusc.get('sample', sample_token)['scene_token']\n scene_record = nusc.get('scene', scene_token)\n if scene_record['name'] in splits[eval_split]:\n sample_tokens.append(sample_token)\n\n all_annotations = EvalBoxes()\n\n # Load annotations and filter predictions and annotations.\n tracking_id_set = set()\n for sample_token in tqdm.tqdm(sample_tokens, leave=verbose):\n\n sample = nusc.get('sample', sample_token)\n sample_annotation_tokens = sample['anns']\n\n sample_boxes = []\n for sample_annotation_token in sample_annotation_tokens:\n\n sample_annotation = nusc.get('sample_annotation', sample_annotation_token)\n if box_cls == DetectionBox:\n # Get label name in detection task and filter unused labels.\n detection_name = category_to_detection_name(sample_annotation['category_name'])\n if detection_name is None:\n continue\n\n # Get attribute_name.\n attr_tokens = sample_annotation['attribute_tokens']\n attr_count = len(attr_tokens)\n if attr_count == 0:\n attribute_name = ''\n elif attr_count == 1:\n attribute_name = attribute_map[attr_tokens[0]]\n else:\n raise Exception('Error: GT annotations must not have more than one attribute!')\n\n sample_boxes.append(\n box_cls(\n sample_token=sample_token,\n translation=sample_annotation['translation'],\n size=sample_annotation['size'],\n rotation=sample_annotation['rotation'],\n velocity=nusc.box_velocity(sample_annotation['token'])[:2],\n num_pts=sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts'],\n detection_name=detection_name,\n detection_score=-1.0, # GT samples do not have a score.\n attribute_name=attribute_name\n )\n )\n elif box_cls == TrackingBox:\n # Use nuScenes token as tracking id.\n tracking_id = sample_annotation['instance_token']\n tracking_id_set.add(tracking_id)\n\n # Get label name in detection task and filter unused labels.\n tracking_name = category_to_tracking_name(sample_annotation['category_name'])\n if tracking_name is None:\n continue\n\n sample_boxes.append(\n box_cls(\n sample_token=sample_token,\n translation=sample_annotation['translation'],\n size=sample_annotation['size'],\n rotation=sample_annotation['rotation'],\n velocity=nusc.box_velocity(sample_annotation['token'])[:2],\n num_pts=sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts'],\n tracking_id=tracking_id,\n tracking_name=tracking_name,\n tracking_score=-1.0 # GT samples do not have a score.\n )\n )\n else:\n raise NotImplementedError('Error: Invalid box_cls %s!' % box_cls)\n\n all_annotations.add_boxes(sample_token, sample_boxes)\n\n if verbose:\n print(\"Loaded ground truth annotations for {} samples.\".format(len(all_annotations.sample_tokens)))\n\n return all_annotations\n\n\ndef add_center_dist(nusc: NuScenes,\n eval_boxes: EvalBoxes):\n \"\"\"\n Adds the cylindrical (xy) center distance from ego vehicle to each box.\n :param nusc: The NuScenes instance.\n :param eval_boxes: A set of boxes, either GT or predictions.\n :return: eval_boxes augmented with center distances.\n \"\"\"\n for sample_token in eval_boxes.sample_tokens:\n sample_rec = nusc.get('sample', sample_token)\n sd_record = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP'])\n pose_record = nusc.get('ego_pose', sd_record['ego_pose_token'])\n cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token'])\n lidar2ego_rotation = cs_record['rotation']\n lidar2ego_translation = cs_record['translation']\n ego2global_rotation = pose_record['rotation']\n ego2global_translation = pose_record['translation']\n\n for box in eval_boxes[sample_token]:\n # Both boxes and ego pose are given in global coord system, so distance can be calculated directly.\n # Note that the z component of the ego pose is 0.\n center_ego = np.array(box.translation) - np.array(ego2global_translation)\n center_ego_tmp = np.dot(Quaternion(ego2global_rotation).inverse.rotation_matrix, center_ego)\n center_lidar_tmp = center_ego_tmp - np.array(lidar2ego_translation)\n center_lidar = np.dot(Quaternion(lidar2ego_rotation).inverse.rotation_matrix, center_lidar_tmp)\n \n if isinstance(box, DetectionBox) or isinstance(box, TrackingBox):\n box.ego_translation = tuple(center_ego)\n box.lidar_translation = tuple(center_lidar) \n else:\n raise NotImplementedError\n\n return eval_boxes\n\n\ndef filter_eval_boxes(nusc: NuScenes,\n eval_boxes: EvalBoxes,\n max_dist: Dict[str, float],\n verbose: bool = False) -> EvalBoxes:\n \"\"\"\n Applies filtering to boxes. Distance, bike-racks and points per box.\n :param nusc: An instance of the NuScenes class.\n :param eval_boxes: An instance of the EvalBoxes class.\n :param max_dist: Maps the detection name to the eval distance threshold for that class.\n :param verbose: Whether to print to stdout.\n \"\"\"\n # Retrieve box type for detectipn/tracking boxes.\n class_field = _get_box_class_field(eval_boxes)\n\n # Accumulators for number of filtered boxes.\n total, dist_filter, point_filter, bike_rack_filter = 0, 0, 0, 0\n for ind, sample_token in enumerate(eval_boxes.sample_tokens):\n\n # Filter on distance first.\n total += len(eval_boxes[sample_token])\n eval_boxes.boxes[sample_token] = [box for box in eval_boxes[sample_token] if\n box.ego_dist < max_dist[box.__getattribute__(class_field)]]\n dist_filter += len(eval_boxes[sample_token])\n\n # Then remove boxes with zero points in them. Eval boxes have -1 points by default.\n eval_boxes.boxes[sample_token] = [box for box in eval_boxes[sample_token] if not box.num_pts == 0]\n point_filter += len(eval_boxes[sample_token])\n\n # Perform bike-rack filtering.\n sample_anns = nusc.get('sample', sample_token)['anns']\n bikerack_recs = [nusc.get('sample_annotation', ann) for ann in sample_anns if\n nusc.get('sample_annotation', ann)['category_name'] == 'static_object.bicycle_rack']\n bikerack_boxes = [Box(rec['translation'], rec['size'], Quaternion(rec['rotation'])) for rec in bikerack_recs]\n filtered_boxes = []\n for box in eval_boxes[sample_token]:\n if box.__getattribute__(class_field) in ['bicycle', 'motorcycle']:\n in_a_bikerack = False\n for bikerack_box in bikerack_boxes:\n if np.sum(points_in_box(bikerack_box, np.expand_dims(np.array(box.translation), axis=1))) > 0:\n in_a_bikerack = True\n if not in_a_bikerack:\n filtered_boxes.append(box)\n else:\n filtered_boxes.append(box)\n\n eval_boxes.boxes[sample_token] = filtered_boxes\n bike_rack_filter += len(eval_boxes.boxes[sample_token])\n\n if verbose:\n print(\"=> Original number of boxes: %d\" % total)\n print(\"=> After distance based filtering: %d\" % dist_filter)\n print(\"=> After LIDAR points based filtering: %d\" % point_filter)\n print(\"=> After bike rack filtering: %d\" % bike_rack_filter)\n\n return eval_boxes\n\n\ndef filter_half_boxes(nusc: NuScenes,\n eval_boxes: EvalBoxes,\n max_dist: Dict[str, float],\n verbose: bool = False) -> EvalBoxes:\n \"\"\"\n Applies filtering to boxes. Distance, bike-racks and points per box.\n :param nusc: An instance of the NuScenes class.\n :param eval_boxes: An instance of the EvalBoxes class.\n :param max_dist: Maps the detection name to the eval distance threshold for that class.\n :param verbose: Whether to print to stdout.\n \"\"\"\n # Retrieve box type for detectipn/tracking boxes.\n class_field = _get_box_class_field(eval_boxes)\n\n # Accumulators for number of filtered boxes.\n total, dist_filter, point_filter, bike_rack_filter = 0, 0, 0, 0\n for ind, sample_token in enumerate(eval_boxes.sample_tokens):\n\n # Filter on distance first.\n total += len(eval_boxes[sample_token])\n eval_boxes.boxes[sample_token] = [box for box in eval_boxes[sample_token] if\n box.lidar_translation[1] > 0 and\n box.ego_dist < max_dist[box.__getattribute__(class_field)]]\n dist_filter += len(eval_boxes[sample_token])\n\n # Then remove boxes with zero points in them. Eval boxes have -1 points by default.\n eval_boxes.boxes[sample_token] = [box for box in eval_boxes[sample_token] if not box.num_pts == 0]\n point_filter += len(eval_boxes[sample_token])\n\n # Perform bike-rack filtering.\n sample_anns = nusc.get('sample', sample_token)['anns']\n bikerack_recs = [nusc.get('sample_annotation', ann) for ann in sample_anns if\n nusc.get('sample_annotation', ann)['category_name'] == 'static_object.bicycle_rack']\n bikerack_boxes = [Box(rec['translation'], rec['size'], Quaternion(rec['rotation'])) for rec in bikerack_recs]\n filtered_boxes = []\n for box in eval_boxes[sample_token]:\n if box.__getattribute__(class_field) in ['bicycle', 'motorcycle']:\n in_a_bikerack = False\n for bikerack_box in bikerack_boxes:\n if np.sum(points_in_box(bikerack_box, np.expand_dims(np.array(box.translation), axis=1))) > 0:\n in_a_bikerack = True\n if not in_a_bikerack:\n filtered_boxes.append(box)\n else:\n filtered_boxes.append(box)\n\n eval_boxes.boxes[sample_token] = filtered_boxes\n bike_rack_filter += len(eval_boxes.boxes[sample_token])\n\n if verbose:\n print(\"=> Original number of boxes: %d\" % total)\n print(\"=> After distance based filtering: %d\" % dist_filter)\n print(\"=> After LIDAR points based filtering: %d\" % point_filter)\n print(\"=> After bike rack filtering: %d\" % bike_rack_filter)\n\n return eval_boxes\n\n\ndef _get_box_class_field(eval_boxes: EvalBoxes) -> str:\n \"\"\"\n Retrieve the name of the class field in the boxes.\n This parses through all boxes until it finds a valid box.\n If there are no valid boxes, this function throws an exception.\n :param eval_boxes: The EvalBoxes used for evaluation.\n :return: The name of the class field in the boxes, e.g. detection_name or tracking_name.\n \"\"\"\n assert len(eval_boxes.boxes) > 0\n box = None\n for val in eval_boxes.boxes.values():\n if len(val) > 0:\n box = val[0]\n break\n if isinstance(box, DetectionBox):\n class_field = 'detection_name'\n elif isinstance(box, TrackingBox):\n class_field = 'tracking_name'\n else:\n raise Exception('Error: Invalid box type: %s' % box)\n\n return class_field\n" ]
[ [ "numpy.array" ] ]
wesleygas/urban-piriquito
[ "a07dacb8ecf1da40e15d7085ef4502a87e6894bf" ]
[ "interface.py" ]
[ "try:\n # Python2\n import Tkinter as tk\nexcept ImportError:\n # Python3\n import tkinter\n# needs Python25 or higher\n\nfrom functools import partial\nfrom signalTeste import *\nimport matplotlib.pyplot as plt\n#matplotlib.use('TkAgg')\n\nnum ='12'\nsig = signalMeu()\ndef numero():\n global num, sig\n def on_key_press(event):\n global num, sig\n num = repr(event.char)\n num = num[1:-1]\n print(int(num))\n tempo, sinal = sig.geraNum(int(num))\n sig.playSig(sinal)\n plt.close(\"all\")\n plt.plot(tempo[0:500],sinal[0:500])\n plt.show(block=False)\n \n \n def click(btn):\n global num, sig\n # test the button command click\n if(btn == \"exit\"):\n boot.quit()\n else:\n print(int(btn))\n tempo, sinal = sig.geraNum(int(btn), duration=2)\n sig.playSig(sinal)\n plt.close(\"all\")\n plt.plot(tempo[0:500],sinal[0:500])\n #tempo, sinal = sig.calcFFT(sinal, 48000)\n #plt.plot(tempo,sinal)\n plt.show(block=False)\n \n \n \n num = btn\n #return num\n\n boot = tkinter.Tk()\n\n boot['bg'] = 'green'\n # create a labeled frame for the keypad buttons\n # relief='groove' and labelanchor='nw' are default\n lf = tkinter.LabelFrame(boot, bd=8)\n lf.pack(padx=15, pady=15)\n # typical calculator button layout\n btn_list = [\n '1', '2', '3',\n '4', '5', '6',\n '7', '8', '9',\n '','0', 'exit']\n # create and position all buttons with a for-loop\n # r, c used for row, column grid values\n r = 1\n c = 0\n n = 0\n # list(range()) needed for Python3\n btn = list(range(len(btn_list)))\n for label in btn_list:\n # partial takes care of function and argument\n cmd = partial(click, label)\n # create the button\n btn[n] = tkinter.Button(lf, text=label, width=10, height=5, command=cmd)\n # position the button\n btn[n].grid(row=r, column=c)\n # increment button index\n n += 1\n # update row/column position\n c += 1\n if c == 3:\n c = 0\n r += 1\n\n frame = tkinter.Frame(boot, width=100, height=100)\n frame.bind(\"<KeyRelease-1>\", on_key_press)\n frame.bind(\"<KeyRelease-2>\", on_key_press)\n frame.bind(\"<KeyRelease-3>\", on_key_press)\n frame.bind(\"<KeyRelease-4>\", on_key_press)\n frame.bind(\"<KeyRelease-5>\", on_key_press)\n frame.bind(\"<KeyRelease-6>\", on_key_press)\n frame.bind(\"<KeyRelease-7>\", on_key_press)\n frame.bind(\"<KeyRelease-8>\", on_key_press)\n frame.bind(\"<KeyRelease-9>\", on_key_press)\n frame.bind(\"<KeyRelease-0>\", on_key_press)\n frame.pack()\n frame.focus_set()\n\n\n\n tk = boot\n tk.resizable(width=False, height=False)\n tk.mainloop()\n\n\n\n\nif __name__ == '__main__':\n numero()" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.show", "matplotlib.pyplot.close" ] ]
fatiando-bot/verde
[ "5d008b0866c1792183ccd02e7ddc0e917f7ed6e1" ]
[ "tutorials/model_evaluation.py" ]
[ "\"\"\"\n.. _model_evaluation:\n\nEvaluating Performance\n======================\n\nThe Green's functions based interpolations in Verde are all linear regressions under the\nhood. This means that we can use some of the same tactics from\n:mod:`sklearn.model_selection` to evaluate our interpolator's performance. Once we have\na quantified measure of the quality of a given fitted gridder, we can use it to tune the\ngridder's parameters, like ``damping`` for a :class:`~verde.Spline` (see\n:ref:`model_selection`).\n\nVerde provides adaptations of common scikit-learn tools to work better with spatial\ndata. Let's use these tools to evaluate the performance of a :class:`~verde.Spline` on\nour sample air temperature data.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cartopy.crs as ccrs\nimport pyproj\nimport verde as vd\n\ndata = vd.datasets.fetch_texas_wind()\n\n# Use Mercator projection because Spline is a Cartesian gridder\nprojection = pyproj.Proj(proj=\"merc\", lat_ts=data.latitude.mean())\nproj_coords = projection(data.longitude.values, data.latitude.values)\n\nregion = vd.get_region((data.longitude, data.latitude))\n# For this data, we'll generate a grid with 15 arc-minute spacing\nspacing = 15 / 60\n\n########################################################################################\n# Splitting the data\n# ------------------\n#\n# We can't evaluate a gridder on the data that went into fitting it. The true test of a\n# model is if it can correctly predict data that it hasn't seen before. scikit-learn has\n# the :func:`sklearn.model_selection.train_test_split` function to separate a dataset\n# into two parts: one for fitting the model (called *training* data) and a separate one\n# for evaluating the model (called *testing* data). Using it with spatial data would\n# involve some tedious array conversions so Verde implements\n# :func:`verde.train_test_split` which does the same thing but takes coordinates and\n# data arrays instead.\n#\n# The split is done randomly so we specify a seed for the random number generator to\n# guarantee that we'll get the same result every time we run this example. You probably\n# don't want to do that for real data. We'll keep 30% of the data to use for testing\n# (``test_size=0.3``).\n\ntrain, test = vd.train_test_split(\n proj_coords, data.air_temperature_c, test_size=0.3, random_state=0\n)\n\n########################################################################################\n# The returned ``train`` and ``test`` variables are tuples containing coordinates, data,\n# and (optionally) weights arrays. Since we're not using weights, the third element of\n# the tuple will be ``None``:\nprint(train)\n\n\n########################################################################################\n#\nprint(test)\n\n########################################################################################\n# Let's plot these two datasets with different colors:\n\nplt.figure(figsize=(8, 6))\nax = plt.axes()\nax.set_title(\"Air temperature measurements for Texas\")\nax.plot(train[0][0], train[0][1], \".r\", label=\"train\")\nax.plot(test[0][0], test[0][1], \".b\", label=\"test\")\nax.legend()\nax.set_aspect(\"equal\")\nplt.tight_layout()\nplt.show()\n\n########################################################################################\n# We can pass the training dataset to the :meth:`~verde.base.BaseGridder.fit` method of\n# most gridders using Python's argument expansion using the ``*`` symbol.\n\nspline = vd.Spline()\nspline.fit(*train)\n\n########################################################################################\n# Let's plot the gridded result to see what it looks like. First, we'll create a\n# geographic grid:\ngrid = spline.grid(\n region=region,\n spacing=spacing,\n projection=projection,\n dims=[\"latitude\", \"longitude\"],\n data_names=[\"temperature\"],\n)\nprint(grid)\n\n########################################################################################\n# Then, we'll mask out grid points that are too far from any given data point and plot\n# the grid:\nmask = vd.distance_mask(\n (data.longitude, data.latitude),\n maxdist=3 * spacing * 111e3,\n coordinates=vd.grid_coordinates(region, spacing=spacing),\n projection=projection,\n)\ngrid = grid.where(mask)\n\nplt.figure(figsize=(8, 6))\nax = plt.axes(projection=ccrs.Mercator())\nax.set_title(\"Gridded temperature\")\npc = grid.temperature.plot.pcolormesh(\n ax=ax,\n cmap=\"plasma\",\n transform=ccrs.PlateCarree(),\n add_colorbar=False,\n add_labels=False,\n)\nplt.colorbar(pc).set_label(\"C\")\nax.plot(data.longitude, data.latitude, \".k\", markersize=1, transform=ccrs.PlateCarree())\nvd.datasets.setup_texas_wind_map(ax)\nplt.tight_layout()\nplt.show()\n\n########################################################################################\n# Scoring\n# --------\n#\n# Gridders in Verde implement the :meth:`~verde.base.BaseGridder.score` method that\n# calculates the `R² coefficient of determination\n# <https://en.wikipedia.org/wiki/Coefficient_of_determination>`__\n# for a given comparison dataset (``test`` in our case). The R² score is at most 1,\n# meaning a perfect prediction, but has no lower bound.\n\nscore = spline.score(*test)\nprint(\"R² score:\", score)\n\n########################################################################################\n# That's a good score meaning that our gridder is able to accurately predict data that\n# wasn't used in the gridding algorithm.\n#\n# .. caution::\n#\n# Once caveat for this score is that it is highly dependent on the particular split\n# that we made. Changing the random number generator seed in\n# :func:`verde.train_test_split` will result in a different score.\n\n# Use 1 as a seed instead of 0\ntrain_other, test_other = vd.train_test_split(\n proj_coords, data.air_temperature_c, test_size=0.3, random_state=1\n)\n\nprint(\"R² score with seed 1:\", vd.Spline().fit(*train_other).score(*test_other))\n\n########################################################################################\n# Cross-validation\n# ----------------\n#\n# A more robust way of scoring the gridders is to use function\n# :func:`verde.cross_val_score`, which (by default) uses a `k-fold cross-validation\n# <https://en.wikipedia.org/wiki/Cross-validation_(statistics)#k-fold_cross-validation>`__\n# by default. It will split the data *k* times and return the score on each *fold*. We\n# can then take a mean of these scores.\n\nscores = vd.cross_val_score(vd.Spline(), proj_coords, data.air_temperature_c)\nprint(\"k-fold scores:\", scores)\nprint(\"Mean score:\", np.mean(scores))\n\n########################################################################################\n# You can also use most cross-validation splitter classes from\n# :mod:`sklearn.model_selection` by specifying the ``cv`` argument. For example, if we\n# want to shuffle then split the data *n* times\n# (:class:`sklearn.model_selection.ShuffleSplit`):\n\nfrom sklearn.model_selection import ShuffleSplit\n\nshuffle = ShuffleSplit(n_splits=10, test_size=0.3, random_state=0)\n\nscores = vd.cross_val_score(\n vd.Spline(), proj_coords, data.air_temperature_c, cv=shuffle\n)\nprint(\"shuffle scores:\", scores)\nprint(\"Mean score:\", np.mean(scores))\n\n########################################################################################\n# **That is not a very good score** so clearly the default arguments for\n# :class:`~verde.Spline` aren't suitable for this dataset. We could try different\n# combinations manually until we get a good score. A better way is to do this\n# automatically. In :ref:`model_selection` we'll go over how to do that.\n" ]
[ [ "sklearn.model_selection.ShuffleSplit", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.axes", "matplotlib.pyplot.show", "matplotlib.pyplot.colorbar", "numpy.mean" ] ]
liangfok/oh-distro
[ "eeee1d832164adce667e56667dafc64a8d7b8cee" ]
[ "software/config/terrain/drc_rehearsal_F.py" ]
[ "import numpy as np\n\nblockName = 'cinderblock'\nblockSize = np.array([15 + 5/8.0, 15 + 3/8.0, 5 + 5/8.0]) * 0.0254 # meters\nblockTiltAngle = 15 # degrees\n\n\n# F=sloping up forward (+x), B=sloping up backward (-x),\n# R=sloping up rightward (-y), L=sloping up leftward (+y)\n# last row is closest to robot (robot is on bottom looking up)\n# column order is left-to-right on robot (+y to -y)\nblockTypes = [\n [ 'N', 'N', 'N', 'N' ],\n [ 'F', 'R', 'B', 'L' ],\n [ 'R', 'B', 'L', 'F' ],\n [ 'N', 'N', 'N', 'N' ],\n [ 'L', 'F', 'R', 'B' ],\n [ 'F', 'R', 'B', 'L' ]\n]\nblockTypes.reverse()\n\n# 0=ground level, 1=one cinderblock offset, etc\nblockLevels = [\n [ -0.9, -0.9, -0.9, -0.9 ],\n [ 0, 0, 0, 0 ],\n [ 0, 0, 0, 0 ],\n [ 1, 1, 1, 1 ],\n [ 0, 0, 0, 0 ],\n [ 0, 0, 0, 0 ]\n]\nblockLevels.reverse()\n\n# map between block types and (pitch,yaw) angles (degrees)\nblockAngleMap = { 'F': (15,180), 'B': (15,0), 'R': (15,90), 'L': (15,270), 'N': (0,0) }\n\n# TODO: this is just an example\n# which foot, block (row,col), offset (x,y), support\n# (row,col) refer to which block\n# (x,y) are offsets wrt the block center, in meters\n# support is an enum indicating foot support type\n# 0=heel-toe, 1=midfoot-toe, 2=heel-midfoot\nfootstepData = [\n [ 'right', (0,1), (-0.05, 0.08), 0 ],\n [ 'left', (0,0), (0.14, -0.11), 2 ],\n [ 'right', (1,1), (-0.02, 0.12), 0 ],\n [ 'left', (1,0), (0.04, -0.07), 0 ],\n [ 'right', (2,1), (-0.05, 0.11), 0 ],\n [ 'left', (2,0), (0.0, -0.08), 0 ],\n [ 'right', (2,1), (0.06, 0.11), 0 ],\n [ 'left', (3,0), (-0.03, -0.11), 0 ],\n [ 'right', (3,1), (0.03, 0.10), 0 ],\n [ 'left', (4,0), (-0.02, -0.10), 0 ],\n [ 'right', (4,1), (0.14, 0.07), 2 ],\n [ 'left', (5,0), (-0.02, -0.12), 0 ],\n [ 'right', (5,1), (0.05, 0.07), 0 ],\n]\n\nnumSteps = [7, -1]\n\n# where to stand, relative to front of first block\nstartingPosition = np.array([-0.39, 0.4, 0])\nstartingYaw = 0 # degrees\n\n# whether to lock lateral footstep offset\nforceZeroLateralFootstepOffset = False\n\nblockColor = [0.4, 0.6, 0.4]\nblockColorMatched = [0.5, 0.8, 0.5]\n" ]
[ [ "numpy.array" ] ]
andrewsris/preProcessing
[ "70ec54e3f254faacea737f4cbb36a1294ce59417" ]
[ "stainNorm_Macenko.py" ]
[ "\"\"\"\nStain normalization based on the method of:\n\nM. Macenko et al., ‘A method for normalizing histology slides for quantitative analysis’, in 2009 IEEE International Symposium on Biomedical Imaging: From Nano to Macro, 2009, pp. 1107–1110.\n\nUses the spams package:\n\nhttp://spams-devel.gforge.inria.fr/index.html\n\nUse with python via e.g https://anaconda.org/conda-forge/python-spams\n\"\"\"\n\nfrom __future__ import division\n\nimport numpy as np\nimport stain_utils as ut\n\n\ndef get_stain_matrix(I, beta=0.15, alpha=1):\n \"\"\"\n Get stain matrix (2x3)\n :param I:\n :param beta:\n :param alpha:\n :return:\n \"\"\"\n OD = ut.RGB_to_OD(I).reshape((-1, 3))\n OD = (OD[(OD > beta).any(axis=1), :])\n _, V = np.linalg.eigh(np.cov(OD, rowvar=False))\n V = V[:, [2, 1]]\n if V[0, 0] < 0: V[:, 0] *= -1\n if V[0, 1] < 0: V[:, 1] *= -1\n That = np.dot(OD, V)\n phi = np.arctan2(That[:, 1], That[:, 0])\n minPhi = np.percentile(phi, alpha)\n maxPhi = np.percentile(phi, 100 - alpha)\n v1 = np.dot(V, np.array([np.cos(minPhi), np.sin(minPhi)]))\n v2 = np.dot(V, np.array([np.cos(maxPhi), np.sin(maxPhi)]))\n if v1[0] > v2[0]:\n HE = np.array([v1, v2])\n else:\n HE = np.array([v2, v1])\n return ut.normalize_rows(HE)\n\n\n###\n\nclass Normalizer(object):\n \"\"\"\n A stain normalization object\n \"\"\"\n\n def __init__(self):\n self.stain_matrix_target = None\n self.target_concentrations = None\n\n def fit(self, target):\n target = ut.standardize_brightness(target)\n self.stain_matrix_target = get_stain_matrix(target)\n self.target_concentrations = ut.get_concentrations(target, self.stain_matrix_target)\n\n def target_stains(self):\n return ut.OD_to_RGB(self.stain_matrix_target)\n\n def transform(self, I):\n I = ut.standardize_brightness(I)\n stain_matrix_source = get_stain_matrix(I)\n source_concentrations = ut.get_concentrations(I, stain_matrix_source)\n maxC_source = np.percentile(source_concentrations, 99, axis=0).reshape((1, 2))\n maxC_target = np.percentile(self.target_concentrations, 99, axis=0).reshape((1, 2))\n source_concentrations *= (maxC_target / maxC_source)\n return (255 * np.exp(-1 * np.dot(source_concentrations, self.stain_matrix_target).reshape(I.shape))).astype(\n np.uint8)\n\n def hematoxylin(self, I):\n I = ut.standardize_brightness(I)\n h, w, c = I.shape\n stain_matrix_source = get_stain_matrix(I)\n source_concentrations = ut.get_concentrations(I, stain_matrix_source)\n H = source_concentrations[:, 0].reshape(h, w)\n H = np.exp(-1 * H)\n return H\n" ]
[ [ "numpy.arctan2", "numpy.cos", "numpy.exp", "numpy.array", "numpy.sin", "numpy.dot", "numpy.percentile", "numpy.cov" ] ]
fjczx/OpenPCDet-lazurite
[ "e3f17ab17b2c295e1786e34c6feb86adffe84b49" ]
[ "lazurite/h5_merge.py" ]
[ "# -*- coding: utf-8 -*-\r\n# @Time : 4/3/2022 6:28 PM\r\n# @Author : Lazurite\r\n# @Email : [email protected]\r\n# @File : h5_create.py\r\n# @Software: PyCharm\r\nimport os\r\nimport tqdm\r\nimport h5py\r\nimport numpy as np\r\n\r\nh5_paths = [\"../data/nuscenes/v1.0-trainval/samples.h5\", \"../data/nuscenes/v1.0-trainval/sweeps.h5\"]\r\nh5_files = [h5py.File(path, \"r\") for path in h5_paths]\r\n\r\nh5_merge = h5py.File(\"../data/nuscenes/v1.0-trainval/samples_sweeps.h5\", \"w\")\r\nn_bin_data = h5_files[0][\"samples_data\"].shape[0] + h5_files[1][\"sweeps_data\"].shape[0]\r\n\r\nprint(\"Creating h5 file...\")\r\nprint(\"Number of bins:\", n_bin_data)\r\n\r\n\r\nnp_dt = h5py.special_dtype(vlen=np.dtype('float32'))\r\ndset = h5_merge.create_dataset(\"data\", shape=(n_bin_data, ), dtype=np_dt)\r\nstr_dt = h5py.special_dtype(vlen=str)\r\nname_map = h5_merge.create_dataset(\"name\", (n_bin_data), dtype=str_dt)\r\n\r\n\r\npbar = tqdm.tqdm(total=n_bin_data)\r\nlen_samples = h5_files[0][\"samples_data\"].shape[0]\r\nfor i in range(len_samples):\r\n dset[i] = h5_files[0][\"samples_data\"][i]\r\n name_map[i] = h5_files[0][\"samples_name\"][i]\r\n pbar.update(1)\r\nfor i in range(len_samples, n_bin_data):\r\n dset[i] = h5_files[1][\"sweeps_data\"][i - len_samples]\r\n name_map[i] = h5_files[1][\"sweeps_name\"][i - len_samples]\r\n pbar.update(1)\r\nh5_merge.close()\r\nprint(\"Done!\")\r\n" ]
[ [ "numpy.dtype" ] ]
saroad2/knapsack_solver
[ "7247e464019a1afcaea0b7e5e76bdc729f4b0a51" ]
[ "src/knapsack_solver/__main__.py" ]
[ "from pathlib import Path\n\nimport click\nimport numpy as np\n\nfrom knapsack_solver.knapsack_problem import KnapsackProblem\nfrom knapsack_solver.plots_util import save_plots\nfrom knapsack_solver.weight import Weight\n\n\[email protected]()\[email protected](\"output-directory\", type=click.Path(file_okay=False))\[email protected](\"--mean-mass\", type=float, default=5.0)\[email protected](\"--mass-std\", type=float, default=1.0)\[email protected](\"--mean-value\", type=float, default=5.0)\[email protected](\"--value-std\", type=float, default=1.0)\[email protected](\"--number-of-weights\", type=int, default=100)\[email protected](\"--generation-size\", type=int, default=50)\[email protected](\"--max-mass\", type=float, default=25)\[email protected](\"--max-iterations\", type=int, default=50)\[email protected](\"--mutation-rate\", type=float, default=0.1)\[email protected](\"--crossover-rate\", type=float, default=0.3)\ndef knapsack_solver_cli(\n output_directory,\n mean_mass,\n mass_std,\n mean_value,\n value_std,\n number_of_weights,\n max_mass,\n max_iterations,\n generation_size,\n mutation_rate,\n crossover_rate,\n):\n output_directory = Path(output_directory)\n output_directory.mkdir(parents=True, exist_ok=True)\n weights = [\n Weight.random(\n identification=i,\n mean_mass=mean_mass,\n mass_std=mass_std,\n mean_value=mean_value,\n value_std=value_std,\n )\n for i in range(1, number_of_weights + 1)\n ]\n problem = KnapsackProblem(\n weights=weights,\n max_mass=max_mass,\n generation_size=generation_size,\n mutation_rate=mutation_rate,\n crossover_rate=crossover_rate,\n )\n history = []\n generation = problem.create_generation()\n history.append(generation)\n with click.progressbar(np.arange(1, max_iterations + 1), show_pos=True) as bar:\n for i in bar:\n generation = problem.create_next_generation(generation, identification=i)\n history.append(generation)\n bar.label = f\"Best sack value: {generation.max_value():.2f}\"\n\n best_sack = generation.best_sack()\n click.echo(f\"Best sack: {best_sack}\")\n for weight in best_sack:\n click.echo(f\"\\t{weight}\")\n\n save_plots(\n output_directory=output_directory, weights=problem.weights, history=history\n )\n\n\nif __name__ == \"__main__\":\n knapsack_solver_cli()\n" ]
[ [ "numpy.arange" ] ]
kateyose/nrn-7.6.7
[ "603da174f660370abb425917cc5c64c3db03dcec" ]
[ "share/lib/python/neuron/crxd/rxd.py" ]
[ "from neuron import h, nrn, nrn_dll_sym \nfrom . import species, node, section1d, region\nfrom .nodelist import NodeList\nimport weakref\nimport numpy\nimport ctypes\nimport atexit\nfrom . import options\nfrom .rxdException import RxDException\nfrom . import initializer \nimport collections\nimport os\nfrom distutils import sysconfig\nimport uuid\nimport sys\nimport itertools\nfrom numpy.ctypeslib import ndpointer\nimport re\nimport platform\n# aliases to avoid repeatedly doing multiple hash-table lookups\n_numpy_array = numpy.array\n_numpy_zeros = numpy.zeros\n_species_get_all_species = species._get_all_species\n_node_get_states = node._get_states\n_section1d_transfer_to_legacy = section1d._transfer_to_legacy\n_ctypes_c_int = ctypes.c_int\n_weakref_ref = weakref.ref\n\n_external_solver = None\n_external_solver_initialized = False\n_windows_dll_files = []\n_windows_dll = []\n\n\n\nmake_time_ptr = nrn_dll_sym('make_time_ptr')\nmake_time_ptr.argtypes = [ctypes.py_object, ctypes.py_object]\nmake_time_ptr(h._ref_dt, h._ref_t)\n\n_double_ptr = ctypes.POINTER(ctypes.c_double)\n_int_ptr = ctypes.POINTER(_ctypes_c_int)\n_long_ptr = ctypes.POINTER(ctypes.c_long)\n\n\nfptr_prototype = ctypes.CFUNCTYPE(None)\nset_nonvint_block = nrn_dll_sym('set_nonvint_block')\nset_nonvint_block(nrn_dll_sym('rxd_nonvint_block'))\n\nset_setup = nrn_dll_sym('set_setup')\nset_setup.argtypes = [fptr_prototype]\nset_initialize = nrn_dll_sym('set_initialize')\nset_initialize.argtypes = [fptr_prototype]\n\nscatter_concentrations = nrn_dll_sym('scatter_concentrations')\n\n# Transfer extracellular concentrations to NEURON\n_fih_transfer_ecs = h.FInitializeHandler(1, scatter_concentrations)\n\n\nrxd_set_no_diffusion = nrn_dll_sym('rxd_set_no_diffusion')\n\nsetup_solver = nrn_dll_sym('setup_solver')\nsetup_solver.argtypes = [ndpointer(ctypes.c_double), ctypes.c_int, numpy.ctypeslib.ndpointer(numpy.int_, flags='contiguous'), ctypes.c_int, ctypes.py_object, ctypes.py_object]\n\n#states = None\n_set_num_threads = nrn_dll_sym('set_num_threads')\n_set_num_threads.argtypes = [ctypes.c_int]\n_get_num_threads = nrn_dll_sym('get_num_threads')\n_get_num_threads.restype = ctypes.c_int\n\n\nclear_rates = nrn_dll_sym('clear_rates')\nregister_rate = nrn_dll_sym('register_rate')\nregister_rate.argtypes = [ \n ctypes.c_int, #num species\n ctypes.c_int, #num regions\n ctypes.c_int, #num seg\n numpy.ctypeslib.ndpointer(ctypes.c_int, flags='contiguous'), #species ids\n ctypes.c_int, numpy.ctypeslib.ndpointer(ctypes.c_int, flags='contiguous'), #num ecs species\n numpy.ctypeslib.ndpointer(ctypes.c_int, flags='contiguous'), #ecs species ids\n ctypes.c_int, #num multicompartment reactions\n numpy.ctypeslib.ndpointer(ctypes.c_double, flags='contiguous'), #multicompartment multipliers\n ] #Reaction rate function\n\nsetup_currents = nrn_dll_sym('setup_currents')\nsetup_currents.argtypes = [\n ctypes.c_int, #number of membrane currents\n ctypes.c_int, #number induced currents\n ctypes.c_int, #number of nodes with membrane currents\n _int_ptr, #number of species involved in each membrane current\n _int_ptr, #charges of the species involved in each membrane current\n _int_ptr, #node indices\n _int_ptr, #node indices\n _double_ptr, #scaling (areas) of the fluxes\n _int_ptr, #charges for each species in each reation\n ctypes.POINTER(ctypes.py_object), #hoc pointers\n _int_ptr, #maps for membrane fluxes\n _int_ptr #maps for ecs fluxes\n]\n \n\nset_reaction_indices = nrn_dll_sym('set_reaction_indices')\nset_reaction_indices.argtypes = [ctypes.c_int, _int_ptr, _int_ptr, _int_ptr, \n _int_ptr,_int_ptr,_double_ptr, ctypes.c_int, _int_ptr, _int_ptr, _int_ptr,\n _int_ptr]\n\necs_register_reaction = nrn_dll_sym('ecs_register_reaction')\necs_register_reaction.argtype = [ctypes.c_int, ctypes.c_int, _int_ptr, fptr_prototype]\n\nset_euler_matrix = nrn_dll_sym('rxd_set_euler_matrix')\nset_euler_matrix.argtypes = [\n ctypes.c_int,\n ctypes.c_int,\n _long_ptr,\n _long_ptr,\n _double_ptr,\n numpy.ctypeslib.ndpointer(numpy.int_, flags='contiguous'),\n ctypes.c_int,\n numpy.ctypeslib.ndpointer(numpy.double, flags='contiguous'),\n]\nrxd_setup_curr_ptrs = nrn_dll_sym('rxd_setup_curr_ptrs')\nrxd_setup_curr_ptrs.argtypes = [\n ctypes.c_int,\n _int_ptr,\n numpy.ctypeslib.ndpointer(numpy.double, flags='contiguous'),\n ctypes.POINTER(ctypes.py_object),\n]\n\nrxd_setup_conc_ptrs = nrn_dll_sym('rxd_setup_conc_ptrs')\nrxd_setup_conc_ptrs.argtypes = [\n ctypes.c_int,\n _int_ptr,\n ctypes.POINTER(ctypes.py_object)\n]\n\n_c_headers = \"\"\"#include <math.h>\n/*Some functions supported by numpy that aren't included in math.h\n * names and arguments match the wrappers used in rxdmath.py\n */\ndouble factorial(const double);\ndouble degrees(const double);\nvoid radians(const double, double*);\ndouble log1p(const double);\n\"\"\"\n\ndef _list_to_cint_array(data):\n if data is None or len(data) == 0:\n return None\n else:\n return (ctypes.c_int * len(data))(*tuple(data))\n\ndef _list_to_cdouble_array(data):\n if data is None or len(data) == 0:\n return None\n else:\n return (ctypes.c_double * len(data))(*tuple(data))\n\ndef _list_to_clong_array(data):\n if data is None or len(data) == 0:\n return None\n else:\n return (ctypes.c_long * len(data))(*tuple(data))\n\ndef _list_to_pyobject_array(data):\n if data is None or len(data) == 0:\n return None\n else:\n return (ctypes.py_object * len(data))(*tuple(data))\n\ndef byeworld():\n # needed to prevent a seg-fault error at shutdown in at least some\n # combinations of NEURON and Python, which I think is due to objects\n # getting deleted out-of-order\n global _react_matrix_solver\n try:\n del _react_matrix_solver\n except NameError:\n # # if it already didn't exist, that's fine\n pass\n _windows_remove_dlls()\n \natexit.register(byeworld)\n\n# Faraday's constant (store to reduce number of lookups)\nFARADAY = h.FARADAY\n\n# converting from mM um^3 to molecules\n# = 6.02214129e23 * 1000. / 1.e18 / 1000\n# = avogadro * (L / m^3) * (m^3 / um^3) * (mM / M)\n# value for avogardro's constant from NIST webpage, accessed 25 April 2012:\n# http://physics.nist.gov/cgi-bin/cuu/Value?na\n_conversion_factor = 602214.129\n\n\n_cvode_object = h.CVode()\n\nlast_diam_change_cnt = None\nlast_structure_change_cnt = None\n\n_linmodadd_c = None\n_diffusion_matrix = None\n_curr_scales = None\n_curr_ptrs = None\n_curr_indices = None\n\n_all_reactions = []\n\n_zero_volume_indices = numpy.ndarray(0, dtype=numpy.int_)\n_nonzero_volume_indices = []\n\nnrn_tree_solve = nrn_dll_sym('nrn_tree_solve')\nnrn_tree_solve.restype = None\n\n_dptr = _double_ptr\n\n_dimensions = collections.defaultdict(lambda: 1)\n_default_dx = 0.25\n_default_method = 'deterministic'\n\n#CRxD\n_diffusion_d = None\n_diffusion_a = None\n_diffusion_b = None\n_diffusion_p = None\n_cur_node_indices = None\n_diffusion_a_ptr, _diffusion_b_ptr, _diffusion_p_ptr = None, None, None\n\ndef set_solve_type(domain=None, dimension=None, dx=None, nsubseg=None, method=None):\n \"\"\"Specify the numerical discretization and solver options.\n \n domain -- a section or Python iterable of sections\"\"\"\n setting_default = False\n if domain is None:\n domain = h.allsec()\n setting_default = True\n elif isinstance(domain, nrn.Section):\n domain = [domain]\n \n # NOTE: These attributes are set on a per-nrn.Section basis; they cannot \n # assume Section1D objects exist because they might be specified before\n # those objects are created\n \n # domain is now always an iterable (or invalid)\n if method is not None:\n raise RxDException('using set_solve_type to specify method is not yet implemented')\n if dimension is not None:\n if dimension not in (1, 3):\n raise RxDException('invalid option to set_solve_type: dimension must be 1 or 3')\n factory = lambda: dimension\n if setting_default:\n _dimensions.default_factory = factory\n for sec in domain:\n _dimensions[sec] = dimension \n if dx is not None:\n raise RxDException('using set_solve_type to specify dx is not yet implemented')\n if nsubseg is not None:\n raise RxDException('using set_solve_type to specify nsubseg is not yet implemented')\n \n\ndef _unregister_reaction(r):\n global _all_reactions\n for i, r2 in enumerate(_all_reactions):\n if r2() == r:\n del _all_reactions[i]\n break\n\ndef _register_reaction(r):\n # TODO: should we search to make sure that (a weakref to) r hasn't already been added?\n global _all_reactions, _external_solver_initialized\n _all_reactions.append(_weakref_ref(r))\n _external_solver_initialized = False\n \ndef _after_advance():\n global last_diam_change_cnt\n last_diam_change_cnt = _diam_change_count.value\n \ndef re_init():\n \"\"\"reinitializes all rxd concentrations to match HOC values, updates matrices\"\"\"\n global _external_solver_initialized\n h.define_shape()\n \n if not species._has_3d:\n # TODO: if we do have 3D, make sure that we do the necessary parts of this\n \n # update current pointers\n section1d._purge_cptrs()\n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None:\n s._register_cptrs()\n \n # update matrix equations\n _setup_matrices()\n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None: s.re_init()\n # TODO: is this safe? \n _cvode_object.re_init()\n\n _external_solver_initialized = False\n \ndef _invalidate_matrices():\n # TODO: make a separate variable for this?\n global _diffusion_matrix, _external_solver_initialized, last_structure_change_cnt\n _diffusion_matrix = None\n last_structure_change_cnt = None\n _external_solver_initialized = False\n\n_rxd_offset = None\n\ndef _atolscale(y):\n real_index_lookup = {item: index for index, item in enumerate(_nonzero_volume_indices)}\n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None:\n shifted_i = [real_index_lookup[i] + _rxd_offset for i in s.indices() if i in real_index_lookup]\n y[shifted_i] *= s._atolscale\n\ndef _ode_count(offset):\n global _rxd_offset, last_structure_change_cnt, _structure_change_count\n initializer._do_init()\n _rxd_offset = offset - len(_nonzero_volume_indices)\n if _diffusion_matrix is None or last_structure_change_cnt != _structure_change_count.value: _setup_matrices()\n last_structure_change_cnt = _structure_change_count.value\n return len(_nonzero_volume_indices)\n\ndef _ode_reinit(y):\n y[_rxd_offset : _rxd_offset + len(_nonzero_volume_indices)] = _node_get_states()[_nonzero_volume_indices]\n\ndef _ode_fun(t, y, ydot):\n initializer.assert_initialized()\n lo = _rxd_offset\n hi = lo + len(_nonzero_volume_indices)\n if lo == hi: return\n states = _node_get_states().copy()\n states[_nonzero_volume_indices] = y[lo : hi]\n\n # need to fill in the zero volume states with the correct concentration\n # this assumes that states at the zero volume indices is zero (although that\n # assumption could be easily removed)\n #matrix = _scipy_sparse_dok_matrix((len(_zero_volume_indices), len(states)))\n \"\"\"\n for i, row in enumerate(_zero_volume_indices):\n d = _diffusion_matrix[row, row]\n if d:\n nzj = _diffusion_matrix[row].nonzero()[1]\n print 'nzj:', nzj\n for j in nzj:\n matrix[i, j] = -_diffusion_matrix[row, j] / d\n states[_zero_volume_indices] = matrix * states\n \"\"\"\n if len(_zero_volume_indices):\n states[_zero_volume_indices] = _mat_for_zero_volume_nodes * states\n \"\"\"\n for i in _zero_volume_indices:\n v = _diffusion_matrix[i] * states\n d = _diffusion_matrix[i, i]\n if d:\n states[i] = -v / d\n \"\"\"\n # TODO: make this so that the section1d parts use cptrs (can't do this directly for 3D because sum, but could maybe move that into the C)\n # the old way: _section1d_transfer_to_legacy()\n# for sr in _species_get_all_species().values():\n# s = sr()\n# if s is not None: s._transfer_to_legacy()\n\n \n if ydot is not None:\n # diffusion_matrix = - jacobian \n ydot[lo : hi] = (_rxd_reaction(states) - _diffusion_matrix * states)[_nonzero_volume_indices]\n \n states[_zero_volume_indices] = 0\n\n_rxd_induced_currents = None\n_memb_cur_ptrs= []\ndef _setup_memb_currents():\n global _memb_cur_ptrs\n initializer._do_init()\n # setup membrane fluxes from our stuff\n # TODO: cache the memb_cur_ptrs, memb_cur_charges, memb_net_charges, memb_cur_mapped\n # because won't change very often\n # need this; think it's because of initialization of mod files\n if _curr_indices is None: return\n SPECIES_ABSENT = -1\n # TODO: change so that this is only called when there are in fact currents\n rxd_memb_scales = []\n _memb_cur_ptrs = []\n memb_cur_charges = []\n memb_net_charges = []\n memb_cur_mapped = []\n memb_cur_mapped_ecs = []\n for rptr in _all_reactions:\n r = rptr()\n if r and r._membrane_flux:\n scales = r._memb_scales\n rxd_memb_scales.extend(scales)\n _memb_cur_ptrs += r._cur_ptrs\n memb_cur_mapped += r._cur_mapped\n memb_cur_mapped_ecs += r._cur_mapped_ecs\n memb_cur_charges += [r._cur_charges] * len(scales)\n memb_net_charges += [r._net_charges] * len(scales)\n ecs_map = [SPECIES_ABSENT if i is None else i for i in list(itertools.chain.from_iterable(itertools.chain.from_iterable(memb_cur_mapped_ecs)))]\n ics_map = [SPECIES_ABSENT if i is None else i for i in list(itertools.chain.from_iterable(itertools.chain.from_iterable(memb_cur_mapped)))]\n if _memb_cur_ptrs:\n cur_counts = [len(x) for x in memb_cur_mapped]\n num_currents = numpy.array(cur_counts).sum()\n setup_currents(len(_memb_cur_ptrs),\n num_currents,\n len(_curr_indices), # num_currents == len(_curr_indices) if no Extracellular\n _list_to_cint_array(cur_counts),\n _list_to_cint_array(memb_net_charges),\n _list_to_cint_array(_curr_indices),\n _list_to_cint_array(_cur_node_indices),\n _list_to_cdouble_array(rxd_memb_scales),\n _list_to_cint_array(list(itertools.chain.from_iterable(memb_cur_charges))),\n _list_to_pyobject_array(list(itertools.chain.from_iterable(_memb_cur_ptrs))),\n _list_to_cint_array(ics_map),\n _list_to_cint_array(ecs_map))\n \ndef _currents(rhs):\n return\n if rxd_memb_flux:\n # TODO: remove the asserts when this is verified to work\n assert(len(rxd_memb_flux) == len(_cur_node_indices))\n assert(len(rxd_memb_flux) == len(memb_cur_ptrs))\n assert(len(rxd_memb_flux) == len(memb_cur_charges))\n assert(len(rxd_memb_flux) == len(memb_net_charges))\n for flux, cur_ptrs, cur_charges, net_charge, i, cur_maps in zip(rxd_memb_flux, memb_cur_ptrs, memb_cur_charges, memb_net_charges, _cur_node_indices, memb_cur_mapped):\n rhs[i] -= net_charge * flux\n #import sys\n #sys.exit()\n # TODO: remove this assert when more thoroughly tested\n assert(len(cur_ptrs) == len(cur_maps))\n for ptr, charge, cur_map_i in zip(cur_ptrs, cur_charges, cur_maps):\n # this has the opposite sign of the above because positive\n # currents lower the membrane potential\n cur = charge * flux\n ptr[0] += cur\n for c in cur_map_i:\n _rxd_induced_currents[c] += cur\n #for sign, c in zip([-1, 1], cur_maps):\n # if c is not None:\n # _rxd_induced_currents[c] += sign * cur\n\n_last_m = None\n_last_preconditioner = None\n_fixed_step_count = 0\n\n\ndef _rxd_reaction(states):\n # TODO: this probably shouldn't be here\n # TODO: this was included in the 3d, probably shouldn't be there either\n # TODO: if its None and there is 3D... should we do anything special?\n if _diffusion_matrix is None and not species._has_3d: _setup_matrices()\n\n b = _numpy_zeros(len(states))\n \n \n if _curr_ptr_vector is not None:\n _curr_ptr_vector.gather(_curr_ptr_storage_nrn)\n b[_curr_indices] = _curr_scales * (_curr_ptr_storage - _rxd_induced_currents) \n \n b[_curr_indices] = _curr_scales * [ptr[0] for ptr in _curr_ptrs]\n\n # TODO: store weak references to the r._evaluate in addition to r so no\n # repeated lookups\n #for rptr in _all_reactions:\n # r = rptr()\n # if r:\n # indices, mult, rate = r._evaluate(states)\n # we split this in parts to allow for multiplicities and to allow stochastic to make the same changes in different places\n # for i, m in zip(indices, mult):\n # b[i] += m * rate\n\n node._apply_node_fluxes(b)\n return b\n \n_last_preconditioner_dt = 0\n_last_dt = None\n_last_m = None\n_diffusion_d = None\n_diffusion_a = None\n_diffusion_b = None\n_diffusion_p = None\n_cur_node_indices = None\n\n_diffusion_a_ptr, _diffusion_b_ptr, _diffusion_p_ptr = None, None, None\n\ndef _setup():\n initializer._do_init()\n # TODO: this is when I should resetup matrices (structure changed event)\n global _last_dt, _external_solver_initialized\n _last_dt = None\n _external_solver_initialized = False\n \n # Using C-code for reactions\n options.use_reaction_contribution_to_jacobian = False\n\ndef _find_librxdmath():\n import glob\n base_path = os.path.join(h.neuronhome(), \"..\", \"..\", platform.machine(), \"lib\", \"librxdmath\")\n success = False \n for extension in ['', '.dll', '.so', '.dylib']:\n dll = base_path + extension\n try:\n success = os.path.exists(dll) \n except:\n pass\n if success: break\n if not success:\n if sys.platform.lower().startswith(\"win\"):\n dll = os.path.join(h.neuronhome(), 'bin', 'librxdmath.dll')\n success = os.path.exists(dll)\n if not success:\n raise RxDException('unable to connect to the librxdmath library')\n return dll\n \ndef _c_compile(formula):\n filename = 'rxddll' + str(uuid.uuid1())\n with open(filename + '.c', 'w') as f:\n f.write(formula)\n math_library = '-lm'\n fpic = '-fPIC'\n try:\n gcc = os.environ[\"CC\"]\n except:\n #when running on windows try and used the gcc included with NEURON\n if sys.platform.lower().startswith(\"win\"):\n math_library = ''\n fpic = ''\n gcc = os.path.join(h.neuronhome(),\"mingw\",\"mingw64\",\"bin\",\"x86_64-w64-mingw32-gcc.exe\")\n if not os.path.isfile(gcc):\n raise RxDException(\"unable to locate a C compiler. Please `set CC=<path to C compiler>`\")\n else:\n gcc = \"gcc\"\n #TODO: Check this works on non-Linux machines\n gcc_cmd = \"%s -I%s -I%s \" % (gcc, sysconfig.get_python_inc(), os.path.join(h.neuronhome(), \"..\", \"..\", \"include\", \"nrn\"))\n gcc_cmd += \"-shared %s %s.c %s \" % (fpic, filename, _find_librxdmath())\n gcc_cmd += \"-o %s.so %s\" % (filename, math_library)\n if sys.platform.lower().startswith(\"win\"):\n my_path = os.getenv('PATH')\n os.putenv('PATH', my_path + ';' + os.path.join(h.neuronhome(),\"mingw\",\"mingw64\",\"bin\"))\n os.system(gcc_cmd)\n os.putenv('PATH', my_path)\n else:\n os.system(gcc_cmd)\n #TODO: Find a better way of letting the system locate librxdmath.so.0\n rxdmath_dll = ctypes.cdll[_find_librxdmath()]\n dll = ctypes.cdll['./%s.so' % filename]\n reaction = dll.reaction\n reaction.argtypes = [ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double)] \n reaction.restype = ctypes.c_double\n os.remove(filename + '.c')\n if sys.platform.lower().startswith(\"win\"):\n #cannot remove dll that are in use\n _windows_dll.append(weakref.ref(dll))\n _windows_dll_files.append(filename + \".so\")\n else:\n os.remove(filename + '.so')\n return reaction\n\n\ndef _conductance(d):\n pass\n \ndef _ode_jacobian(dt, t, ypred, fpred):\n #print '_ode_jacobian: dt = %g, last_dt = %r' % (dt, _last_dt)\n lo = _rxd_offset\n hi = lo + len(_nonzero_volume_indices) \n _reaction_matrix_setup(dt, ypred[lo : hi])\n\n_curr_ptr_vector = None\n_curr_ptr_storage = None\n_curr_ptr_storage_nrn = None\npinverse = None\n_cur_map = None\n_h_ptrvector = h.PtrVector\n_h_vector = h.Vector\n\n_structure_change_count = nrn_dll_sym('structure_change_cnt', _ctypes_c_int)\n_diam_change_count = nrn_dll_sym('diam_change_cnt', _ctypes_c_int)\n\ndef _donothing(): pass\n\ndef _update_node_data(force=False):\n global last_diam_change_cnt, last_structure_change_cnt, _curr_indices, _curr_scales, _curr_ptrs, _cur_map\n global _curr_ptr_vector, _curr_ptr_storage, _curr_ptr_storage_nrn\n if last_diam_change_cnt != _diam_change_count.value or _structure_change_count.value != last_structure_change_cnt or force:\n _cur_map = {}\n last_diam_change_cnt = _diam_change_count.value\n last_structure_change_cnt = _structure_change_count.value\n #if not species._has_3d:\n # TODO: merge this with the 3d/hybrid case?\n nsegs_changed = 0\n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None: nsegs_changed += s._update_node_data()\n if nsegs_changed:\n section1d._purge_cptrs()\n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None:\n s._update_region_indices(True)\n s._register_cptrs()\n if species._has_1d and species._1d_submatrix_n():\n volumes = node._get_data()[0]\n _zero_volume_indices = (numpy.where(volumes == 0)[0]).astype(numpy.int_)\n setup_solver(_node_get_states(), len(_node_get_states()), _zero_volume_indices, len(_zero_volume_indices), h._ref_t, h._ref_dt)\n # TODO: separate compiling reactions -- so the indices can be updated without recompiling\n _compile_reactions()\n\n #end#if\n for rptr in _all_reactions:\n r = rptr()\n if r is not None: r._update_indices()\n _curr_indices = []\n _curr_scales = []\n _curr_ptrs = []\n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None: s._setup_currents(_curr_indices, _curr_scales, _curr_ptrs, _cur_map)\n \n num = len(_curr_ptrs)\n if num:\n _curr_ptr_vector = _h_ptrvector(num)\n _curr_ptr_vector.ptr_update_callback(_donothing)\n for i, ptr in enumerate(_curr_ptrs):\n _curr_ptr_vector.pset(i, ptr)\n \n _curr_ptr_storage_nrn = _h_vector(num)\n _curr_ptr_storage = _curr_ptr_storage_nrn.as_numpy()\n else:\n _curr_ptr_vector = None\n\n #_curr_scales = _numpy_array(_curr_scales) \n\n\ndef _matrix_to_rxd_sparse(m):\n \"\"\"precondition: assumes m a numpy array\"\"\"\n nonzero_i, nonzero_j = list(zip(*list(m.keys())))\n nonzero_values = numpy.ascontiguousarray(list(m.values()), dtype=numpy.float64)\n\n # number of rows\n n = m.shape[1]\n\n return n, len(nonzero_i), numpy.ascontiguousarray(nonzero_i, dtype=numpy.int_), numpy.ascontiguousarray(nonzero_j, dtype=numpy.int_), nonzero_values\n\n\n_euler_matrix = None\n\n# TODO: make sure this does the right thing when the diffusion constant changes between two neighboring nodes\ndef _setup_matrices():\n global _curr_ptrs\n global _cur_node_indices\n global _zero_volume_indices\n\n # TODO: this sometimes seems to get called twice. Figure out why and fix, if possible.\n\n # if the shape has changed update the nodes\n _update_node_data()\n\n n = len(_node_get_states())\n \n #TODO: Replace with ADI version \n \"\"\"\n if species._has_3d:\n _euler_matrix = _scipy_sparse_dok_matrix((n, n), dtype=float)\n\n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None: s._setup_matrices3d(_euler_matrix)\n\n _diffusion_matrix = -_euler_matrix\n\n _euler_matrix = _euler_matrix.tocsr()\n _update_node_data(True)\n\n # NOTE: if we also have 1D, this will be replaced with the correct values below\n _zero_volume_indices = []\n _nonzero_volume_indices = list(range(len(_node_get_states())))\n \n \"\"\"\n if species._has_1d:\n n = species._1d_submatrix_n()\n # TODO: initialization is slow. track down why\n \n _last_dt = None\n \n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None:\n s._assign_parents()\n \n _update_node_data(True)\n\n volumes = node._get_data()[0]\n _zero_volume_indices = (numpy.where(volumes == 0)[0]).astype(numpy.int_)\n _nonzero_volume_indices = volumes.nonzero()[0]\n\n # remove old linearmodeladdition\n _linmodadd_cur = None\n \n if n: \n # create sparse matrix for C in cy'+gy=b\n c_diagonal = numpy.zeros(n,dtype=ctypes.c_double)\n # most entries are 1 except those corresponding to the 0 and 1 ends\n \n # create the matrix G\n #if not species._has_3d:\n # # if we have both, then put the 1D stuff into the matrix that already exists for 3D\n _diffusion_matrix = [dict() for idx in range(n)] \n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None:\n s._setup_diffusion_matrix(_diffusion_matrix)\n s._setup_c_matrix(c_diagonal)\n #print '_diffusion_matrix.shape = %r, n = %r, species._has_3d = %r' % (_diffusion_matrix.shape, n, species._has_3d)\n euler_matrix_i, euler_matrix_j, euler_matrix_nonzero = [], [], []\n for i in range(n):\n mat_i = _diffusion_matrix[i]\n euler_matrix_i.extend(itertools.repeat(i,len(mat_i)))\n euler_matrix_j.extend(mat_i.keys())\n euler_matrix_nonzero.extend(mat_i.values())\n euler_matrix_nnonzero = len(euler_matrix_nonzero)\n assert(len(euler_matrix_i) == len(euler_matrix_j) == len(euler_matrix_nonzero))\n # modify C for cases where no diffusive coupling of 0, 1 ends\n # TODO: is there a better way to handle no diffusion?\n #for i in range(n):\n # if not _diffusion_matrix[i, i]:\n # _linmodadd_c[i, i] = 1\n\n \n # setup for induced membrane currents\n _cur_node_indices = []\n\n for rptr in _all_reactions:\n r = rptr()\n if r is not None:\n r._setup_membrane_fluxes(_cur_node_indices, _cur_map)\n \n #_cvode_object.re_init() \n\n #if species._has_3d:\n # _euler_matrix = -_diffusion_matrix\n\n\n #TODO: Replace this this to handle 1d/3d hybrid models\n \"\"\"\n if species._has_1d and species._has_3d:\n # TODO: add connections to matrix; for now: find them\n hybrid_neighbors = collections.defaultdict(lambda: [])\n hybrid_diams = {}\n dxs = set()\n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None:\n if s._nodes and s._secs:\n # have both 1D and 3D, so find the neighbors\n # for each of the 3D sections, find the parent sections\n for r in s._regions:\n dxs.add(r._dx)\n for sec in r._secs3d:\n parent_seg = sec.trueparentseg()\n parent_sec = None if not parent_seg else parent_seg.sec\n # are any of these a match with a 1d section?\n if s._has_region_section(r, parent_sec):\n # this section has a 1d section that is a parent\n index1d, indices3d = _get_node_indices(s, r, sec, h.section_orientation(sec=sec), parent_sec, h.parent_connection(sec=sec))\n hybrid_neighbors[index1d] += indices3d\n hybrid_diams[index1d] = parent_seg.diam\n else:\n for sec1d in r._secs1d:\n parent_1d_seg = sec1d.trueparentseg()\n parent_1d = None if not parent_seg else parent_seg.sec\n if parent_1d == sec:\n # it is the parent of a 1d section\n index1d, indices3d = _get_node_indices(s, r, sec, h.parent_connection(sec=sec1d), sec1d, sec1d.orientation())\n hybrid_neighbors[index1d] += indices3d\n hybrid_diams[index1d] = parent_1d_seg.diam\n break\n elif parent_1d == parent_sec:\n # it connects to the parent of a 1d section\n index1d, indices3d = _get_node_indices(s, r, sec, h.section_orientation(sec=sec), sec1d, sec1d.orientation())\n hybrid_neighbors[index1d] += indices3d\n hybrid_diams[index1d] = parent_1d_seg.diam\n break\n if len(dxs) > 1:\n raise RxDException('currently require a unique value for dx')\n dx = dxs.pop()\n diffs = node._diffs\n n = len(_node_get_states())\n # TODO: validate that we're doing the right thing at boundaries\n for index1d in list(hybrid_neighbors.keys()):\n neighbors3d = set(hybrid_neighbors[index1d])\n # NOTE: splitting the connection area equally across all the connecting nodes\n area = (numpy.pi * 0.25 * hybrid_diams[index1d] ** 2) / len(neighbors3d)\n for i in neighbors3d:\n d = diffs[i]\n vol = node._volumes[i]\n rate = d * area / (vol * dx / 2.)\n # make the connections on the 3d side\n _euler_matrix[i, i] -= rate\n _euler_matrix[i, index1d] += rate\n # make the connections on the 1d side (scale by vol because conserving mass not volume)\n _euler_matrix[index1d, index1d] -= rate * vol\n _euler_matrix[index1d, i] += rate * vol\n #print 'index1d row sum:', sum(_euler_matrix[index1d, j] for j in xrange(n))\n #print 'index1d col sum:', sum(_euler_matrix[j, index1d] for j in xrange(n))\n \"\"\"\n #CRxD\n if n and euler_matrix_nnonzero > 0:\n _update_node_data()\n section1d._transfer_to_legacy()\n set_euler_matrix(n, euler_matrix_nnonzero,\n _list_to_clong_array(euler_matrix_i),\n _list_to_clong_array(euler_matrix_j),\n _list_to_cdouble_array(euler_matrix_nonzero),\n _zero_volume_indices,\n len(_zero_volume_indices),\n c_diagonal)\n else:\n rxd_set_no_diffusion()\n setup_solver(_node_get_states(), len(_node_get_states()), _zero_volume_indices, len(_zero_volume_indices), h._ref_t, h._ref_dt)\n \n if _curr_indices is not None and len(_curr_indices) > 0:\n rxd_setup_curr_ptrs(len(_curr_indices), _list_to_cint_array(_curr_indices),\n numpy.concatenate(_curr_scales), _list_to_pyobject_array(_curr_ptrs))\n\n if section1d._all_cindices is not None and len(section1d._all_cindices) > 0:\n rxd_setup_conc_ptrs(len(section1d._all_cindices), \n _list_to_cint_array(section1d._all_cindices), \n _list_to_pyobject_array(section1d._all_cptrs))\n\n # we do this last because of performance issues with changing sparsity of csr matrices\n \"\"\"\n if _diffusion_matrix is not None:\n _diffusion_matrix = _diffusion_matrix.tocsr()\n if _euler_matrix is not None:\n _euler_matrix = _euler_matrix.tocsr()\n\n if species._has_1d:\n if species._has_3d:\n _diffusion_matrix = -_euler_matrix\n n = species._1d_submatrix_n()\n if n:\n matrix = _diffusion_matrix[_zero_volume_indices].tocsr()\n indptr = matrix.indptr\n matrixdata = matrix.data\n count = len(_zero_volume_indices)\n for row, i in enumerate(_zero_volume_indices):\n d = _diffusion_matrix[i, i]\n if d:\n matrixdata[indptr[row] : indptr[row + 1]] /= -d\n matrix[row, i] = 0\n else:\n matrixdata[indptr[row] : indptr[row + 1]] = 0\n global _mat_for_zero_volume_nodes\n _mat_for_zero_volume_nodes = matrix\n # TODO: _mat_for_zero_volume_nodes is used for CVode.\n # Figure out if/how it has to be changed for hybrid 1D/3D sims (probably just augment with identity? or change how its used to avoid multiplying by I)\n \n \"\"\"\n \n \"\"\"\n if pt1 in indices:\n ileft = indices[pt1]\n dleft = (d + diffs[ileft]) * 0.5\n left = dleft * areal / (vol * dx)\n euler_matrix[index, ileft] += left\n euler_matrix[index, index] -= left\n if pt2 in indices:\n iright = indices[pt2]\n dright = (d + diffs[iright]) * 0.5\n right = dright * arear / (vol * dx)\n euler_matrix[index, iright] += right\n euler_matrix[index, index] -= right\n\"\"\" \n \n\n\ndef _get_node_indices(species, region, sec3d, x3d, sec1d, x1d):\n # TODO: remove need for this assumption\n assert(x1d in (0, 1))\n disc_indices = region._indices_from_sec_x(sec3d, x3d)\n #print '%r(%g) connects to the 1d section %r(%g)' % (sec3d, x3d, sec1d, x1d)\n #print 'disc indices: %r' % disc_indices\n indices3d = []\n for node in species._nodes:\n if node._r == region:\n for i, j, k in disc_indices:\n if node._i == i and node._j == j and node._k == k:\n indices3d.append(node._index)\n #print 'found node %d with coordinates (%g, %g, %g)' % (node._index, node.x3d, node.y3d, node.z3d)\n # discard duplicates...\n # TODO: really, need to figure out all the 3d nodes connecting to a given 1d endpoint, then unique that\n indices3d = list(set(indices3d))\n #print '3d matrix indices: %r' % indices3d\n # TODO: remove the need for this assertion\n if x1d == sec1d.orientation():\n # TODO: make this whole thing more efficient\n # the parent node is the nonzero index on the first row before the diagonal\n first_row = min([node._index for node in species.nodes(region)(sec1d)])\n for j in range(first_row):\n if _euler_matrix[first_row, j] != 0:\n index_1d = j\n break\n else:\n raise RxDException('should never get here; could not find parent')\n elif x1d == 1 - sec1d.orientation():\n # the ending zero-volume node is the one after the last node\n # TODO: make this more efficient\n index_1d = max([node._index for node in species.nodes(region)(sec1d)]) + 1\n else:\n raise RxDException('should never get here; _get_node_indices apparently only partly converted to allow connecting to 1d in middle')\n #print '1d index is %d' % index_1d\n \n return index_1d, indices3d\n\ndef _compile_reactions():\n #clear all previous reactions (intracellular & extracellular) and the\n #supporting indexes\n #_windows_remove_dlls()\n clear_rates()\n \n regions_inv = dict() #regions -> reactions that occur there\n species_by_region = dict()\n all_species_involed = set()\n location_count = 0\n \n ecs_regions_inv = dict()\n ecs_species_by_region = dict()\n ecs_all_species_involed = set()\n ecs_mc_species_involved = set() \n from . import rate, multiCompartmentReaction\n\n #Find sets of sections that contain the same regions\n from .region import _c_region\n matched_regions = [] # the different combinations of regions that arise in different sections\n for nrnsec in list(section1d._rxd_sec_lookup.keys()):\n set_of_regions = set() # a set of the regions that occur in a given section\n for sec in section1d._rxd_sec_lookup[nrnsec]:\n if sec(): set_of_regions.add(sec()._region)\n if set_of_regions not in matched_regions:\n matched_regions.append(set_of_regions)\n region._c_region_lookup = dict()\n \n #create a c_region instance for each of the unique sets of regions\n c_region_list = []\n for sets in matched_regions:\n c_region_list.append(_c_region(sets))\n \n\n for rptr in _all_reactions:\n r = rptr()\n if not r:\n continue\n\n #Find all the species involved\n if isinstance(r,rate.Rate):\n if not r._species():\n continue\n sptrs = set(list(r._involved_species) + [r._species])\n else:\n sptrs = set(list(r._involved_species) + r._dests + r._sources)\n \n #Find all the regions involved\n if isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):\n if not hasattr(r._mult, 'flatten'):\n r._update_indices()\n react_regions = [s()._extracellular()._region for s in r._sources + r._dests if isinstance(s(),species.SpeciesOnExtracellular)] + [s()._region() for s in r._sources + r._dests if not isinstance(s(),species.SpeciesOnExtracellular)]\n react_regions += [sptr()._region() for sptr in sptrs if isinstance(sptr(),species.SpeciesOnRegion)]\n #if regions are specified - use those\n elif hasattr(r,'_active_regions'):\n react_regions = r._active_regions\n #Otherwise use all the regions where the species are\n else:\n react_regions = set()\n nsp = 0\n for sp in sptrs:\n s = sp()\n nsp += 1\n if isinstance(s,species.SpeciesOnRegion):\n react_regions.add(s._region())\n elif isinstance(s,species.SpeciesOnExtracellular):\n react_regions.add(s._extracellular()._region)\n elif isinstance(s,species._ExtracellularSpecies):\n react_regions.add(s._region)\n elif None not in s._regions:\n [react_regions.add(reg) for reg in s._regions + s._extracellular_regions]\n react_regions = list(react_regions)\n #Only regions where ALL the species are present -- unless it is a membrane\n #from collections import Counter\n #from . import geometry as geo\n #react_regions = [reg for reg, count in Counter(react_regions).iteritems() if count == nsp or isinstance(reg.geometry,geo.ScalableBorder)]\n #Any intracellular regions\n if not all([isinstance(x, region.Extracellular) for x in react_regions]):\n species_involved = []\n for sp in sptrs:\n s = sp()\n if not isinstance(s, species.SpeciesOnExtracellular):\n all_species_involed.add(s)\n species_involved.append(s)\n for reg in react_regions:\n if isinstance(reg, region.Extracellular):\n continue\n if reg in regions_inv:\n regions_inv[reg].append(rptr)\n else:\n regions_inv[reg] = [rptr]\n if reg in species_by_region:\n species_by_region[reg] = species_by_region[reg].union(species_involved)\n else:\n species_by_region[reg] = set(species_involved)\n for sec in reg._secs:\n location_count += sec.nseg\n #Any extracellular regions\n if any([isinstance(x, region.Extracellular) for x in react_regions]):\n #MultiCompartment - so can have both extracellular and intracellular regions\n if isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):\n for sp in sptrs:\n s = sp()\n if isinstance(s,species._ExtracellularSpecies):\n ecs_mc_species_involved.add(s)\n elif isinstance(s,species.SpeciesOnExtracellular):\n ecs_mc_species_involved.add(s._extracellular())\n for reg in react_regions:\n if reg in list(ecs_species_by_region.keys()):\n ecs_species_by_region[reg] = ecs_species_by_region[reg].union(ecs_mc_species_involved)\n else:\n ecs_species_by_region[reg] = set(ecs_mc_species_involved)\n #Otherwise - reaction can only have extracellular regions\n else:\n ecs_species_involved = []\n for sp in sptrs:\n s = sp()\n ecs_all_species_involed.add(s)\n ecs_species_involved.append(s)\n if any([isinstance(x, region.Region) for x in react_regions]):\n raise RxDException(\"Error: an %s cannot have both Extracellular and Intracellular regions. Use a MultiCompartmentReaction or specify the desired region with the 'region=' keyword argument\", rptr().__class__)\n for reg in react_regions:\n if not isinstance(reg, region.Extracellular):\n continue\n\n if reg in ecs_regions_inv:\n ecs_regions_inv[reg].append(rptr)\n else:\n ecs_regions_inv[reg] = [rptr]\n if reg in ecs_species_by_region:\n ecs_species_by_region[reg] = ecs_species_by_region[reg].union(ecs_species_involved)\n else:\n ecs_species_by_region[reg] = set(ecs_species_involved)\n #Create lists of indexes for intracellular reactions and rates\n nseg_by_region = [] # a list of the number of segments for each region\n # a table for location,species -> state index\n location_index = []\n for reg in regions_inv:\n rptr = weakref.ref(reg)\n for c_region in region._c_region_lookup[rptr]:\n for react in regions_inv[reg]:\n c_region.add_reaction(react,rptr)\n c_region.add_species(species_by_region[reg])\n if reg in ecs_species_by_region:\n c_region.add_ecs_species(ecs_species_by_region[reg])\n\n # now setup the reactions\n setup_solver(_node_get_states(), len(_node_get_states()), _zero_volume_indices, len(_zero_volume_indices), h._ref_t, h._ref_dt)\n #if there are no reactions\n if location_count == 0 and len(ecs_regions_inv) == 0:\n return None\n \n #Setup intracellular and multicompartment reactions\n if location_count > 0:\n from . import rate, multiCompartmentReaction\n for creg in c_region_list:\n creg._initalize()\n mc_mult_count = 0\n mc_mult_list = []\n species_ids_used = numpy.zeros((creg.num_species,creg.num_regions),bool)\n flux_ids_used = numpy.zeros((creg.num_species,creg.num_regions),bool)\n ecs_species_ids_used = numpy.zeros((creg.num_ecs_species,creg.num_regions),bool)\n fxn_string = _c_headers \n fxn_string += 'void reaction(double** species, double** rhs, double* mult, double** species_ecs, double** rhs_ecs, double** flux)\\n{'\n # declare the \"rate\" variable if any reactions (non-rates)\n for rprt in list(creg._react_regions.keys()):\n if not isinstance(rprt(),rate.Rate):\n fxn_string += '\\n\\tdouble rate;'\n break\n for rptr in list(creg._react_regions.keys()):\n r = rptr()\n if isinstance(r,rate.Rate):\n s = r._species()\n species_id = creg._species_ids.get(s._id)\n if isinstance(s,species.SpeciesOnRegion):\n region_ids = [creg._region_ids.get(s._region()._id)]\n else:\n region_ids = creg._react_regions[rptr]\n for region_id in region_ids:\n rate_str = re.sub(r'species\\[(\\d+)\\]\\[(\\d+)\\]',lambda m: \"species[%i][%i]\" % (creg._species_ids.get(int(m.groups()[0])), creg._region_ids.get(int(m.groups()[1]))), r._rate)\n rate_str = re.sub(r'species\\[(\\d+)\\]\\[\\]',lambda m: \"species[%i][%i]\" % (creg._species_ids.get(int(m.groups()[0])), region_id), rate_str)\n operator = '+=' if species_ids_used[species_id][region_id] else '='\n fxn_string += \"\\n\\trhs[%d][%d] %s %s;\" % (species_id, region_id, operator, rate_str)\n species_ids_used[species_id][region_id] = True\n elif isinstance(r, multiCompartmentReaction.MultiCompartmentReaction):\n #Lookup the region_id for the reaction\n for sptr in r._sources + r._dests:\n if isinstance(sptr(),species.SpeciesOnExtracellular):\n continue\n region_id = creg._region_ids.get(sptr()._region()._id)\n rate_str = re.sub(r'species\\[(\\d+)\\]\\[(\\d+)\\]',lambda m: \"species[%i][%i]\" % (creg._species_ids.get(int(m.groups()[0])), creg._region_ids.get(int(m.groups()[1]))), r._rate)\n rate_str = re.sub(r'species\\[(\\d+)\\]\\[\\]',lambda m: \"species[%i][%i]\" % (creg._species_ids.get(int(m.groups()[0])), region_id), rate_str)\n rate_str = re.sub(r'species_ecs\\[(\\d+)\\]',lambda m: \"species_ecs[%i][%i]\" % (int(m.groups()[0]), region_id), rate_str)\n \n fxn_string += \"\\n\\trate = %s;\" % rate_str\n \n for sptr in r._sources + r._dests:\n s = sptr()\n if isinstance(s,species.SpeciesOnExtracellular):\n species_id = s._extracellular()._grid_id\n operator = '+=' if ecs_species_ids_used[species_id][region_id] else '='\n fxn_string += \"\\n\\trhs_ecs[%d][%d] %s mult[%d] * rate;\" % (species_id, region_id, operator, mc_mult_count)\n ecs_species_ids_used[species_id][region_id] = True\n else:\n species_id = creg._species_ids.get(s._id)\n region_id = creg._region_ids.get(sptr()._region()._id)\n operator = '+=' if species_ids_used[species_id][region_id] else '='\n fxn_string += \"\\n\\trhs[%d][%d] %s mult[%d] * rate;\" % (species_id, region_id, operator, mc_mult_count)\n species_ids_used[species_id][region_id] = True\n if r._membrane_flux:\n operator = '+=' if flux_ids_used[species_id][region_id] else '='\n fxn_string += \"\\n\\tif(flux) flux[%d][%d] %s rate;\" % (species_id, region_id, operator)\n flux_ids_used[species_id][region_id] = True\n #TODO: Fix problem if the whole region isn't part of the same aggregate c_region\n mc_mult_count += 1\n mc_mult_list.extend(r._mult.flatten())\n else:\n for region_id in creg._react_regions[rptr]:\n \n rate_str = re.sub(r'species\\[(\\d+)\\]\\[(\\d+)\\]',lambda m: \"species[%i][%i]\" % (creg._species_ids.get(int(m.groups()[0])), creg._region_ids.get(int(m.groups()[1]))), r._rate)\n rate_str = re.sub(r'species\\[(\\d+)\\]\\[\\]',lambda m: \"species[%i][%i]\" % (creg._species_ids.get(int(m.groups()[0])), region_id), rate_str)\n fxn_string += \"\\n\\trate = %s;\" % rate_str\n summed_mults = collections.defaultdict(lambda: 0)\n for (mult, sp) in zip(r._mult, r._sources + r._dests):\n summed_mults[creg._species_ids.get(sp()._id)] += mult\n for idx in sorted(summed_mults.keys()):\n operator = '+=' if species_ids_used[idx][region_id] else '='\n species_ids_used[idx][region_id] = True\n fxn_string += \"\\n\\trhs[%d][%d] %s (%g) * rate;\" % (idx, region_id, operator, summed_mults[idx])\n \n fxn_string += \"\\n}\\n\"\n register_rate(creg.num_species, creg.num_regions, creg.num_segments, creg.get_state_index(),\n creg.num_ecs_species, creg.get_ecs_species_ids(), creg.get_ecs_index(),\n mc_mult_count, numpy.array(mc_mult_list, dtype=ctypes.c_double),\n _c_compile(fxn_string))\n\n \n #Setup extracellular reactions\n if len(ecs_regions_inv) > 0:\n grid_ids = []\n all_gids = set() \n fxn_string = _c_headers \n #TODO: find the nrn include path in python\n #It is necessary for a couple of function in python that are not in math.h\n fxn_string += 'void reaction(double* species_ecs, double* rhs)\\n{'\n # declare the \"rate\" variable if any reactions (non-rates)\n for rptr in [r for rlist in list(ecs_regions_inv.values()) for r in rlist]:\n if not isinstance(rptr(),rate.Rate):\n fxn_string += '\\n\\tdouble rate;'\n break\n #get a list of all grid_ids invovled\n for rptr in [r for rlist in list(ecs_regions_inv.values()) for r in rlist]:\n if isinstance(rptr(),rate.Rate):\n for sp in [rptr()._species] + rptr()._involved_species_ecs:\n s = sp()[reg]._extracellular() if isinstance(sp(), species.Species) else sp()\n all_gids.add(sp()._extracellular()._grid_id if isinstance(s, species.SpeciesOnExtracellular) else s._grid_id)\n else:\n for sp in rptr()._sources + rptr()._dests + rptr()._involved_species_ecs:\n s = sp()[reg]._extracellular() if isinstance(sp(), species.Species) else sp()\n all_gids.add(sp()._extracellular()._grid_id if isinstance(s, species.SpeciesOnExtracellular) else s._grid_id)\n all_gids = list(all_gids)\n for reg in ecs_regions_inv:\n for rptr in ecs_regions_inv[reg]:\n r = rptr()\n rate_str = re.sub(r'species_ecs\\[(\\d+)\\]',lambda m: \"species_ecs[%i]\" % [pid for pid,gid in enumerate(all_gids) if gid == int(m.groups()[0])][0], r._rate_ecs)\n if isinstance(r,rate.Rate):\n s = r._species()\n #Get underlying rxd._ExtracellularSpecies for the grid_id\n if isinstance(s, species.Species):\n s = s[reg]._extracellular()\n elif isinstance(s, species.SpeciesOnExtracellular):\n s = s._extracellular()\n if s._grid_id in grid_ids:\n operator = '+=' \n else:\n operator = '='\n grid_ids.append(s._grid_id)\n pid = [pid for pid,gid in enumerate(all_gids) if gid == s._grid_id][0]\n fxn_string += \"\\n\\trhs[%d] %s %s;\" % (pid, operator, rate_str)\n else:\n idx=0\n fxn_string += \"\\n\\trate = %s;\" % rate_str\n for sp in r._sources + r._dests:\n s = sp()\n #Get underlying rxd._ExtracellularSpecies for the grid_id\n if isinstance(s, species.Species):\n s = s[reg]._extracellular()\n elif isinstance(s, species.SpeciesOnExtracellular):\n s = s._extracellular()\n if s._grid_id in grid_ids:\n operator = '+=' \n else:\n operator = '='\n grid_ids.append(s._grid_id)\n pid = [pid for pid,gid in enumerate(all_gids) if gid == s._grid_id][0]\n fxn_string += \"\\n\\trhs[%d] %s (%s)*rate;\" % (pid, operator, r._mult[idx])\n idx += 1\n fxn_string += \"\\n}\\n\"\n ecs_register_reaction(0, len(all_gids), _list_to_cint_array(all_gids), _c_compile(fxn_string))\n\ndef _init():\n if len(species._all_species) == 0:\n return None\n initializer._do_init()\n # TODO: check about the 0<x<1 problem alluded to in the documentation\n h.define_shape()\n\n # if the shape has changed update the nodes\n _update_node_data()\n \n if species._has_1d:\n section1d._purge_cptrs()\n \n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None:\n # TODO: are there issues with hybrid or 3D here? (I don't think so, but here's a bookmark just in case)\n s._register_cptrs()\n s._finitialize()\n _setup_matrices()\n _compile_reactions()\n _setup_memb_currents()\n\ndef _init_concentration():\n if len(species._all_species) == 0:\n return None\n for sr in list(_species_get_all_species().values()):\n s = sr()\n if s is not None:\n # TODO: are there issues with hybrid or 3D here? (I don't think so, but here's a bookmark just in case)\n s._finitialize()\n\n\n\n_has_nbs_registered = False\n_nbs = None\ndo_setup_matrices_fptr = None\ndef _do_nbs_register():\n global _has_nbs_registered, _nbs, _fih, _fih2, _fih3, do_setup_matrices_fptr\n \n if not _has_nbs_registered:\n #from neuron import nonvint_block_supervisor as _nbs\n\n _has_nbs_registered = True\n #_nbs.register(_callbacks) not used by crxd\n \n #\n # register the initialization handler and the ion register handler\n #\n _fih = h.FInitializeHandler(_init_concentration)\n _fih3 = h.FInitializeHandler(3, _init)\n\n set_setup_matrices = nrn_dll_sym('set_setup_matrices')\n set_setup_matrices.argtypes = [fptr_prototype]\n do_setup_matrices_fptr = fptr_prototype(_setup_matrices)\n set_setup_matrices(do_setup_matrices_fptr)\n\n _fih2 = h.FInitializeHandler(3, initializer._do_ion_register)\n\n\n #\n # register scatter/gather mechanisms\n #\n _cvode_object.extra_scatter_gather(0, _after_advance)\n \n\n# register the Python callbacks\ndo_setup_fptr = fptr_prototype(_setup)\ndo_initialize_fptr = fptr_prototype(_init)\nset_setup(do_setup_fptr)\nset_initialize(do_initialize_fptr)\n\ndef _windows_remove_dlls():\n global _windows_dll_files, _windows_dll\n for (dll_ptr,filepath) in zip(_windows_dll,_windows_dll_files):\n dll = dll_ptr()\n if dll:\n handle = dll._handle\n del dll\n ctypes.windll.kernel32.FreeLibrary(handle)\n os.remove(filepath)\n _windows_dll_files = []\n _windows_dll = []\n \n \ndef nthread(n=None):\n if(n):\n _set_num_threads(n)\n return _get_num_threads()\n" ]
[ [ "numpy.ctypeslib.ndpointer", "numpy.zeros", "numpy.concatenate", "numpy.ndarray", "numpy.where", "numpy.array", "numpy.ascontiguousarray" ] ]
alexlee-gk/visual_dynamics
[ "90227bb0d0aebb1989117b5c25ca311655ca7cc7" ]
[ "visual_dynamics/gui/gps_training_gui.py" ]
[ "\"\"\"\nGPS Training GUI\n\nThe GPS Training GUI is used to interact with the GPS algorithm during training.\nIt contains the below seven functionalities:\n\nAction Panel contains buttons for stop, reset, go, fail\nAction Status Textbox displays action status\nAlgorithm Status Textbox displays algorithm status\nCost Plot displays costs after each iteration\nAlgorithm Output Textbox displays algorithm output after each iteration\n3D Trajectory Visualizer displays 3D trajectories after each iteration\nImage Visualizer displays images received from a rostopic\n\nFor more detailed documentation, visit: rll.berkeley.edu/gps/gui\n\"\"\"\nimport time\nimport threading\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n\nfrom gps.gui.config import config\nfrom gps.gui.action_panel import Action, ActionPanel\nfrom gps.gui.textbox import Textbox\nfrom gps.gui.mean_plotter import MeanPlotter\nfrom gps.gui.plotter_3d import Plotter3D\nfrom gps.gui.image_visualizer import ImageVisualizer\nfrom gps.gui.util import buffered_axis_limits, load_data_from_npz\n\nfrom gps.proto.gps_pb2 import END_EFFECTOR_POINTS\n\n# Needed for typechecks\nfrom gps.algorithm.algorithm_badmm import AlgorithmBADMM\nfrom gps.algorithm.algorithm_mdgps import AlgorithmMDGPS\n\nclass GPSTrainingGUI(object):\n\n def __init__(self, hyperparams):\n self._hyperparams = hyperparams\n self._log_filename = self._hyperparams['log_filename']\n if 'target_filename' in self._hyperparams:\n self._target_filename = self._hyperparams['target_filename']\n else:\n self._target_filename = None\n\n # GPS Training Status.\n self.mode = config['initial_mode'] # Modes: run, wait, end, request, process.\n self.request = None # Requests: stop, reset, go, fail, None.\n self.err_msg = None\n self._colors = {\n 'run': 'cyan',\n 'wait': 'orange',\n 'end': 'red',\n\n 'stop': 'red',\n 'reset': 'yellow',\n 'go': 'green',\n 'fail': 'magenta',\n }\n self._first_update = True\n\n # Actions.\n actions_arr = [\n Action('stop', 'stop', self.request_stop, axis_pos=0),\n Action('reset', 'reset', self.request_reset, axis_pos=1),\n Action('go', 'go', self.request_go, axis_pos=2),\n Action('fail', 'fail', self.request_fail, axis_pos=3),\n ]\n\n # Setup figure.\n plt.ion()\n plt.rcParams['toolbar'] = 'None'\n for key in plt.rcParams:\n if key.startswith('keymap.'):\n plt.rcParams[key] = ''\n\n self._fig = plt.figure(figsize=config['figsize'])\n self._fig.subplots_adjust(left=0.01, bottom=0.01, right=0.99, top=0.99,\n wspace=0, hspace=0)\n\n # Assign GUI component locations.\n self._gs = gridspec.GridSpec(16, 8)\n self._gs_action_panel = self._gs[0:2, 0:8]\n self._gs_action_output = self._gs[2:3, 0:4]\n self._gs_status_output = self._gs[3:4, 0:4]\n self._gs_cost_plotter = self._gs[2:4, 4:8]\n self._gs_algthm_output = self._gs[4:8, 0:8]\n if config['image_on']:\n self._gs_traj_visualizer = self._gs[8:16, 0:4]\n self._gs_image_visualizer = self._gs[8:16, 4:8]\n else:\n self._gs_traj_visualizer = self._gs[8:16, 0:8]\n\n # Create GUI components.\n self._action_panel = ActionPanel(self._fig, self._gs_action_panel, 1, 4, actions_arr)\n self._action_output = Textbox(self._fig, self._gs_action_output, border_on=True)\n self._status_output = Textbox(self._fig, self._gs_status_output, border_on=False)\n self._algthm_output = Textbox(self._fig, self._gs_algthm_output,\n max_display_size=config['algthm_output_max_display_size'],\n log_filename=self._log_filename,\n fontsize=config['algthm_output_fontsize'],\n font_family='monospace')\n self._cost_plotter = MeanPlotter(self._fig, self._gs_cost_plotter,\n color='blue', label='mean cost')\n self._traj_visualizer = Plotter3D(self._fig, self._gs_traj_visualizer,\n num_plots=self._hyperparams['conditions'])\n if config['image_on']:\n self._image_visualizer = ImageVisualizer(self._fig,\n self._gs_image_visualizer, cropsize=config['image_size'],\n rostopic=config['image_topic'], show_overlay_buttons=True)\n\n # Setup GUI components.\n self._algthm_output.log_text('\\n')\n self.set_output_text(self._hyperparams['info'])\n if config['initial_mode'] == 'run':\n self.run_mode()\n else:\n self.wait_mode()\n\n # Setup 3D Trajectory Visualizer plot titles and legends\n for m in range(self._hyperparams['conditions']):\n self._traj_visualizer.set_title(m, 'Condition %d' % (m))\n self._traj_visualizer.add_legend(linestyle='-', marker='None',\n color='green', label='Trajectory Samples')\n self._traj_visualizer.add_legend(linestyle='-', marker='None',\n color='blue', label='Policy Samples')\n self._traj_visualizer.add_legend(linestyle='None', marker='x',\n color=(0.5, 0, 0), label='LG Controller Means')\n self._traj_visualizer.add_legend(linestyle='-', marker='None',\n color='red', label='LG Controller Distributions')\n\n self._fig.canvas.draw()\n\n # Display calculating thread\n def display_calculating(delay, run_event):\n while True:\n if not run_event.is_set():\n run_event.wait()\n if run_event.is_set():\n self.set_status_text('Calculating.')\n time.sleep(delay)\n if run_event.is_set():\n self.set_status_text('Calculating..')\n time.sleep(delay)\n if run_event.is_set():\n self.set_status_text('Calculating...')\n time.sleep(delay)\n\n self._calculating_run = threading.Event()\n self._calculating_thread = threading.Thread(target=display_calculating,\n args=(1, self._calculating_run))\n self._calculating_thread.daemon = True\n self._calculating_thread.start()\n\n # GPS Training functions\n def request_stop(self, event=None):\n self.request_mode('stop')\n\n def request_reset(self, event=None):\n self.request_mode('reset')\n\n def request_go(self, event=None):\n self.request_mode('go')\n\n def request_fail(self, event=None):\n self.request_mode('fail')\n\n def request_mode(self, request):\n \"\"\"\n Sets the request mode (stop, reset, go, fail). The request is read by\n gps_main before sampling, and the appropriate action is taken.\n \"\"\"\n self.mode = 'request'\n self.request = request\n self.set_action_text(self.request + ' requested')\n self.set_action_bgcolor(self._colors[self.request], alpha=0.2)\n\n def process_mode(self):\n \"\"\"\n Completes the current request, after it is first read by gps_main.\n Displays visual confirmation that the request was processed,\n displays any error messages, and then switches into mode 'run' or 'wait'.\n \"\"\"\n self.mode = 'process'\n self.set_action_text(self.request + ' processed')\n self.set_action_bgcolor(self._colors[self.request], alpha=1.0)\n if self.err_msg:\n self.set_action_text(self.request + ' processed' + '\\nERROR: ' +\n self.err_msg)\n self.err_msg = None\n time.sleep(1.0)\n else:\n time.sleep(0.5)\n if self.request in ('stop', 'reset', 'fail'):\n self.wait_mode()\n elif self.request == 'go':\n self.run_mode()\n self.request = None\n\n def wait_mode(self):\n self.mode = 'wait'\n self.set_action_text('waiting')\n self.set_action_bgcolor(self._colors[self.mode], alpha=1.0)\n\n def run_mode(self):\n self.mode = 'run'\n self.set_action_text('running')\n self.set_action_bgcolor(self._colors[self.mode], alpha=1.0)\n\n def end_mode(self):\n self.mode = 'end'\n self.set_action_text('ended')\n self.set_action_bgcolor(self._colors[self.mode], alpha=1.0)\n\n def estop(self, event=None):\n self.set_action_text('estop: NOT IMPLEMENTED')\n\n # GUI functions\n def set_action_text(self, text):\n self._action_output.set_text(text)\n self._cost_plotter.draw_ticklabels() # redraw overflow ticklabels\n\n def set_action_bgcolor(self, color, alpha=1.0):\n self._action_output.set_bgcolor(color, alpha)\n self._cost_plotter.draw_ticklabels() # redraw overflow ticklabels\n\n def set_status_text(self, text):\n self._status_output.set_text(text)\n self._cost_plotter.draw_ticklabels() # redraw overflow ticklabels\n\n def set_output_text(self, text):\n self._algthm_output.set_text(text)\n self._cost_plotter.draw_ticklabels() # redraw overflow ticklabels\n\n def append_output_text(self, text):\n self._algthm_output.append_text(text)\n self._cost_plotter.draw_ticklabels() # redraw overflow ticklabels\n\n def start_display_calculating(self):\n self._calculating_run.set()\n\n def stop_display_calculating(self):\n self._calculating_run.clear()\n\n def set_image_overlays(self, condition):\n \"\"\"\n Sets up the image visualizer with what images to overlay if\n \"overlay_initial_image\" or \"overlay_target_image\" is pressed.\n \"\"\"\n if not config['image_on'] or not self._target_filename:\n return\n initial_image = load_data_from_npz(self._target_filename,\n config['image_overlay_actuator'], str(condition),\n 'initial', 'image', default=None)\n target_image = load_data_from_npz(self._target_filename,\n config['image_overlay_actuator'], str(condition),\n 'target', 'image', default=None)\n self._image_visualizer.set_initial_image(initial_image,\n alpha=config['image_overlay_alpha'])\n self._image_visualizer.set_target_image(target_image,\n alpha=config['image_overlay_alpha'])\n\n # Iteration update functions\n def update(self, itr, algorithm, agent, traj_sample_lists, pol_sample_lists):\n \"\"\"\n After each iteration, update the iteration data output, the cost plot,\n and the 3D trajectory visualizations (if end effector points exist).\n \"\"\"\n if self._first_update:\n self._output_column_titles(algorithm)\n self._first_update = False\n\n costs = [np.mean(np.sum(algorithm.prev[m].cs, axis=1)) for m in range(algorithm.M)]\n self._update_iteration_data(itr, algorithm, costs, pol_sample_lists)\n self._cost_plotter.update(costs, t=itr)\n if END_EFFECTOR_POINTS in agent.x_data_types:\n self._update_trajectory_visualizations(algorithm, agent,\n traj_sample_lists, pol_sample_lists)\n\n self._fig.canvas.draw()\n self._fig.canvas.flush_events() # Fixes bug in Qt4Agg backend\n\n def _output_column_titles(self, algorithm, policy_titles=False):\n \"\"\"\n Setup iteration data column titles: iteration, average cost, and for\n each condition the mean cost over samples, step size, linear Guassian\n controller entropies, and initial/final KL divergences for BADMM.\n \"\"\"\n self.set_output_text(self._hyperparams['experiment_name'])\n if isinstance(algorithm, AlgorithmMDGPS) or isinstance(algorithm, AlgorithmBADMM):\n condition_titles = '%3s | %8s %12s' % ('', '', '')\n itr_data_fields = '%3s | %8s %12s' % ('itr', 'avg_cost', 'avg_pol_cost')\n else:\n condition_titles = '%3s | %8s' % ('', '')\n itr_data_fields = '%3s | %8s' % ('itr', 'avg_cost')\n for m in range(algorithm.M):\n condition_titles += ' | %8s %9s %-7d' % ('', 'condition', m)\n itr_data_fields += ' | %8s %8s %8s' % (' cost ', ' step ', 'entropy ')\n if isinstance(algorithm, AlgorithmBADMM):\n condition_titles += ' %8s %8s %8s' % ('', '', '')\n itr_data_fields += ' %8s %8s %8s' % ('pol_cost', 'kl_div_i', 'kl_div_f')\n elif isinstance(algorithm, AlgorithmMDGPS):\n condition_titles += ' %8s' % ('')\n itr_data_fields += ' %8s' % ('pol_cost')\n self.append_output_text(condition_titles)\n self.append_output_text(itr_data_fields)\n\n def _update_iteration_data(self, itr, algorithm, costs, pol_sample_lists):\n \"\"\"\n Update iteration data information: iteration, average cost, and for\n each condition the mean cost over samples, step size, linear Guassian\n controller entropies, and initial/final KL divergences for BADMM.\n \"\"\"\n avg_cost = np.mean(costs)\n if pol_sample_lists is not None:\n test_idx = algorithm._hyperparams['test_conditions']\n # pol_sample_lists is a list of singletons\n samples = [sl[0] for sl in pol_sample_lists]\n pol_costs = [np.sum(algorithm.cost[idx].eval(s)[0])\n for s, idx in zip(samples, test_idx)]\n itr_data = '%3d | %8.2f %12.2f' % (itr, avg_cost, np.mean(pol_costs))\n else:\n itr_data = '%3d | %8.2f' % (itr, avg_cost)\n for m in range(algorithm.M):\n cost = costs[m]\n step = algorithm.prev[m].step_mult * algorithm.base_kl_step\n entropy = 2*np.sum(np.log(np.diagonal(algorithm.prev[m].traj_distr.chol_pol_covar,\n axis1=1, axis2=2)))\n itr_data += ' | %8.2f %8.2f %8.2f' % (cost, step, entropy)\n if isinstance(algorithm, AlgorithmBADMM):\n kl_div_i = algorithm.cur[m].pol_info.init_kl.mean()\n kl_div_f = algorithm.cur[m].pol_info.prev_kl.mean()\n itr_data += ' %8.2f %8.2f %8.2f' % (pol_costs[m], kl_div_i, kl_div_f)\n elif isinstance(algorithm, AlgorithmMDGPS):\n # TODO: Change for test/train better.\n if test_idx == algorithm._hyperparams['train_conditions']:\n itr_data += ' %8.2f' % (pol_costs[m])\n else:\n itr_data += ' %8s' % (\"N/A\")\n self.append_output_text(itr_data)\n\n def _update_trajectory_visualizations(self, algorithm, agent,\n traj_sample_lists, pol_sample_lists):\n \"\"\"\n Update 3D trajectory visualizations information: the trajectory samples,\n policy samples, and linear Gaussian controller means and covariances.\n \"\"\"\n xlim, ylim, zlim = self._calculate_3d_axis_limits(traj_sample_lists, pol_sample_lists)\n for m in range(algorithm.M):\n self._traj_visualizer.clear(m)\n self._traj_visualizer.set_lim(i=m, xlim=xlim, ylim=ylim, zlim=zlim)\n self._update_linear_gaussian_controller_plots(algorithm, agent, m)\n self._update_samples_plots(traj_sample_lists, m, 'green', 'Trajectory Samples')\n if pol_sample_lists:\n self._update_samples_plots(pol_sample_lists, m, 'blue', 'Policy Samples')\n self._traj_visualizer.draw() # this must be called explicitly\n\n def _calculate_3d_axis_limits(self, traj_sample_lists, pol_sample_lists):\n \"\"\"\n Calculate the 3D axis limits shared between trajectory plots,\n based on the minimum and maximum xyz values across all samples.\n \"\"\"\n all_eept = np.empty((0, 3))\n sample_lists = traj_sample_lists\n if pol_sample_lists:\n sample_lists += traj_sample_lists\n for sample_list in sample_lists:\n for sample in sample_list.get_samples():\n ee_pt = sample.get(END_EFFECTOR_POINTS)\n for i in range(ee_pt.shape[1]/3):\n ee_pt_i = ee_pt[:, 3*i+0:3*i+3]\n all_eept = np.r_[all_eept, ee_pt_i]\n min_xyz = np.amin(all_eept, axis=0)\n max_xyz = np.amax(all_eept, axis=0)\n xlim = buffered_axis_limits(min_xyz[0], max_xyz[0], buffer_factor=1.25)\n ylim = buffered_axis_limits(min_xyz[1], max_xyz[1], buffer_factor=1.25)\n zlim = buffered_axis_limits(min_xyz[2], max_xyz[2], buffer_factor=1.25)\n return xlim, ylim, zlim\n\n def _update_linear_gaussian_controller_plots(self, algorithm, agent, m):\n \"\"\"\n Update the linear Guassian controller plots with iteration data,\n for the mean and covariances of the end effector points.\n \"\"\"\n # Calculate mean and covariance for end effector points\n eept_idx = agent.get_idx_x(END_EFFECTOR_POINTS)\n start, end = eept_idx[0], eept_idx[-1]\n mu, sigma = algorithm.traj_opt.forward(algorithm.prev[m].traj_distr, algorithm.prev[m].traj_info)\n mu_eept, sigma_eept = mu[:, start:end+1], sigma[:, start:end+1, start:end+1]\n\n # Linear Gaussian Controller Distributions (Red)\n for i in range(mu_eept.shape[1]/3):\n mu, sigma = mu_eept[:, 3*i+0:3*i+3], sigma_eept[:, 3*i+0:3*i+3, 3*i+0:3*i+3]\n self._traj_visualizer.plot_3d_gaussian(i=m, mu=mu, sigma=sigma,\n edges=100, linestyle='-', linewidth=1.0, color='red',\n alpha=0.15, label='LG Controller Distributions')\n\n # Linear Gaussian Controller Means (Dark Red)\n for i in range(mu_eept.shape[1]/3):\n mu = mu_eept[:, 3*i+0:3*i+3]\n self._traj_visualizer.plot_3d_points(i=m, points=mu, linestyle='None',\n marker='x', markersize=5.0, markeredgewidth=1.0,\n color=(0.5, 0, 0), alpha=1.0, label='LG Controller Means')\n\n def _update_samples_plots(self, sample_lists, m, color, label):\n \"\"\"\n Update the samples plots with iteration data, for the trajectory samples\n and the policy samples.\n \"\"\"\n samples = sample_lists[m].get_samples()\n for sample in samples:\n ee_pt = sample.get(END_EFFECTOR_POINTS)\n for i in range(ee_pt.shape[1]/3):\n ee_pt_i = ee_pt[:, 3*i+0:3*i+3]\n self._traj_visualizer.plot_3d_points(m, ee_pt_i, color=color, label=label)\n\n def save_figure(self, filename):\n self._fig.savefig(filename)\n" ]
[ [ "numpy.sum", "numpy.empty", "matplotlib.pyplot.figure", "numpy.amin", "numpy.amax", "matplotlib.pyplot.ion", "numpy.diagonal", "matplotlib.gridspec.GridSpec", "numpy.mean" ] ]
milokhl/places-2017
[ "97d4500aacc35e8e55f31918c3fda081d6345c60" ]
[ "model/tensorflow/vgg_slim.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains model definitions for versions of the Oxford VGG network.\nThese model definitions were introduced in the following technical report:\n Very Deep Convolutional Networks For Large-Scale Image Recognition\n Karen Simonyan and Andrew Zisserman\n arXiv technical report, 2015\n PDF: http://arxiv.org/pdf/1409.1556.pdf\n ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf\n CC-BY-4.0\nMore information can be obtained from the VGG website:\nwww.robots.ox.ac.uk/~vgg/research/very_deep/\nUsage:\n with slim.arg_scope(vgg.vgg_arg_scope()):\n outputs, end_points = vgg.vgg_a(inputs)\n with slim.arg_scope(vgg.vgg_arg_scope()):\n outputs, end_points = vgg.vgg_16(inputs)\n@@vgg_a\n@@vgg_16\n@@vgg_19\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib.framework.python.ops import arg_scope\nfrom tensorflow.contrib.layers.python.layers import layers as layers_lib\nfrom tensorflow.contrib.layers.python.layers import regularizers\nfrom tensorflow.contrib.layers.python.layers import utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import variable_scope\n\n\ndef vgg_arg_scope(weight_decay=0.0005):\n \"\"\"Defines the VGG arg scope.\n Args:\n weight_decay: The l2 regularization coefficient.\n Returns:\n An arg_scope.\n \"\"\"\n with arg_scope(\n [layers.conv2d, layers_lib.fully_connected],\n activation_fn=nn_ops.relu,\n weights_regularizer=regularizers.l2_regularizer(weight_decay),\n biases_initializer=init_ops.zeros_initializer()):\n with arg_scope([layers.conv2d], padding='SAME') as arg_sc:\n return arg_sc\n\n\ndef vgg_a(inputs,\n num_classes=1000,\n is_training=True,\n dropout_keep_prob=0.5,\n spatial_squeeze=True,\n scope='vgg_a'):\n \"\"\"Oxford Net VGG 11-Layers version A Example.\n Note: All the fully_connected layers have been transformed to conv2d layers.\n To use in classification mode, resize input to 224x224.\n Args:\n inputs: a tensor of size [batch_size, height, width, channels].\n num_classes: number of predicted classes.\n is_training: whether or not the model is being trained.\n dropout_keep_prob: the probability that activations are kept in the dropout\n layers during training.\n spatial_squeeze: whether or not should squeeze the spatial dimensions of the\n outputs. Useful to remove unnecessary dimensions for classification.\n scope: Optional scope for the variables.\n Returns:\n the last op containing the log predictions and end_points dict.\n \"\"\"\n with variable_scope.variable_scope(scope, 'vgg_a', [inputs]) as sc:\n end_points_collection = sc.original_name_scope + '_end_points'\n # Collect outputs for conv2d, fully_connected and max_pool2d.\n with arg_scope(\n [layers.conv2d, layers_lib.max_pool2d],\n outputs_collections=end_points_collection):\n net = layers_lib.repeat(\n inputs, 1, layers.conv2d, 64, [3, 3], scope='conv1')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')\n net = layers_lib.repeat(net, 1, layers.conv2d, 128, [3, 3], scope='conv2')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')\n net = layers_lib.repeat(net, 2, layers.conv2d, 256, [3, 3], scope='conv3')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')\n net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv4')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')\n net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv5')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')\n # Use conv2d instead of fully_connected layers.\n net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')\n net = layers_lib.dropout(\n net, dropout_keep_prob, is_training=is_training, scope='dropout6')\n net = layers.conv2d(net, 4096, [1, 1], scope='fc7')\n net = layers_lib.dropout(\n net, dropout_keep_prob, is_training=is_training, scope='dropout7')\n net = layers.conv2d(\n net,\n num_classes, [1, 1],\n activation_fn=None,\n normalizer_fn=None,\n scope='fc8')\n # Convert end_points_collection into a end_point dict.\n end_points = utils.convert_collection_to_dict(end_points_collection)\n if spatial_squeeze:\n net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')\n end_points[sc.name + '/fc8'] = net\n return net, end_points\n\n\nvgg_a.default_image_size = 224\n\n\ndef vgg_16(inputs,\n num_classes=1000,\n is_training=True,\n dropout_keep_prob=0.5,\n spatial_squeeze=True,\n scope='vgg_16'):\n \"\"\"Oxford Net VGG 16-Layers version D Example.\n Note: All the fully_connected layers have been transformed to conv2d layers.\n To use in classification mode, resize input to 224x224.\n Args:\n inputs: a tensor of size [batch_size, height, width, channels].\n num_classes: number of predicted classes.\n is_training: whether or not the model is being trained.\n dropout_keep_prob: the probability that activations are kept in the dropout\n layers during training.\n spatial_squeeze: whether or not should squeeze the spatial dimensions of the\n outputs. Useful to remove unnecessary dimensions for classification.\n scope: Optional scope for the variables.\n Returns:\n the last op containing the log predictions and end_points dict.\n \"\"\"\n with variable_scope.variable_scope(scope, 'vgg_16', [inputs]) as sc:\n end_points_collection = sc.original_name_scope + '_end_points'\n # Collect outputs for conv2d, fully_connected and max_pool2d.\n with arg_scope(\n [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],\n outputs_collections=end_points_collection):\n net = layers_lib.repeat(inputs, 2, layers.conv2d, 64, (3, 3), scope='conv1')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')\n net = layers_lib.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')\n net = layers_lib.repeat(net, 3, layers.conv2d, 256, [3, 3], scope='conv3')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')\n net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv4')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')\n net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv5')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')\n # Use conv2d instead of fully_connected layers.\n net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')\n net = layers_lib.dropout(\n net, dropout_keep_prob, is_training=is_training, scope='dropout6')\n net = layers.conv2d(net, 4096, [1, 1], scope='fc7')\n net = layers_lib.dropout(\n net, dropout_keep_prob, is_training=is_training, scope='dropout7')\n net = layers.conv2d(\n net,\n num_classes, [1, 1],\n activation_fn=None,\n normalizer_fn=None,\n scope='fc8')\n # Convert end_points_collection into a end_point dict.\n end_points = utils.convert_collection_to_dict(end_points_collection)\n if spatial_squeeze:\n net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')\n end_points[sc.name + '/fc8'] = net\n return net, end_points\n\n\nvgg_16.default_image_size = 224\n\n\ndef vgg_19(inputs,\n num_classes=1000,\n is_training=True,\n dropout_keep_prob=0.5,\n spatial_squeeze=True,\n scope='vgg_19'):\n \"\"\"Oxford Net VGG 19-Layers version E Example.\n Note: All the fully_connected layers have been transformed to conv2d layers.\n To use in classification mode, resize input to 224x224.\n Args:\n inputs: a tensor of size [batch_size, height, width, channels].\n num_classes: number of predicted classes.\n is_training: whether or not the model is being trained.\n dropout_keep_prob: the probability that activations are kept in the dropout\n layers during training.\n spatial_squeeze: whether or not should squeeze the spatial dimensions of the\n outputs. Useful to remove unnecessary dimensions for classification.\n scope: Optional scope for the variables.\n Returns:\n the last op containing the log predictions and end_points dict.\n \"\"\"\n with variable_scope.variable_scope(scope, 'vgg_19', [inputs]) as sc:\n end_points_collection = sc.name + '_end_points'\n # Collect outputs for conv2d, fully_connected and max_pool2d.\n with arg_scope(\n [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],\n outputs_collections=end_points_collection):\n net = layers_lib.repeat(inputs, 2, layers.conv2d, 64, [3, 3], scope='conv1')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')\n net = layers_lib.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')\n net = layers_lib.repeat(net, 4, layers.conv2d, 256, [3, 3], scope='conv3')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')\n net = layers_lib.repeat(net, 4, layers.conv2d, 512, [3, 3], scope='conv4')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')\n net = layers_lib.repeat(net, 4, layers.conv2d, 512, [3, 3], scope='conv5')\n net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')\n # Use conv2d instead of fully_connected layers.\n net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')\n net = layers_lib.dropout(\n net, dropout_keep_prob, is_training=is_training, scope='dropout6')\n net = layers.conv2d(net, 4096, [1, 1], scope='fc7')\n net = layers_lib.dropout(\n net, dropout_keep_prob, is_training=is_training, scope='dropout7')\n net = layers.conv2d(\n net,\n num_classes, [1, 1],\n activation_fn=None,\n normalizer_fn=None,\n scope='fc8')\n # Convert end_points_collection into a end_point dict.\n end_points = utils.convert_collection_to_dict(end_points_collection)\n if spatial_squeeze:\n net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')\n end_points[sc.name + '/fc8'] = net\n return net, end_points\n\n\nvgg_19.default_image_size = 224\n\n# Alias\nvgg_d = vgg_16\nvgg_e = vgg_19" ]
[ [ "tensorflow.contrib.layers.python.layers.layers.dropout", "tensorflow.contrib.layers.python.layers.layers.repeat", "tensorflow.contrib.layers.python.layers.layers.max_pool2d", "tensorflow.contrib.layers.conv2d", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.ops.array_ops.squeeze", "tensorflow.contrib.framework.python.ops.arg_scope", "tensorflow.contrib.layers.python.layers.regularizers.l2_regularizer", "tensorflow.python.ops.init_ops.zeros_initializer", "tensorflow.contrib.layers.python.layers.utils.convert_collection_to_dict" ] ]
ayueaa/Some-Spiders
[ "4cf085e55eab822c08d06b62099d1c235d1840ae" ]
[ "lianjia_chengjiao(可做模板)/pandans合并数据库表重写入.py" ]
[ "import pymongo\r\nimport pandas as pd\r\n\r\n#连接到数据库\r\n#连接到数据库\r\nclient = pymongo.MongoClient(\"localhost\",27017)\r\nlianjia = client[\"ershoufang\"]\r\ninfo = lianjia[\"lianjia_solded\"]\r\nlocation = lianjia['locations']\r\nnew_info = lianjia['cd_lianjia_solded_total_2']\r\n\r\n#将数据表1(包含原始10w+房源信息)转化为DataFrame\r\ndata1 = pd.DataFrame(list(info.find()))\r\nprint(data1.head())\r\n#将数据表2(包含7k+小区经纬度信息)转化为DataFrame\r\ndata2 = pd.DataFrame(list(location.find()))\r\nprint(data2.head())\r\n#多表查询,以house_name为共同键,向表一合并,与mysql的查询功能类似,得到合并后的DataFrame\r\nresult =pd.merge(data1,data2,left_on=\"village_name\", right_on='house_name', how=\"left\").drop(['_id_x','_id_y'],axis=\"columns\")\r\n#衔接上面代码,用于插入数据库,遍历插入的,不知道有没有简单的办法啊~\r\nfor i in range(len(result)):\r\n s = result.loc[i]\r\n#这里加了str()函数是无奈之举,DataFrame中的专有float64等数字格式使MongoDB无法识别,写入会报错,暂时先全部转换为字符串格式写入吧\r\n dic = {index:str(s[index]) for index in s.index}\r\n new_info.insert_one(dic)\r\n print(dic)" ]
[ [ "pandas.merge" ] ]
barentsen/photutils
[ "57cbe18c8c1b8b08c93daa3d5c8dd74c10c3daae" ]
[ "photutils/utils/tests/test_cutouts.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom astropy.tests.helper import pytest\n\nfrom ..cutouts import cutout_footprint\n\n\nXCS = [25.7]\nYCS = [26.2]\nXSTDDEVS = [3.2, 4.0]\nYSTDDEVS = [5.7, 4.1]\nTHETAS = np.array([30., 45.]) * np.pi / 180.\nDATA = np.zeros((3, 3))\nDATA[0:2, 1] = 1.\nDATA[1, 0:2] = 1.\nDATA[1, 1] = 2.\n\n\nclass TestCutoutFootprint(object):\n def test_dataonly(self):\n data = np.ones((5, 5))\n position = (2, 2)\n result1 = cutout_footprint(data, position, 3)\n result2 = cutout_footprint(data, position, footprint=np.ones((3, 3)))\n assert_allclose(result1[:-2], result2[:-2])\n assert result1[-2] is None\n assert result2[-2] is None\n assert result1[-1] == result2[-1]\n\n def test_mask_error(self):\n data = error = np.ones((5, 5))\n mask = np.zeros_like(data, dtype=bool)\n position = (2, 2)\n box_size1 = 3\n box_size2 = (3, 3)\n footprint = np.ones((3, 3))\n result1 = cutout_footprint(data, position, box_size1, mask=mask,\n error=error)\n result2 = cutout_footprint(data, position, box_size2, mask=mask,\n error=error)\n result3 = cutout_footprint(data, position, box_size1,\n footprint=footprint, mask=mask,\n error=error)\n assert_allclose(result1[:-1], result2[:-1])\n assert_allclose(result1[:-1], result3[:-1])\n assert result1[-1] == result2[-1]\n\n def test_position_len(self):\n with pytest.raises(ValueError):\n cutout_footprint(np.ones((3, 3)), [1])\n\n def test_nofootprint(self):\n with pytest.raises(ValueError):\n cutout_footprint(np.ones((3, 3)), (1, 1), box_size=None,\n footprint=None)\n\n def test_wrongboxsize(self):\n with pytest.raises(ValueError):\n cutout_footprint(np.ones((3, 3)), (1, 1), box_size=(1, 2, 3))\n" ]
[ [ "numpy.ones", "numpy.zeros_like", "numpy.zeros", "numpy.testing.assert_allclose", "numpy.array" ] ]
jenniferxsj/CS5001_Project
[ "64e1ecec65a431e66aa83751453bba71fcc33b7c" ]
[ "Monitor_temp_hum.py" ]
[ "#Final Project for CS 5001\n#Brian Meyer\n#Shujun Xiao\n#Xiaoliang Xu\n\nimport Adafruit_DHT as DHT\nimport json\nimport time\nimport psutil\nimport twilio\nfrom twilio.rest import Client\nimport matplotlib.pyplot as plt\nimport csv\nfrom matplotlib import rcParams\nimport http.client\nimport urllib\n\n# Turn on the interactive mode\nplt.ion()\n# Creact 3 lists to hold all the inputted data.\nx = []\ny_tem = []\ny_hum = []\n\n# API Thingspeak - Brian\nkey = '66EU45C8K4SJUNCH'\nchannelID = '1353959'\n\n\n# Define sensor type and pin number. - Shujun\nsensor = DHT.DHT22\npin = 27\n\n# Writing the data to the csv file. - Shujun\ndef write_temp(temperature, humidity):\n with open(\"temp_humidity.csv\", \"a\") as log:\n log.write(\"{0},{1},{2}\\n\".format(time.strftime(\"%H:%M:%S\"),str(temperature),str(humidity)))\n\n# Read the csv file and draw a graph using matplotlib. - Shujun\ndef graph():\n with open(\"temp_humidity.csv\",\"r\") as csvfile:\n plots = csv.reader(csvfile, delimiter=\",\")\n for row in plots:\n if row[0] not in x:\n x.append(row[0])\n y_tem.append(int(float(row[1])))\n y_hum.append(int(float(row[2])))\n plt.clf() # wipe out the graph \n rcParams['figure.figsize'] = 20,6 # set the size of the canvas \n plt.plot(x, y_tem, label = \"Temperature\")\n plt.plot(x, y_hum, label = \"Humidity\")\n plt.xlabel(\"Time\")\n plt.ylabel(\"Reading\")\n plt.title(\"Temperature and Humidity Readings\")\n plt.legend(loc=1) # put the legends on the upper right of the graph\n plt.grid(True,linestyle=\":\") # Adding grid to the graph\n plt.draw() # draw out the graph\n\n#conditionals sending variables to API statements - Xiaolang\ndef checkAvgTempForAcSwitch(tempValues, threshold):\n '''\n checkAvgTempForAC takes a list temp values, compute the average temperature, \n compare it with the threshold. \n params:\n tempValues: a list of temp values\n threshold: the threshold of the average temperature \n return:\n a tuple of (average temperature, statement), where the statement is a string.\n if the average temperature > threshold, statement = \"Switching on AC\";\n otherwise \"Switching off AC\"\n '''\n avg = sum(tempValues) / len(tempValues)\n if avg > threshold:\n text=\"Switching on AC\"\n sendtoSMS(text) \n\n# Connect with twilio and sending out messages - Brian\ndef sendtoSMS(statement):\n account_sid = 'AC96c973f5b3e4b88eca097ef809acc0f6'\n auth_token = 'af6e9952608904435b84c4707d086efd'\n client = Client(account_sid, auth_token)\n\n message = client.messages.create(body= statement, from_='+18507714790', to='+15857332025')\n\n print(message.sid)\n\n# Connect with Thinkspeak, print out the readings and connection status.- Brian\ndef thingspeak(temperature, humidity):\n while True:\n params = urllib.parse.urlencode({'field1': temperature, 'field2': humidity, 'key':key }) \n headers = {\"Content-typZZe\": \"application/x-www-form-urlencoded\",\"Accept\": \"text/plain\"}\n conn = http.client.HTTPConnection(\"api.thingspeak.com:80\")\n try:\n conn.request(\"POST\", \"/update\", params, headers)\n response = conn.getresponse()\n print(response.status, response.reason)\n data = response.read()\n conn.close()\n except:\n print(\"connection failed\")\n break\n\nsendtoSMS(\"The program is starting to run!\")\nwhile True: \n temperature, humidity = DHT.read_retry(sensor, pin) # get readings from sensor\n print(\"Temperature is:\",temperature, \"\\nHumidity is:\",humidity)\n write_temp(temperature, humidity)\n graph()\n thingspeak(temperature, humidity)\n tempValues = y_tem\n threshold=32\n checkAvgTempForAcSwitch(tempValues, threshold)\n plt.pause(5) \nsendtoSMS(\"The program is stopped!\")" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.pause", "matplotlib.pyplot.legend", "matplotlib.pyplot.draw", "matplotlib.pyplot.grid", "matplotlib.pyplot.clf", "matplotlib.pyplot.title", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.ion", "matplotlib.pyplot.xlabel" ] ]
kimandsharp/bmb510
[ "5446cd168709dd7f5d6cee66f596e57d3632af3d" ]
[ "SciInf_utilities.py" ]
[ "\"\"\"\nsome useful defs for bayes programs\n\"\"\"\nimport numpy as np\n#-------\n# globals\nCREDIBLE_MIN = 2.5 # lower percentile for credible interval\nCREDIBLE_MAX = 97.5 # upper percentile for credible interval # covers 95%\n#CREDIBLE_MIN = 5. # lower percentile for credible interval\n#CREDIBLE_MAX = 95. # upper percentile for credible interval # covers 90%\nNPOINT = 2501\nprint('number of integration points: ',NPOINT)\nMAKEPLOT = True\n#-------\ndef read_n(n,filename):\n # read a list of integers from a file\n data_file = open(filename,\"r\")\n contents = data_file.readlines()\n for line in contents:\n if(line[0] == '#'):\n print('%s' % line[:-1])\n continue\n if(len(line) <= 1):\n continue\n field = line.split()\n n.append(int(field[0]))\n data_file.close()\n ndata = len(n)\n print ('# data points %d ' % (ndata))\n return ndata\n\ndef read_x(x,filename):\n # read a list of reals (floats) from a file\n data_file = open(filename,\"r\")\n contents = data_file.readlines()\n for line in contents:\n if(line[0] == '#'):\n print('%s' % line[:-1])\n continue\n if(len(line) <= 1):\n continue\n field = line.split()\n #print(field)\n x.append(float(field[0]))\n data_file.close()\n ndata = len(x)\n print ('# data points %d ' % (ndata))\n return ndata\n #print(x)\n\ndef read_xy(x,y,filename):\n # read pairs of reals (floats), one pair per line separated by whitespace \n data_file = open(filename,\"r\")\n contents = data_file.readlines()\n for line in contents:\n if(line[0] == '#'):\n print('%s' % line[:-1])\n continue\n if(len(line) <= 1):\n continue\n field = line.split()\n #vprint(field)\n x.append(float(field[0]))\n y.append(float(field[1]))\n data_file.close()\n ndata = len(x)\n print ('# data points %d ' % (ndata))\n return ndata\n #print(x)\n #print(y)\n\ndef average_x(x):\n # return average of list of floats\n avx = 0.\n for i in range(len(x)):\n avx += x[i]\n if(len(x)>0): avx = avx/len(x)\n return avx\n\ndef average_xy(x,y):\n # return average of product of two lists of floats\n avx = 0.\n length = min(len(x),len(y))\n if(len(x)!=len(y)):\n print ('warning different length lists- downsizing') \n for i in range(length):\n avx += x[i]*y[i]\n if(length>0): avx = avx/length\n return avx\n\n\ndef pdf_to_cdf(x_axis,pdf,norm=True,discrete=False):\n \"\"\"\n integrate probability distribution function to get cumulative distribution function\n using trapezoidal rule\n \"\"\"\n n = len(pdf)\n cdf = np.zeros(n)\n if(discrete):\n cdf[0] = pdf[0]\n for i in range(1,n):\n cdf[i] = cdf[i-1] + pdf[i]\n else:\n for i in range(1,n):\n cdf[i] = cdf[i-1] + 0.5*(pdf[i]+pdf[i-1])*(x_axis[i] - x_axis[i-1])\n if(norm):\n cmax = cdf[n-1]\n cdf = cdf/cmax\n return cdf\n\ndef quantile(x_axis,cdf,percent,reverse=False):\n \"\"\"\n get quantile by scanning thru cdf\n \"\"\"\n n = len(cdf)\n if(not reverse):\n cut = percent/100.\n else:\n cut = 1. - percent/100.\n i = 0\n while((cdf[i]<=cut)and(i<n)):\n i += 1\n if(i>0):\n return x_axis[i-1]\n else:\n return x_axis[i]\n\ndef pdf_to_mean(x_axis,pdf,discrete=False):\n \"\"\"\n return mean as <x> = int(x.p(x)) using trapezoidal rule\n do not assume that pdf is normalized\n \"\"\"\n n = len(pdf)\n x_mean = 0.\n pdf_sum = 0.\n if(discrete):\n pdf_max = -1.e6\n for i in range(n):\n pdf_sum += pdf[i]\n x_mean += x_axis[i]*pdf[i]\n if(pdf[i] > pdf_max):\n pdf_max = pdf[i]\n x_mode = x_axis[i]\n x_mean /= pdf_sum\n else:\n pdf_max = pdf[0]\n x_mode = x_axis[0]\n for i in range(1,n):\n pdf_sum += 0.5*(pdf[i]+pdf[i-1])*(x_axis[i] - x_axis[i-1])\n x_mean += 0.5*(pdf[i]+pdf[i-1])*(x_axis[i] - x_axis[i-1])*0.5*(x_axis[i] + x_axis[i-1])\n if(pdf[i] > pdf_max):\n pdf_max = pdf[i]\n x_mode = x_axis[i]\n x_mean /= pdf_sum\n # print(\" mean: {:12.5f} \".format(x_mean))\n # print(\" mode: \",x_mode)\n return x_mean,x_mode\n\ndef sort_1_by_2(x,y,rev=False):\n \"\"\"\n sort one list by elements in another list\n \"\"\"\n #print('reverse',rev)\n if(len(x) == len(y)):\n y_x = zip(y,x)\n y_x_sorted = sorted(y_x,reverse=rev)\n y = [z[0] for z in y_x_sorted]\n x = [z[1] for z in y_x_sorted]\n return x,y\n else:\n print('lists of different length- not sorting')\n# for i in range(len(x)):\n# print(x[i],y[i])\n#\ndef summarize(x_axis,pdf,cdf,discrete=False,title='parameter'):\n median = quantile(x_axis,cdf,50.)\n limit_min = quantile(x_axis,cdf,CREDIBLE_MIN)\n limit_max = quantile(x_axis,cdf,CREDIBLE_MAX)\n mean,mode = pdf_to_mean(x_axis,pdf,discrete)\n print('\\n===========================================================')\n print('SUMMARY of posterior distribution for {:s} '.format(title))\n print('===========================================================')\n print('mean: {: 12.5f} mode: {:12.5f} '.format(mean, mode))\n print('median: {:12.5f}'.format(median))\n print('{:6.1f}% to {:6.1f}% limits: ({:12.5f} to {:12.5f})'.format(CREDIBLE_MIN,CREDIBLE_MAX,limit_min,limit_max))\n print('===========================================================\\n')\n return limit_min,limit_max\n\ndef write_pdf_cdf(x_axis,pdf,cdf,title='x pdf cdf',filename='pdf_cdf.dat'):\n head1 = '#' + title + '\\n'\n head2 = '# x p(x) cdf(x) ' + '\\n'\n fileout = open(filename,'w')\n fileout.write(head1)\n fileout.write(head2)\n for i in range(len(x_axis)):\n strbuf = '{:15.5g} {:15.5g} {:15.5g} \\n'.format(x_axis[i],pdf[i],cdf[i])\n fileout.write(strbuf)\n fileout.close()\n" ]
[ [ "numpy.zeros" ] ]
pavlin-policar/graphml-tutorials
[ "72fb9244e8d392b0222b3cfc94b26eb8463ead75" ]
[ "03-graph-classification/preprocess.py" ]
[ "from rdkit import Chem\nimport numpy as np\nfrom pysmiles import read_smiles\nimport networkx as nx\nfrom molecule import Molecule\nimport pickle\nimport pandas as pd\n\n\n\nclass RegressionData():\n \"\"\"\n \t:param mols: list of nx.Graph molecules describing respective SMILES string\n :param labels: list of labels where each label is a list of three topological indices\n [wiener_idx, hyper_wiener_idx, zagreb_idx]\n \"\"\"\n def __init__(self, mols, labels):\n self.mols = mols\n self.labels = labels\n \n self.periodic_table = Chem.GetPeriodicTable()\n self.ams = [nx.to_numpy_matrix(mol, weight='order') for mol in self.mols]\n self.graphs = [nx.from_numpy_matrix(am) for am in self.ams]\n self.element_lists = [mol.nodes(data = 'element') for mol in self.mols]\n \n def create_molecule(self, element_list, label, am):\n \"\"\"\n :param element_list: list of integers of atomic number of the molecule \n :param label: list of three topological indices [wiener_idx, hyper_wiener_idx, zagreb_idx]\n :param am: adjacency matrix of the molecule \n :return: Molecule object with its attributes specified by above parameters\n \"\"\"\n nodes = np.array([Chem.rdchem.PeriodicTable.GetAtomicNumber(self.periodic_table, atom[1]) for atom in element_list])\n return Molecule(nodes, label, am)\n\nclass ClassificationData():\n \"\"\"\n \t:param file_name: string of file name to be used as property prediction task data\n \"\"\"\n def __init__(self, file_name):\n self.data = pd.read_csv(file_name)\n \n self.smiles = self.data['smiles']\n self.labels = self.data['activity']\n self.mols = [read_smiles(smile) for smile in self.smiles]\n \n self.periodic_table = Chem.GetPeriodicTable()\n self.ams = [nx.to_numpy_matrix(mol, weight='order') for mol in self.mols]\n self.graphs = [nx.from_numpy_matrix(am) for am in self.ams]\n self.element_lists = [mol.nodes(data = 'element') for mol in self.mols]\n \n \n def create_molecule(self, element_list, label, am):\n \"\"\"\n :param element_list: list of integers of atomic number of the molecule \n :param label: if active 1, else 0\n :return: Molecule object with its attributes specified by above parameters\n \"\"\"\n nodes = np.array([Chem.rdchem.PeriodicTable.GetAtomicNumber(self.periodic_table, atom[1]) for atom in element_list])\n return Molecule(nodes, label, am)\n \n def get_labels(self):\n \"\"\"\n :return: list of labels of {0,1}\n \"\"\"\n return self.labels\n\n \ndef get_smiles(file_name):\n file = open(file_name, 'r')\n smiles = []\n for i in range(5000):\n line = next(file).strip()\n _,_,smile = line.partition('\\t')\n smiles.append(smile)\n return smiles\n\ndef save_mols(file_name):\n smiles = get_smiles(file_name)\n mols = [read_smiles(smile) for smile in smiles]\n pickle_out = open(\"5000_mols.pickle\", \"wb\")\n pickle.dump(mols, pickle_out)\n pickle_out.close()\n \ndef get_data(data):\n molecules = []\n for i in range (len(data.element_lists)):\n e = data.element_lists[i]\n label = data.labels[i]\n am = data.ams[i]\n\n mol = data.create_molecule(e, label, am)\n molecules.append(mol)\n \n return molecules\n\ndef get_labels(wiener_idx, hyper_wiener_idx, zagreb_idx):\n \"\"\"\n :param wiener_idx: np.array of shape [-1, 1] containing wiener index of each molecule \n :param hyper_wiener_idx: np.array of shape [-1, 1] containing hyper wiener index of each molecule \n :param zagreb_idx: np.array of shape [-1, 1] containing hyper zagreb index of each molecule \n :return: np.array of shape [-1, 3] where [wiener_idx, hyper_wiener_idx, zagreb_idx] of each \n molecule is concatenated\n \"\"\"\n wiener_idx = np.reshape(wiener_idx, (len(wiener_idx), 1))\n hyper_wiener_idx = np.reshape(hyper_wiener_idx, (len(hyper_wiener_idx), 1))\n zagreb_idx = np.reshape(zagreb_idx, (len(zagreb_idx), 1))\n labels = np.hstack((wiener_idx, hyper_wiener_idx, zagreb_idx))\n labels = np.log10(labels)\n return labels\n\n" ]
[ [ "pandas.read_csv", "numpy.hstack", "numpy.log10" ] ]
mohdsherif/mne-python
[ "affc6854168e32e73a075a0104e7af8bddd7eefe" ]
[ "mne/forward/forward.py" ]
[ "# Authors: Alexandre Gramfort <[email protected]>\n# Matti Hamalainen <[email protected]>\n# Martin Luessi <[email protected]>\n#\n# License: BSD (3-clause)\n\nfrom time import time\nfrom copy import deepcopy\nimport re\n\nimport numpy as np\nfrom scipy import linalg, sparse\n\nimport shutil\nimport os\nfrom os import path as op\nimport tempfile\n\nfrom ..io import RawArray, Info\nfrom ..io.constants import FIFF\nfrom ..io.open import fiff_open\nfrom ..io.tree import dir_tree_find\nfrom ..io.tag import find_tag, read_tag\nfrom ..io.matrix import (_read_named_matrix, _transpose_named_matrix,\n write_named_matrix)\nfrom ..io.meas_info import read_bad_channels, write_info\nfrom ..io.pick import (pick_channels_forward, pick_info, pick_channels,\n pick_types)\nfrom ..io.write import (write_int, start_block, end_block,\n write_coord_trans, write_ch_info, write_name_list,\n write_string, start_file, end_file, write_id)\nfrom ..io.base import BaseRaw\nfrom ..evoked import Evoked, EvokedArray\nfrom ..epochs import BaseEpochs\nfrom ..source_space import (_read_source_spaces_from_tree,\n find_source_space_hemi, _set_source_space_vertices,\n _write_source_spaces_to_fid)\nfrom ..source_estimate import _BaseSourceEstimate\nfrom ..transforms import (transform_surface_to, invert_transform,\n write_trans)\nfrom ..utils import (_check_fname, get_subjects_dir, has_mne_c, warn,\n run_subprocess, check_fname, logger, verbose, fill_doc,\n _validate_type, _check_compensation_grade, _check_option)\nfrom ..label import Label\nfrom ..fixes import einsum\n\n\nclass Forward(dict):\n \"\"\"Forward class to represent info from forward solution.\"\"\"\n\n def copy(self):\n \"\"\"Copy the Forward instance.\"\"\"\n return Forward(deepcopy(self))\n\n def __repr__(self):\n \"\"\"Summarize forward info instead of printing all.\"\"\"\n entr = '<Forward'\n\n nchan = len(pick_types(self['info'], meg=True, eeg=False, exclude=[]))\n entr += ' | ' + 'MEG channels: %d' % nchan\n nchan = len(pick_types(self['info'], meg=False, eeg=True, exclude=[]))\n entr += ' | ' + 'EEG channels: %d' % nchan\n\n src_types = np.array([src['type'] for src in self['src']])\n if (src_types == 'surf').all():\n entr += (' | Source space: Surface with %d vertices'\n % self['nsource'])\n elif (src_types == 'vol').all():\n entr += (' | Source space: Volume with %d grid points'\n % self['nsource'])\n elif (src_types == 'discrete').all():\n entr += (' | Source space: Discrete with %d dipoles'\n % self['nsource'])\n else:\n count_string = ''\n if (src_types == 'surf').any():\n count_string += '%d surface, ' % (src_types == 'surf').sum()\n if (src_types == 'vol').any():\n count_string += '%d volume, ' % (src_types == 'vol').sum()\n if (src_types == 'discrete').any():\n count_string += '%d discrete, ' \\\n % (src_types == 'discrete').sum()\n count_string = count_string.rstrip(', ')\n entr += (' | Source space: Mixed (%s) with %d vertices'\n % (count_string, self['nsource']))\n\n if self['source_ori'] == FIFF.FIFFV_MNE_UNKNOWN_ORI:\n entr += (' | Source orientation: Unknown')\n elif self['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:\n entr += (' | Source orientation: Fixed')\n elif self['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI:\n entr += (' | Source orientation: Free')\n\n entr += '>'\n\n return entr\n\n\ndef _block_diag(A, n):\n \"\"\"Construct a block diagonal from a packed structure.\n\n You have to try it on a matrix to see what it's doing.\n\n If A is not sparse, then returns a sparse block diagonal \"bd\",\n diagonalized from the\n elements in \"A\".\n \"A\" is ma x na, comprising bdn=(na/\"n\") blocks of submatrices.\n Each submatrix is ma x \"n\", and these submatrices are\n placed down the diagonal of the matrix.\n\n If A is already sparse, then the operation is reversed, yielding\n a block\n row matrix, where each set of n columns corresponds to a block element\n from the block diagonal.\n\n Parameters\n ----------\n A : array\n The matrix\n n : int\n The block size\n Returns\n -------\n bd : sparse matrix\n The block diagonal matrix\n \"\"\"\n if sparse.issparse(A): # then make block sparse\n raise NotImplementedError('sparse reversal not implemented yet')\n ma, na = A.shape\n bdn = na // int(n) # number of submatrices\n\n if na % n > 0:\n raise ValueError('Width of matrix must be a multiple of n')\n\n tmp = np.arange(ma * bdn, dtype=np.int).reshape(bdn, ma)\n tmp = np.tile(tmp, (1, n))\n ii = tmp.ravel()\n\n jj = np.arange(na, dtype=np.int)[None, :]\n jj = jj * np.ones(ma, dtype=np.int)[:, None]\n jj = jj.T.ravel() # column indices foreach sparse bd\n\n bd = sparse.coo_matrix((A.T.ravel(), np.c_[ii, jj].T)).tocsc()\n\n return bd\n\n\ndef _inv_block_diag(A, n):\n \"\"\"Construct an inverse block diagonal from a packed structure.\n\n You have to try it on a matrix to see what it's doing.\n\n \"A\" is ma x na, comprising bdn=(na/\"n\") blocks of submatrices.\n Each submatrix is ma x \"n\", and the inverses of these submatrices\n are placed down the diagonal of the matrix.\n\n Parameters\n ----------\n A : array\n The matrix.\n n : int\n The block size.\n\n Returns\n -------\n bd : sparse matrix\n The block diagonal matrix.\n \"\"\"\n ma, na = A.shape\n bdn = na // int(n) # number of submatrices\n\n if na % n > 0:\n raise ValueError('Width of matrix must be a multiple of n')\n\n # modify A in-place to invert each sub-block\n A = A.copy()\n for start in range(0, na, 3):\n # this is a view\n A[:, start:start + 3] = linalg.inv(A[:, start:start + 3])\n\n tmp = np.arange(ma * bdn, dtype=np.int).reshape(bdn, ma)\n tmp = np.tile(tmp, (1, n))\n ii = tmp.ravel()\n\n jj = np.arange(na, dtype=np.int)[None, :]\n jj = jj * np.ones(ma, dtype=np.int)[:, None]\n jj = jj.T.ravel() # column indices foreach sparse bd\n\n bd = sparse.coo_matrix((A.T.ravel(), np.c_[ii, jj].T)).tocsc()\n\n return bd\n\n\ndef _get_tag_int(fid, node, name, id_):\n \"\"\"Check we have an appropriate tag.\"\"\"\n tag = find_tag(fid, node, id_)\n if tag is None:\n fid.close()\n raise ValueError(name + ' tag not found')\n return int(tag.data)\n\n\ndef _read_one(fid, node):\n \"\"\"Read all interesting stuff for one forward solution.\"\"\"\n # This function assumes the fid is open as a context manager\n if node is None:\n return None\n\n one = Forward()\n one['source_ori'] = _get_tag_int(fid, node, 'Source orientation',\n FIFF.FIFF_MNE_SOURCE_ORIENTATION)\n one['coord_frame'] = _get_tag_int(fid, node, 'Coordinate frame',\n FIFF.FIFF_MNE_COORD_FRAME)\n one['nsource'] = _get_tag_int(fid, node, 'Number of sources',\n FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)\n one['nchan'] = _get_tag_int(fid, node, 'Number of channels',\n FIFF.FIFF_NCHAN)\n try:\n one['sol'] = _read_named_matrix(fid, node,\n FIFF.FIFF_MNE_FORWARD_SOLUTION,\n transpose=True)\n one['_orig_sol'] = one['sol']['data'].copy()\n except Exception:\n logger.error('Forward solution data not found')\n raise\n\n try:\n fwd_type = FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD\n one['sol_grad'] = _read_named_matrix(fid, node, fwd_type,\n transpose=True)\n one['_orig_sol_grad'] = one['sol_grad']['data'].copy()\n except Exception:\n one['sol_grad'] = None\n\n if one['sol']['data'].shape[0] != one['nchan'] or \\\n (one['sol']['data'].shape[1] != one['nsource'] and\n one['sol']['data'].shape[1] != 3 * one['nsource']):\n raise ValueError('Forward solution matrix has wrong dimensions')\n\n if one['sol_grad'] is not None:\n if one['sol_grad']['data'].shape[0] != one['nchan'] or \\\n (one['sol_grad']['data'].shape[1] != 3 * one['nsource'] and\n one['sol_grad']['data'].shape[1] != 3 * 3 * one['nsource']):\n raise ValueError('Forward solution gradient matrix has '\n 'wrong dimensions')\n\n return one\n\n\ndef _read_forward_meas_info(tree, fid):\n \"\"\"Read light measurement info from forward operator.\n\n Parameters\n ----------\n tree : tree\n FIF tree structure.\n fid : file id\n The file id.\n\n Returns\n -------\n info : instance of Info\n The measurement info.\n \"\"\"\n # This function assumes fid is being used as a context manager\n info = Info()\n\n # Information from the MRI file\n parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)\n if len(parent_mri) == 0:\n raise ValueError('No parent MEG information found in operator')\n parent_mri = parent_mri[0]\n\n tag = find_tag(fid, parent_mri, FIFF.FIFF_MNE_FILE_NAME)\n info['mri_file'] = tag.data if tag is not None else None\n tag = find_tag(fid, parent_mri, FIFF.FIFF_PARENT_FILE_ID)\n info['mri_id'] = tag.data if tag is not None else None\n\n # Information from the MEG file\n parent_meg = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)\n if len(parent_meg) == 0:\n raise ValueError('No parent MEG information found in operator')\n parent_meg = parent_meg[0]\n\n tag = find_tag(fid, parent_meg, FIFF.FIFF_MNE_FILE_NAME)\n info['meas_file'] = tag.data if tag is not None else None\n tag = find_tag(fid, parent_meg, FIFF.FIFF_PARENT_FILE_ID)\n info['meas_id'] = tag.data if tag is not None else None\n\n # Add channel information\n chs = list()\n for k in range(parent_meg['nent']):\n kind = parent_meg['directory'][k].kind\n pos = parent_meg['directory'][k].pos\n if kind == FIFF.FIFF_CH_INFO:\n tag = read_tag(fid, pos)\n chs.append(tag.data)\n info['chs'] = chs\n info._update_redundant()\n\n # Get the MRI <-> head coordinate transformation\n tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)\n coord_head = FIFF.FIFFV_COORD_HEAD\n coord_mri = FIFF.FIFFV_COORD_MRI\n coord_device = FIFF.FIFFV_COORD_DEVICE\n coord_ctf_head = FIFF.FIFFV_MNE_COORD_CTF_HEAD\n if tag is None:\n raise ValueError('MRI/head coordinate transformation not found')\n cand = tag.data\n if cand['from'] == coord_mri and cand['to'] == coord_head:\n info['mri_head_t'] = cand\n else:\n raise ValueError('MRI/head coordinate transformation not found')\n\n # Get the MEG device <-> head coordinate transformation\n tag = find_tag(fid, parent_meg, FIFF.FIFF_COORD_TRANS)\n if tag is None:\n raise ValueError('MEG/head coordinate transformation not found')\n cand = tag.data\n if cand['from'] == coord_device and cand['to'] == coord_head:\n info['dev_head_t'] = cand\n elif cand['from'] == coord_ctf_head and cand['to'] == coord_head:\n info['ctf_head_t'] = cand\n else:\n raise ValueError('MEG/head coordinate transformation not found')\n\n info['bads'] = read_bad_channels(fid, parent_meg)\n # clean up our bad list, old versions could have non-existent bads\n info['bads'] = [bad for bad in info['bads'] if bad in info['ch_names']]\n\n # Check if a custom reference has been applied\n tag = find_tag(fid, parent_mri, FIFF.FIFF_MNE_CUSTOM_REF)\n if tag is None:\n tag = find_tag(fid, parent_mri, 236) # Constant 236 used before v0.11\n\n info['custom_ref_applied'] = bool(tag.data) if tag is not None else False\n info._check_consistency()\n return info\n\n\ndef _subject_from_forward(forward):\n \"\"\"Get subject id from inverse operator.\"\"\"\n return forward['src']._subject\n\n\n@verbose\ndef _merge_meg_eeg_fwds(megfwd, eegfwd, verbose=None):\n \"\"\"Merge loaded MEG and EEG forward dicts into one dict.\"\"\"\n if megfwd is not None and eegfwd is not None:\n if (megfwd['sol']['data'].shape[1] != eegfwd['sol']['data'].shape[1] or\n megfwd['source_ori'] != eegfwd['source_ori'] or\n megfwd['nsource'] != eegfwd['nsource'] or\n megfwd['coord_frame'] != eegfwd['coord_frame']):\n raise ValueError('The MEG and EEG forward solutions do not match')\n\n fwd = megfwd\n fwd['sol']['data'] = np.r_[fwd['sol']['data'], eegfwd['sol']['data']]\n fwd['_orig_sol'] = np.r_[fwd['_orig_sol'], eegfwd['_orig_sol']]\n fwd['sol']['nrow'] = fwd['sol']['nrow'] + eegfwd['sol']['nrow']\n\n fwd['sol']['row_names'] = (fwd['sol']['row_names'] +\n eegfwd['sol']['row_names'])\n if fwd['sol_grad'] is not None:\n fwd['sol_grad']['data'] = np.r_[fwd['sol_grad']['data'],\n eegfwd['sol_grad']['data']]\n fwd['_orig_sol_grad'] = np.r_[fwd['_orig_sol_grad'],\n eegfwd['_orig_sol_grad']]\n fwd['sol_grad']['nrow'] = (fwd['sol_grad']['nrow'] +\n eegfwd['sol_grad']['nrow'])\n fwd['sol_grad']['row_names'] = (fwd['sol_grad']['row_names'] +\n eegfwd['sol_grad']['row_names'])\n\n fwd['nchan'] = fwd['nchan'] + eegfwd['nchan']\n logger.info(' MEG and EEG forward solutions combined')\n elif megfwd is not None:\n fwd = megfwd\n else:\n fwd = eegfwd\n return fwd\n\n\n@verbose\ndef read_forward_solution(fname, include=(), exclude=(), verbose=None):\n \"\"\"Read a forward solution a.k.a. lead field.\n\n Parameters\n ----------\n fname : string\n The file name, which should end with -fwd.fif or -fwd.fif.gz.\n include : list, optional\n List of names of channels to include. If empty all channels\n are included.\n exclude : list, optional\n List of names of channels to exclude. If empty include all\n channels.\n %(verbose)s\n\n Returns\n -------\n fwd : instance of Forward\n The forward solution.\n\n See Also\n --------\n write_forward_solution, make_forward_solution\n\n Notes\n -----\n Forward solutions, which are derived from an original forward solution with\n free orientation, are always stored on disk as forward solution with free\n orientation in X/Y/Z RAS coordinates. To apply any transformation to the\n forward operator (surface orientation, fixed orientation) please apply\n :func:`convert_forward_solution` after reading the forward solution with\n :func:`read_forward_solution`.\n\n Forward solutions, which are derived from an original forward solution with\n fixed orientation, are stored on disk as forward solution with fixed\n surface-based orientations. Please note that the transformation to\n surface-based, fixed orientation cannot be reverted after loading the\n forward solution with :func:`read_forward_solution`.\n \"\"\"\n check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz',\n '_fwd.fif', '_fwd.fif.gz'))\n\n # Open the file, create directory\n logger.info('Reading forward solution from %s...' % fname)\n f, tree, _ = fiff_open(fname)\n with f as fid:\n # Find all forward solutions\n fwds = dir_tree_find(tree, FIFF.FIFFB_MNE_FORWARD_SOLUTION)\n if len(fwds) == 0:\n raise ValueError('No forward solutions in %s' % fname)\n\n # Parent MRI data\n parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)\n if len(parent_mri) == 0:\n raise ValueError('No parent MRI information in %s' % fname)\n parent_mri = parent_mri[0]\n\n src = _read_source_spaces_from_tree(fid, tree, patch_stats=False)\n for s in src:\n s['id'] = find_source_space_hemi(s)\n\n fwd = None\n\n # Locate and read the forward solutions\n megnode = None\n eegnode = None\n for k in range(len(fwds)):\n tag = find_tag(fid, fwds[k], FIFF.FIFF_MNE_INCLUDED_METHODS)\n if tag is None:\n raise ValueError('Methods not listed for one of the forward '\n 'solutions')\n\n if tag.data == FIFF.FIFFV_MNE_MEG:\n megnode = fwds[k]\n elif tag.data == FIFF.FIFFV_MNE_EEG:\n eegnode = fwds[k]\n\n megfwd = _read_one(fid, megnode)\n if megfwd is not None:\n if is_fixed_orient(megfwd):\n ori = 'fixed'\n else:\n ori = 'free'\n logger.info(' Read MEG forward solution (%d sources, '\n '%d channels, %s orientations)'\n % (megfwd['nsource'], megfwd['nchan'], ori))\n\n eegfwd = _read_one(fid, eegnode)\n if eegfwd is not None:\n if is_fixed_orient(eegfwd):\n ori = 'fixed'\n else:\n ori = 'free'\n logger.info(' Read EEG forward solution (%d sources, '\n '%d channels, %s orientations)'\n % (eegfwd['nsource'], eegfwd['nchan'], ori))\n\n fwd = _merge_meg_eeg_fwds(megfwd, eegfwd)\n\n # Get the MRI <-> head coordinate transformation\n tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)\n if tag is None:\n raise ValueError('MRI/head coordinate transformation not found')\n mri_head_t = tag.data\n if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or\n mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD):\n mri_head_t = invert_transform(mri_head_t)\n if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or\n mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD):\n fid.close()\n raise ValueError('MRI/head coordinate transformation not '\n 'found')\n fwd['mri_head_t'] = mri_head_t\n\n #\n # get parent MEG info\n #\n fwd['info'] = _read_forward_meas_info(tree, fid)\n\n # MNE environment\n parent_env = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)\n if len(parent_env) > 0:\n parent_env = parent_env[0]\n tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_WORKING_DIR)\n if tag is not None:\n fwd['info']['working_dir'] = tag.data\n tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_COMMAND_LINE)\n if tag is not None:\n fwd['info']['command_line'] = tag.data\n\n # Transform the source spaces to the correct coordinate frame\n # if necessary\n\n # Make sure forward solution is in either the MRI or HEAD coordinate frame\n if fwd['coord_frame'] not in (FIFF.FIFFV_COORD_MRI, FIFF.FIFFV_COORD_HEAD):\n raise ValueError('Only forward solutions computed in MRI or head '\n 'coordinates are acceptable')\n\n # Transform each source space to the HEAD or MRI coordinate frame,\n # depending on the coordinate frame of the forward solution\n # NOTE: the function transform_surface_to will also work on discrete and\n # volume sources\n nuse = 0\n for s in src:\n try:\n s = transform_surface_to(s, fwd['coord_frame'], mri_head_t)\n except Exception as inst:\n raise ValueError('Could not transform source space (%s)' % inst)\n\n nuse += s['nuse']\n\n # Make sure the number of sources match after transformation\n if nuse != fwd['nsource']:\n raise ValueError('Source spaces do not match the forward solution.')\n\n logger.info(' Source spaces transformed to the forward solution '\n 'coordinate frame')\n fwd['src'] = src\n\n # Handle the source locations and orientations\n fwd['source_rr'] = np.concatenate([ss['rr'][ss['vertno'], :]\n for ss in src], axis=0)\n\n # Store original source orientations\n fwd['_orig_source_ori'] = fwd['source_ori']\n\n # Deal with include and exclude\n pick_channels_forward(fwd, include=include, exclude=exclude, copy=False)\n\n if is_fixed_orient(fwd, orig=True):\n fwd['source_nn'] = np.concatenate([_src['nn'][_src['vertno'], :]\n for _src in fwd['src']], axis=0)\n fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI\n fwd['surf_ori'] = True\n else:\n fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3))\n fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI\n fwd['surf_ori'] = False\n return Forward(fwd)\n\n\n@verbose\ndef convert_forward_solution(fwd, surf_ori=False, force_fixed=False,\n copy=True, use_cps=True, verbose=None):\n \"\"\"Convert forward solution between different source orientations.\n\n Parameters\n ----------\n fwd : Forward\n The forward solution to modify.\n surf_ori : bool, optional (default False)\n Use surface-based source coordinate system? Note that force_fixed=True\n implies surf_ori=True.\n force_fixed : bool, optional (default False)\n Force fixed source orientation mode?\n copy : bool\n Whether to return a new instance or modify in place.\n use_cps : bool (default True)\n Whether to use cortical patch statistics to define normal\n orientations. Only used when surf_ori and/or force_fixed are True.\n %(verbose)s\n\n Returns\n -------\n fwd : Forward\n The modified forward solution.\n \"\"\"\n fwd = fwd.copy() if copy else fwd\n\n if force_fixed is True:\n surf_ori = True\n\n if any([src['type'] == 'vol' for src in fwd['src']]) and force_fixed:\n raise ValueError(\n 'Forward operator was generated with sources from a '\n 'volume source space. Conversion to fixed orientation is not '\n 'possible. Consider using a discrete source space if you have '\n 'meaningful normal orientations.')\n\n if surf_ori:\n if use_cps:\n if any(s.get('patch_inds') is not None for s in fwd['src']):\n use_ave_nn = True\n logger.info(' Average patch normals will be employed in '\n 'the rotation to the local surface coordinates..'\n '..')\n else:\n use_ave_nn = False\n logger.info(' No patch info available. The standard source '\n 'space normals will be employed in the rotation '\n 'to the local surface coordinates....')\n else:\n use_ave_nn = False\n\n # We need to change these entries (only):\n # 1. source_nn\n # 2. sol['data']\n # 3. sol['ncol']\n # 4. sol_grad['data']\n # 5. sol_grad['ncol']\n # 6. source_ori\n\n if is_fixed_orient(fwd, orig=True) or (force_fixed and not use_ave_nn):\n # Fixed\n fwd['source_nn'] = np.concatenate([s['nn'][s['vertno'], :]\n for s in fwd['src']], axis=0)\n if not is_fixed_orient(fwd, orig=True):\n logger.info(' Changing to fixed-orientation forward '\n 'solution with surface-based source orientations...')\n fix_rot = _block_diag(fwd['source_nn'].T, 1)\n # newer versions of numpy require explicit casting here, so *= no\n # longer works\n fwd['sol']['data'] = (fwd['_orig_sol'] *\n fix_rot).astype('float32')\n fwd['sol']['ncol'] = fwd['nsource']\n if fwd['sol_grad'] is not None:\n x = sparse.block_diag([fix_rot] * 3)\n fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod\n fwd['sol_grad']['ncol'] = 3 * fwd['nsource']\n fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI\n fwd['surf_ori'] = True\n\n elif surf_ori: # Free, surf-oriented\n # Rotate the local source coordinate systems\n fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3))\n logger.info(' Converting to surface-based source orientations...')\n # Actually determine the source orientations\n pp = 0\n for s in fwd['src']:\n if s['type'] in ['surf', 'discrete']:\n for p in range(s['nuse']):\n # Project out the surface normal and compute SVD\n if use_ave_nn and s.get('patch_inds') is not None:\n nn = s['nn'][s['pinfo'][s['patch_inds'][p]], :]\n nn = np.sum(nn, axis=0)[:, np.newaxis]\n nn /= linalg.norm(nn)\n else:\n nn = s['nn'][s['vertno'][p], :][:, np.newaxis]\n U, S, _ = linalg.svd(np.eye(3, 3) - nn * nn.T)\n # Make sure that ez is in the direction of nn\n if np.sum(nn.ravel() * U[:, 2].ravel()) < 0:\n U *= -1.0\n fwd['source_nn'][pp:pp + 3, :] = U.T\n pp += 3\n else:\n pp += 3 * s['nuse']\n\n # Rotate the solution components as well\n if force_fixed:\n fwd['source_nn'] = fwd['source_nn'][2::3, :]\n fix_rot = _block_diag(fwd['source_nn'].T, 1)\n # newer versions of numpy require explicit casting here, so *= no\n # longer works\n fwd['sol']['data'] = (fwd['_orig_sol'] *\n fix_rot).astype('float32')\n fwd['sol']['ncol'] = fwd['nsource']\n if fwd['sol_grad'] is not None:\n x = sparse.block_diag([fix_rot] * 3)\n fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod\n fwd['sol_grad']['ncol'] = 3 * fwd['nsource']\n fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI\n fwd['surf_ori'] = True\n else:\n surf_rot = _block_diag(fwd['source_nn'].T, 3)\n fwd['sol']['data'] = fwd['_orig_sol'] * surf_rot\n fwd['sol']['ncol'] = 3 * fwd['nsource']\n if fwd['sol_grad'] is not None:\n x = sparse.block_diag([surf_rot] * 3)\n fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod\n fwd['sol_grad']['ncol'] = 9 * fwd['nsource']\n fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI\n fwd['surf_ori'] = True\n\n else: # Free, cartesian\n logger.info(' Cartesian source orientations...')\n fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3))\n fwd['sol']['data'] = fwd['_orig_sol'].copy()\n fwd['sol']['ncol'] = 3 * fwd['nsource']\n if fwd['sol_grad'] is not None:\n fwd['sol_grad']['data'] = fwd['_orig_sol_grad'].copy()\n fwd['sol_grad']['ncol'] = 9 * fwd['nsource']\n fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI\n fwd['surf_ori'] = False\n\n logger.info(' [done]')\n\n return fwd\n\n\n@verbose\ndef write_forward_solution(fname, fwd, overwrite=False, verbose=None):\n \"\"\"Write forward solution to a file.\n\n Parameters\n ----------\n fname : str\n File name to save the forward solution to. It should end with -fwd.fif\n or -fwd.fif.gz.\n fwd : Forward\n Forward solution.\n overwrite : bool\n If True, overwrite destination file (if it exists).\n %(verbose)s\n\n See Also\n --------\n read_forward_solution\n\n Notes\n -----\n Forward solutions, which are derived from an original forward solution with\n free orientation, are always stored on disk as forward solution with free\n orientation in X/Y/Z RAS coordinates. Transformations (surface orientation,\n fixed orientation) will be reverted. To reapply any transformation to the\n forward operator please apply :func:`convert_forward_solution` after\n reading the forward solution with :func:`read_forward_solution`.\n\n Forward solutions, which are derived from an original forward solution with\n fixed orientation, are stored on disk as forward solution with fixed\n surface-based orientations. Please note that the transformation to\n surface-based, fixed orientation cannot be reverted after loading the\n forward solution with :func:`read_forward_solution`.\n \"\"\"\n check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz',\n '_fwd.fif', '_fwd.fif.gz'))\n\n # check for file existence\n _check_fname(fname, overwrite)\n fid = start_file(fname)\n start_block(fid, FIFF.FIFFB_MNE)\n\n #\n # MNE env\n #\n start_block(fid, FIFF.FIFFB_MNE_ENV)\n write_id(fid, FIFF.FIFF_BLOCK_ID)\n data = fwd['info'].get('working_dir', None)\n if data is not None:\n write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data)\n data = fwd['info'].get('command_line', None)\n if data is not None:\n write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data)\n end_block(fid, FIFF.FIFFB_MNE_ENV)\n\n #\n # Information from the MRI file\n #\n start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)\n write_string(fid, FIFF.FIFF_MNE_FILE_NAME, fwd['info']['mri_file'])\n if fwd['info']['mri_id'] is not None:\n write_id(fid, FIFF.FIFF_PARENT_FILE_ID, fwd['info']['mri_id'])\n # store the MRI to HEAD transform in MRI file\n write_coord_trans(fid, fwd['info']['mri_head_t'])\n end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)\n\n # write measurement info\n write_forward_meas_info(fid, fwd['info'])\n\n # invert our original source space transform\n src = list()\n for s in fwd['src']:\n s = deepcopy(s)\n try:\n # returns source space to original coordinate frame\n # usually MRI\n s = transform_surface_to(s, fwd['mri_head_t']['from'],\n fwd['mri_head_t'])\n except Exception as inst:\n raise ValueError('Could not transform source space (%s)' % inst)\n src.append(s)\n\n #\n # Write the source spaces (again)\n #\n _write_source_spaces_to_fid(fid, src)\n n_vert = sum([ss['nuse'] for ss in src])\n if fwd['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:\n n_col = n_vert\n else:\n n_col = 3 * n_vert\n\n # Undo transformations\n sol = fwd['_orig_sol'].copy()\n if fwd['sol_grad'] is not None:\n sol_grad = fwd['_orig_sol_grad'].copy()\n else:\n sol_grad = None\n\n if fwd['surf_ori'] is True:\n if fwd['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:\n warn('The forward solution, which is stored on disk now, is based '\n 'on a forward solution with fixed orientation. Please note '\n 'that the transformation to surface-based, fixed orientation '\n 'cannot be reverted after loading the forward solution with '\n 'read_forward_solution.', RuntimeWarning)\n else:\n warn('This forward solution is based on a forward solution with '\n 'free orientation. The original forward solution is stored '\n 'on disk in X/Y/Z RAS coordinates. Any transformation '\n '(surface orientation or fixed orientation) will be '\n 'reverted. To reapply any transformation to the forward '\n 'operator please apply convert_forward_solution after '\n 'reading the forward solution with read_forward_solution.',\n RuntimeWarning)\n\n #\n # MEG forward solution\n #\n picks_meg = pick_types(fwd['info'], meg=True, eeg=False, ref_meg=False,\n exclude=[])\n picks_eeg = pick_types(fwd['info'], meg=False, eeg=True, ref_meg=False,\n exclude=[])\n n_meg = len(picks_meg)\n n_eeg = len(picks_eeg)\n row_names_meg = [fwd['sol']['row_names'][p] for p in picks_meg]\n row_names_eeg = [fwd['sol']['row_names'][p] for p in picks_eeg]\n\n if n_meg > 0:\n meg_solution = dict(data=sol[picks_meg], nrow=n_meg, ncol=n_col,\n row_names=row_names_meg, col_names=[])\n _transpose_named_matrix(meg_solution)\n start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)\n write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_MEG)\n write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd['coord_frame'])\n write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION,\n fwd['_orig_source_ori'])\n write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert)\n write_int(fid, FIFF.FIFF_NCHAN, n_meg)\n write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, meg_solution)\n if sol_grad is not None:\n meg_solution_grad = dict(data=sol_grad[picks_meg],\n nrow=n_meg, ncol=n_col * 3,\n row_names=row_names_meg, col_names=[])\n _transpose_named_matrix(meg_solution_grad)\n write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD,\n meg_solution_grad)\n end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)\n\n #\n # EEG forward solution\n #\n if n_eeg > 0:\n eeg_solution = dict(data=sol[picks_eeg], nrow=n_eeg, ncol=n_col,\n row_names=row_names_eeg, col_names=[])\n _transpose_named_matrix(eeg_solution)\n start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)\n write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_EEG)\n write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd['coord_frame'])\n write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION,\n fwd['_orig_source_ori'])\n write_int(fid, FIFF.FIFF_NCHAN, n_eeg)\n write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert)\n write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, eeg_solution)\n if sol_grad is not None:\n eeg_solution_grad = dict(data=sol_grad[picks_eeg],\n nrow=n_eeg, ncol=n_col * 3,\n row_names=row_names_eeg, col_names=[])\n _transpose_named_matrix(eeg_solution_grad)\n write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD,\n eeg_solution_grad)\n end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)\n\n end_block(fid, FIFF.FIFFB_MNE)\n end_file(fid)\n\n\ndef is_fixed_orient(forward, orig=False):\n \"\"\"Check if the forward operator is fixed orientation.\n\n Parameters\n ----------\n forward : instance of Forward\n The forward.\n orig : bool\n If True, consider the original source orientation.\n If False (default), consider the current source orientation.\n\n Returns\n -------\n fixed_ori : bool\n Whether or not it is fixed orientation.\n \"\"\"\n if orig: # if we want to know about the original version\n fixed_ori = (forward['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI)\n else: # most of the time we want to know about the current version\n fixed_ori = (forward['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI)\n return fixed_ori\n\n\ndef write_forward_meas_info(fid, info):\n \"\"\"Write measurement info stored in forward solution.\n\n Parameters\n ----------\n fid : file id\n The file id\n info : instance of Info\n The measurement info.\n \"\"\"\n info._check_consistency()\n #\n # Information from the MEG file\n #\n start_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)\n write_string(fid, FIFF.FIFF_MNE_FILE_NAME, info['meas_file'])\n if info['meas_id'] is not None:\n write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])\n # get transformation from CTF and DEVICE to HEAD coordinate frame\n meg_head_t = info.get('dev_head_t', info.get('ctf_head_t'))\n if meg_head_t is None:\n fid.close()\n raise ValueError('Head<-->sensor transform not found')\n write_coord_trans(fid, meg_head_t)\n\n if 'chs' in info:\n # Channel information\n write_int(fid, FIFF.FIFF_NCHAN, len(info['chs']))\n for k, c in enumerate(info['chs']):\n # Scan numbers may have been messed up\n c = deepcopy(c)\n c['scanno'] = k + 1\n write_ch_info(fid, c)\n if 'bads' in info and len(info['bads']) > 0:\n # Bad channels\n start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)\n write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, info['bads'])\n end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)\n\n end_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)\n\n\ndef _select_orient_forward(forward, info, noise_cov=None, copy=True):\n \"\"\"Prepare forward solution for inverse solvers.\"\"\"\n # fwd['sol']['row_names'] may be different order from fwd['info']['chs']\n fwd_sol_ch_names = forward['sol']['row_names']\n all_ch_names = set(fwd_sol_ch_names)\n all_bads = set(info['bads'])\n if noise_cov is not None:\n all_ch_names &= set(noise_cov['names'])\n all_bads |= set(noise_cov['bads'])\n else:\n noise_cov = dict(bads=info['bads'])\n ch_names = [c['ch_name'] for c in info['chs']\n if c['ch_name'] not in all_bads and\n c['ch_name'] in all_ch_names]\n\n if not len(info['bads']) == len(noise_cov['bads']) or \\\n not all(b in noise_cov['bads'] for b in info['bads']):\n logger.info('info[\"bads\"] and noise_cov[\"bads\"] do not match, '\n 'excluding bad channels from both')\n\n # check the compensation grade\n _check_compensation_grade(forward['info'], info, 'forward')\n\n n_chan = len(ch_names)\n logger.info(\"Computing inverse operator with %d channels.\" % n_chan)\n forward = pick_channels_forward(forward, ch_names, ordered=True,\n copy=copy)\n info_idx = [info['ch_names'].index(name) for name in ch_names]\n info_picked = pick_info(info, info_idx)\n forward['info']._check_consistency()\n info_picked._check_consistency()\n return forward, info_picked\n\n\n@verbose\ndef compute_orient_prior(forward, loose=0.2, verbose=None):\n \"\"\"Compute orientation prior.\n\n Parameters\n ----------\n forward : instance of Forward\n Forward operator.\n loose : float\n The loose orientation parameter (between 0 and 1).\n %(verbose)s\n\n Returns\n -------\n orient_prior : ndarray, shape (n_vertices,)\n Orientation priors.\n\n See Also\n --------\n compute_depth_prior\n \"\"\"\n is_fixed_ori = is_fixed_orient(forward)\n n_sources = forward['sol']['data'].shape[1]\n loose = float(loose)\n if not (0 <= loose <= 1):\n raise ValueError('loose value should be between 0 and 1, '\n 'got %s.' % (loose,))\n orient_prior = np.ones(n_sources, dtype=np.float)\n if loose > 0.:\n if is_fixed_ori:\n raise ValueError('loose must be 0. with forward operator '\n 'with fixed orientation, got %s' % (loose,))\n if loose < 1:\n if not forward['surf_ori']:\n raise ValueError('Forward operator is not oriented in surface '\n 'coordinates. loose parameter should be 1 '\n 'not %s.' % (loose,))\n logger.info('Applying loose dipole orientations. Loose value '\n 'of %s.' % loose)\n orient_prior[0::3] *= loose\n orient_prior[1::3] *= loose\n\n return orient_prior\n\n\ndef _restrict_gain_matrix(G, info):\n \"\"\"Restrict gain matrix entries for optimal depth weighting.\"\"\"\n # Figure out which ones have been used\n if len(info['chs']) != G.shape[0]:\n raise ValueError('G.shape[0] (%d) and length of info[\"chs\"] (%d) '\n 'do not match' % (G.shape[0], len(info['chs'])))\n for meg, eeg, kind in (\n ('grad', False, 'planar'),\n ('mag', False, 'magnetometer or axial gradiometer'),\n (False, True, 'EEG')):\n sel = pick_types(info, meg=meg, eeg=eeg, ref_meg=False, exclude=[])\n if len(sel) > 0:\n logger.info(' %d %s channels' % (len(sel), kind))\n break\n else:\n warn('Could not find MEG or EEG channels to limit depth channels')\n sel = slice(None)\n return G[sel]\n\n\n@verbose\ndef compute_depth_prior(forward, info, exp=0.8, limit=10.0,\n limit_depth_chs=False, combine_xyz='spectral',\n noise_cov=None, rank=None, verbose=None):\n \"\"\"Compute depth prior for depth weighting.\n\n Parameters\n ----------\n forward : instance of Forward\n The forward solution.\n info : instance of Info\n The measurement info.\n exp : float\n Exponent for the depth weighting, must be between 0 and 1.\n limit : float | None\n The upper bound on depth weighting.\n Can be None to be bounded by the largest finite prior.\n limit_depth_chs : bool | 'whiten'\n How to deal with multiple channel types in depth weighting.\n The default is True, which whitens based on the source sensitivity\n of the highest-SNR channel type. See Notes for details.\n\n .. versionchanged:: 0.18\n Added the \"whiten\" option.\n combine_xyz : 'spectral' | 'fro'\n When a loose (or free) orientation is used, how the depth weighting\n for each triplet should be calculated.\n If 'spectral', use the squared spectral norm of Gk.\n If 'fro', use the squared Frobenius norm of Gk.\n\n .. versionadded:: 0.18\n noise_cov : instance of Covariance | None\n The noise covariance to use to whiten the gain matrix when\n ``limit_depth_chs='whiten'``.\n\n .. versionadded:: 0.18\n %(rank_None)s\n\n .. versionadded:: 0.18\n %(verbose)s\n\n Returns\n -------\n depth_prior : ndarray, shape (n_vertices,)\n The depth prior.\n\n See Also\n --------\n compute_orient_prior\n\n Notes\n -----\n The defaults used by the minimum norm code and sparse solvers differ.\n In particular, the values for MNE are::\n\n compute_depth_prior(..., limit=10., limit_depth_chs=True,\n combine_xyz='spectral')\n\n In sparse solvers and LCMV, the values are::\n\n compute_depth_prior(..., limit=None, limit_depth_chs='whiten',\n combine_xyz='fro')\n\n The ``limit_depth_chs`` argument can take the following values:\n\n * :data:`python:True` (default)\n Use only grad channels in depth weighting (equivalent to MNE C\n minimum-norm code). If grad channels aren't present, only mag\n channels will be used (if no mag, then eeg). This makes the depth\n prior dependent only on the sensor geometry (and relationship\n to the sources).\n * ``'whiten'``\n Compute a whitener and apply it to the gain matirx before computing\n the depth prior. In this case ``noise_cov`` must not be None.\n Whitening the gain matrix makes the depth prior\n depend on both sensor geometry and the data of interest captured\n by the noise covariance (e.g., projections, SNR).\n\n .. versionadded:: 0.18\n * :data:`python:False`\n Use all channels. Not recommended since the depth weighting will be\n biased toward whichever channel type has the largest values in\n SI units (such as EEG being orders of magnitude larger than MEG).\n\n \"\"\"\n from ..cov import Covariance, compute_whitener\n _validate_type(forward, Forward, 'forward')\n patch_areas = forward.get('patch_areas', None)\n is_fixed_ori = is_fixed_orient(forward)\n G = forward['sol']['data']\n logger.info('Creating the depth weighting matrix...')\n _validate_type(noise_cov, (Covariance, None), 'noise_cov',\n 'Covariance or None')\n _validate_type(limit_depth_chs, (str, bool), 'limit_depth_chs')\n if isinstance(limit_depth_chs, str):\n if limit_depth_chs != 'whiten':\n raise ValueError('limit_depth_chs, if str, must be \"whiten\", got '\n '%s' % (limit_depth_chs,))\n if not isinstance(noise_cov, Covariance):\n raise ValueError('With limit_depth_chs=\"whiten\", noise_cov must be'\n ' a Covariance, got %s' % (type(noise_cov),))\n if combine_xyz is not False: # private / expert option\n _check_option('combine_xyz', combine_xyz, ('fro', 'spectral'))\n\n # If possible, pick best depth-weighting channels\n if limit_depth_chs is True:\n G = _restrict_gain_matrix(G, info)\n elif limit_depth_chs == 'whiten':\n whitener, _ = compute_whitener(noise_cov, info, pca=True, rank=rank,\n verbose=False)\n G = np.dot(whitener, G)\n\n # Compute the gain matrix\n if is_fixed_ori or combine_xyz in ('fro', False):\n d = np.sum(G ** 2, axis=0)\n if not (is_fixed_ori or combine_xyz is False):\n d = d.reshape(-1, 3).sum(axis=1)\n # Spherical leadfield can be zero at the center\n d[d == 0.] = np.min(d[d != 0.])\n else: # 'spectral'\n # n_pos = G.shape[1] // 3\n # The following is equivalent to this, but 4-10x faster\n # d = np.zeros(n_pos)\n # for k in range(n_pos):\n # Gk = G[:, 3 * k:3 * (k + 1)]\n # x = np.dot(Gk.T, Gk)\n # d[k] = linalg.svdvals(x)[0]\n G.shape = (G.shape[0], -1, 3)\n d = np.linalg.norm(einsum('svj,svk->vjk', G, G), # vector dot products\n ord=2, axis=(1, 2)) # ord=2 spectral (largest s.v.)\n G.shape = (G.shape[0], -1)\n\n # XXX Currently the fwd solns never have \"patch_areas\" defined\n if patch_areas is not None:\n if not is_fixed_ori and combine_xyz is False:\n patch_areas = np.repeat(patch_areas, 3)\n d /= patch_areas ** 2\n logger.info(' Patch areas taken into account in the depth '\n 'weighting')\n\n w = 1.0 / d\n if limit is not None:\n ws = np.sort(w)\n weight_limit = limit ** 2\n if limit_depth_chs is False:\n # match old mne-python behavor\n # we used to do ind = np.argmin(ws), but this is 0 by sort above\n n_limit = 0\n limit = ws[0] * weight_limit\n else:\n # match C code behavior\n limit = ws[-1]\n n_limit = len(d)\n if ws[-1] > weight_limit * ws[0]:\n ind = np.where(ws > weight_limit * ws[0])[0][0]\n limit = ws[ind]\n n_limit = ind\n\n logger.info(' limit = %d/%d = %f'\n % (n_limit + 1, len(d),\n np.sqrt(limit / ws[0])))\n scale = 1.0 / limit\n logger.info(' scale = %g exp = %g' % (scale, exp))\n w = np.minimum(w / limit, 1)\n depth_prior = w ** exp\n\n if not (is_fixed_ori or combine_xyz is False):\n depth_prior = np.repeat(depth_prior, 3)\n\n return depth_prior\n\n\ndef _stc_src_sel(src, stc, on_missing='raise',\n extra=', likely due to forward calculations'):\n \"\"\"Select the vertex indices of a source space using a source estimate.\"\"\"\n if isinstance(stc, list):\n vertices = stc\n else:\n assert isinstance(stc, _BaseSourceEstimate)\n vertices = stc._vertices_list\n del stc\n if not len(src) == len(vertices):\n raise RuntimeError('Mismatch between number of source spaces (%s) and '\n 'STC vertices (%s)' % (len(src), len(vertices)))\n src_sels, stc_sels, out_vertices = [], [], []\n src_offset = stc_offset = 0\n for s, v in zip(src, vertices):\n joint_sel = np.intersect1d(s['vertno'], v)\n src_sels.append(np.searchsorted(s['vertno'], joint_sel) + src_offset)\n src_offset += len(s['vertno'])\n idx = np.searchsorted(v, joint_sel)\n stc_sels.append(idx + stc_offset)\n stc_offset += len(v)\n out_vertices.append(np.array(v)[idx])\n src_sel = np.concatenate(src_sels)\n stc_sel = np.concatenate(stc_sels)\n assert len(src_sel) == len(stc_sel) == sum(len(v) for v in out_vertices)\n\n n_stc = sum(len(v) for v in vertices)\n n_joint = len(src_sel)\n if n_joint != n_stc:\n msg = ('Only %i of %i SourceEstimate %s found in '\n 'source space%s'\n % (n_joint, n_stc, 'vertex' if n_stc == 1 else 'vertices',\n extra))\n if on_missing == 'raise':\n raise RuntimeError(msg)\n elif on_missing == 'warn':\n warn(msg)\n else:\n assert on_missing == 'ignore'\n return src_sel, stc_sel, out_vertices\n\n\ndef _fill_measurement_info(info, fwd, sfreq):\n \"\"\"Fill the measurement info of a Raw or Evoked object.\"\"\"\n sel = pick_channels(info['ch_names'], fwd['sol']['row_names'])\n info = pick_info(info, sel)\n info['bads'] = []\n\n # this is probably correct based on what's done in meas_info.py...\n info['meas_id'] = fwd['info']['meas_id']\n info['file_id'] = info['meas_id']\n\n now = time()\n sec = np.floor(now)\n usec = 1e6 * (now - sec)\n\n info['meas_date'] = (int(sec), int(usec))\n info['highpass'] = 0.0\n info['lowpass'] = sfreq / 2.0\n info['sfreq'] = sfreq\n info['projs'] = []\n\n return info\n\n\n@verbose\ndef _apply_forward(fwd, stc, start=None, stop=None, on_missing='raise',\n verbose=None):\n \"\"\"Apply forward model and return data, times, ch_names.\"\"\"\n if not is_fixed_orient(fwd):\n raise ValueError('Only fixed-orientation forward operators are '\n 'supported.')\n\n if np.all(stc.data > 0):\n warn('Source estimate only contains currents with positive values. '\n 'Use pick_ori=\"normal\" when computing the inverse to compute '\n 'currents not current magnitudes.')\n\n max_cur = np.max(np.abs(stc.data))\n if max_cur > 1e-7: # 100 nAm threshold for warning\n warn('The maximum current magnitude is %0.1f nAm, which is very large.'\n ' Are you trying to apply the forward model to noise-normalized '\n '(dSPM, sLORETA, or eLORETA) values? The result will only be '\n 'correct if currents (in units of Am) are used.'\n % (1e9 * max_cur))\n\n src_sel, stc_sel, _ = _stc_src_sel(fwd['src'], stc, on_missing=on_missing)\n gain = fwd['sol']['data'][:, src_sel]\n # save some memory if possible\n stc_sel = slice(None) if len(stc_sel) == len(stc.data) else stc_sel\n\n logger.info('Projecting source estimate to sensor space...')\n data = np.dot(gain, stc.data[stc_sel, start:stop])\n logger.info('[done]')\n\n times = deepcopy(stc.times[start:stop])\n\n return data, times\n\n\n@verbose\ndef apply_forward(fwd, stc, info, start=None, stop=None, use_cps=True,\n on_missing='raise', verbose=None):\n \"\"\"Project source space currents to sensor space using a forward operator.\n\n The sensor space data is computed for all channels present in fwd. Use\n pick_channels_forward or pick_types_forward to restrict the solution to a\n subset of channels.\n\n The function returns an Evoked object, which is constructed from\n evoked_template. The evoked_template should be from the same MEG system on\n which the original data was acquired. An exception will be raised if the\n forward operator contains channels that are not present in the template.\n\n\n Parameters\n ----------\n fwd : Forward\n Forward operator to use.\n stc : SourceEstimate\n The source estimate from which the sensor space data is computed.\n info : instance of Info\n Measurement info to generate the evoked.\n start : int, optional\n Index of first time sample (index not time is seconds).\n stop : int, optional\n Index of first time sample not to include (index not time is seconds).\n use_cps : bool (default True)\n Whether to use cortical patch statistics to define normal\n orientations when converting to fixed orientation (if necessary).\n\n .. versionadded:: 0.15\n %(on_missing)s Default is \"raise\".\n\n .. versionadded:: 0.18\n %(verbose)s\n\n Returns\n -------\n evoked : Evoked\n Evoked object with computed sensor space data.\n\n See Also\n --------\n apply_forward_raw: Compute sensor space data and return a Raw object.\n \"\"\"\n # make sure evoked_template contains all channels in fwd\n for ch_name in fwd['sol']['row_names']:\n if ch_name not in info['ch_names']:\n raise ValueError('Channel %s of forward operator not present in '\n 'evoked_template.' % ch_name)\n\n # project the source estimate to the sensor space\n if not is_fixed_orient(fwd):\n fwd = convert_forward_solution(fwd, force_fixed=True, use_cps=use_cps)\n data, times = _apply_forward(fwd, stc, start, stop, on_missing=on_missing)\n\n # fill the measurement info\n sfreq = float(1.0 / stc.tstep)\n info_out = _fill_measurement_info(info, fwd, sfreq)\n\n evoked = EvokedArray(data, info_out, times[0], nave=1)\n\n evoked.times = times\n evoked.first = int(np.round(evoked.times[0] * sfreq))\n evoked.last = evoked.first + evoked.data.shape[1] - 1\n\n return evoked\n\n\n@verbose\ndef apply_forward_raw(fwd, stc, info, start=None, stop=None,\n on_missing='raise', verbose=None):\n \"\"\"Project source space currents to sensor space using a forward operator.\n\n The sensor space data is computed for all channels present in fwd. Use\n pick_channels_forward or pick_types_forward to restrict the solution to a\n subset of channels.\n\n The function returns a Raw object, which is constructed using provided\n info. The info object should be from the same MEG system on which the\n original data was acquired. An exception will be raised if the forward\n operator contains channels that are not present in the info.\n\n Parameters\n ----------\n fwd : Forward\n Forward operator to use. Has to be fixed-orientation.\n stc : SourceEstimate\n The source estimate from which the sensor space data is computed.\n info : instance of Info\n The measurement info.\n start : int, optional\n Index of first time sample (index not time is seconds).\n stop : int, optional\n Index of first time sample not to include (index not time is seconds).\n %(on_missing)s Default is \"raise\".\n\n .. versionadded:: 0.18\n %(verbose)s\n\n Returns\n -------\n raw : Raw object\n Raw object with computed sensor space data.\n\n See Also\n --------\n apply_forward: Compute sensor space data and return an Evoked object.\n \"\"\"\n # make sure info contains all channels in fwd\n for ch_name in fwd['sol']['row_names']:\n if ch_name not in info['ch_names']:\n raise ValueError('Channel %s of forward operator not present in '\n 'info.' % ch_name)\n\n # project the source estimate to the sensor space\n data, times = _apply_forward(fwd, stc, start, stop, on_missing=on_missing)\n\n sfreq = 1.0 / stc.tstep\n info = _fill_measurement_info(info, fwd, sfreq)\n info['projs'] = []\n # store sensor data in Raw object using the info\n raw = RawArray(data, info)\n raw.preload = True\n\n raw._first_samps = np.array([int(np.round(times[0] * sfreq))])\n raw._last_samps = np.array([raw.first_samp + raw._data.shape[1] - 1])\n raw._projector = None\n raw._update_times()\n return raw\n\n\n@fill_doc\ndef restrict_forward_to_stc(fwd, stc, on_missing='ignore'):\n \"\"\"Restrict forward operator to active sources in a source estimate.\n\n Parameters\n ----------\n fwd : instance of Forward\n Forward operator.\n stc : instance of SourceEstimate\n Source estimate.\n %(on_missing)s Default is \"ignore\".\n\n .. versionadded:: 0.18\n\n Returns\n -------\n fwd_out : instance of Forward\n Restricted forward operator.\n\n See Also\n --------\n restrict_forward_to_label\n \"\"\"\n _validate_type(on_missing, str, 'on_missing')\n _check_option('on_missing', on_missing, ('ignore', 'warn', 'raise'))\n src_sel, _, vertices = _stc_src_sel(fwd['src'], stc, on_missing=on_missing)\n del stc\n return _restrict_forward_to_src_sel(fwd, src_sel)\n\n\ndef _restrict_forward_to_src_sel(fwd, src_sel):\n fwd_out = deepcopy(fwd)\n # figure out the vertno we are keeping\n idx_sel = np.concatenate([[[si] * len(s['vertno']), s['vertno']]\n for si, s in enumerate(fwd['src'])], axis=-1)\n assert idx_sel.ndim == 2 and idx_sel.shape[0] == 2\n assert idx_sel.shape[1] == fwd['nsource']\n idx_sel = idx_sel[:, src_sel]\n\n fwd_out['source_rr'] = fwd['source_rr'][src_sel]\n fwd_out['nsource'] = len(src_sel)\n\n if is_fixed_orient(fwd):\n idx = src_sel\n if fwd['sol_grad'] is not None:\n idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel()\n else:\n idx = (3 * src_sel[:, None] + np.arange(3)).ravel()\n if fwd['sol_grad'] is not None:\n idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel()\n\n fwd_out['source_nn'] = fwd['source_nn'][idx]\n fwd_out['sol']['data'] = fwd['sol']['data'][:, idx]\n if fwd['sol_grad'] is not None:\n fwd_out['sol_grad']['data'] = fwd['sol_grad']['data'][:, idx_grad]\n fwd_out['sol']['ncol'] = len(idx)\n\n if is_fixed_orient(fwd, orig=True):\n idx = src_sel\n if fwd['sol_grad'] is not None:\n idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel()\n else:\n idx = (3 * src_sel[:, None] + np.arange(3)).ravel()\n if fwd['sol_grad'] is not None:\n idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel()\n\n fwd_out['_orig_sol'] = fwd['_orig_sol'][:, idx]\n if fwd['sol_grad'] is not None:\n fwd_out['_orig_sol_grad'] = fwd['_orig_sol_grad'][:, idx_grad]\n\n vertices = [idx_sel[1][idx_sel[0] == si]\n for si in range(len(fwd_out['src']))]\n _set_source_space_vertices(fwd_out['src'], vertices)\n\n return fwd_out\n\n\ndef restrict_forward_to_label(fwd, labels):\n \"\"\"Restrict forward operator to labels.\n\n Parameters\n ----------\n fwd : Forward\n Forward operator.\n labels : instance of Label | list\n Label object or list of label objects.\n\n Returns\n -------\n fwd_out : dict\n Restricted forward operator.\n\n See Also\n --------\n restrict_forward_to_stc\n \"\"\"\n vertices = [np.array([], int), np.array([], int)]\n\n if not isinstance(labels, list):\n labels = [labels]\n\n # Get vertices separately of each hemisphere from all label\n for label in labels:\n _validate_type(label, Label, \"label\", \"Label or list\")\n i = 0 if label.hemi == 'lh' else 1\n vertices[i] = np.append(vertices[i], label.vertices)\n # Remove duplicates and sort\n vertices = [np.unique(vert_hemi) for vert_hemi in vertices]\n\n fwd_out = deepcopy(fwd)\n fwd_out['source_rr'] = np.zeros((0, 3))\n fwd_out['nsource'] = 0\n fwd_out['source_nn'] = np.zeros((0, 3))\n fwd_out['sol']['data'] = np.zeros((fwd['sol']['data'].shape[0], 0))\n fwd_out['_orig_sol'] = np.zeros((fwd['_orig_sol'].shape[0], 0))\n if fwd['sol_grad'] is not None:\n fwd_out['sol_grad']['data'] = np.zeros(\n (fwd['sol_grad']['data'].shape[0], 0))\n fwd_out['_orig_sol_grad'] = np.zeros(\n (fwd['_orig_sol_grad'].shape[0], 0))\n fwd_out['sol']['ncol'] = 0\n nuse_lh = fwd['src'][0]['nuse']\n\n for i in range(2):\n fwd_out['src'][i]['vertno'] = np.array([], int)\n fwd_out['src'][i]['nuse'] = 0\n fwd_out['src'][i]['inuse'] = fwd['src'][i]['inuse'].copy()\n fwd_out['src'][i]['inuse'].fill(0)\n fwd_out['src'][i]['use_tris'] = np.array([[]], int)\n fwd_out['src'][i]['nuse_tri'] = np.array([0])\n\n # src_sel is idx to cols in fwd that are in any label per hemi\n src_sel = np.intersect1d(fwd['src'][i]['vertno'], vertices[i])\n src_sel = np.searchsorted(fwd['src'][i]['vertno'], src_sel)\n\n # Reconstruct each src\n vertno = fwd['src'][i]['vertno'][src_sel]\n fwd_out['src'][i]['inuse'][vertno] = 1\n fwd_out['src'][i]['nuse'] += len(vertno)\n fwd_out['src'][i]['vertno'] = np.where(fwd_out['src'][i]['inuse'])[0]\n\n # Reconstruct part of fwd that is not sol data\n src_sel += i * nuse_lh # Add column shift to right hemi\n fwd_out['source_rr'] = np.vstack([fwd_out['source_rr'],\n fwd['source_rr'][src_sel]])\n fwd_out['nsource'] += len(src_sel)\n\n if is_fixed_orient(fwd):\n idx = src_sel\n if fwd['sol_grad'] is not None:\n idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel()\n else:\n idx = (3 * src_sel[:, None] + np.arange(3)).ravel()\n if fwd['sol_grad'] is not None:\n idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel()\n\n fwd_out['source_nn'] = np.vstack(\n [fwd_out['source_nn'], fwd['source_nn'][idx]])\n fwd_out['sol']['data'] = np.hstack(\n [fwd_out['sol']['data'], fwd['sol']['data'][:, idx]])\n if fwd['sol_grad'] is not None:\n fwd_out['sol_grad']['data'] = np.hstack(\n [fwd_out['sol_grad']['data'],\n fwd['sol_rad']['data'][:, idx_grad]])\n fwd_out['sol']['ncol'] += len(idx)\n\n if is_fixed_orient(fwd, orig=True):\n idx = src_sel\n if fwd['sol_grad'] is not None:\n idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel()\n else:\n idx = (3 * src_sel[:, None] + np.arange(3)).ravel()\n if fwd['sol_grad'] is not None:\n idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel()\n\n fwd_out['_orig_sol'] = np.hstack(\n [fwd_out['_orig_sol'], fwd['_orig_sol'][:, idx]])\n if fwd['sol_grad'] is not None:\n fwd_out['_orig_sol_grad'] = np.hstack(\n [fwd_out['_orig_sol_grad'],\n fwd['_orig_sol_grad'][:, idx_grad]])\n\n return fwd_out\n\n\ndef _do_forward_solution(subject, meas, fname=None, src=None, spacing=None,\n mindist=None, bem=None, mri=None, trans=None,\n eeg=True, meg=True, fixed=False, grad=False,\n mricoord=False, overwrite=False, subjects_dir=None,\n verbose=None):\n \"\"\"Calculate a forward solution for a subject using MNE-C routines.\n\n This is kept around for testing purposes.\n\n This function wraps to mne_do_forward_solution, so the mne\n command-line tools must be installed and accessible from Python.\n\n Parameters\n ----------\n subject : str\n Name of the subject.\n meas : Raw | Epochs | Evoked | str\n If Raw or Epochs, a temporary evoked file will be created and\n saved to a temporary directory. If str, then it should be a\n filename to a file with measurement information the mne\n command-line tools can understand (i.e., raw or evoked).\n fname : str | None\n Destination forward solution filename. If None, the solution\n will be created in a temporary directory, loaded, and deleted.\n src : str | None\n Source space name. If None, the MNE default is used.\n spacing : str\n The spacing to use. Can be ``'#'`` for spacing in mm, ``'ico#'`` for a\n recursively subdivided icosahedron, or ``'oct#'`` for a recursively\n subdivided octahedron (e.g., ``spacing='ico4'``). Default is 7 mm.\n mindist : float | str | None\n Minimum distance of sources from inner skull surface (in mm).\n If None, the MNE default value is used. If string, 'all'\n indicates to include all points.\n bem : str | None\n Name of the BEM to use (e.g., \"sample-5120-5120-5120\"). If None\n (Default), the MNE default will be used.\n mri : str | None\n The name of the trans file in FIF format.\n If None, trans must not be None.\n trans : dict | str | None\n File name of the trans file in text format.\n If None, mri must not be None.\n eeg : bool\n If True (Default), include EEG computations.\n meg : bool\n If True (Default), include MEG computations.\n fixed : bool\n If True, make a fixed-orientation forward solution (Default:\n False). Note that fixed-orientation inverses can still be\n created from free-orientation forward solutions.\n grad : bool\n If True, compute the gradient of the field with respect to the\n dipole coordinates as well (Default: False).\n mricoord : bool\n If True, calculate in MRI coordinates (Default: False).\n overwrite : bool\n If True, the destination file (if it exists) will be overwritten.\n If False (default), an error will be raised if the file exists.\n subjects_dir : None | str\n Override the SUBJECTS_DIR environment variable.\n %(verbose)s\n\n See Also\n --------\n make_forward_solution\n\n Returns\n -------\n fwd : Forward\n The generated forward solution.\n \"\"\"\n if not has_mne_c():\n raise RuntimeError('mne command line tools could not be found')\n\n # check for file existence\n temp_dir = tempfile.mkdtemp()\n if fname is None:\n fname = op.join(temp_dir, 'temp-fwd.fif')\n _check_fname(fname, overwrite)\n _validate_type(subject, \"str\", \"subject\")\n\n # check for meas to exist as string, or try to make evoked\n if isinstance(meas, str):\n if not op.isfile(meas):\n raise IOError('measurement file \"%s\" could not be found' % meas)\n elif isinstance(meas, (BaseRaw, BaseEpochs, Evoked)):\n meas_file = op.join(temp_dir, 'info.fif')\n write_info(meas_file, meas.info)\n meas = meas_file\n else:\n raise ValueError('meas must be string, Raw, Epochs, or Evoked')\n\n # deal with trans/mri\n if mri is not None and trans is not None:\n raise ValueError('trans and mri cannot both be specified')\n if mri is None and trans is None:\n # MNE allows this to default to a trans/mri in the subject's dir,\n # but let's be safe here and force the user to pass us a trans/mri\n raise ValueError('Either trans or mri must be specified')\n\n if trans is not None:\n _validate_type(trans, \"str\", \"trans\")\n if not op.isfile(trans):\n raise IOError('trans file \"%s\" not found' % trans)\n if mri is not None:\n # deal with trans\n if not isinstance(mri, str):\n if isinstance(mri, dict):\n mri_data = deepcopy(mri)\n mri = op.join(temp_dir, 'mri-trans.fif')\n try:\n write_trans(mri, mri_data)\n except Exception:\n raise IOError('mri was a dict, but could not be '\n 'written to disk as a transform file')\n else:\n raise ValueError('trans must be a string or dict (trans)')\n if not op.isfile(mri):\n raise IOError('trans file \"%s\" could not be found' % trans)\n\n # deal with meg/eeg\n if not meg and not eeg:\n raise ValueError('meg or eeg (or both) must be True')\n\n path, fname = op.split(fname)\n if not op.splitext(fname)[1] == '.fif':\n raise ValueError('Forward name does not end with .fif')\n path = op.abspath(path)\n\n # deal with mindist\n if mindist is not None:\n if isinstance(mindist, str):\n if not mindist.lower() == 'all':\n raise ValueError('mindist, if string, must be \"all\"')\n mindist = ['--all']\n else:\n mindist = ['--mindist', '%g' % mindist]\n\n # src, spacing, bem\n for element, name, kind in zip((src, spacing, bem),\n (\"src\", \"spacing\", \"bem\"),\n ('path-like', 'str', 'path-like')):\n if element is not None:\n _validate_type(element, kind, name, \"%s or None\" % kind)\n\n # put together the actual call\n cmd = ['mne_do_forward_solution',\n '--subject', subject,\n '--meas', meas,\n '--fwd', fname,\n '--destdir', path]\n if src is not None:\n cmd += ['--src', src]\n if spacing is not None:\n if spacing.isdigit():\n pass # spacing in mm\n else:\n # allow both \"ico4\" and \"ico-4\" style values\n match = re.match(r\"(oct|ico)-?(\\d+)$\", spacing)\n if match is None:\n raise ValueError(\"Invalid spacing parameter: %r\" % spacing)\n spacing = '-'.join(match.groups())\n cmd += ['--spacing', spacing]\n if mindist is not None:\n cmd += mindist\n if bem is not None:\n cmd += ['--bem', bem]\n if mri is not None:\n cmd += ['--mri', '%s' % mri]\n if trans is not None:\n cmd += ['--trans', '%s' % trans]\n if not meg:\n cmd.append('--eegonly')\n if not eeg:\n cmd.append('--megonly')\n if fixed:\n cmd.append('--fixed')\n if grad:\n cmd.append('--grad')\n if mricoord:\n cmd.append('--mricoord')\n if overwrite:\n cmd.append('--overwrite')\n\n env = os.environ.copy()\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n env['SUBJECTS_DIR'] = subjects_dir\n\n try:\n logger.info('Running forward solution generation command with '\n 'subjects_dir %s' % subjects_dir)\n run_subprocess(cmd, env=env)\n except Exception:\n raise\n else:\n fwd = read_forward_solution(op.join(path, fname), verbose=False)\n finally:\n shutil.rmtree(temp_dir, ignore_errors=True)\n return fwd\n\n\n@verbose\ndef average_forward_solutions(fwds, weights=None):\n \"\"\"Average forward solutions.\n\n Parameters\n ----------\n fwds : list of Forward\n Forward solutions to average. Each entry (dict) should be a\n forward solution.\n weights : array | None\n Weights to apply to each forward solution in averaging. If None,\n forward solutions will be equally weighted. Weights must be\n non-negative, and will be adjusted to sum to one.\n\n Returns\n -------\n fwd : Forward\n The averaged forward solution.\n \"\"\"\n # check for fwds being a list\n _validate_type(fwds, list, \"fwds\")\n if not len(fwds) > 0:\n raise ValueError('fwds must not be empty')\n\n # check weights\n if weights is None:\n weights = np.ones(len(fwds))\n weights = np.asanyarray(weights) # in case it's a list, convert it\n if not np.all(weights >= 0):\n raise ValueError('weights must be non-negative')\n if not len(weights) == len(fwds):\n raise ValueError('weights must be None or the same length as fwds')\n w_sum = np.sum(weights)\n if not w_sum > 0:\n raise ValueError('weights cannot all be zero')\n weights /= w_sum\n\n # check our forward solutions\n for fwd in fwds:\n # check to make sure it's a forward solution\n _validate_type(fwd, dict, \"each entry in fwds\", \"dict\")\n # check to make sure the dict is actually a fwd\n check_keys = ['info', 'sol_grad', 'nchan', 'src', 'source_nn', 'sol',\n 'source_rr', 'source_ori', 'surf_ori', 'coord_frame',\n 'mri_head_t', 'nsource']\n if not all(key in fwd for key in check_keys):\n raise KeyError('forward solution dict does not have all standard '\n 'entries, cannot compute average.')\n\n # check forward solution compatibility\n if any(fwd['sol'][k] != fwds[0]['sol'][k]\n for fwd in fwds[1:] for k in ['nrow', 'ncol']):\n raise ValueError('Forward solutions have incompatible dimensions')\n if any(fwd[k] != fwds[0][k] for fwd in fwds[1:]\n for k in ['source_ori', 'surf_ori', 'coord_frame']):\n raise ValueError('Forward solutions have incompatible orientations')\n\n # actually average them (solutions and gradients)\n fwd_ave = deepcopy(fwds[0])\n fwd_ave['sol']['data'] *= weights[0]\n fwd_ave['_orig_sol'] *= weights[0]\n for fwd, w in zip(fwds[1:], weights[1:]):\n fwd_ave['sol']['data'] += w * fwd['sol']['data']\n fwd_ave['_orig_sol'] += w * fwd['_orig_sol']\n if fwd_ave['sol_grad'] is not None:\n fwd_ave['sol_grad']['data'] *= weights[0]\n fwd_ave['_orig_sol_grad'] *= weights[0]\n for fwd, w in zip(fwds[1:], weights[1:]):\n fwd_ave['sol_grad']['data'] += w * fwd['sol_grad']['data']\n fwd_ave['_orig_sol_grad'] += w * fwd['_orig_sol_grad']\n return fwd_ave\n" ]
[ [ "numpy.ones", "numpy.sum", "scipy.sparse.block_diag", "numpy.intersect1d", "numpy.vstack", "numpy.append", "scipy.linalg.norm", "numpy.concatenate", "numpy.abs", "numpy.where", "numpy.round", "numpy.unique", "numpy.minimum", "numpy.sqrt", "numpy.tile", "numpy.eye", "numpy.zeros", "numpy.searchsorted", "numpy.repeat", "numpy.asanyarray", "scipy.linalg.inv", "numpy.arange", "numpy.hstack", "numpy.all", "numpy.min", "numpy.sort", "scipy.sparse.issparse", "numpy.floor", "numpy.array", "numpy.dot" ] ]
zhangbo2008/facenet
[ "4dfabcb5cf14f99622dbe5f9f12f0539821c169c" ]
[ "etc/tf_tutorial/Tensorflow-101-master/logistic_regression_customdata.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# # LOGISTIC REGRESSION WITH CUSTOM DATA\n\n# In[1]:\n\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nprint (\"Packages loaded\")\n\n\n# # Load data\n\n# In[2]:\n\n\n# Load them!\ncwd = os.getcwd()\nloadpath = cwd + \"/data/custom_data.npz\"\nl = np.load(loadpath)\n\n# See what's in here\nprint (l.files)\n\n# Parse data\ntrainimg = l['trainimg']\ntrainlabel = l['trainlabel']\ntestimg = l['testimg']\ntestlabel = l['testlabel']\nuse_gray = l['use_gray']\nntrain = trainimg.shape[0]\nnclass = trainlabel.shape[1]\ndim = trainimg.shape[1]\nntest = testimg.shape[0]\nprint (\"%d train images loaded\" % (ntrain))\nprint (\"%d test images loaded\" % (ntest))\nprint (\"%d dimensional input\" % (dim))\nprint (\"%d classes\" % (nclass))\n\n\n# # Define network\n\n# In[3]:\n\n\ntf.set_random_seed(0)\n# Parameters of Logistic Regression\nlearning_rate = 0.001\ntraining_epochs = 1000\nbatch_size = 10\ndisplay_step = 100\n\n# Create Graph for Logistic Regression\nx = tf.placeholder(\"float\", [None, dim]) \ny = tf.placeholder(\"float\", [None, nclass]) \nW = tf.Variable(tf.zeros([dim, nclass]), name = 'weights')\nb = tf.Variable(tf.zeros([nclass]))\n\n\n# # Define functions\n\n# In[4]:\n\n\nWEIGHT_DECAY_FACTOR = 1 # 0.000001\nl2_loss = tf.add_n([tf.nn.l2_loss(v) \n for v in tf.trainable_variables()])\n_pred = tf.nn.softmax(tf.matmul(x, W) + b) \ncost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(_pred)\n , reduction_indices=1)) \ncost = cost + WEIGHT_DECAY_FACTOR*l2_loss\noptm = tf.train.GradientDescentOptimizer(\n learning_rate).minimize(cost) \n_corr = tf.equal(tf.argmax(_pred, 1), tf.argmax(y, 1)) \naccr = tf.reduce_mean(tf.cast(_corr, tf.float32))\ninit = tf.initialize_all_variables()\nprint (\"Functions ready\")\n\n\n# # Optimize\n\n# In[5]:\n\n\n# Launch the graph\nsess = tf.Session()\nsess.run(init)\n# Training cycle\nfor epoch in range(training_epochs):\n avg_cost = 0.\n num_batch = int(ntrain/batch_size)\n # Loop over all batches\n for i in range(num_batch): \n randidx = np.random.randint(ntrain, size=batch_size)\n batch_xs = trainimg[randidx, :]\n batch_ys = trainlabel[randidx, :] \n # Fit training using batch data\n sess.run(optm, feed_dict={x: batch_xs, y: batch_ys})\n # Compute average loss\n avg_cost += sess.run(cost\n , feed_dict={x: batch_xs, y: batch_ys})/num_batch\n\n # Display logs per epoch step\n if epoch % display_step == 0:\n print (\"Epoch: %03d/%03d cost: %.9f\" % \n (epoch, training_epochs, avg_cost))\n train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys})\n print (\" Training accuracy: %.3f\" % (train_acc))\n test_acc = sess.run(accr, feed_dict={x: testimg, y: testlabel})\n print (\" Test accuracy: %.3f\" % (test_acc))\nprint (\"Optimization Finished!\")\n\n\n# # CLOSE SESSION\n\n# In[6]:\n\n\nsess.close()\nprint (\"Session closed.\")\n\n" ]
[ [ "tensorflow.initialize_all_variables", "numpy.load", "tensorflow.placeholder", "tensorflow.zeros", "tensorflow.nn.l2_loss", "tensorflow.matmul", "tensorflow.trainable_variables", "tensorflow.cast", "tensorflow.set_random_seed", "tensorflow.train.GradientDescentOptimizer", "tensorflow.Session", "tensorflow.argmax", "tensorflow.log", "numpy.random.randint" ] ]