repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
cambridgeltl/cancer-hallmark-cnn | [
"a1aba55ba425aa0deac4f80c97572a146e4097bb"
] | [
"keras/ltlib/evaluation.py"
] | [
"import numpy as np # TODO remove dependency\n\nfrom collections import namedtuple\nfrom itertools import chain\n\nfrom sklearn import metrics as skmetrics\n\nfrom util import unique\n\nfrom logging import warn\n\nBinaryClassificationCounts = namedtuple('BinaryClassificationCounts',\n 'tp tn fp fn')\nBinaryClassificationMetrics = namedtuple('BinaryClassificationMetrics',\n 'tp tn fp fn acc prec rec fscore')\nPrCurvePoint = namedtuple('PrCurvePoint', 'prec rec fscore threshold')\n\ndef accuracy(gold, pred):\n if len(gold) != len(pred):\n raise ValueError('count mismatch')\n correct = sum(int(g == p) for g, p in zip(gold, pred))\n return 1.*correct/len(gold)\n\ndef tp_tn_fp_fn(gold, pred):\n \"\"\"Return (TP, FN, FP, FN) counts for gold and prediced values.\n\n Assumes that 0 is negative and all others positive.\n \"\"\"\n tp, tn, fp, fn = 0, 0, 0, 0\n for g, p in zip(gold, pred):\n if g == p:\n if g == 0:\n tn += 1\n else:\n tp += 1\n else:\n if g == 0:\n fp += 1\n else:\n fn += 1\n return BinaryClassificationCounts(tp, tn, fp, fn)\n\ndef precision_recall_fscore(tp, fp, fn):\n \"\"\"Return (precision, recall, f-score) for given counts.\"\"\"\n prec = 0.0 if tp + fp == 0 else 1.*tp / (tp + fp)\n rec = 0.0 if tp + fn == 0 else 1.*tp / (tp + fn)\n f = 0.0 if prec + rec == 0.0 else 2 * prec * rec / (prec + rec)\n return prec, rec, f\n\ndef evaluate_binary_classification(gold, pred, positive):\n \"\"\"Evaluate binary classification performance.\n\n Map labels in positive to 1 and others to 0.\n\n Return BinaryClassificationMetrics.\n \"\"\"\n if len(gold) != len(pred):\n raise ValueError('count mismatch')\n\n gold = _binarize(gold, positive)\n pred = _binarize(pred, positive)\n\n if not any(i for i in gold):\n warn('no positive gold labels for %s' % str(positive))\n\n acc = accuracy(gold, pred)\n tp, tn, fp, fn = tp_tn_fp_fn(gold, pred)\n prec, rec, f = precision_recall_fscore(tp, fp, fn)\n\n return BinaryClassificationMetrics(tp, tn, fp, fn, acc, prec, rec, f)\n\ndef _binarize(a, positive):\n \"\"\"Return values mapped to 1 or 0.\n\n Map values in positive to 1 and others to 0.\n \"\"\"\n return [1 if i in positive else 0 for i in a]\n\ndef average_precision_recall_fscore(results, micro=True):\n \"\"\"Return average precision, recall and f-score for list of\n BinaryClassificationMetrics.\n \"\"\"\n if micro:\n total = BinaryClassificationMetrics(*tuple(np.sum(results, axis=0)))\n return precision_recall_fscore(total.tp, total.fp, total.fn)\n else:\n avg = BinaryClassificationMetrics(*tuple(np.average(results, axis=0)))\n return avg.prec, avg.rec, avg.fscore\n\ndef _positive_label(labels):\n \"\"\"Return label representing the positive class or None if ambiguous.\"\"\"\n if set(labels) == set(['positive', 'negative']):\n return 'positive'\n elif set(labels) == set(['pos', 'neg']):\n return 'pos'\n else:\n return None # TODO other alternatives\n\ndef is_binary_labeling(labels):\n \"\"\"Return True iff given labels represent binary classification.\"\"\"\n return len(labels) == 2 and _positive_label(labels) is not None\n\ndef _binary_labels(dataitems):\n gold = dataitems.target_strs\n pred = dataitems.prediction_strs\n labels = unique(chain(gold, pred))\n return is_binary_labeling(labels)\n\ndef f1_score(prec, rec):\n from math import isnan\n if isnan(prec) or isnan(rec) or prec+rec == 0.0:\n return float('nan')\n else:\n return 2*prec*rec/(prec+rec)\n\ndef max_f_point(dataitems):\n \"\"\"Return PrCurvePoint with maximal f1 score.\"\"\"\n import logging\n from sklearn.metrics import precision_recall_curve\n y_true = np.argmax(dataitems.targets, axis=-1)\n prob_neg = dataitems.predictions[:,0] # 1st column\n prob_pos = dataitems.predictions[:,1] # 2nd column\n pos_score = prob_pos - prob_neg\n precs, recs, tholds = precision_recall_curve(y_true, pos_score)\n max_f, max_point = float('-inf'), PrCurvePoint(None, None, None, None)\n for p, r, t in zip(precs, recs, tholds):\n f = f1_score(p, r)\n if f > max_f:\n max_f, max_point = f, PrCurvePoint(p, r, f, t)\n return max_point\n\ndef evaluate_binary_labeling(dataitems):\n gold = dataitems.target_strs\n pred = dataitems.prediction_strs\n labels = unique(chain(gold, pred))\n pos = _positive_label(labels)\n res = {}\n res['acc'] = accuracy(gold, pred)\n bcm = evaluate_binary_classification(gold, pred, pos)\n res.update(bcm._asdict())\n res['auc'] = skmetrics.roc_auc_score(dataitems.targets,\n dataitems.predictions)\n res['ap'] = skmetrics.average_precision_score(dataitems.targets,\n dataitems.predictions)\n maxfp = max_f_point(dataitems)\n res.update({ 'maxf-{}'.format(k): v for k, v in maxfp._asdict().items() })\n return res\n\ndef summarize_classification(results):\n return (\n 'acc: {acc:.2%} auc: {auc:.2%} ap: {ap:.2%} ' +\n 'f: {fscore:.2%} (p:{prec:.1%} r:{rec:.1%} ' +\n 'tp:{tp} fp:{fp} fn:{fn}) ' +\n 'maxf: {maxf-fscore:.2%} (p:{maxf-prec:.1%} r:{maxf-rec:.1%} ' +\n 'th:{maxf-threshold:.2})'\n ).format(**results)\n\ndef evaluate_classification(dataitems):\n if _binary_labels(dataitems):\n return evaluate_binary_labeling(dataitems)\n else:\n raise NotImplementedError()\n"
] | [
[
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.precision_recall_curve",
"numpy.argmax",
"sklearn.metrics.average_precision_score",
"numpy.average",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Jallet/keras-jl-ac-mean | [
"2bbc1596192fb8c3aefc4a8126482a5283574a59"
] | [
"keras/utils/np_utils.py"
] | [
"from __future__ import absolute_import\nimport numpy as np\nimport scipy as sp\nfrom six.moves import range\nfrom six.moves import zip\n\n\ndef to_categorical(y, nb_classes=None):\n '''Convert class vector (integers from 0 to nb_classes)\n to binary class matrix, for use with categorical_crossentropy.\n '''\n if not nb_classes:\n nb_classes = np.max(y)+1\n Y = np.zeros((len(y), nb_classes))\n for i in range(len(y)):\n Y[i, y[i]] = 1.\n return Y\n\n\ndef normalize(a, axis=-1, order=2):\n l2 = np.atleast_1d(np.linalg.norm(a, order, axis))\n l2[l2 == 0] = 1\n return a / np.expand_dims(l2, axis)\n\n\ndef binary_logloss(p, y):\n epsilon = 1e-15\n p = sp.maximum(epsilon, p)\n p = sp.minimum(1-epsilon, p)\n res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))\n res *= -1.0/len(y)\n return res\n\n\ndef multiclass_logloss(P, Y):\n npreds = [P[i][Y[i]-1] for i in range(len(Y))]\n score = -(1. / len(Y)) * np.sum(np.log(npreds))\n return score\n\n\ndef accuracy(p, y):\n return np.mean([a == b for a, b in zip(p, y)])\n\n\ndef probas_to_classes(y_pred):\n if len(y_pred.shape) > 1 and y_pred.shape[1] > 1:\n return categorical_probas_to_classes(y_pred)\n return np.array([1 if p > 0.5 else 0 for p in y_pred])\n\n\ndef categorical_probas_to_classes(p):\n return np.argmax(p, axis=1)\n\n\ndef convert_kernel(kernel, dim_ordering='th'):\n '''Converts a kernel matrix (numpy array)\n from Theano format to TensorFlow format\n (or reciprocally, since the transformation\n is its own inverse).\n '''\n new_kernel = np.copy(kernel)\n if dim_ordering == 'th':\n w = kernel.shape[2]\n h = kernel.shape[3]\n for i in range(w):\n for j in range(h):\n new_kernel[:, :, i, j] = kernel[:, :, w - i - 1, h - j - 1]\n elif dim_ordering == 'tf':\n w = kernel.shape[0]\n h = kernel.shape[1]\n for i in range(w):\n for j in range(h):\n new_kernel[i, j, :, :] = kernel[w - i - 1, h - j - 1, :, :]\n else:\n raise Exception('Invalid dim_ordering: ' + str(dim_ordering))\n return new_kernel\n"
] | [
[
"numpy.log",
"numpy.expand_dims",
"scipy.log",
"scipy.minimum",
"scipy.maximum",
"numpy.linalg.norm",
"numpy.max",
"numpy.copy",
"numpy.argmax",
"numpy.array",
"scipy.subtract"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
abraia/abraia-python | [
"e49e3869b2ee7e6b1bcb41e0cc1ae126ac39e202"
] | [
"abraia/hsi.py"
] | [
"import os\nimport wget\nimport tempfile\nimport numpy as np\nimport scipy.io as sio\nimport scipy.ndimage as nd\n\nfrom PIL import Image\nfrom sklearn.svm import SVC\nfrom sklearn.utils import resample\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n\nfrom tensorflow import keras\nfrom keras.utils import np_utils\nfrom keras.models import Model, load_model\nfrom keras.layers import Input, Conv2D, Conv3D, Flatten, Dense, Reshape, Dropout\n\nfrom .plot import plot_image, plot_images, plot_train_history\n\ntempdir = tempfile.gettempdir()\n\n\ndef download(url):\n basename = os.path.basename(url)\n dest = os.path.join(tempdir, basename)\n if not os.path.exists(dest):\n wget.download(url, dest)\n return dest\n\n\ndef load_dataset(dataset):\n \"\"\"Load one of the available hyperspectral datasets (IP, PU, SA, KSC).\"\"\"\n if dataset == 'IP':\n data_hsi = sio.loadmat(download(\n 'http://www.ehu.eus/ccwintco/uploads/6/67/Indian_pines_corrected.mat'))['indian_pines_corrected']\n gt_hsi = sio.loadmat(download(\n 'http://www.ehu.eus/ccwintco/uploads/c/c4/Indian_pines_gt.mat'))['indian_pines_gt']\n class_names = ['', 'Alfalfa', 'Corn-notill', 'Corn-mintill', 'Corn', 'Grass-pasture',\n 'Grass-trees', 'Grass-pasture-mowed', 'Hay-windrowed', 'Oats', 'Soybean-notill',\n 'Soybean-mintill', 'Soybean-clean', 'Wheat', 'Woods', 'Buildings Grass Trees Drives',\n 'Stone Steel Towers']\n return data_hsi, gt_hsi, class_names\n if dataset == 'PU':\n data_hsi = sio.loadmat(download(\n 'http://www.ehu.eus/ccwintco/uploads/e/ee/PaviaU.mat'))['paviaU']\n gt_hsi = sio.loadmat(download(\n 'http://www.ehu.eus/ccwintco/uploads/5/50/PaviaU_gt.mat'))['paviaU_gt']\n class_names = ['', 'Asphalt', 'Meadows', 'Gravel', 'Trees', 'Painted metal sheets',\n 'Bare Soil', 'Bitumen', 'Self-Blocking Bricks', 'Shadows']\n return data_hsi, gt_hsi, class_names\n if dataset == 'SA':\n data_hsi = sio.loadmat(download(\n 'http://www.ehu.eus/ccwintco/uploads/a/a3/Salinas_corrected.mat'))['salinas_corrected']\n gt_hsi = sio.loadmat(download(\n 'http://www.ehu.eus/ccwintco/uploads/f/fa/Salinas_gt.mat'))['salinas_gt']\n class_names = ['', 'Brocoli_green_weeds_1', 'Brocoli_green_weeds_2', 'Fallow', 'Fallow_rough_plow',\n 'Fallow_smooth', 'Stubble', 'Celery', 'Grapes_untrained', 'Soil_vinyard_develop',\n 'Corn_senesced_green_weeds', 'Lettuce_romaine_4wk', 'Lettuce_romaine_5wk',\n 'Lettuce_romaine_6wk', 'Lettuce_romaine_7wk', 'Vinyard_untrained', 'Vinyard_vertical_trellis']\n return data_hsi, gt_hsi, class_names\n if dataset == 'KSC':\n data_hsi = sio.loadmat(download(\n 'http://www.ehu.es/ccwintco/uploads/2/26/KSC.mat'))['KSC']\n gt_hsi = sio.loadmat(download(\n 'http://www.ehu.es/ccwintco/uploads/a/a6/KSC_gt.mat'))['KSC_gt']\n return data_hsi, gt_hsi\n\n\ndef random(img, n_bands=6, indexes=False):\n \"\"\"Returns a list of random bands\"\"\"\n bands = []\n indexes = []\n for i in range(n_bands):\n q = np.random.randint(img.shape[2])\n indexes.append(q)\n bands.append(img[:, :, q])\n if indexes:\n return bands, indexes\n return bands\n\n\ndef rgb(img, bands=None):\n \"\"\"Returns the RGB image from the selected bands (R, G, B)\"\"\"\n from spectral import get_rgb\n return get_rgb(img, bands=bands)\n\n\ndef ndvi(img, red_band, nir_band):\n \"\"\"Returns the NDVI image from the specified read and nir bands\"\"\"\n from spectral import ndvi\n return ndvi(img, red_band, nir_band)\n\n\ndef resample(img, n_samples=32):\n \"\"\"Resamples the number of spectral bands (n_samples)\"\"\"\n h, w, d = img.shape\n X = img.reshape((h * w), d)\n r = resample(np.transpose(X), n_samples=n_samples)\n return np.transpose(r).reshape(h, w, n_samples)\n\n\ndef resize(img, size):\n \"\"\"Resize the image to the given size (w, h)\"\"\"\n return np.array(Image.fromarray(img).resize(size, resample=Image.LANCZOS))\n\n\ndef normalize(img):\n \"\"\"Normalize the image to the range [0, 1]\"\"\"\n min, max = np.amin(img), np.amax(img)\n return (img - min) / (max - min)\n\n\ndef saliency(img):\n \"\"\"Calculate saliency map of the image\"\"\"\n smaps = []\n for n in range(img.shape[2]):\n band = img[:, :, n]\n h, w = band.shape\n fft = np.fft.fft2(resize(band, (64, 64)))\n log_amplitude, phase = np.log(np.absolute(fft)), np.angle(fft)\n spectral_residual = log_amplitude - nd.uniform_filter(log_amplitude, size=3, mode='nearest')\n smap = np.absolute(np.fft.ifft2(np.exp(spectral_residual + 1.j * phase)))\n smap = nd.gaussian_filter(smap, sigma=3)\n smaps.append(normalize(resize(smap, (w, h))))\n return np.sum(np.dstack(smaps), axis=2)\n\n\ndef spectrum(img, point=None):\n \"\"\"Get the spectrum at a given point (x, y)\n\n When a point is not specified the spectrum of the most salient point is returned.\n \"\"\"\n if point is None:\n sal = saliency(img)\n idx = np.unravel_index(np.argmax(sal), sal.shape)\n point = (idx[1], idx[0])\n return img[point[1], point[0], :]\n\n\ndef split_train_test(X, y, train_ratio=0.7):\n \"\"\"Split data for training and test\"\"\"\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_ratio, stratify=y)\n return X_train, X_test, y_train, y_test\n\n\ndef principal_components(img, n_components=3, spectrum=False):\n \"\"\"Calculate principal components of the image\"\"\"\n h, w, d = img.shape\n X = img.reshape((h * w), d)\n pca = PCA(n_components=n_components, whiten=True)\n bands = pca.fit_transform(X).reshape(h, w, n_components)\n if spectrum:\n bands, pca.components_\n return bands\n\n\ndef pad_with_zeros(X, margin=2):\n newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2* margin, X.shape[2]))\n newX[margin:X.shape[0] + margin, margin:X.shape[1] + margin, :] = X\n return newX\n\n\ndef create_patch(data, height_index, width_index, patch_size):\n height_slice = slice(height_index, height_index + patch_size)\n width_slice = slice(width_index, width_index + patch_size)\n return data[height_slice, width_slice, :]\n\n\n# TODO: Convert create patches to generator with batch_size parameter\ndef create_patches(X, patch_size):\n patches = []\n width, height = X.shape[1], X.shape[0]\n X = pad_with_zeros(X, patch_size // 2)\n for i in range(height):\n for j in range(width):\n image_patch = create_patch(X, i, j, patch_size)\n patches.append(image_patch.reshape(image_patch.shape + (1,)).astype('float32'))\n return np.array(patches)\n\n\ndef create_image_cubes(X, y, patch_size):\n width, height = X.shape[1], X.shape[0]\n patchesData = create_patches(X, patch_size)\n labels = []\n for i in range(height):\n for j in range(width):\n labels.append(y[i, j])\n patchesLabels = np.array(labels)\n return patchesData, patchesLabels\n\n\ndef generate_training_data(X, y, patch_size, train_ratio=0.7):\n X, y = create_image_cubes(X, y, patch_size)\n X_train, X_test, y_train, y_test = split_train_test(X, y, train_ratio)\n X_train = X_train.reshape(-1, patch_size, patch_size, X.shape[-1], 1)\n X_test = X_test.reshape(-1, patch_size, patch_size, X.shape[-1], 1)\n return X_train, X_test, y_train, y_test\n\n\ndef create_hsn_model(input_shape, n_classes):\n input_layer = Input((*input_shape, 1))\n ## convolutional layers\n conv_layer1 = Conv3D(filters=8, kernel_size=(3, 3, 7), activation='relu')(input_layer)\n conv_layer2 = Conv3D(filters=16, kernel_size=(3, 3, 5), activation='relu')(conv_layer1)\n conv_layer3 = Conv3D(filters=32, kernel_size=(3, 3, 3), activation='relu')(conv_layer2)\n conv_layer3 = Reshape((conv_layer3.shape[1], conv_layer3.shape[2], conv_layer3.shape[3] * conv_layer3.shape[4]))(conv_layer3)\n conv_layer4 = Conv2D(filters=64, kernel_size=(3,3), activation='relu')(conv_layer3)\n flatten_layer = Flatten()(conv_layer4)\n ## fully connected layers\n dense_layer1 = Dense(units=256, activation='relu')(flatten_layer)\n dense_layer1 = Dropout(0.4)(dense_layer1)\n dense_layer2 = Dense(units=128, activation='relu')(dense_layer1)\n dense_layer2 = Dropout(0.4)(dense_layer2)\n output_layer = Dense(units=n_classes, activation='softmax')(dense_layer2)\n # define and compile the model with input layer and output layer\n model = Model(inputs=input_layer, outputs=output_layer)\n adam = keras.optimizers.Adam(learning_rate=0.001, decay=1e-06)\n model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])\n return model\n\n\ndef predict_hsn_model(model, X, patch_size):\n width, height = X.shape[1], X.shape[0]\n X_pred = create_patches(X, patch_size)\n y_pred = np.argmax(model.predict(X_pred), axis=1)\n return y_pred.reshape(height, width).astype(int)\n\n\nclass HyperspectralModel:\n def __init__(self, name, *args):\n self.name = name\n if self.name == 'svm':\n self.model = SVC(C=150, kernel='rbf')\n elif self.name == 'hsn':\n self.input_shape, self.n_classes = args\n self.model = create_hsn_model(self.input_shape, self.n_classes) # Hybrid Spectral Net\n\n def train(self, X, y, train_ratio=0.7, epochs=50):\n if self.name == 'svm':\n X_train, X_test, y_train, y_test = train_test_split(X.reshape(-1, X.shape[-1]), y, train_size=train_ratio, stratify=y)\n self.model.fit(X_train, y_train)\n return y_test, self.model.predict(X_test)\n elif self.name == 'hsn':\n X = principal_components(X, n_components=self.input_shape[2])\n X_train, X_test, y_train, y_test = generate_training_data(X, y, self.input_shape[0], train_ratio)\n self.history = self.model.fit(x=X_train, y=np_utils.to_categorical(y_train), batch_size=256, epochs=epochs)\n return y_test, np.argmax(self.model.predict(X_test), axis=1)\n\n def predict(self, X):\n if self.name == 'svm':\n return self.model.predict(X.reshape(-1, X.shape[2])).reshape(X.shape[0], X.shape[1])\n elif self.name == 'hsn':\n X = principal_components(X, n_components=self.input_shape[2])\n return predict_hsn_model(self.model, X, self.input_shape[0])\n \n def plot_history():\n if self.history:\n plot_train_history(self.history)\n \n def save(self, filename='model.h5'):\n self.model.save(filename)\n\n def load(self, filename='model.h5'):\n self.model = load_model(filename)\n\n\ndef create_model(name, *args):\n \"\"\"Create a new model: svm or hsn\"\"\"\n return HyperspectralModel(name, *args)\n"
] | [
[
"numpy.amax",
"numpy.absolute",
"scipy.ndimage.gaussian_filter",
"numpy.amin",
"sklearn.model_selection.train_test_split",
"numpy.dstack",
"scipy.ndimage.uniform_filter",
"tensorflow.keras.optimizers.Adam",
"numpy.argmax",
"sklearn.svm.SVC",
"numpy.transpose",
"numpy.exp",
"numpy.angle",
"numpy.array",
"numpy.zeros",
"sklearn.decomposition.PCA",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
rcelebi/android-elfali | [
"314d9cd9b607460f8bfea80fc828b1521ca18443",
"4ea14a58a18356ef9e16aba2e7dae84c02afba12",
"4ea14a58a18356ef9e16aba2e7dae84c02afba12",
"4ea14a58a18356ef9e16aba2e7dae84c02afba12",
"314d9cd9b607460f8bfea80fc828b1521ca18443",
"314d9cd9b607460f8bfea80fc828b1521ca18443",
"4ea14a58a18356ef9e16aba2e7dae84c02afba12"
] | [
"jni-build/jni/include/tensorflow/python/training/server_lib_test.py",
"jni-build/jni/include/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py",
"jni-build/jni/include/tensorflow/contrib/distributions/python/ops/bijector.py",
"jni-build/jni/include/tensorflow/contrib/metrics/python/ops/metric_ops.py",
"jni-build/jni/include/tensorflow/contrib/slim/python/slim/data/parallel_reader.py",
"jni-build/jni/include/tensorflow/contrib/learn/python/learn/tests/io_test.py",
"jni-build/jni/include/tensorflow/tensorboard/backend/handler.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tf.GrpcServer.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\n\nclass GrpcServerTest(tf.test.TestCase):\n\n def testRunStep(self):\n server = tf.train.Server.create_local_server()\n\n with tf.Session(server.target) as sess:\n c = tf.constant([[2, 1]])\n d = tf.constant([[1], [2]])\n e = tf.matmul(c, d)\n self.assertAllEqual([[4]], sess.run(e))\n # TODO(mrry): Add `server.stop()` and `server.join()` when these work.\n\n def testMultipleSessions(self):\n server = tf.train.Server.create_local_server()\n\n c = tf.constant([[2, 1]])\n d = tf.constant([[1], [2]])\n e = tf.matmul(c, d)\n\n sess_1 = tf.Session(server.target)\n sess_2 = tf.Session(server.target)\n\n self.assertAllEqual([[4]], sess_1.run(e))\n self.assertAllEqual([[4]], sess_2.run(e))\n\n sess_1.close()\n sess_2.close()\n # TODO(mrry): Add `server.stop()` and `server.join()` when these work.\n\n # Verifies behavior of multiple variables with multiple sessions connecting to\n # the same server.\n def testSameVariablesNoClear(self):\n server = tf.train.Server.create_local_server()\n\n with tf.Session(server.target) as sess_1:\n v0 = tf.Variable([[2, 1]], name=\"v0\")\n v1 = tf.Variable([[1], [2]], name=\"v1\")\n v2 = tf.matmul(v0, v1)\n sess_1.run([v0.initializer, v1.initializer])\n self.assertAllEqual([[4]], sess_1.run(v2))\n\n with tf.Session(server.target) as sess_2:\n new_v0 = tf.get_default_graph().get_tensor_by_name(\"v0:0\")\n new_v1 = tf.get_default_graph().get_tensor_by_name(\"v1:0\")\n new_v2 = tf.matmul(new_v0, new_v1)\n self.assertAllEqual([[4]], sess_2.run(new_v2))\n\n # Verifies behavior of tf.Session.reset().\n def testSameVariablesClear(self):\n server = tf.train.Server.create_local_server()\n\n # Creates a graph with 2 variables.\n v0 = tf.Variable([[2, 1]], name=\"v0\")\n v1 = tf.Variable([[1], [2]], name=\"v1\")\n v2 = tf.matmul(v0, v1)\n\n # Verifies that both sessions connecting to the same target return\n # the same results.\n sess_1 = tf.Session(server.target)\n sess_2 = tf.Session(server.target)\n sess_1.run(tf.initialize_all_variables())\n self.assertAllEqual([[4]], sess_1.run(v2))\n self.assertAllEqual([[4]], sess_2.run(v2))\n\n # Resets target. sessions abort. Use sess_2 to verify.\n tf.Session.reset(server.target)\n with self.assertRaises(tf.errors.AbortedError):\n self.assertAllEqual([[4]], sess_2.run(v2))\n\n # Connects to the same target. Device memory for the variables would have\n # been released, so they will be unitialized.\n sess_2 = tf.Session(server.target)\n with self.assertRaises(tf.errors.FailedPreconditionError):\n sess_2.run(v2)\n # Reinitialzes the variables.\n sess_2.run(tf.initialize_all_variables())\n self.assertAllEqual([[4]], sess_2.run(v2))\n sess_2.close()\n\n # Verifies behavior of tf.Session.reset() with multiple containers using\n # default container names as defined by the target name.\n def testSameVariablesClearContainer(self):\n # Starts two servers with different names so they map to different\n # resource \"containers\".\n server0 = tf.train.Server({\"local0\": [\"localhost:0\"]}, protocol=\"grpc\",\n start=True)\n server1 = tf.train.Server({\"local1\": [\"localhost:0\"]}, protocol=\"grpc\",\n start=True)\n\n # Creates a graph with 2 variables.\n v0 = tf.Variable(1.0, name=\"v0\")\n v1 = tf.Variable(2.0, name=\"v0\")\n\n # Initializes the variables. Verifies that the values are correct.\n sess_0 = tf.Session(server0.target)\n sess_1 = tf.Session(server1.target)\n sess_0.run(v0.initializer)\n sess_1.run(v1.initializer)\n self.assertAllEqual(1.0, sess_0.run(v0))\n self.assertAllEqual(2.0, sess_1.run(v1))\n\n # Resets container \"local0\". Verifies that v0 is no longer initialized.\n tf.Session.reset(server0.target, [\"local0\"])\n sess = tf.Session(server0.target)\n with self.assertRaises(tf.errors.FailedPreconditionError):\n sess.run(v0)\n # Reinitializes v0 for the following test.\n sess.run(v0.initializer)\n\n # Verifies that v1 is still valid.\n self.assertAllEqual(2.0, sess_1.run(v1))\n\n # Resets container \"local1\". Verifies that v1 is no longer initialized.\n tf.Session.reset(server1.target, [\"local1\"])\n sess = tf.Session(server1.target)\n with self.assertRaises(tf.errors.FailedPreconditionError):\n sess.run(v1)\n # Verifies that v0 is still valid.\n sess = tf.Session(server0.target)\n self.assertAllEqual(1.0, sess.run(v0))\n\n # Verifies behavior of tf.Session.reset() with multiple containers using\n # tf.container.\n def testMultipleContainers(self):\n with tf.container(\"test0\"):\n v0 = tf.Variable(1.0, name=\"v0\")\n with tf.container(\"test1\"):\n v1 = tf.Variable(2.0, name=\"v0\")\n server = tf.train.Server.create_local_server()\n sess = tf.Session(server.target)\n sess.run(tf.initialize_all_variables())\n self.assertAllEqual(1.0, sess.run(v0))\n self.assertAllEqual(2.0, sess.run(v1))\n\n # Resets container. Session aborts.\n tf.Session.reset(server.target, [\"test0\"])\n with self.assertRaises(tf.errors.AbortedError):\n sess.run(v1)\n\n # Connects to the same target. Device memory for the v0 would have\n # been released, so it will be unitialized. But v1 should still\n # be valid.\n sess = tf.Session(server.target)\n with self.assertRaises(tf.errors.FailedPreconditionError):\n sess.run(v0)\n self.assertAllEqual(2.0, sess.run(v1))\n\n # Verifies various reset failures.\n def testResetFails(self):\n # Creates variable with container name.\n with tf.container(\"test0\"):\n v0 = tf.Variable(1.0, name=\"v0\")\n # Creates variable with default container.\n v1 = tf.Variable(2.0, name=\"v1\")\n # Verifies resetting the non-existent target returns error.\n with self.assertRaises(tf.errors.NotFoundError):\n tf.Session.reset(\"nonexistent\", [\"test0\"])\n\n # Verifies resetting with config.\n # Verifies that resetting target with no server times out.\n with self.assertRaises(tf.errors.DeadlineExceededError):\n tf.Session.reset(\"grpc://localhost:0\", [\"test0\"],\n config=tf.ConfigProto(operation_timeout_in_ms=5))\n\n # Verifies no containers are reset with non-existent container.\n server = tf.train.Server.create_local_server()\n sess = tf.Session(server.target)\n sess.run(tf.initialize_all_variables())\n self.assertAllEqual(1.0, sess.run(v0))\n self.assertAllEqual(2.0, sess.run(v1))\n # No container is reset, but the server is reset.\n tf.Session.reset(server.target, [\"test1\"])\n # Verifies that both variables are still valid.\n sess = tf.Session(server.target)\n self.assertAllEqual(1.0, sess.run(v0))\n self.assertAllEqual(2.0, sess.run(v1))\n\n def testLargeConstant(self):\n server = tf.train.Server.create_local_server()\n with tf.Session(server.target) as sess:\n const_val = np.empty([10000, 3000], dtype=np.float32)\n const_val.fill(0.5)\n c = tf.constant(const_val)\n shape_t = tf.shape(c)\n self.assertAllEqual([10000, 3000], sess.run(shape_t))\n\n def testLargeFetch(self):\n server = tf.train.Server.create_local_server()\n with tf.Session(server.target) as sess:\n c = tf.fill([10000, 3000], 0.5)\n expected_val = np.empty([10000, 3000], dtype=np.float32)\n expected_val.fill(0.5)\n self.assertAllEqual(expected_val, sess.run(c))\n\n def testLargeFeed(self):\n server = tf.train.Server.create_local_server()\n with tf.Session(server.target) as sess:\n feed_val = np.empty([10000, 3000], dtype=np.float32)\n feed_val.fill(0.5)\n p = tf.placeholder(tf.float32, shape=[10000, 3000])\n min_t = tf.reduce_min(p)\n max_t = tf.reduce_max(p)\n min_val, max_val = sess.run([min_t, max_t], feed_dict={p: feed_val})\n self.assertEqual(0.5, min_val)\n self.assertEqual(0.5, max_val)\n\n def testCloseCancelsBlockingOperation(self):\n server = tf.train.Server.create_local_server()\n sess = tf.Session(server.target)\n\n q = tf.FIFOQueue(10, [tf.float32])\n enqueue_op = q.enqueue(37.0)\n dequeue_t = q.dequeue()\n\n sess.run(enqueue_op)\n sess.run(dequeue_t)\n\n def blocking_dequeue():\n with self.assertRaises(tf.errors.CancelledError):\n sess.run(dequeue_t)\n\n blocking_thread = self.checkedThread(blocking_dequeue)\n blocking_thread.start()\n time.sleep(0.5)\n sess.close()\n blocking_thread.join()\n\n def testSetConfiguration(self):\n config = tf.ConfigProto(\n gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.1))\n\n # Configure a server using the default local server options.\n server = tf.train.Server.create_local_server(config=config, start=False)\n self.assertEqual(\n 0.1,\n server.server_def.default_session_config\n .gpu_options.per_process_gpu_memory_fraction)\n\n # Configure a server using an explicit ServerDefd with an\n # overridden config.\n cluster_def = tf.train.ClusterSpec(\n {\"localhost\": [\"localhost:0\"]}).as_cluster_def()\n server_def = tf.train.ServerDef(\n cluster=cluster_def, job_name=\"localhost\", task_index=0,\n protocol=\"grpc\")\n server = tf.train.Server(server_def, config=config, start=False)\n self.assertEqual(\n 0.1,\n server.server_def.default_session_config\n .gpu_options.per_process_gpu_memory_fraction)\n\n def testInvalidHostname(self):\n with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, \"port\"):\n _ = tf.train.Server({\"local\": [\"localhost\"]},\n job_name=\"local\",\n task_index=0)\n\n def testInteractiveSession(self):\n server = tf.train.Server.create_local_server()\n # TODO(b/29900832): Remove this assertion when the bug is fixed.\n a = tf.constant(1.0)\n with self.assertRaisesRegexp(tf.errors.UnimplementedError, \"pruned\"):\n sess = tf.InteractiveSession(target=server.target)\n sess.run(a)\n\n # TODO(b/29900832): The following code fails (without the unimplemented\n # check in `tensorflow::MasterSession`):\n # a = tf.constant(1.0)\n # b = tf.constant(2.0)\n # self.assertEqual(1.0, sess.run(a))\n # self.assertEqual(2.0, sess.run(b))\n\n\nclass ServerDefTest(tf.test.TestCase):\n\n def testLocalServer(self):\n cluster_def = tf.train.ClusterSpec(\n {\"local\": [\"localhost:2222\"]}).as_cluster_def()\n server_def = tf.train.ServerDef(\n cluster=cluster_def, job_name=\"local\", task_index=0, protocol=\"grpc\")\n\n self.assertProtoEquals(\"\"\"\n cluster {\n job { name: 'local' tasks { key: 0 value: 'localhost:2222' } }\n }\n job_name: 'local' task_index: 0 protocol: 'grpc'\n \"\"\", server_def)\n\n # Verifies round trip from Proto->Spec->Proto is correct.\n cluster_spec = tf.train.ClusterSpec(cluster_def)\n self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())\n\n def testTwoProcesses(self):\n cluster_def = tf.train.ClusterSpec(\n {\"local\": [\"localhost:2222\", \"localhost:2223\"]}).as_cluster_def()\n server_def = tf.train.ServerDef(\n cluster=cluster_def, job_name=\"local\", task_index=1, protocol=\"grpc\")\n\n self.assertProtoEquals(\"\"\"\n cluster {\n job { name: 'local' tasks { key: 0 value: 'localhost:2222' }\n tasks { key: 1 value: 'localhost:2223' } }\n }\n job_name: 'local' task_index: 1 protocol: 'grpc'\n \"\"\", server_def)\n\n # Verifies round trip from Proto->Spec->Proto is correct.\n cluster_spec = tf.train.ClusterSpec(cluster_def)\n self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())\n\n def testTwoJobs(self):\n cluster_def = tf.train.ClusterSpec(\n {\"ps\": [\"ps0:2222\", \"ps1:2222\"],\n \"worker\": [\"worker0:2222\", \"worker1:2222\", \"worker2:2222\"]}\n ).as_cluster_def()\n server_def = tf.train.ServerDef(\n cluster=cluster_def, job_name=\"worker\", task_index=2, protocol=\"grpc\")\n\n self.assertProtoEquals(\"\"\"\n cluster {\n job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }\n tasks { key: 1 value: 'ps1:2222' } }\n job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }\n tasks { key: 1 value: 'worker1:2222' }\n tasks { key: 2 value: 'worker2:2222' } }\n }\n job_name: 'worker' task_index: 2 protocol: 'grpc'\n \"\"\", server_def)\n\n # Verifies round trip from Proto->Spec->Proto is correct.\n cluster_spec = tf.train.ClusterSpec(cluster_def)\n self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())\n\n def testClusterSpec(self):\n cluster_spec = tf.train.ClusterSpec(\n {\"ps\": [\"ps0:2222\", \"ps1:2222\"],\n \"worker\": [\"worker0:2222\", \"worker1:2222\", \"worker2:2222\"]})\n\n expected_proto = \"\"\"\n job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }\n tasks { key: 1 value: 'ps1:2222' } }\n job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }\n tasks { key: 1 value: 'worker1:2222' }\n tasks { key: 2 value: 'worker2:2222' } }\n \"\"\"\n\n self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())\n self.assertProtoEquals(\n expected_proto, tf.train.ClusterSpec(cluster_spec).as_cluster_def())\n self.assertProtoEquals(\n expected_proto,\n tf.train.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())\n self.assertProtoEquals(\n expected_proto,\n tf.train.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"TensorFlow estimators for Linear and DNN joined training models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib.framework.python.ops import variables as contrib_variables\nfrom tensorflow.contrib.layers.python.layers import feature_column_ops\nfrom tensorflow.contrib.learn.python.learn.estimators import composable_model\nfrom tensorflow.contrib.learn.python.learn.estimators import estimator\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import logging_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import parsing_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.training import training\n\n\n# TODO(ispir): Increase test coverage\nclass _DNNLinearCombinedBaseEstimator(estimator.BaseEstimator):\n \"\"\"An estimator for TensorFlow Linear and DNN joined training models.\n\n Input of `fit`, `train`, and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n if `weight_column_name` is not `None`, a feature with\n `key=weight_column_name` whose value is a `Tensor`.\n for each `column` in `dnn_feature_columns` + `linear_feature_columns`:\n - if `column` is a `SparseColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `WeightedSparseColumn`, two features: the first with\n `key` the id column name, the second with `key` the weight column\n name. Both features' `value` must be a `SparseTensor`.\n - if `column` is a `RealValuedColumn, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n \"\"\"\n\n def __init__(self,\n target_column,\n model_dir=None,\n linear_feature_columns=None,\n linear_optimizer=None,\n dnn_feature_columns=None,\n dnn_optimizer=None,\n dnn_hidden_units=None,\n dnn_activation_fn=nn.relu,\n dnn_dropout=None,\n gradient_clip_norm=None,\n enable_centered_bias=True,\n config=None):\n \"\"\"Initializes a _DNNLinearCombinedBaseEstimator instance.\n\n Args:\n target_column: A _TargetColumn object.\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator\n to continue training a previously saved model.\n linear_feature_columns: An iterable containing all the feature columns\n used by linear part of the model. All items in the set should be\n instances of classes derived from `FeatureColumn`.\n linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to\n the linear part of the model. If `None`, will use a FTRL optimizer.\n dnn_feature_columns: An iterable containing all the feature columns used\n by deep part of the model. All items in the set should be instances of\n classes derived from `FeatureColumn`.\n dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to\n the deep part of the model. If `None`, will use an Adagrad optimizer.\n dnn_hidden_units: List of hidden units per layer. All layers are fully\n connected.\n dnn_activation_fn: Activation function applied to each layer. If `None`,\n will use `tf.nn.relu`.\n dnn_dropout: When not None, the probability we will drop out\n a given coordinate.\n gradient_clip_norm: A float > 0. If provided, gradients are clipped\n to their global norm with this clipping ratio. See\n tf.clip_by_global_norm for more details.\n enable_centered_bias: A bool. If True, estimator will learn a centered\n bias variable for each class. Rest of the model structure learns the\n residual after centered bias.\n config: RunConfig object to configure the runtime settings.\n\n Raises:\n ValueError: If both linear_feature_columns and dnn_features_columns are\n empty at the same time.\n \"\"\"\n super(_DNNLinearCombinedBaseEstimator, self).__init__(\n model_dir=model_dir, config=config)\n\n num_ps_replicas = config.num_ps_replicas if config else 0\n\n self._linear_model = composable_model.LinearComposableModel(\n num_label_columns=target_column.num_label_columns,\n optimizer=linear_optimizer,\n gradient_clip_norm=gradient_clip_norm,\n num_ps_replicas=num_ps_replicas)\n\n self._dnn_model = composable_model.DNNComposableModel(\n num_label_columns=target_column.num_label_columns,\n hidden_units=dnn_hidden_units,\n optimizer=dnn_optimizer,\n activation_fn=dnn_activation_fn,\n dropout=dnn_dropout,\n gradient_clip_norm=gradient_clip_norm,\n num_ps_replicas=num_ps_replicas) if dnn_hidden_units else None\n\n self._linear_feature_columns = linear_feature_columns\n self._linear_optimizer = linear_optimizer\n self._dnn_feature_columns = dnn_feature_columns\n self._dnn_hidden_units = dnn_hidden_units\n self._centered_bias_weight_collection = \"centered_bias\"\n self._enable_centered_bias = enable_centered_bias\n self._target_column = target_column\n\n @property\n def linear_weights_(self):\n \"\"\"Returns weights per feature of the linear part.\"\"\"\n return self._linear_model.get_weights(model_dir=self._model_dir)\n\n @property\n def linear_bias_(self):\n \"\"\"Returns bias of the linear part.\"\"\"\n return (self._linear_model.get_bias(model_dir=self._model_dir) +\n self.get_variable_value(\"centered_bias_weight\"))\n\n @property\n def dnn_weights_(self):\n \"\"\"Returns weights of deep neural network part.\"\"\"\n return self._dnn_model.get_weights(model_dir=self._model_dir)\n\n @property\n def dnn_bias_(self):\n \"\"\"Returns bias of deep neural network part.\"\"\"\n return (self._dnn_model.get_bias(model_dir=self._model_dir) +\n [self.get_variable_value(\"centered_bias_weight\")])\n\n def _get_feature_dict(self, features):\n if isinstance(features, dict):\n return features\n return {\"\": features}\n\n def _get_train_ops(self, features, targets):\n \"\"\"See base class.\"\"\"\n global_step = contrib_variables.get_global_step()\n assert global_step\n\n features = self._get_feature_dict(features)\n logits = self._logits(features, is_training=True)\n if self._enable_centered_bias:\n centered_bias_step = [self._centered_bias_step(targets, features)]\n else:\n centered_bias_step = []\n with ops.control_dependencies(centered_bias_step):\n loss = self._target_column.loss(logits, targets, features)\n logging_ops.scalar_summary(\"loss\", loss)\n\n linear_train_step = self._linear_model.get_train_step(loss)\n dnn_train_step = (self._dnn_model.get_train_step(loss)\n if self._dnn_model else [])\n\n with ops.control_dependencies(linear_train_step + dnn_train_step):\n with ops.get_default_graph().colocate_with(global_step):\n return state_ops.assign_add(global_step, 1).op, loss\n\n def _get_eval_ops(self, features, targets, metrics=None):\n raise NotImplementedError\n\n def _get_predict_ops(self, features):\n \"\"\"See base class.\"\"\"\n features = self._get_feature_dict(features)\n logits = self._logits(features)\n return self._target_column.logits_to_predictions(logits, proba=True)\n\n def _get_feature_ops_from_example(self, examples_batch):\n column_types = layers.create_feature_spec_for_parsing((\n self._get_linear_feature_columns() or []) + (\n self._get_dnn_feature_columns() or []))\n features = parsing_ops.parse_example(examples_batch, column_types)\n return features\n\n def _get_linear_feature_columns(self):\n if not self._linear_feature_columns:\n return None\n feature_column_ops.check_feature_columns(self._linear_feature_columns)\n return sorted(set(self._linear_feature_columns), key=lambda x: x.key)\n\n def _get_dnn_feature_columns(self):\n if not self._dnn_feature_columns:\n return None\n feature_column_ops.check_feature_columns(self._dnn_feature_columns)\n return sorted(set(self._dnn_feature_columns), key=lambda x: x.key)\n\n def _dnn_logits(self, features, is_training):\n return self._dnn_model.build_model(\n features, self._dnn_feature_columns, is_training)\n\n def _linear_logits(self, features, is_training):\n return self._linear_model.build_model(\n features, self._linear_feature_columns, is_training)\n\n def _centered_bias(self):\n centered_bias = variables.Variable(\n array_ops.zeros([self._target_column.num_label_columns]),\n collections=[self._centered_bias_weight_collection,\n ops.GraphKeys.VARIABLES],\n name=\"centered_bias_weight\")\n logging_ops.scalar_summary(\n [\"centered_bias_%d\" % cb for cb in range(\n self._target_column.num_label_columns)],\n array_ops.reshape(centered_bias, [-1]))\n return centered_bias\n\n def _centered_bias_step(self, targets, features):\n centered_bias = ops.get_collection(self._centered_bias_weight_collection)\n batch_size = array_ops.shape(targets)[0]\n logits = array_ops.reshape(\n array_ops.tile(centered_bias[0], [batch_size]),\n [batch_size, self._target_column.num_label_columns])\n loss = self._target_column.loss(logits, targets, features)\n # Learn central bias by an optimizer. 0.1 is a convervative lr for a single\n # variable.\n return training.AdagradOptimizer(0.1).minimize(loss, var_list=centered_bias)\n\n def _logits(self, features, is_training=False):\n linear_feature_columns = self._get_linear_feature_columns()\n dnn_feature_columns = self._get_dnn_feature_columns()\n if not (linear_feature_columns or dnn_feature_columns):\n raise ValueError(\"Either linear_feature_columns or dnn_feature_columns \"\n \"should be defined.\")\n\n if linear_feature_columns and dnn_feature_columns:\n logits = (self._linear_logits(features, is_training) +\n self._dnn_logits(features, is_training))\n elif dnn_feature_columns:\n logits = self._dnn_logits(features, is_training)\n else:\n logits = self._linear_logits(features, is_training)\n\n if self._enable_centered_bias:\n return nn.bias_add(logits, self._centered_bias())\n else:\n return logits\n\n\nclass DNNLinearCombinedClassifier(_DNNLinearCombinedBaseEstimator):\n \"\"\"A classifier for TensorFlow Linear and DNN joined training models.\n\n Example:\n\n ```python\n education = sparse_column_with_hash_bucket(column_name=\"education\",\n hash_bucket_size=1000)\n occupation = sparse_column_with_hash_bucket(column_name=\"occupation\",\n hash_bucket_size=1000)\n\n education_x_occupation = crossed_column(columns=[education, occupation],\n hash_bucket_size=10000)\n education_emb = embedding_column(sparse_id_column=education, dimension=16,\n combiner=\"sum\")\n occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,\n combiner=\"sum\")\n\n estimator = DNNLinearCombinedClassifier(\n # common settings\n n_classes=n_classes,\n weight_column_name=weight_column_name,\n # wide settings\n linear_feature_columns=[education_x_occupation],\n linear_optimizer=tf.train.FtrlOptimizer(...),\n # deep settings\n dnn_feature_columns=[education_emb, occupation_emb],\n dnn_hidden_units=[1000, 500, 100],\n dnn_optimizer=tf.train.AdagradOptimizer(...))\n\n # Input builders\n def input_fn_train: # returns x, y\n ...\n def input_fn_eval: # returns x, y\n ...\n estimator.fit(input_fn=input_fn_train)\n estimator.evaluate(input_fn=input_fn_eval)\n estimator.predict(x=x)\n ```\n\n Input of `fit` and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n if `weight_column_name` is not `None`, a feature with\n `key=weight_column_name` whose value is a `Tensor`.\n for each `column` in `dnn_feature_columns` + `linear_feature_columns`:\n - if `column` is a `SparseColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `WeightedSparseColumn`, two features: the first with\n `key` the id column name, the second with `key` the weight column name.\n Both features' `value` must be a `SparseTensor`.\n - if `column` is a `RealValuedColumn, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n \"\"\"\n\n def __init__(self,\n model_dir=None,\n n_classes=2,\n weight_column_name=None,\n linear_feature_columns=None,\n linear_optimizer=None,\n dnn_feature_columns=None,\n dnn_optimizer=None,\n dnn_hidden_units=None,\n dnn_activation_fn=nn.relu,\n dnn_dropout=None,\n gradient_clip_norm=None,\n enable_centered_bias=True,\n config=None):\n \"\"\"Constructs a DNNLinearCombinedClassifier instance.\n\n Args:\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator\n to continue training a previously saved model.\n n_classes: number of target classes. Default is binary classification.\n weight_column_name: A string defining feature column name representing\n weights. It is used to down weight or boost examples during training.\n It will be multiplied by the loss of the example.\n linear_feature_columns: An iterable containing all the feature columns\n used by linear part of the model. All items in the set must be\n instances of classes derived from `FeatureColumn`.\n linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to\n the linear part of the model. If `None`, will use a FTRL optimizer.\n dnn_feature_columns: An iterable containing all the feature columns used\n by deep part of the model. All items in the set must be instances of\n classes derived from `FeatureColumn`.\n dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to\n the deep part of the model. If `None`, will use an Adagrad optimizer.\n dnn_hidden_units: List of hidden units per layer. All layers are fully\n connected.\n dnn_activation_fn: Activation function applied to each layer. If `None`,\n will use `tf.nn.relu`.\n dnn_dropout: When not None, the probability we will drop out\n a given coordinate.\n gradient_clip_norm: A float > 0. If provided, gradients are clipped\n to their global norm with this clipping ratio. See\n tf.clip_by_global_norm for more details.\n enable_centered_bias: A bool. If True, estimator will learn a centered\n bias variable for each class. Rest of the model structure learns the\n residual after centered bias.\n config: RunConfig object to configure the runtime settings.\n\n Raises:\n ValueError: If `n_classes` < 2.\n ValueError: If both `linear_feature_columns` and `dnn_features_columns`\n are empty at the same time.\n \"\"\"\n\n if n_classes < 2:\n raise ValueError(\"n_classes should be greater than 1. Given: {}\".format(\n n_classes))\n target_column = layers.multi_class_target(\n n_classes=n_classes,\n weight_column_name=weight_column_name)\n super(DNNLinearCombinedClassifier, self).__init__(\n model_dir=model_dir,\n linear_feature_columns=linear_feature_columns,\n linear_optimizer=linear_optimizer,\n dnn_feature_columns=dnn_feature_columns,\n dnn_optimizer=dnn_optimizer,\n dnn_hidden_units=dnn_hidden_units,\n dnn_activation_fn=dnn_activation_fn,\n dnn_dropout=dnn_dropout,\n gradient_clip_norm=gradient_clip_norm,\n enable_centered_bias=enable_centered_bias,\n target_column=target_column,\n config=config)\n\n def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=False):\n \"\"\"Returns predicted classes for given features.\n\n Args:\n x: features.\n input_fn: Input function. If set, x must be None.\n batch_size: Override default batch size.\n as_iterable: If True, return an iterable which keeps yielding predictions\n for each example until inputs are exhausted. Note: The inputs must\n terminate if you want the iterable to terminate (e.g. be sure to pass\n num_epochs=1 if you are using something like read_batch_features).\n\n Returns:\n Numpy array of predicted classes (or an iterable of predicted classes if\n as_iterable is True).\n \"\"\"\n predictions = self.predict_proba(\n x=x, input_fn=input_fn, batch_size=batch_size, as_iterable=as_iterable)\n if as_iterable:\n return (np.argmax(p, axis=0) for p in predictions)\n else:\n return np.argmax(predictions, axis=1)\n\n def predict_proba(\n self, x=None, input_fn=None, batch_size=None, as_iterable=False):\n \"\"\"Returns prediction probabilities for given features.\n\n Args:\n x: features.\n input_fn: Input function. If set, x and y must be None.\n batch_size: Override default batch size.\n as_iterable: If True, return an iterable which keeps yielding predictions\n for each example until inputs are exhausted. Note: The inputs must\n terminate if you want the iterable to terminate (e.g. be sure to pass\n num_epochs=1 if you are using something like read_batch_features).\n\n Returns:\n Numpy array of predicted probabilities (or an iterable of predicted\n probabilities if as_iterable is True).\n \"\"\"\n return super(DNNLinearCombinedClassifier, self).predict(\n x=x, input_fn=input_fn, batch_size=batch_size, as_iterable=as_iterable)\n\n def _get_eval_ops(self, features, targets, metrics=None):\n \"\"\"See base class.\"\"\"\n features = self._get_feature_dict(features)\n logits = self._logits(features)\n return self._target_column.get_eval_ops(features, logits, targets, metrics)\n\n\nclass DNNLinearCombinedRegressor(_DNNLinearCombinedBaseEstimator):\n \"\"\"A regressor for TensorFlow Linear and DNN joined training models.\n\n Example:\n\n ```python\n education = sparse_column_with_hash_bucket(column_name=\"education\",\n hash_bucket_size=1000)\n occupation = sparse_column_with_hash_bucket(column_name=\"occupation\",\n hash_bucket_size=1000)\n\n education_x_occupation = crossed_column(columns=[education, occupation],\n hash_bucket_size=10000)\n education_emb = embedding_column(sparse_id_column=education, dimension=16,\n combiner=\"sum\")\n occupation_emb = embedding_column(sparse_id_column=occupation, dimension=16,\n combiner=\"sum\")\n\n estimator = DNNLinearCombinedClassifier(\n # common settings\n n_classes=n_classes,\n weight_column_name=weight_column_name,\n # wide settings\n linear_feature_columns=[education_x_occupation],\n linear_optimizer=tf.train.FtrlOptimizer(...),\n # deep settings\n dnn_feature_columns=[education_emb, occupation_emb],\n dnn_hidden_units=[1000, 500, 100],\n dnn_optimizer=tf.train.ProximalAdagradOptimizer(...))\n\n # To apply L1 and L2 regularization, you can set optimizers as follows:\n tf.train.ProximalAdagradOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.001,\n l2_regularization_strength=0.001)\n # It is same for FtrlOptimizer.\n\n # Input builders\n def input_fn_train: # returns x, y\n ...\n def input_fn_eval: # returns x, y\n ...\n estimator.train(input_fn_train)\n estimator.evaluate(input_fn_eval)\n estimator.predict(x)\n ```\n\n Input of `fit`, `train`, and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n if `weight_column_name` is not `None`, a feature with\n `key=weight_column_name` whose value is a `Tensor`.\n for each `column` in `dnn_feature_columns` + `linear_feature_columns`:\n - if `column` is a `SparseColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `WeightedSparseColumn`, two features: the first with\n `key` the id column name, the second with `key` the weight column name.\n Both features' `value` must be a `SparseTensor`.\n - if `column` is a `RealValuedColumn, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n \"\"\"\n\n def __init__(self,\n model_dir=None,\n weight_column_name=None,\n linear_feature_columns=None,\n linear_optimizer=None,\n dnn_feature_columns=None,\n dnn_optimizer=None,\n dnn_hidden_units=None,\n dnn_activation_fn=nn.relu,\n dnn_dropout=None,\n gradient_clip_norm=None,\n enable_centered_bias=True,\n target_dimension=1,\n config=None):\n \"\"\"Initializes a DNNLinearCombinedRegressor instance.\n\n Args:\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator\n to continue training a previously saved model.\n weight_column_name: A string defining feature column name representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example.\n linear_feature_columns: An iterable containing all the feature columns\n used by linear part of the model. All items in the set must be\n instances of classes derived from `FeatureColumn`.\n linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to\n the linear part of the model. If `None`, will use a FTRL optimizer.\n dnn_feature_columns: An iterable containing all the feature columns used\n by deep part of the model. All items in the set must be instances of\n classes derived from `FeatureColumn`.\n dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to\n the deep part of the model. If `None`, will use an Adagrad optimizer.\n dnn_hidden_units: List of hidden units per layer. All layers are fully\n connected.\n dnn_activation_fn: Activation function applied to each layer. If None,\n will use `tf.nn.relu`.\n dnn_dropout: When not None, the probability we will drop out\n a given coordinate.\n gradient_clip_norm: A float > 0. If provided, gradients are clipped\n to their global norm with this clipping ratio. See\n tf.clip_by_global_norm for more details.\n enable_centered_bias: A bool. If True, estimator will learn a centered\n bias variable for each class. Rest of the model structure learns the\n residual after centered bias.\n target_dimension: TODO(zakaria): dimension of the target for multilabels.\n config: RunConfig object to configure the runtime settings.\n\n Raises:\n ValueError: If both linear_feature_columns and dnn_features_columns are\n empty at the same time.\n \"\"\"\n target_column = layers.regression_target(\n weight_column_name=weight_column_name,\n target_dimension=target_dimension)\n super(DNNLinearCombinedRegressor, self).__init__(\n model_dir=model_dir,\n linear_feature_columns=linear_feature_columns,\n linear_optimizer=linear_optimizer,\n dnn_feature_columns=dnn_feature_columns,\n dnn_optimizer=dnn_optimizer,\n dnn_hidden_units=dnn_hidden_units,\n dnn_activation_fn=dnn_activation_fn,\n dnn_dropout=dnn_dropout,\n gradient_clip_norm=gradient_clip_norm,\n enable_centered_bias=enable_centered_bias,\n target_column=target_column,\n config=config)\n\n def _get_eval_ops(self, features, targets, metrics=None):\n \"\"\"See base class.\"\"\"\n features = self._get_feature_dict(features)\n logits = self._logits(features)\n return self._target_column.get_eval_ops(features, logits, targets, metrics)\n\n\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"An API for reversible (bijective) transformations of random variables.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\n\n\nclass _Bijector(object):\n \"\"\"An interface for transforming random variable(s).\n\n A bijector is characterized by three operations:\n\n 1) Forward Evaluation\n Useful for turning one random outcome into another random outcome from a\n different distribution.\n\n 2) Inverse Evaluation\n Useful for \"reversing\" a transformation to compute one probability in terms\n of another.\n\n 3) (log o det o Jacobian o inverse)(x)\n \"The log of the determinant of the matrix of all first-order partial\n derivatives of the inverse function.\"\n Useful for inverting a transformation to compute one probability in terms\n of another. Geometrically, the det(Jacobian) is the volume of the\n transformation and is used to scale the probability.\n\n By convention, transformations of random variables are named in terms of the\n forward transformation. The forward transformation creates samples, the\n inverse is useful for computing probabilities.\n\n Example transformations:\n \"Exponential\"\n\n ```\n Y = g(X) = exp(X)\n X ~ Normal(0, 1) # Univariate.\n ```\n\n Implies:\n\n ```\n g^{-1}(Y) = log(Y)\n |Jacobian(g^{-1})(y)| = 1 / y\n Y ~ LogNormal(0, 1), i.e.,\n prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))\n = (1 / y) Normal(log(y); 0, 1)\n ```\n\n \"ShiftAndScale\"\n\n ```\n Y = g(X) = sqrtSigma * X + mu\n X ~ MultivariateNormal(0, I_d)\n ```\n\n Implies:\n\n ```\n g^{-1}(Y) = inv(sqrtSigma) * (Y - mu)\n |Jacobian(g^{-1})(y)| = det(inv(sqrtSigma))\n Y ~ MultivariateNormal(mu, sqrtSigma) , i.e.,\n prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))\n = det(sqrtSigma)^(-d) *\n MultivariateNormal(inv(sqrtSigma) * (y - mu); 0, I_d)\n ```\n\n Example use:\n Basic properties:\n\n ```python\n x = ... # A tensor.\n # Evaluate forward transformation.\n fwd_x = my_bijector.forward(x)\n x != my_bijector.forward(fwd_x) # Not equal because g(x) != g(g(x)).\n x == my_bijector.inverse(fwd_x)\n ```\n\n Computing a log-likelihood:\n\n ```python\n def transformed_log_pdf(bijector, log_pdf, x):\n return (bijector.inverse_log_det_jacobian(x) +\n log_pdf(bijector.inverse(x)))\n ```\n\n Transforming a random outcome:\n\n ```python\n def transformed_sample(bijector, x):\n return bijector.forward(x)\n ```\n\n \"\"\"\n\n # TODO(b/30476956): Try to remove constructor dependence on shape util.\n def __init__(self, shaper=None, name=None):\n \"\"\"Constructs Bijector.\n\n A bijector transforms random variables into new random variables. Managing\n shape is typically an important piece of this so a Bijector is usually\n composed of ShapeUtil. The ShapeUtil object handles input shape checks as\n well as reshaping/transposing for easier linear algebra operations.\n\n Example:\n ```python\n # Create the Y = g(X) = X transform which operates on 4-Tensors of vectors.\n identity = Identity(ShapeUtil(batch_ndims=4, event_ndims=1))\n\n # Create the Y = g(X) = exp(X) transform which operates on matrices.\n exp = Exp(ShapeUtil(batch_ndims=0, event_ndims=2))\n ```\n\n See Bijector subclass doc for more details and examples.\n\n Args:\n shaper: object used for managing and manipulating shape, typically an\n instance of ShapeUtil.\n name: The name to give Ops created by the initializer.\n \"\"\"\n self._shaper = shaper\n self._name = name or type(self).__name__\n\n @property\n def shaper(self):\n \"\"\"Returns shape object used to manage shape constraints.\"\"\"\n return self._shaper\n\n @property\n def name(self):\n \"\"\"Returns the string name of this bijector.\"\"\"\n return self._name\n\n def forward(self, x, name='forward'):\n \"\"\"Returns the forward bijector evaluation, i.e., X = g(Y).\n\n Args:\n x: `Tensor`. The input to the \"forward\" evaluation.\n name: The name to give this op.\n\n Returns:\n `Tensor`.\n \"\"\"\n with ops.name_scope(self.name):\n with ops.op_scope([x], name):\n x = ops.convert_to_tensor(x)\n return self._forward(x)\n\n def inverse(self, x, name='inverse'):\n \"\"\"Returns the inverse bijector evaluation, i.e., X = g^{-1}(Y).\n\n Args:\n x: `Tensor`. The input to the \"inverse\" evaluation.\n name: The name to give this op.\n\n Returns:\n `Tensor`.\n \"\"\"\n with ops.name_scope(self.name):\n with ops.op_scope([x], name):\n x = ops.convert_to_tensor(x)\n try:\n return self._inverse(x)\n except NotImplementedError:\n return self._inverse_and_inverse_log_det_jacobian(x)[0]\n\n def inverse_log_det_jacobian(self, x, name='inverse_log_det_jacobian'):\n \"\"\"Returns the (log o det o Jacobian o inverse)(x).\n\n Mathematically, returns: log(det(dY/dX g^{-1}))(Y).\n\n Args:\n x: `Tensor`. The input to the \"inverse\" Jacobian evaluation.\n name: The name to give this op.\n\n Returns:\n `Tensor`.\n \"\"\"\n with ops.name_scope(self.name):\n with ops.op_scope([x], name):\n x = ops.convert_to_tensor(x)\n try:\n return self._inverse_log_det_jacobian(x)\n except NotImplementedError:\n return self._inverse_and_inverse_log_det_jacobian(x)[1]\n\n def inverse_and_inverse_log_det_jacobian(\n self, x, name='inverse_and_inverse_log_det_jacobian'):\n \"\"\"Returns both the inverse evaluation and inverse_log_det_jacobian.\n\n Enables possibly more efficient calculation when both inverse and\n corresponding Jacobian are needed.\n\n See `inverse()`, `inverse_log_det_jacobian()` for more details.\n\n Args:\n x: `Tensor`. The input to the \"inverse\" Jacobian evaluation.\n name: The name to give this op.\n\n Returns:\n `Tensor`.\n \"\"\"\n with ops.name_scope(self.name):\n with ops.op_scope([x], name):\n x = ops.convert_to_tensor(x)\n try:\n return self._inverse_and_inverse_log_det_jacobian(x)\n except NotImplementedError:\n return self._inverse(x), self._inverse_log_det_jacobian(x)\n\n # Subclass interface.\n def _forward(self, x):\n \"\"\"Subclass implementation of forward().\n\n Args:\n x: `Tensor`. The input to the \"forward\" evaluation.\n\n Raises:\n `NotImplementedError`: if subclass implementation not provided\n\n Returns:\n `Tensor`.\n \"\"\"\n raise NotImplementedError('_forward not implemented')\n\n def _inverse(self, x):\n \"\"\"Subclass implementation of inverse().\n\n Args:\n x: `Tensor`. The input to the \"inverse\" evaluation.\n\n Raises:\n `NotImplementedError`: if subclass implementation not provided\n\n Returns:\n `Tensor`.\n \"\"\"\n raise NotImplementedError('_inverse not implemented')\n\n def _inverse_log_det_jacobian(self, x):\n \"\"\"Subclass implementation of inverse_log_det_jacobian().\n\n Args:\n x: `Tensor`. The input to the \"inverse\" Jacobian evaluation.\n\n Raises:\n `NotImplementedError`: if subclass implementation not provided\n\n Returns:\n `Tensor`.\n \"\"\"\n raise NotImplementedError('_inverse_log_det_jacobian not implemented')\n\n def _inverse_and_inverse_log_det_jacobian(self, x):\n \"\"\"Subclass implementation of inverse_and_inverse_log_det_jacobian().\n\n Args:\n x: `Tensor`. The input to the \"inverse\" evaluation.\n\n Returns:\n List of two `Tensor` items, inverse and inverse_log_det_jacobian.\n \"\"\"\n raise NotImplementedError(\n '_inverse_and_inverse_log_det_jacobian not implemented')\n\n\nclass _Identity(_Bijector):\n \"\"\"Bijector which computes Y = g(X) = X.\n\n Example Use:\n ```python\n # Create the Y=g(X)=X transform which works only on Tensors with 1 batch\n # ndims and 1 event ndim (i.e., vector of vectors).\n identity = Identity(ShapeUtil(batch_ndims=1, event_ndims=1))\n x = [[1., 2],\n [3, 4]]\n x == identity.forward(x) == identity.inverse(x)\n ```\n\n \"\"\"\n\n # TODO(b/30476956): Try to remove constructor dependence on shape util.\n def __init__(self, shaper=None, name='Identity'):\n super(_Identity, self).__init__(shaper, name)\n\n def _forward(self, x):\n return x\n\n def _inverse(self, x):\n return x\n\n def _inverse_log_det_jacobian(self, x):\n result_shape = self.shaper.get_shape(\n x, sample=True, batch=True, event=False)\n return array_ops.zeros(result_shape, dtype=x.dtype)\n\n\nclass _Exp(_Bijector):\n \"\"\"Bijector which computes Y = g(X) = exp(X).\n\n Example Use:\n ```python\n # Create the Y=g(X)=exp(X) transform which works only on Tensors with 1\n # batch ndims and 2 event ndim (i.e., vector of matrices).\n exp = Exp(ShapeUtil(batch_ndims=1, event_ndims=2))\n x = [[[1., 2],\n [3, 4]],\n [[5, 6],\n [7, 8]]]\n exp(x) == exp.forward(x)\n log(x) == exp.inverse(x)\n ```\n\n \"\"\"\n\n # TODO(b/30476956): Try to remove constructor dependence on shape util.\n def __init__(self, shaper=None, name='Exp'):\n super(_Exp, self).__init__(shaper, name)\n\n def _forward(self, x):\n return math_ops.exp(x)\n\n def _inverse(self, x):\n return math_ops.log(x)\n\n def _inverse_log_det_jacobian(self, x):\n d = self.shaper.get_event_dims(x)\n return -math_ops.reduce_sum(math_ops.log(x), d)\n\n def _inverse_and_inverse_log_det_jacobian(self, x):\n y = math_ops.log(x)\n d = self.shaper.get_event_dims(x)\n return y, -math_ops.reduce_sum(y, d)\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains metric-computing operations on streamed tensors.\n\nModule documentation, including \"@@\" callouts, should be put in\nthird_party/tensorflow/contrib/metrics/__init__.py\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.framework.python.ops import variables as contrib_variables\n\nfrom tensorflow.contrib.metrics.python.ops import confusion_matrix_ops\nfrom tensorflow.contrib.metrics.python.ops import metric_ops_util\nfrom tensorflow.contrib.metrics.python.ops import set_ops\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.util.all_util import make_all\n\n\ndef _mask_to_weights(mask=None):\n \"\"\"Converts a binary mask to a set of weights.\n\n Args:\n mask: A binary `Tensor`.\n\n Returns:\n The corresponding set of weights if `mask` is not `None`, otherwise `None`.\n \"\"\"\n if mask is not None:\n check_ops.assert_type(mask, dtypes.bool)\n weights = math_ops.logical_not(mask)\n else:\n weights = None\n return weights\n\n\ndef _create_local(name, shape=None, collections=None, dtype=dtypes.float32):\n \"\"\"Creates a new local variable.\n\n Args:\n name: The name of the new or existing variable.\n shape: Shape of the new or existing variable.\n collections: A list of collection names to which the Variable will be added.\n dtype: Data type of the variables.\n\n Returns:\n The created variable.\n \"\"\"\n # Make sure local variables are added to tf.GraphKeys.LOCAL_VARIABLES\n collections = list(collections or [])\n collections += [ops.GraphKeys.LOCAL_VARIABLES]\n return variables.Variable(\n initial_value=array_ops.zeros(shape, dtype=dtype),\n name=name,\n trainable=False,\n collections=collections)\n\n\ndef _count_condition(values, ignore_mask=None, metrics_collections=None,\n updates_collections=None):\n \"\"\"Computes the total number of cases where the given values are True.\n\n Args:\n values: A binary `Tensor` of arbitrary size.\n ignore_mask: An optional, binary tensor whose size matches 'values'.\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n\n Returns:\n value_tensor: A tensor representing the current value of the metric.\n update_op: An operation that accumulates the error from a batch of data.\n\n Raises:\n ValueError: If either `metrics_collections` or `updates_collections` are not\n a list or tuple.\n \"\"\"\n check_ops.assert_type(values, dtypes.bool)\n count = _create_local('count', shape=[])\n\n if ignore_mask is not None:\n values.get_shape().assert_is_compatible_with(ignore_mask.get_shape())\n check_ops.assert_type(ignore_mask, dtypes.bool)\n values = math_ops.select(\n ignore_mask,\n array_ops.zeros_like(values),\n values)\n values = math_ops.to_float(values)\n\n value_tensor = array_ops.identity(count)\n update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, value_tensor)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return value_tensor, update_op\n\n\ndef _streaming_true_positives(predictions, labels, ignore_mask=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the total number of true_positives.\n\n Args:\n predictions: The predicted values, a binary `Tensor` of arbitrary\n dimensions.\n labels: The ground truth values, a binary `Tensor` whose dimensions must\n match `predictions`.\n ignore_mask: An optional, binary tensor whose size matches 'predictions'.\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_op_scope name.\n\n Returns:\n value_tensor: A tensor representing the current value of the metric.\n update_op: An operation that accumulates the error from a batch of data.\n\n Raises:\n ValueError: If either `metrics_collections` or `updates_collections` are not\n a list or tuple.\n \"\"\"\n with variable_scope.variable_op_scope(\n [predictions, labels], name, 'true_positives'):\n\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n is_true_positive = math_ops.logical_and(math_ops.equal(labels, 1),\n math_ops.equal(predictions, 1))\n return _count_condition(is_true_positive, ignore_mask, metrics_collections,\n updates_collections)\n\n\ndef _streaming_false_positives(predictions, labels, ignore_mask=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the total number of false positives.\n\n Args:\n predictions: The predicted values, a binary `Tensor` of arbitrary\n dimensions.\n labels: The ground truth values, a binary `Tensor` whose dimensions must\n match `predictions`.\n ignore_mask: An optional, binary tensor whose size matches 'predictions'.\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_op_scope name.\n\n Returns:\n value_tensor: A tensor representing the current value of the metric.\n update_op: An operation that accumulates the error from a batch of data.\n\n Raises:\n ValueError: If either `metrics_collections` or `updates_collections` are not\n a list or tuple.\n \"\"\"\n with variable_scope.variable_op_scope(\n [predictions, labels], name, 'false_positives'):\n\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n is_false_positive = math_ops.logical_and(math_ops.equal(labels, 0),\n math_ops.equal(predictions, 1))\n return _count_condition(is_false_positive, ignore_mask,\n metrics_collections, updates_collections)\n\n\ndef _streaming_false_negatives(predictions, labels, ignore_mask=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the total number of false positives.\n\n Args:\n predictions: The predicted values, a binary `Tensor` of arbitrary\n dimensions.\n labels: The ground truth values, a binary `Tensor` whose dimensions must\n match `predictions`.\n ignore_mask: An optional, binary tensor whose size matches 'predictions'.\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_op_scope name.\n\n Returns:\n value_tensor: A tensor representing the current value of the metric.\n update_op: An operation that accumulates the error from a batch of data.\n\n Raises:\n ValueError: If either `metrics_collections` or `updates_collections` are not\n a list or tuple.\n \"\"\"\n with variable_scope.variable_op_scope(\n [predictions, labels], name, 'false_negatives'):\n\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n is_false_negative = math_ops.logical_and(math_ops.equal(labels, 1),\n math_ops.equal(predictions, 0))\n return _count_condition(is_false_negative, ignore_mask,\n metrics_collections, updates_collections)\n\n\ndef streaming_mean(values, weights=None, metrics_collections=None,\n updates_collections=None, name=None):\n \"\"\"Computes the (weighted) mean of the given values.\n\n The `streaming_mean` function creates two local variables, `total` and `count`\n that are used to compute the average of `values`. This average is ultimately\n returned as `mean` which is an idempotent operation that simply divides\n `total` by `count`. To facilitate the estimation of a mean over a stream\n of data, the function creates an `update_op` operation whose behavior is\n dependent on the value of `weights`. If `weights` is None, then `update_op`\n increments `total` with the reduced sum of `values` and increments `count`\n with the number of elements in `values`. If `weights` is not `None`, then\n `update_op` increments `total` with the reduced sum of the product of `values`\n and `weights` and increments `count` with the reduced sum of weights.\n In addition to performing the updates, `update_op` also returns the\n `mean`.\n\n Args:\n values: A `Tensor` of arbitrary dimensions.\n weights: An optional set of weights of the same shape as `values`. If\n `weights` is not None, the function computes a weighted mean.\n metrics_collections: An optional list of collections that `mean`\n should be added to.\n updates_collections: An optional list of collections that `update_op`\n should be added to.\n name: An optional variable_op_scope name.\n\n Returns:\n mean: A tensor representing the current mean, the value of `total` divided\n by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `mean_value`.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match `values`\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n \"\"\"\n with variable_scope.variable_op_scope([values, weights], name, 'mean'):\n values = math_ops.to_float(values)\n\n total = _create_local('total', shape=[])\n count = _create_local('count', shape=[])\n\n if weights is not None:\n values.get_shape().assert_is_compatible_with(weights.get_shape())\n weights = math_ops.to_float(weights)\n values = math_ops.mul(values, weights)\n num_values = math_ops.reduce_sum(weights)\n else:\n num_values = math_ops.to_float(array_ops.size(values))\n\n total_compute_op = state_ops.assign_add(total, math_ops.reduce_sum(values))\n count_compute_op = state_ops.assign_add(count, num_values)\n\n def compute_mean(total, count, name):\n return math_ops.select(math_ops.greater(count, 0),\n math_ops.div(total, count),\n 0, name)\n\n mean = compute_mean(total, count, 'value')\n with ops.control_dependencies([total_compute_op, count_compute_op]):\n update_op = compute_mean(total, count, 'update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, mean)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return mean, update_op\n\n\ndef streaming_mean_tensor(values, weights=None, metrics_collections=None,\n updates_collections=None, name=None):\n \"\"\"Computes the element-wise (weighted) mean of the given tensors.\n\n In contrast to the `streaming_mean` function which returns a scalar with the\n mean, this function returns an average tensor with the same shape as the\n input tensors.\n\n The `streaming_mean_tensor` function creates two local variables,\n `total_tensor` and `count_tensor` that are used to compute the average of\n `values`. This average is ultimately returned as `mean` which is an idempotent\n operation that simply divides `total` by `count`. To facilitate the estimation\n of a mean over a stream of data, the function creates an `update_op` operation\n whose behavior is dependent on the value of `weights`. If `weights` is None,\n then `update_op` increments `total` with the reduced sum of `values` and\n increments `count` with the number of elements in `values`. If `weights` is\n not `None`, then `update_op` increments `total` with the reduced sum of the\n product of `values` and `weights` and increments `count` with the reduced sum\n of weights. In addition to performing the updates, `update_op` also returns\n the `mean`.\n\n Args:\n values: A `Tensor` of arbitrary dimensions.\n weights: An optional set of weights of the same shape as `values`. If\n `weights` is not None, the function computes a weighted mean.\n metrics_collections: An optional list of collections that `mean`\n should be added to.\n updates_collections: An optional list of collections that `update_op`\n should be added to.\n name: An optional variable_op_scope name.\n\n Returns:\n mean: A float tensor representing the current mean, the value of `total`\n divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `mean_value`.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match `values`\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n \"\"\"\n with variable_scope.variable_op_scope([values, weights], name, 'mean'):\n total = _create_local('total_tensor', shape=values.get_shape())\n count = _create_local('count_tensor', shape=values.get_shape())\n\n if weights is not None:\n values.get_shape().assert_is_compatible_with(weights.get_shape())\n weights = math_ops.to_float(weights)\n values = math_ops.mul(values, weights)\n num_values = weights\n else:\n num_values = array_ops.ones_like(values)\n\n total_compute_op = state_ops.assign_add(total, values)\n count_compute_op = state_ops.assign_add(count, num_values)\n\n def compute_mean(total, count, name):\n non_zero_count = math_ops.maximum(count,\n array_ops.ones_like(count),\n name=name)\n return math_ops.truediv(total, non_zero_count, name=name)\n\n mean = compute_mean(total, count, 'value')\n with ops.control_dependencies([total_compute_op, count_compute_op]):\n update_op = compute_mean(total, count, 'update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, mean)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return mean, update_op\n\n\ndef streaming_accuracy(predictions, labels, weights=None,\n metrics_collections=None, updates_collections=None,\n name=None):\n \"\"\"Calculates how often `predictions` matches `labels`.\n\n The `streaming_accuracy` function creates two local variables, `total` and\n `count` that are used to compute the frequency with which `predictions`\n matches `labels`. This frequency is ultimately returned as `accuracy`: an\n idempotent operation that simply divides `total` by `count`.\n To facilitate the estimation of the accuracy over a stream of data, the\n function utilizes two operations. First, an `is_correct` operation that\n computes a tensor whose shape matches `predictions` and whose elements are\n set to 1.0 when the corresponding values of `predictions` and `labels match\n and 0.0 otherwise. Second, an `update_op` operation whose behavior is\n dependent on the value of `weights`. If `weights` is None, then `update_op`\n increments `total` with the number of elements of `predictions` that match\n `labels` and increments `count` with the number of elements in `values`. If\n `weights` is not `None`, then `update_op` increments `total` with the reduced\n sum of the product of `weights` and `is_correct` and increments `count` with\n the reduced sum of `weights`. In addition to performing the updates,\n `update_op` also returns the `accuracy` value.\n\n Args:\n predictions: The predicted values, a `Tensor` of any shape.\n labels: The ground truth values, a `Tensor` whose shape matches\n `predictions`.\n weights: An optional set of weights whose shape matches `predictions`\n which, when not `None`, produces a weighted mean accuracy.\n metrics_collections: An optional list of collections that `accuracy` should\n be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_op_scope name.\n\n Returns:\n accuracy: A tensor representing the accuracy, the value of `total` divided\n by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `accuracy`.\n\n Raises:\n ValueError: If the dimensions of `predictions` and `labels` don't match or\n if `weight` is not `None` and its shape doesn't match `predictions` or\n if either `metrics_collections` or `updates_collections` are not\n a list or tuple.\n \"\"\"\n predictions, labels = metric_ops_util.remove_squeezable_dimensions(\n predictions, labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n is_correct = math_ops.to_float(math_ops.equal(predictions, labels))\n return streaming_mean(is_correct, weights, metrics_collections,\n updates_collections, name or 'accuracy')\n\n\ndef streaming_precision(predictions, labels, ignore_mask=None,\n metrics_collections=None, updates_collections=None,\n name=None):\n \"\"\"Computes the precision of the predictions with respect to the labels.\n\n The `streaming_precision` function creates two local variables,\n `true_positives` and `false_positives`, that are used to compute the\n precision. This value is ultimately returned as `precision`, an idempotent\n operation that simply divides `true_positives` by the sum of `true_positives`\n and `false_positives`. To facilitate the calculation of the precision over a\n stream of data, the function creates an `update_op` operation whose behavior\n is dependent on the value of `ignore_mask`. If `ignore_mask` is None, then\n `update_op` increments `true_positives` with the number of elements of\n `predictions` and `labels` that are both `True` and increments\n `false_positives` with the number of elements of `predictions` that are `True`\n whose corresponding `labels` element is `False`. If `ignore_mask` is not\n `None`, then the increments for `true_positives` and `false_positives` are\n only computed using elements of `predictions` and `labels` whose corresponding\n values in `ignore_mask` are `False`. In addition to performing the updates,\n `update_op` also returns the value of `precision`.\n\n Args:\n predictions: The predicted values, a binary `Tensor` of arbitrary shape.\n labels: The ground truth values, a binary `Tensor` whose dimensions must\n match `predictions`.\n ignore_mask: An optional, binary tensor whose size matches `predictions`.\n metrics_collections: An optional list of collections that `precision` should\n be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_op_scope name.\n\n Returns:\n precision: Scalar float `Tensor` with the value of `true_positives`\n divided by the sum of `true_positives` and `false_positives`.\n update_op: `Operation` that increments `true_positives` and\n `false_positives` variables appropriately and whose value matches\n `precision`.\n\n Raises:\n ValueError: If the dimensions of `predictions` and `labels` don't match or\n if `ignore_mask` is not `None` and its shape doesn't match `predictions`\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n \"\"\"\n with variable_scope.variable_op_scope(\n [predictions, labels], name, 'precision'):\n\n predictions, labels = metric_ops_util.remove_squeezable_dimensions(\n predictions, labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n\n true_positives, true_positives_update_op = _streaming_true_positives(\n predictions, labels, ignore_mask, metrics_collections=None,\n updates_collections=None, name=None)\n false_positives, false_positives_update_op = _streaming_false_positives(\n predictions, labels, ignore_mask, metrics_collections=None,\n updates_collections=None, name=None)\n\n def compute_precision(name):\n return math_ops.select(\n math_ops.greater(true_positives + false_positives, 0),\n math_ops.div(true_positives, true_positives + false_positives),\n 0,\n name)\n\n precision = compute_precision('value')\n with ops.control_dependencies([true_positives_update_op,\n false_positives_update_op]):\n update_op = compute_precision('update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, precision)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return precision, update_op\n\n\ndef streaming_recall(predictions, labels, ignore_mask=None,\n metrics_collections=None, updates_collections=None,\n name=None):\n \"\"\"Computes the recall of the predictions with respect to the labels.\n\n The `streaming_recall` function creates two local variables,\n `true_positives` and `false_negatives`, that are used to compute the\n recall. This value is ultimately returned as `recall`, an idempotent\n operation that simply divides `true_positives` by the sum of `true_positives`\n and `false_negatives`. To facilitate the calculation of the recall over a\n stream of data, the function creates an `update_op` operation whose behavior\n is dependent on the value of `ignore_mask`. If `ignore_mask` is None, then\n `update_op` increments `true_positives` with the number of elements of\n `predictions` and `labels` that are both `True` and increments\n `false_negatives` with the number of elements of `predictions` that are\n `False` whose corresponding `labels` element is `False`. If `ignore_mask` is\n not `None`, then the increments for `true_positives` and `false_negatives` are\n only computed using elements of `predictions` and `labels` whose corresponding\n values in `ignore_mask` are `False`. In addition to performing the updates,\n `update_op` also returns the value of `recall`.\n\n Args:\n predictions: The predicted values, a binary `Tensor` of arbitrary shape.\n labels: The ground truth values, a binary `Tensor` whose dimensions must\n match `predictions`.\n ignore_mask: An optional, binary tensor whose size matches `predictions`.\n metrics_collections: An optional list of collections that `recall` should\n be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_op_scope name.\n\n Returns:\n recall: Scalar float `Tensor` with the value of `true_positives` divided\n by the sum of `true_positives` and `false_negatives`.\n update_op: `Operation` that increments `true_positives` and\n `false_negatives` variables appropriately and whose value matches\n `recall`.\n\n Raises:\n ValueError: If the dimensions of `predictions` and `labels` don't match or\n if `ignore_mask` is not `None` and its shape doesn't match `predictions`\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n \"\"\"\n with variable_scope.variable_op_scope([predictions, labels], name, 'recall'):\n predictions, labels = metric_ops_util.remove_squeezable_dimensions(\n predictions, labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n\n true_positives, true_positives_update_op = _streaming_true_positives(\n predictions, labels, ignore_mask, metrics_collections=None,\n updates_collections=None, name=None)\n false_negatives, false_negatives_update_op = _streaming_false_negatives(\n predictions, labels, ignore_mask, metrics_collections=None,\n updates_collections=None, name=None)\n\n def compute_recall(true_positives, false_negatives, name):\n return math_ops.select(\n math_ops.greater(true_positives + false_negatives, 0),\n math_ops.div(true_positives, true_positives + false_negatives),\n 0,\n name)\n\n recall = compute_recall(true_positives, false_negatives, 'value')\n with ops.control_dependencies([true_positives_update_op,\n false_negatives_update_op]):\n update_op = compute_recall(true_positives, false_negatives, 'update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, recall)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return recall, update_op\n\n\ndef _tp_fn_tn_fp(predictions, labels, thresholds, ignore_mask=None):\n \"\"\"Computes true_positives, false_negatives, true_negatives, false_positives.\n\n The `_tp_fn_tn_fp` function creates four local variables, `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives`.\n `true_positive[i]` is defined` as the number of values in `predictions` above\n `thresholds[i]` whose corresponding entry in `labels` is `True`.\n `false_negatives[i]` is defined` as the number of values in `predictions` at\n most `thresholds[i]` whose corresponding entry in `labels` is `True`.\n `true_negatives[i]` is defined` as the number of values in `predictions` at\n most `thresholds[i]` whose corresponding entry in `labels` is `False`.\n `false_positives[i]` is defined` as the number of values in `predictions`\n above `thresholds[i]` whose corresponding entry in `labels` is `False`.\n\n These four variables are updated through the `update_op`.\n The streaming behavior is that the values of the variables after a few\n `update_op`s is the same as if the inputs had been concatenated and a single\n `update_op` had been performed.\n\n If `ignore_mask` is not `None`, then the increment of the variables is\n performed using only the elements of `predictions` and `labels` whose\n corresponding value in `ignore_mask` is `False`.\n\n Args:\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n labels: A binary `Tensor` whose shape matches `predictions`.\n thresholds: A python list or tuple of float thresholds in `[0, 1]`.\n ignore_mask: An optional, binary tensor whose size matches `predictions`.\n\n Returns:\n true_positive: A variable of shape [len(thresholds)].\n false_negative: A variable of shape [len(thresholds)].\n true_negatives: A variable of shape [len(thresholds)].\n false_positives: A variable of shape [len(thresholds)].\n true_positives_update_op: An operation that increments the `true_positives`.\n false_negative_update_op: An operation that increments the `false_negative`.\n true_negatives_update_op: An operation that increments the `true_negatives`.\n false_positives_update_op: An operation that increments the\n `false_positives`.\n\n Raises:\n ValueError: If the shape of `predictions` and `labels` do not match or if\n `ignore_mask` is not `None` and its shape doesn't match `predictions`\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n \"\"\"\n predictions, labels = metric_ops_util.remove_squeezable_dimensions(\n predictions, labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n\n num_thresholds = len(thresholds)\n\n # Reshape predictions and labels to be column vectors\n predictions = array_ops.reshape(predictions, [-1, 1])\n labels = array_ops.reshape(labels, [-1, 1])\n\n # Use static shape if known.\n num_predictions = predictions.get_shape().as_list()[0]\n\n # Otherwise use dynamic shape.\n if num_predictions is None:\n num_predictions = array_ops.shape(predictions)[0]\n thresh_tiled = array_ops.tile(\n array_ops.expand_dims(array_ops.constant(thresholds), [1]),\n array_ops.pack([1, num_predictions]))\n\n # Tile the predictions after thresholding them across different thresholds.\n pred_tiled = math_ops.cast(\n math_ops.greater(\n array_ops.tile(\n array_ops.transpose(predictions), [num_thresholds, 1]),\n thresh_tiled),\n dtype=dtypes.int32)\n # Tile labels by number of thresholds\n labels_tiled = array_ops.tile(array_ops.transpose(labels),\n [num_thresholds, 1])\n\n true_positives = _create_local('true_positives', shape=[num_thresholds])\n false_negatives = _create_local('false_negatives', shape=[num_thresholds])\n true_negatives = _create_local('true_negatives', shape=[num_thresholds])\n false_positives = _create_local('false_positives', shape=[num_thresholds])\n\n is_true_positive = math_ops.to_float(\n math_ops.logical_and(\n math_ops.equal(labels_tiled, 1), math_ops.equal(pred_tiled, 1)))\n is_false_negative = math_ops.to_float(\n math_ops.logical_and(\n math_ops.equal(labels_tiled, 1), math_ops.equal(pred_tiled, 0)))\n is_false_positive = math_ops.to_float(\n math_ops.logical_and(\n math_ops.equal(labels_tiled, 0), math_ops.equal(pred_tiled, 1)))\n is_true_negative = math_ops.to_float(\n math_ops.logical_and(\n math_ops.equal(labels_tiled, 0), math_ops.equal(pred_tiled, 0)))\n\n if ignore_mask is not None:\n ignore_mask = array_ops.reshape(ignore_mask, [-1, 1])\n mask_tiled = array_ops.tile(array_ops.transpose(ignore_mask),\n [num_thresholds, 1])\n\n labels_tiled.get_shape().assert_is_compatible_with(mask_tiled.get_shape())\n check_ops.assert_type(mask_tiled, dtypes.bool)\n is_true_positive = math_ops.select(\n mask_tiled,\n array_ops.zeros_like(labels_tiled, dtype=dtypes.float32),\n is_true_positive)\n is_false_negative = math_ops.select(\n mask_tiled,\n array_ops.zeros_like(labels_tiled, dtype=dtypes.float32),\n is_false_negative)\n is_false_positive = math_ops.select(\n mask_tiled,\n array_ops.zeros_like(labels_tiled, dtype=dtypes.float32),\n is_false_positive)\n is_true_negative = math_ops.select(\n mask_tiled,\n array_ops.zeros_like(labels_tiled, dtype=dtypes.float32),\n is_true_negative)\n\n true_positives_update_op = state_ops.assign_add(\n true_positives, math_ops.reduce_sum(is_true_positive, 1))\n false_negatives_update_op = state_ops.assign_add(\n false_negatives, math_ops.reduce_sum(is_false_negative, 1))\n true_negatives_update_op = state_ops.assign_add(\n true_negatives, math_ops.reduce_sum(is_true_negative, 1))\n false_positives_update_op = state_ops.assign_add(\n false_positives, math_ops.reduce_sum(is_false_positive, 1))\n\n return (true_positives, false_negatives, true_negatives, false_positives,\n true_positives_update_op, false_negatives_update_op,\n true_negatives_update_op, false_positives_update_op)\n\n\ndef streaming_auc(predictions, labels, ignore_mask=None, num_thresholds=200,\n metrics_collections=None, updates_collections=None,\n curve='ROC', name=None):\n \"\"\"Computes the approximate AUC via a Riemann sum.\n\n The `streaming_auc` function creates four local variables, `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` that are used to\n compute the AUC. To discretize the AUC curve, a linearly spaced set of\n thresholds is used to compute pairs of recall and precision values. The area\n under the ROC-curve is therefore computed using the height of the recall\n values by the false positive rate, while the area under the PR-curve is the\n computed using the height of the precision values by the recall.\n\n This value is ultimately returned as `auc`, an idempotent\n operation the computes the area under a discretized curve of precision versus\n recall values (computed using the afformentioned variables). The\n `num_thresholds` variable controls the degree of discretization with larger\n numbers of thresholds more closely approximating the true AUC.\n\n To faciliate the estimation of the AUC over a stream of data, the function\n creates an `update_op` operation whose behavior is dependent on the value of\n `ignore_mask`. If `ignore_mask` is None, then `update_op` increments the\n `true_positives`, `true_negatives`, `false_positives` and `false_negatives`\n counts with the number of each found in the current `predictions` and `labels`\n `Tensors`. If `ignore_mask` is not `None`, then the increment is performed\n using only the elements of `predictions` and `labels` whose corresponding\n value in `ignore_mask` is `False`. In addition to performing the updates,\n `update_op` also returns the `auc`.\n\n Args:\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n labels: A binary `Tensor` whose shape matches `predictions`.\n ignore_mask: An optional, binary tensor whose size matches `predictions`.\n num_thresholds: The number of thresholds to use when discretizing the roc\n curve.\n metrics_collections: An optional list of collections that `auc` should be\n added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n curve: Specifies the name of the curve to be computed, 'ROC' [default] or\n 'PR' for the Precision-Recall-curve.\n name: An optional variable_op_scope name.\n\n Returns:\n auc: A scalar tensor representing the current area-under-curve.\n update_op: An operation that increments the `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` variables\n appropriately and whose value matches `auc`.\n\n Raises:\n ValueError: If the shape of `predictions` and `labels` do not match or if\n `ignore_mask` is not `None` and its shape doesn't match `predictions` or\n if either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n with variable_scope.variable_op_scope([predictions, labels], name, 'auc'):\n if curve != 'ROC' and curve != 'PR':\n raise ValueError('curve must be either ROC or PR, %s unknown' %\n (curve))\n kepsilon = 1e-7 # to account for floating point imprecisions\n thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)\n for i in range(num_thresholds-2)]\n thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]\n\n (tp, fn, tn, fp, tp_update_op, fn_update_op, tn_update_op,\n fp_update_op) = _tp_fn_tn_fp(predictions, labels, thresholds, ignore_mask)\n\n # Add epsilons to avoid dividing by 0.\n epsilon = 1.0e-6\n assert array_ops.squeeze(fp).get_shape().as_list()[0] == num_thresholds\n\n def compute_auc(tp, fn, tn, fp, name):\n \"\"\"Computes the roc-auc or pr-auc based on confusion counts.\"\"\"\n recall = math_ops.div(tp + epsilon, tp + fn + epsilon)\n if curve == 'ROC':\n fp_rate = math_ops.div(fp, fp + tn + epsilon)\n x = fp_rate\n y = recall\n else: # curve == 'PR'.\n precision = math_ops.div(tp + epsilon, tp + fp + epsilon)\n x = recall\n y = precision\n return math_ops.reduce_sum(math_ops.mul(\n x[:num_thresholds - 1] - x[1:],\n (y[:num_thresholds - 1] + y[1:]) / 2.), name=name)\n\n # sum up the areas of all the trapeziums\n auc = compute_auc(tp, fn, tn, fp, 'value')\n update_op = compute_auc(\n tp_update_op, fn_update_op, tn_update_op, fp_update_op, 'update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, auc)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return auc, update_op\n\n\ndef streaming_precision_at_thresholds(predictions, labels, thresholds,\n ignore_mask=None,\n metrics_collections=None,\n updates_collections=None, name=None):\n \"\"\"Computes precision values for different `thresholds` on `predictions`.\n\n The `streaming_precision_at_thresholds` function creates four local variables,\n `true_positives`, `true_negatives`, `false_positives` and `false_negatives`\n for various values of thresholds.\n `precision[i]` is defined as the number of values in `predictions` above\n `thresholds[i]` whose corresponding entry in `labels` is `True`\n (`true_positives[i]`) divided by the number of values in `predictions`\n above `thresholds[i]` (`true_positives[i] + false_positives[i]`).\n\n If `ignore_mask` is not None then only values whose corresponding value in\n `ignore_mask` is `False` are considered.\n\n `precision` is returned along with an `update_op` whose value equals that of\n `precision`.\n\n Args:\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n labels: A binary `Tensor` whose shape matches `predictions`.\n thresholds: A python list or tuple of float thresholds in `[0, 1]`.\n ignore_mask: An optional, binary tensor whose size matches `predictions`.\n metrics_collections: An optional list of collections that `auc` should be\n added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_op_scope name.\n\n Returns:\n precision: A float tensor of shape [len(thresholds)].\n update_op: An operation that increments the `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` variables that\n are used in the computation of `precision`.\n\n Raises:\n ValueError: If the shape of `predictions` and `labels` do not match or if\n `ignore_mask` is not `None` and its shape doesn't match `predictions`\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n \"\"\"\n with variable_scope.variable_op_scope([predictions, labels], name,\n 'precision_at_thresholds'):\n (true_positives, _, _, false_positives, true_positives_compute_op, _, _,\n false_positives_compute_op,) = _tp_fn_tn_fp(\n predictions, labels, thresholds, ignore_mask)\n\n # avoid division by zero\n epsilon = 1e-7\n def compute_precision(name):\n precision = math_ops.div(true_positives,\n epsilon + true_positives + false_positives,\n name='precision_' + name)\n return precision\n\n precision = compute_precision('value')\n with ops.control_dependencies([true_positives_compute_op,\n false_positives_compute_op]):\n update_op = compute_precision('update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, precision)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return precision, update_op\n\n\ndef streaming_recall_at_thresholds(predictions, labels, thresholds,\n ignore_mask=None, metrics_collections=None,\n updates_collections=None, name=None):\n \"\"\"Computes various recall values for different `thresholds` on `predictions`.\n\n The `streaming_recall_at_thresholds` function creates four local variables,\n `true_positives`, `true_negatives`, `false_positives` and `false_negatives`\n for various values of thresholds.\n `recall[i]` is defined as the number of values in `predictions` above\n `thresholds[i]` whose corresponding entry in `labels` is `True`\n (`true_positives[i]`) divided by the number of True values in `labels`\n (`true_positives[i] + false_negatives[i]`).\n\n If `ignore_mask` is not None then only values whose corresponding value in\n `ignore_mask` is `False` are considered.\n\n `recall` are returned along with an `update_op` whose value equals that of\n `recall`.\n\n Args:\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n labels: A binary `Tensor` whose shape matches `predictions`.\n thresholds: A python list or tuple of float thresholds in `[0, 1]`.\n ignore_mask: An optional, binary tensor whose size matches `predictions`.\n metrics_collections: An optional list of collections that `auc` should be\n added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_op_scope name.\n\n Returns:\n recall: A float tensor of shape [len(thresholds)].\n update_op: An operation that increments the `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` variables that\n are used in the computation of `recall`.\n\n Raises:\n ValueError: If the shape of `predictions` and `labels` do not match or if\n `ignore_mask` is not `None` and its shape doesn't match `predictions`\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n \"\"\"\n with variable_scope.variable_op_scope([predictions, labels], name,\n 'recall_at_thresholds'):\n (true_positives, false_negatives, _, _, true_positives_compute_op,\n false_negatives_compute_op, _, _,) = _tp_fn_tn_fp(\n predictions, labels, thresholds, ignore_mask)\n\n # avoid division by zero\n epsilon = 1e-7\n def compute_recall(name):\n recall = math_ops.div(true_positives,\n epsilon + true_positives + false_negatives,\n name='recall_' + name)\n return recall\n\n recall = compute_recall('value')\n with ops.control_dependencies([true_positives_compute_op,\n false_negatives_compute_op]):\n update_op = compute_recall('update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, recall)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return recall, update_op\n\n\ndef streaming_recall_at_k(predictions, labels, k, ignore_mask=None,\n metrics_collections=None, updates_collections=None,\n name=None):\n \"\"\"Computes the recall@k of the predictions with respect to dense labels.\n\n The `streaming_recall_at_k` function creates two local variables, `total` and\n `count`, that are used to compute the recall@k frequency. This frequency is\n ultimately returned as `recall_at_<k>`: an idempotent operation that simply\n divides `total` by `count`. To facilitate the estimation of recall@k over a\n stream of data, the function utilizes two operations. First, an `in_top_k`\n operation computes a tensor with shape [batch_size] whose elements indicate\n whether or not the corresponding label is in the top `k` predictions of the\n `predictions` `Tensor`. Second, an `update_op` operation whose behavior is\n dependent on the value of `ignore_mask`. If `ignore_mask` is None, then\n `update_op` increments `total` with the number of elements of `in_top_k` that\n are set to `True` and increments `count` with the batch size. If `ignore_mask`\n is not `None`, then `update_op` increments `total` with the number of elements\n in `in_top_k` that are `True` whose corresponding element in `ignore_mask` is\n `False`. In addition to performing the updates, `update_op` also returns the\n recall value.\n\n Args:\n predictions: A floating point tensor of dimension [batch_size, num_classes]\n labels: A tensor of dimension [batch_size] whose type is in `int32`,\n `int64`.\n k: The number of top elements to look at for computing recall.\n ignore_mask: An optional, binary tensor whose size matches `labels`. If an\n element of `ignore_mask` is True, the corresponding prediction and label\n pair is used to compute the metrics. Otherwise, the pair is ignored.\n metrics_collections: An optional list of collections that `recall_at_k`\n should be added to.\n updates_collections: An optional list of collections `update_op` should be\n added to.\n name: An optional variable_op_scope name.\n\n Returns:\n recall_at_k: A tensor representing the recall@k, the fraction of labels\n which fall into the top `k` predictions.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `recall_at_k`.\n\n Raises:\n ValueError: If the dimensions of `predictions` and `labels` don't match or\n if `ignore_mask` is not `None` and its shape doesn't match `predictions`\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n \"\"\"\n in_top_k = math_ops.to_float(nn.in_top_k(predictions, labels, k))\n return streaming_mean(in_top_k, _mask_to_weights(ignore_mask),\n metrics_collections,\n updates_collections,\n name or ('recall_at_%d' % k))\n\n\n# TODO(ptucker): Validate range of values in labels?\ndef streaming_sparse_recall_at_k(predictions,\n labels,\n k,\n class_id=None,\n ignore_mask=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes recall@k of the predictions with respect to sparse labels.\n\n If `class_id` is specified, we calculate recall by considering only the\n entries in the batch for which `class_id` is in the label, and computing\n the fraction of them for which `class_id` is in the top-k `predictions`.\n If `class_id` is not specified, we'll calculate recall as how often on\n average a class among the labels of a batch entry is in the top-k\n `predictions`.\n\n `streaming_sparse_recall_at_k` creates two local variables,\n `true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute\n the recall_at_k frequency. This frequency is ultimately returned as\n `recall_at_<k>`: an idempotent operation that simply divides\n `true_positive_at_<k>` by total (`true_positive_at_<k>` + `recall_at_<k>`). To\n facilitate the estimation of recall@k over a stream of data, the function\n utilizes three steps.\n * A `top_k` operation computes a tensor whose elements indicate the top `k`\n predictions of the `predictions` `Tensor`.\n * Set operations are applied to `top_k` and `labels` to calculate true\n positives and false negatives.\n * An `update_op` operation increments `true_positive_at_<k>` and\n `false_negative_at_<k>`. It also returns the recall value.\n\n Args:\n predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where\n N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].\n The final dimension contains the logit values for each class. [D1, ... DN]\n must match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match `labels`.\n Values should be in range [0, num_classes], where num_classes is the last\n dimension of `predictions`.\n k: Integer, k for @k metric.\n class_id: Integer class ID for which we want binary metrics. This should be\n in range [0, num_classes], where num_classes is the last dimension of\n `predictions`.\n ignore_mask: An optional, binary tensor whose shape is broadcastable to the\n the first [D1, ... DN] dimensions of `predictions_idx` and `labels`.\n metrics_collections: An optional list of collections that values should\n be added to.\n updates_collections: An optional list of collections that updates should\n be added to.\n name: Name of new update operation, and namespace for other dependant ops.\n\n Returns:\n recall: Scalar `float64` `Tensor` with the value of `true_positives` divided\n by the sum of `true_positives` and `false_negatives`.\n update_op: `Operation` that increments `true_positives` and\n `false_negatives` variables appropriately, and whose value matches\n `recall`.\n \"\"\"\n default_name = 'recall_at_%d' % k\n if class_id is not None:\n default_name = '%s_class%d' % (default_name, class_id)\n\n with ops.op_scope([predictions, labels], name, default_name) as scope:\n _, top_k_idx = nn.top_k(predictions, k)\n top_k_idx = math_ops.to_int64(top_k_idx)\n tp, tp_update = _streaming_sparse_true_positive_at_k(\n predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,\n ignore_mask=ignore_mask)\n fn, fn_update = _streaming_sparse_false_negative_at_k(\n predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,\n ignore_mask=ignore_mask)\n\n metric = math_ops.div(tp, math_ops.add(tp, fn), name=scope)\n update = math_ops.div(\n tp_update, math_ops.add(tp_update, fn_update), name='update')\n if metrics_collections:\n ops.add_to_collections(metrics_collections, metric)\n if updates_collections:\n ops.add_to_collections(updates_collections, update)\n return metric, update\n\n\n# TODO(ptucker): Validate range of values in labels?\ndef streaming_sparse_precision_at_k(predictions,\n labels,\n k,\n class_id=None,\n ignore_mask=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes precision@k of the predictions with respect to sparse labels.\n\n If `class_id` is specified, we calculate precision by considering only the\n entries in the batch for which `class_id` is in the top-k highest\n `predictions`, and computing the fraction of them for which `class_id` is\n indeed a correct label.\n If `class_id` is not specified, we'll calculate precision as how often on\n average a class among the top-k classes with the highest predicted values\n of a batch entry is correct and can be found in the label for that entry.\n\n `streaming_sparse_precision_at_k` creates two local variables,\n `true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute\n the precision@k frequency. This frequency is ultimately returned as\n `precision_at_<k>`: an idempotent operation that simply divides\n `true_positive_at_<k>` by total (`true_positive_at_<k>` +\n `false_positive_at_<k>`). To facilitate the estimation of\n precision@k over a stream of data, the function utilizes three\n steps.\n * A `top_k` operation computes a tensor whose elements indicate the top `k`\n predictions of the `predictions` `Tensor`.\n * Set operations are applied to `top_k` and `labels` to calculate true\n positives and false positives.\n * An `update_op` operation increments `true_positive_at_<k>` and\n `false_positive_at_<k>`. It also returns the precision value.\n\n Args:\n predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where\n N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].\n The final dimension contains the logit values for each class. [D1, ... DN]\n must match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_idx`. Values should be in range [0, num_classes], where\n num_classes is the last dimension of `predictions`.\n k: Integer, k for @k metric.\n class_id: Integer class ID for which we want binary metrics. This should be\n in range [0, num_classes], where num_classes is the last dimension of\n `predictions`.\n ignore_mask: An optional, binary tensor whose shape is broadcastable to the\n the first [D1, ... DN] dimensions of `predictions_idx` and `labels`.\n metrics_collections: An optional list of collections that values should\n be added to.\n updates_collections: An optional list of collections that updates should\n be added to.\n name: Name of new update operation, and namespace for other dependant ops.\n\n Returns:\n precision: Scalar `float64` `Tensor` with the value of `true_positives`\n divided by the sum of `true_positives` and `false_positives`.\n update_op: `Operation` that increments `true_positives` and\n `false_positives` variables appropriately, and whose value matches\n `precision`.\n \"\"\"\n default_name = 'precision_at_%d' % k\n if class_id is not None:\n default_name = '%s_class%d' % (default_name, class_id)\n with ops.op_scope([predictions, labels], name, default_name) as scope:\n _, top_k_idx = nn.top_k(predictions, k)\n top_k_idx = math_ops.to_int64(top_k_idx)\n tp, tp_update = _streaming_sparse_true_positive_at_k(\n predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,\n ignore_mask=ignore_mask)\n fp, fp_update = _streaming_sparse_false_positive_at_k(\n predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,\n ignore_mask=ignore_mask)\n\n metric = math_ops.div(tp, math_ops.add(tp, fp), name=scope)\n update = math_ops.div(\n tp_update, math_ops.add(tp_update, fp_update), name='update')\n if metrics_collections:\n ops.add_to_collections(metrics_collections, metric)\n if updates_collections:\n ops.add_to_collections(updates_collections, update)\n return metric, update\n\n\ndef _select_class_id(ids, selected_id):\n \"\"\"Filter all but `selected_id` out of `ids`.\n\n Args:\n ids: `int64` `Tensor` or `SparseTensor` of IDs.\n selected_id: Int id to select.\n\n Returns:\n `SparseTensor` of same dimensions as `ids`, except for the last dimension,\n which might be smaller. This contains only the entries equal to\n `selected_id`.\n \"\"\"\n if isinstance(ids, ops.SparseTensor):\n return sparse_ops.sparse_retain(\n ids, math_ops.equal(ids.values, selected_id))\n\n # TODO(ptucker): Make this more efficient, maybe add a sparse version of\n # tf.equal and tf.reduce_any?\n\n # Shape of filled IDs is the same as `ids` with the last dim collapsed to 1.\n ids_shape = array_ops.shape(ids)\n ids_last_dim = array_ops.size(ids_shape) - 1\n filled_selected_id_shape = math_ops.reduced_shape(\n ids_shape, array_ops.reshape(ids_last_dim, [1]))\n\n # Intersect `ids` with the selected ID.\n filled_selected_id = array_ops.fill(\n filled_selected_id_shape, math_ops.to_int64(selected_id))\n return set_ops.set_intersection(filled_selected_id, ids)\n\n\ndef _maybe_select_class_id(labels, predictions_idx, selected_id=None):\n \"\"\"If class ID is specified, filter all other classes.\n\n Args:\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_idx`.\n predictions_idx: `int64` `Tensor` of class IDs, with shape [D1, ... DN, k]\n where N >= 1. Commonly, N=1 and predictions has shape [batch size, k].\n selected_id: Int id to select.\n\n Returns:\n Tuple of `labels` and `predictions_idx`, possibly with classes removed.\n \"\"\"\n if selected_id is None:\n return labels, predictions_idx\n return (_select_class_id(labels, selected_id),\n _select_class_id(predictions_idx, selected_id))\n\n\ndef _streaming_sparse_true_positive_at_k(predictions_idx,\n labels,\n k,\n class_id=None,\n ignore_mask=None,\n name=None):\n \"\"\"Calculates per step true positives for recall@k and precision@k.\n\n If `class_id` is specified, calculate binary true positives for `class_id`\n only.\n If `class_id` is not specified, calculate metrics for `k` predicted vs\n `n` label classes, where `n` is the 2nd dimension of `labels_sparse`.\n\n Args:\n predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,\n top `k` predicted classes. For rank `n`, the first `n-1` dimensions must\n match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_idx`.\n k: Integer, k for @k metric. This is only used for default op name.\n class_id: Class for which we want binary metrics.\n ignore_mask: An optional, binary tensor whose shape is broadcastable to the\n the first [D1, ... DN] dimensions of `predictions_idx` and `labels`.\n name: Name of new variable, and namespace for other dependant ops.\n\n Returns:\n A tuple of `Variable` and update `Operation`.\n \"\"\"\n default_name = 'true_positive_at_%d' % k\n if class_id is not None:\n default_name = '%s_class%d' % (default_name, class_id)\n with ops.op_scope([predictions_idx, labels], name, default_name) as scope:\n labels, predictions_idx = _maybe_select_class_id(labels,\n predictions_idx,\n class_id)\n tp = set_ops.set_size(set_ops.set_intersection(predictions_idx, labels))\n if ignore_mask is not None:\n tp = math_ops.select(ignore_mask, array_ops.zeros_like(tp), tp)\n batch_total_tp = math_ops.cast(\n math_ops.reduce_sum(tp), dtype=dtypes.float64)\n\n var = contrib_variables.local_variable(\n array_ops.zeros([], dtype=dtypes.float64), name=scope)\n return var, state_ops.assign_add(var, batch_total_tp, name='update')\n\n\ndef _streaming_sparse_false_positive_at_k(predictions_idx,\n labels,\n k,\n class_id=None,\n ignore_mask=None,\n name=None):\n \"\"\"Calculates per step false positives for precision@k.\n\n If `class_id` is specified, calculate binary true positives for `class_id`\n only.\n If `class_id` is not specified, calculate metrics for `k` predicted vs\n `n` label classes, where `n` is the 2nd dimension of `labels_sparse`.\n\n Args:\n predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,\n top `k` predicted classes. For rank `n`, the first `n-1` dimensions must\n match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_idx`.\n k: Integer, k for @k metric. This is only used for default op name.\n class_id: Class for which we want binary metrics.\n ignore_mask: An optional, binary tensor whose shape is broadcastable to the\n the first [D1, ... DN] dimensions of `predictions_idx` and `labels`.\n name: Name of new variable, and namespace for other dependant ops.\n\n Returns:\n A tuple of `Variable` and update `Operation`.\n \"\"\"\n default_name = 'false_positive_at_%d' % k\n if class_id is not None:\n default_name = '%s_class%d' % (default_name, class_id)\n with ops.op_scope([predictions_idx, labels], name, default_name) as scope:\n labels, predictions_idx = _maybe_select_class_id(labels,\n predictions_idx,\n class_id)\n fp = set_ops.set_size(set_ops.set_difference(predictions_idx,\n labels,\n aminusb=True))\n if ignore_mask is not None:\n fp = math_ops.select(ignore_mask, array_ops.zeros_like(fp), fp)\n batch_total_fp = math_ops.cast(\n math_ops.reduce_sum(fp), dtype=dtypes.float64)\n\n var = contrib_variables.local_variable(\n array_ops.zeros([], dtype=dtypes.float64), name=scope)\n return var, state_ops.assign_add(var, batch_total_fp, name='update')\n\n\ndef _streaming_sparse_false_negative_at_k(predictions_idx,\n labels,\n k,\n class_id=None,\n ignore_mask=None,\n name=None):\n \"\"\"Calculates per step false negatives for recall@k.\n\n If `class_id` is specified, calculate binary true positives for `class_id`\n only.\n If `class_id` is not specified, calculate metrics for `k` predicted vs\n `n` label classes, where `n` is the 2nd dimension of `labels_sparse`.\n\n Args:\n predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,\n top `k` predicted classes. For rank `n`, the first `n-1` dimensions must\n match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_idx`.\n k: Integer, k for @k metric. This is only used for default op name.\n class_id: Class for which we want binary metrics.\n ignore_mask: An optional, binary tensor whose shape is broadcastable to the\n the first [D1, ... DN] dimensions of `predictions_idx` and `labels`.\n name: Name of new variable, and namespace for other dependant ops.\n\n Returns:\n A tuple of `Variable` and update `Operation`.\n \"\"\"\n default_name = 'false_negative_at_%d' % k\n if class_id is not None:\n default_name = '%s_class%d' % (default_name, class_id)\n with ops.op_scope([predictions_idx, labels], name, default_name) as scope:\n labels, predictions_idx = _maybe_select_class_id(labels,\n predictions_idx,\n class_id)\n fn = set_ops.set_size(set_ops.set_difference(predictions_idx,\n labels,\n aminusb=False))\n if ignore_mask is not None:\n fn = math_ops.select(ignore_mask, array_ops.zeros_like(fn), fn)\n batch_total_fn = math_ops.cast(\n math_ops.reduce_sum(fn), dtype=dtypes.float64)\n\n var = contrib_variables.local_variable(\n array_ops.zeros([], dtype=dtypes.float64), name=scope)\n return var, state_ops.assign_add(var, batch_total_fn, name='update')\n\n\ndef streaming_mean_absolute_error(predictions, labels, weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the mean absolute error between the labels and predictions.\n\n The `streaming_mean_absolute_error` function creates two local variables,\n `total` and `count` that are used to compute the mean absolute error. This\n average is ultimately returned as `mean_absolute_error`: an idempotent\n operation that simply divides `total` by `count`. To facilitate the estimation\n of the mean absolute error over a stream of data, the function utilizes two\n operations. First, an `absolute_errors` operation computes the absolute value\n of the differences between `predictions` and `labels`. Second, an `update_op`\n operation whose behavior is dependent on the value of `weights`. If `weights`\n is None, then `update_op` increments `total` with the reduced sum of\n `absolute_errors` and increments `count` with the number of elements in\n `absolute_errors`. If `weights` is not `None`, then `update_op` increments\n `total` with the reduced sum of the product of `weights` and `absolute_errors`\n and increments `count` with the reduced sum of `weights`. In addition to\n performing the updates, `update_op` also returns the `mean_absolute_error`\n value.\n\n Args:\n predictions: A `Tensor` of arbitrary shape.\n labels: A `Tensor` of the same shape as `predictions`.\n weights: An optional set of weights of the same shape as `predictions`. If\n `weights` is not None, the function computes a weighted mean.\n metrics_collections: An optional list of collections that\n `mean_absolute_error` should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_op_scope name.\n\n Returns:\n mean_absolute_error: A tensor representing the current mean, the value of\n `total` divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `mean_absolute_error`.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match\n `predictions` or if either `metrics_collections` or `updates_collections`\n are not a list or tuple.\n \"\"\"\n predictions, labels = metric_ops_util.remove_squeezable_dimensions(\n predictions, labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n absolute_errors = math_ops.abs(predictions - labels)\n return streaming_mean(absolute_errors, weights, metrics_collections,\n updates_collections, name or 'mean_absolute_error')\n\n\ndef streaming_mean_relative_error(predictions, labels, normalizer, weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the mean relative error by normalizing with the given values.\n\n The `streaming_mean_relative_error` function creates two local variables,\n `total` and `count` that are used to compute the mean relative absolute error.\n This average is ultimately returned as `mean_relative_error`: an idempotent\n operation that simply divides `total` by `count`. To facilitate the estimation\n of the mean relative error over a stream of data, the function utilizes two\n operations. First, a `relative_errors` operation divides the absolute value\n of the differences between `predictions` and `labels` by the `normalizer`.\n Second, an `update_op` operation whose behavior is dependent on the value of\n `weights`. If `weights` is None, then `update_op` increments `total` with the\n reduced sum of `relative_errors` and increments `count` with the number of\n elements in `relative_errors`. If `weights` is not `None`, then `update_op`\n increments `total` with the reduced sum of the product of `weights` and\n `relative_errors` and increments `count` with the reduced sum of `weights`. In\n addition to performing the updates, `update_op` also returns the\n `mean_relative_error` value.\n\n Args:\n predictions: A `Tensor` of arbitrary shape.\n labels: A `Tensor` of the same shape as `predictions`.\n normalizer: A `Tensor` of the same shape as `predictions`.\n weights: An optional set of weights of the same shape as `predictions`. If\n `weights` is not None, the function computes a weighted mean.\n metrics_collections: An optional list of collections that\n `mean_relative_error` should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_op_scope name.\n\n Returns:\n mean_relative_error: A tensor representing the current mean, the value of\n `total` divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `mean_relative_error`.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match\n `predictions` or if either `metrics_collections` or `updates_collections`\n are not a list or tuple.\n \"\"\"\n predictions, labels = metric_ops_util.remove_squeezable_dimensions(\n predictions, labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n\n predictions, normalizer = metric_ops_util.remove_squeezable_dimensions(\n predictions, normalizer)\n predictions.get_shape().assert_is_compatible_with(normalizer.get_shape())\n relative_errors = math_ops.select(\n math_ops.equal(normalizer, 0.0),\n array_ops.zeros_like(labels),\n math_ops.div(math_ops.abs(labels - predictions), normalizer))\n return streaming_mean(relative_errors, weights, metrics_collections,\n updates_collections, name or 'mean_relative_error')\n\n\ndef streaming_mean_squared_error(predictions, labels, weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the mean squared error between the labels and predictions.\n\n The `streaming_mean_squared_error` function creates two local variables,\n `total` and `count` that are used to compute the mean squared error.\n This average is ultimately returned as `mean_squared_error`: an idempotent\n operation that simply divides `total` by `count`. To facilitate the estimation\n of the mean squared error over a stream of data, the function utilizes two\n operations. First, a `squared_error` operation computes the element-wise\n square of the difference between `predictions` and `labels`. Second, an\n `update_op` operation whose behavior is dependent on the value of `weights`.\n If `weights` is None, then `update_op` increments `total` with the\n reduced sum of `squared_error` and increments `count` with the number of\n elements in `squared_error`. If `weights` is not `None`, then `update_op`\n increments `total` with the reduced sum of the product of `weights` and\n `squared_error` and increments `count` with the reduced sum of `weights`. In\n addition to performing the updates, `update_op` also returns the\n `mean_squared_error` value.\n\n Args:\n predictions: A `Tensor` of arbitrary shape.\n labels: A `Tensor` of the same shape as `predictions`.\n weights: An optional set of weights of the same shape as `predictions`. If\n `weights` is not None, the function computes a weighted mean.\n metrics_collections: An optional list of collections that\n `mean_squared_error` should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_op_scope name.\n\n Returns:\n mean_squared_error: A tensor representing the current mean, the value of\n `total` divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `mean_squared_error`.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match\n `predictions` or if either `metrics_collections` or `updates_collections`\n are not a list or tuple.\n \"\"\"\n predictions, labels = metric_ops_util.remove_squeezable_dimensions(\n predictions, labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n squared_error = math_ops.square(labels - predictions)\n return streaming_mean(squared_error, weights, metrics_collections,\n updates_collections, name or 'mean_squared_error')\n\n\ndef streaming_root_mean_squared_error(predictions, labels, weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the root mean squared error between the labels and predictions.\n\n The `streaming_root_mean_squared_error` function creates two local variables,\n `total` and `count` that are used to compute the root mean squared error.\n This average is ultimately returned as `root_mean_squared_error`: an\n idempotent operation that takes the square root of the division of `total`\n by `count`. To facilitate the estimation of the root mean squared error over a\n stream of data, the function utilizes two operations. First, a `squared_error`\n operation computes the element-wise square of the difference between\n `predictions` and `labels`. Second, an `update_op` operation whose behavior is\n dependent on the value of `weights`. If `weights` is None, then `update_op`\n increments `total` with the reduced sum of `squared_error` and increments\n `count` with the number of elements in `squared_error`. If `weights` is not\n `None`, then `update_op` increments `total` with the reduced sum of the\n product of `weights` and `squared_error` and increments `count` with the\n reduced sum of `weights`. In addition to performing the updates, `update_op`\n also returns the `root_mean_squared_error` value.\n\n Args:\n predictions: A `Tensor` of arbitrary shape.\n labels: A `Tensor` of the same shape as `predictions`.\n weights: An optional set of weights of the same shape as `predictions`. If\n `weights` is not None, the function computes a weighted mean.\n metrics_collections: An optional list of collections that\n `root_mean_squared_error` should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_op_scope name.\n\n Returns:\n root_mean_squared_error: A tensor representing the current mean, the value\n of `total` divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `root_mean_squared_error`.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match\n `predictions` or if either `metrics_collections` or `updates_collections`\n are not a list or tuple.\n \"\"\"\n predictions, labels = metric_ops_util.remove_squeezable_dimensions(\n predictions, labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n value_tensor, update_op = streaming_mean_squared_error(\n predictions, labels, weights, None, None,\n name or 'root_mean_squared_error')\n\n root_mean_squared_error = math_ops.sqrt(value_tensor)\n with ops.control_dependencies([update_op]):\n update_op = math_ops.sqrt(update_op)\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, root_mean_squared_error)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return root_mean_squared_error, update_op\n\n\n# TODO(nsilberman): add a 'normalized' flag so that the user can request\n# normalization if the inputs are not normalized.\ndef streaming_mean_cosine_distance(predictions, labels, dim, weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the cosine distance between the labels and predictions.\n\n The `streaming_mean_cosine_distance` function creates two local variables,\n `total` and `count` that are used to compute the average cosine distance\n between `predictions` and `labels`. This average is ultimately returned as\n `mean_distance` which is an idempotent operation that simply divides `total`\n by `count. To facilitate the estimation of a mean over multiple batches\n of data, the function creates an `update_op` operation whose behavior is\n dependent on the value of `weights`. If `weights` is None, then `update_op`\n increments `total` with the reduced sum of `values and increments `count` with\n the number of elements in `values`. If `weights` is not `None`, then\n `update_op` increments `total` with the reduced sum of the product of `values`\n and `weights` and increments `count` with the reduced sum of weights.\n\n Args:\n predictions: A tensor of the same size as labels.\n labels: A tensor of arbitrary size.\n dim: The dimension along which the cosine distance is computed.\n weights: An optional set of weights which indicates which predictions to\n ignore during metric computation. Its size matches that of labels except\n for the value of 'dim' which should be 1. For example if labels has\n dimensions [32, 100, 200, 3], then `weights` should have dimensions\n [32, 100, 200, 1].\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_op_scope name.\n\n Returns:\n mean_distance: A tensor representing the current mean, the value of `total`\n divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately.\n\n Raises:\n ValueError: If labels and predictions are of different sizes or if the\n ignore_mask is of the wrong size or if either `metrics_collections` or\n `updates_collections` are not a list or tuple.\n \"\"\"\n predictions, labels = metric_ops_util.remove_squeezable_dimensions(\n predictions, labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n radial_diffs = math_ops.mul(predictions, labels)\n radial_diffs = math_ops.reduce_sum(radial_diffs,\n reduction_indices=[dim,],\n keep_dims=True)\n mean_distance, update_op = streaming_mean(radial_diffs, weights,\n None,\n None,\n name or 'mean_cosine_distance')\n mean_distance = math_ops.sub(1.0, mean_distance)\n update_op = math_ops.sub(1.0, update_op)\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, mean_distance)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return mean_distance, update_op\n\n\ndef streaming_percentage_less(values, threshold, ignore_mask=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the percentage of values less than the given threshold.\n\n The `streaming_percentage_less` function creates two local variables,\n `total` and `count` that are used to compute the percentage of `values` that\n fall below `threshold`. This rate is ultimately returned as `percentage`\n which is an idempotent operation that simply divides `total` by `count.\n To facilitate the estimation of the percentage of values that fall under\n `threshold` over multiple batches of data, the function creates an\n `update_op` operation whose behavior is dependent on the value of\n `ignore_mask`. If `ignore_mask` is None, then `update_op`\n increments `total` with the number of elements of `values` that are less\n than `threshold` and `count` with the number of elements in `values`. If\n `ignore_mask` is not `None`, then `update_op` increments `total` with the\n number of elements of `values` that are less than `threshold` and whose\n corresponding entries in `ignore_mask` are False, and `count` is incremented\n with the number of elements of `ignore_mask` that are False.\n\n Args:\n values: A numeric `Tensor` of arbitrary size.\n threshold: A scalar threshold.\n ignore_mask: An optional mask of the same shape as 'values' which indicates\n which elements to ignore during metric computation.\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_op_scope name.\n\n Returns:\n percentage: A tensor representing the current mean, the value of `total`\n divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately.\n\n Raises:\n ValueError: If `ignore_mask` is not None and its shape doesn't match `values\n or if either `metrics_collections` or `updates_collections` are supplied\n but are not a list or tuple.\n \"\"\"\n is_below_threshold = math_ops.to_float(math_ops.less(values, threshold))\n return streaming_mean(is_below_threshold, _mask_to_weights(ignore_mask),\n metrics_collections, updates_collections,\n name or 'percentage_below_threshold')\n\n\ndef streaming_mean_iou(predictions,\n labels,\n num_classes,\n ignore_mask=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Calculate per-step mean Intersection-Over-Union (mIOU).\n\n Mean Intersection-Over-Union is a common evaluation metric for\n semantic image segmentation, which first computes the IOU for each\n semantic class and then computes the average over classes.\n IOU is defined as follows:\n IOU = true_positive / (true_positive + false_positive + false_negative).\n The predictions are accumulated in a confusion matrix, and mIOU is then\n calculated from it.\n\n Args:\n predictions: A tensor of prediction results for semantic labels, whose\n shape is [batch size] and type `int32` or `int64`. The tensor will be\n flattened, if its rank > 1.\n labels: A tensor of ground truth labels with shape [batch size] and of\n type `int32` or `int64`. The tensor will be flattened, if its rank > 1.\n num_classes: The possible number of labels the prediction task can\n have. This value must be provided, since a confusion matrix of\n dimension = [num_classes, num_classes] will be allocated.\n ignore_mask: An optional, boolean tensor whose size matches `labels`. If an\n element of `ignore_mask` is True, the corresponding prediction and label\n pair is NOT used to compute the metrics. Otherwise, the pair is included.\n metrics_collections: An optional list of collections that `mean_iou`\n should be added to.\n updates_collections: An optional list of collections `update_op` should be\n added to.\n name: An optional variable_op_scope name.\n\n Returns:\n mean_iou: A tensor representing the mean intersection-over-union.\n update_op: An operation that increments the confusion matrix.\n\n Raises:\n ValueError: If the dimensions of `predictions` and `labels` don't match or\n if `ignore_mask` is not `None` and its shape doesn't match `labels`\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n \"\"\"\n with variable_scope.variable_op_scope(\n [predictions, labels], name, 'mean_iou'):\n # Check if shape is compatible.\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n if ignore_mask is not None:\n labels.get_shape().assert_is_compatible_with(ignore_mask.get_shape())\n\n # Local variable to accumulate the predictions in the confusion matrix.\n total_cm = _create_local('total_confusion_matrix',\n shape=[num_classes, num_classes],\n dtype=dtypes.int64)\n\n # Cast the type to int64 required by confusion_matrix_ops.\n predictions = math_ops.to_int64(predictions)\n labels = math_ops.to_int64(labels)\n num_classes = math_ops.to_int64(num_classes)\n\n # Flatten the input if its rank > 1.\n predictions_rank = predictions.get_shape().ndims\n if predictions_rank > 1:\n predictions = array_ops.reshape(predictions, [-1])\n\n labels_rank = labels.get_shape().ndims\n if labels_rank > 1:\n labels = array_ops.reshape(labels, [-1])\n\n if ignore_mask is not None:\n ignore_mask_rank = ignore_mask.get_shape().ndims\n if ignore_mask_rank > 1:\n ignore_mask = array_ops.reshape(ignore_mask, [-1])\n\n check_ops.assert_type(ignore_mask, dtypes.bool)\n not_ignore_mask = math_ops.logical_not(ignore_mask)\n predictions = array_ops.boolean_mask(predictions, not_ignore_mask)\n labels = array_ops.boolean_mask(labels, not_ignore_mask)\n\n # Accumulate the prediction to current confusion matrix.\n current_cm = confusion_matrix_ops.confusion_matrix(\n predictions, labels, num_classes, dtype=dtypes.int64)\n update_op = state_ops.assign_add(total_cm, current_cm)\n\n def compute_mean_iou(name):\n \"\"\"Compute the mean intersection-over-union via the confusion matrix.\"\"\"\n sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0))\n sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1))\n cm_diag = math_ops.to_float(array_ops.diag_part(total_cm))\n denominator = sum_over_row + sum_over_col - cm_diag\n\n # If the value of the denominator is 0, set it to 1 to avoid\n # zero division.\n denominator = math_ops.select(\n math_ops.greater(denominator, 0),\n denominator,\n array_ops.ones_like(denominator))\n iou = math_ops.div(cm_diag, denominator)\n return math_ops.reduce_mean(iou, name=name)\n\n mean_iou = compute_mean_iou('mean_iou')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, mean_iou)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return mean_iou, update_op\n\n\ndef aggregate_metrics(*value_update_tuples):\n \"\"\"Aggregates the metric value tensors and update ops into two lists.\n\n Args:\n *value_update_tuples: a variable number of tuples, each of which contain the\n pair of (value_tensor, update_op) from a streaming metric.\n\n Returns:\n a list of value tensors and a list of update ops.\n\n Raises:\n ValueError: if `value_update_tuples` is empty.\n \"\"\"\n if not value_update_tuples:\n raise ValueError('Expected at least one value_tensor/update_op pair')\n value_ops, update_ops = zip(*value_update_tuples)\n return list(value_ops), list(update_ops)\n\n\ndef aggregate_metric_map(names_to_tuples):\n \"\"\"Aggregates the metric names to tuple dictionary.\n\n This function is useful for pairing metric names with their associated value\n and update ops when the list of metrics is long. For example:\n\n metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map({\n 'Mean Absolute Error': new_slim.metrics.streaming_mean_absolute_error(\n predictions, labels, weights),\n 'Mean Relative Error': new_slim.metrics.streaming_mean_relative_error(\n predictions, labels, labels, weights),\n 'RMSE Linear': new_slim.metrics.streaming_root_mean_squared_error(\n predictions, labels, weights),\n 'RMSE Log': new_slim.metrics.streaming_root_mean_squared_error(\n predictions, labels, weights),\n })\n\n Args:\n names_to_tuples: a map of metric names to tuples, each of which contain the\n pair of (value_tensor, update_op) from a streaming metric.\n\n Returns:\n A dictionary from metric names to value ops and a dictionary from metric\n names to update ops.\n \"\"\"\n metric_names = names_to_tuples.keys()\n value_ops, update_ops = zip(*names_to_tuples.values())\n return dict(zip(metric_names, value_ops)), dict(zip(metric_names, update_ops))\n\n\n__all__ = make_all(__name__)\n",
"# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements a parallel data reader with queues and optional shuffling.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import dtypes as tf_dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.ops import io_ops\nfrom tensorflow.python.ops import logging_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.training import input as tf_input\nfrom tensorflow.python.training import queue_runner\n\n\nclass ParallelReader(io_ops.ReaderBase):\n \"\"\"Reader class that uses multiple readers in parallel to improve speed.\n\n See ReaderBase for supported methods.\n \"\"\"\n\n def __init__(self,\n reader_class,\n common_queue,\n num_readers=4,\n reader_kwargs=None):\n \"\"\"ParallelReader creates num_readers instances of the reader_class.\n\n Each instance is created by calling the `reader_class` function passing\n the arguments specified in `reader_kwargs` as in:\n reader_class(**read_kwargs)\n\n When you read from a ParallelReader, with its `read()` method,\n you just dequeue examples from the `common_queue`.\n\n The readers will read different files in parallel, asynchronously enqueueing\n their output into `common_queue`. The `common_queue.dtypes` must be\n [tf.string, tf.string]\n\n Because each reader can read from a different file, the examples in the\n `common_queue` could be from different files. Due to the asynchronous\n reading there is no guarantee that all the readers will read the same\n number of examples.\n\n If the `common_queue` is a shuffling queue, then the examples are shuffled.\n\n Usage:\n common_queue = tf.RandomShuffleQueue(\n capacity=256,\n min_after_dequeue=128,\n dtypes=[tf.string, tf.string])\n p_reader = ParallelReader(tf.TFRecordReader, common_queue)\n\n common_queue = tf.FIFOQueue(\n capacity=256,\n dtypes=[tf.string, tf.string])\n p_reader = ParallelReader(readers, common_queue, num_readers=2)\n\n\n Args:\n reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader\n common_queue: a Queue to hold (key, value pairs) with `dtypes` equal to\n [tf.string, tf.string]. Must be one of the data_flow_ops.Queues\n instances, ex. `tf.FIFOQueue()`, `tf.RandomShuffleQueue()`, ...\n num_readers: a integer, number of instances of reader_class to create.\n reader_kwargs: an optional dict of kwargs to create the readers.\n\n Raises:\n TypeError: if `common_queue.dtypes` is not [tf.string, tf.string].\n \"\"\"\n if len(common_queue.dtypes) != 2:\n raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')\n for dtype in common_queue.dtypes:\n if not dtype.is_compatible_with(tf_dtypes.string):\n raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')\n\n reader_kwargs = reader_kwargs or {}\n self._readers = [reader_class(**reader_kwargs) for _ in range(num_readers)]\n self._common_queue = common_queue\n\n @property\n def num_readers(self):\n return len(self._readers)\n\n @property\n def common_queue(self):\n return self._common_queue\n\n def read(self, queue, name=None):\n \"\"\"Returns the next record (key, value pair) produced by the reader.\n\n The multiple reader instances are all configured to `read()` from the\n filenames listed in `queue` and enqueue their output into the `common_queue`\n passed to the constructor, and this method returns the next record dequeued\n from that `common_queue`.\n\n\n Readers dequeue a work unit from `queue` if necessary (e.g. when a\n reader needs to start reading from a new file since it has finished with\n the previous file).\n\n A queue runner for enqueing in the `common_queue` is automatically added to\n the TF QueueRunners collection.\n\n Args:\n queue: A Queue or a mutable string Tensor representing a handle\n to a Queue, with string work items.\n name: A name for the operation (optional).\n\n Returns:\n The next record (i.e. (key, value pair)) from the common_queue.\n \"\"\"\n\n enqueue_ops = []\n for reader in self._readers:\n enqueue_ops.append(self._common_queue.enqueue(reader.read(queue)))\n\n queue_runner.add_queue_runner(queue_runner.QueueRunner(\n self._common_queue, enqueue_ops))\n\n return self._common_queue.dequeue(name=name)\n\n def num_records_produced(self, name=None):\n \"\"\"Returns the number of records this reader has produced.\n\n Args:\n name: A name for the operation (optional).\n\n Returns:\n An int64 Tensor.\n\n \"\"\"\n num_records = [r.num_records_produced() for r in self._readers]\n return math_ops.add_n(num_records, name=name)\n\n def num_work_units_completed(self, name=None):\n \"\"\"Returns the number of work units this reader has finished processing.\n\n Args:\n name: A name for the operation (optional).\n\n Returns:\n An int64 Tensor.\n \"\"\"\n num_work_units = [r.num_work_units_completed() for r in self._readers]\n return math_ops.add_n(num_work_units, name=name)\n\n\ndef parallel_read(data_sources,\n reader_class,\n num_epochs=None,\n num_readers=4,\n reader_kwargs=None,\n shuffle=True,\n dtypes=None,\n capacity=256,\n min_after_dequeue=128):\n \"\"\"Reads multiple records in parallel from data_sources using n readers.\n\n It uses a ParallelReader to read from multiple files in parallel using\n multiple readers created using `reader_class` with `reader_kwargs'.\n\n If shuffle is True the common_queue would be a RandomShuffleQueue otherwise\n it would be a FIFOQueue.\n\n Usage:\n data_sources = ['path_to/train*']\n key, value = parallel_read(data_sources, tf.CSVReader, num_readers=4)\n\n Args:\n data_sources: a list/tuple of files or the location of the data, i.e.\n /cns/../train@128, /cns/.../train* or /tmp/.../train*\n reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader\n num_epochs: The number of times each data source is read. If left as None,\n the data will be cycled through indefinitely.\n num_readers: a integer, number of Readers to create.\n reader_kwargs: an optional dict, of kwargs for the reader.\n shuffle: boolean, wether should shuffle the files and the records by using\n RandomShuffleQueue as common_queue.\n dtypes: A list of types. The length of dtypes must equal the number\n of elements in each record. If it is None it will default to\n [tf.string, tf.string] for (key, value).\n capacity: integer, capacity of the common_queue.\n min_after_dequeue: integer, minimum number of records in the common_queue\n after dequeue. Needed for a good shuffle.\n\n Returns:\n key, value: a tuple of keys and values from the data_source.\n \"\"\"\n data_files = get_data_files(data_sources)\n with ops.name_scope('parallel_read'):\n filename_queue = tf_input.string_input_producer(\n data_files, num_epochs=num_epochs, shuffle=shuffle)\n dtypes = dtypes or [tf_dtypes.string, tf_dtypes.string]\n if shuffle:\n common_queue = data_flow_ops.RandomShuffleQueue(\n capacity=capacity,\n min_after_dequeue=min_after_dequeue,\n dtypes=dtypes)\n else:\n common_queue = data_flow_ops.FIFOQueue(capacity=capacity, dtypes=dtypes)\n\n logging_ops.scalar_summary('queue/%s/fraction_of_%d_full' %\n (common_queue.name, capacity),\n math_ops.to_float(common_queue.size()) *\n (1. / capacity))\n\n return ParallelReader(reader_class,\n common_queue,\n num_readers=num_readers,\n reader_kwargs=reader_kwargs).read(filename_queue)\n\n\ndef single_pass_read(data_sources,\n reader_class,\n reader_kwargs=None):\n \"\"\"Reads sequentially the data_sources using the reader, doing a single pass.\n\n Args:\n data_sources: a list/tuple of files or the location of the data, i.e.\n /cns/../train@128, /cns/.../train* or /tmp/.../train*\n reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader.\n reader_kwargs: an optional dict, of kwargs for the reader.\n\n Returns:\n key, value: a tuple of keys and values from the data_source.\n \"\"\"\n data_files = get_data_files(data_sources)\n with ops.name_scope('single_pass_read'):\n filename_queue = tf_input.string_input_producer(data_files,\n num_epochs=1,\n shuffle=False,\n capacity=1)\n reader_kwargs = reader_kwargs or {}\n return reader_class(**reader_kwargs).read(filename_queue)\n\n\ndef get_data_files(data_sources):\n \"\"\"Get data_files from data_sources.\n\n Args:\n data_sources: a list/tuple of files or the location of the data, i.e.\n /cns/../train@128, /cns/.../train* or /tmp/.../train*\n\n Returns:\n a list of data_files.\n\n Raises:\n ValueError: if not data files are not found\n\n \"\"\"\n if isinstance(data_sources, (list, tuple)):\n data_files = []\n for source in data_sources:\n data_files += get_data_files(source)\n else:\n if '*' in data_sources or '?' in data_sources or '[' in data_sources:\n data_files = gfile.Glob(data_sources)\n else:\n data_files = [data_sources]\n if not data_files:\n raise ValueError('No data files found in %s', data_sources)\n return data_files\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"tf.learn IO operation tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport random\n\nimport tensorflow as tf\n\nfrom tensorflow.contrib.learn.python import learn\nfrom tensorflow.contrib.learn.python.learn import datasets\nfrom tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score\n# pylint: disable=wildcard-import\nfrom tensorflow.contrib.learn.python.learn.learn_io import *\n# pylint: enable=wildcard-import\n\n\nclass IOTest(tf.test.TestCase):\n # pylint: disable=undefined-variable\n \"\"\"tf.learn IO operation tests.\"\"\"\n\n def test_pandas_dataframe(self):\n if HAS_PANDAS:\n import pandas as pd # pylint: disable=g-import-not-at-top\n random.seed(42)\n iris = datasets.load_iris()\n data = pd.DataFrame(iris.data)\n labels = pd.DataFrame(iris.target)\n classifier = learn.TensorFlowLinearClassifier(\n feature_columns=learn.infer_real_valued_columns_from_input(data),\n n_classes=3)\n classifier.fit(data, labels)\n score = accuracy_score(labels[0], classifier.predict(data))\n self.assertGreater(score, 0.5, \"Failed with score = {0}\".format(score))\n else:\n print(\"No pandas installed. pandas-related tests are skipped.\")\n\n def test_pandas_series(self):\n if HAS_PANDAS:\n import pandas as pd # pylint: disable=g-import-not-at-top\n random.seed(42)\n iris = datasets.load_iris()\n data = pd.DataFrame(iris.data)\n labels = pd.Series(iris.target)\n classifier = learn.TensorFlowLinearClassifier(\n feature_columns=learn.infer_real_valued_columns_from_input(data),\n n_classes=3)\n classifier.fit(data, labels)\n score = accuracy_score(labels, classifier.predict(data))\n self.assertGreater(score, 0.5, \"Failed with score = {0}\".format(score))\n\n def test_string_data_formats(self):\n if HAS_PANDAS:\n import pandas as pd # pylint: disable=g-import-not-at-top\n with self.assertRaises(ValueError):\n learn.io.extract_pandas_data(pd.DataFrame({\"Test\": [\"A\", \"B\"]}))\n with self.assertRaises(ValueError):\n learn.io.extract_pandas_labels(pd.DataFrame({\"Test\": [\"A\", \"B\"]}))\n\n def test_dask_io(self):\n if HAS_DASK and HAS_PANDAS:\n import pandas as pd # pylint: disable=g-import-not-at-top\n import dask.dataframe as dd # pylint: disable=g-import-not-at-top\n # test dask.dataframe\n df = pd.DataFrame(\n dict(a=list(\"aabbcc\"), b=list(range(6))),\n index=pd.date_range(start=\"20100101\", periods=6))\n ddf = dd.from_pandas(df, npartitions=3)\n extracted_ddf = extract_dask_data(ddf)\n self.assertEqual(\n extracted_ddf.divisions, (0, 2, 4, 6),\n \"Failed with divisions = {0}\".format(extracted_ddf.divisions))\n self.assertEqual(\n extracted_ddf.columns.tolist(), [\"a\", \"b\"],\n \"Failed with columns = {0}\".format(extracted_ddf.columns))\n # test dask.series\n labels = ddf[\"a\"]\n extracted_labels = extract_dask_labels(labels)\n self.assertEqual(\n extracted_labels.divisions, (0, 2, 4, 6),\n \"Failed with divisions = {0}\".format(extracted_labels.divisions))\n # labels should only have one column\n with self.assertRaises(ValueError):\n extract_dask_labels(ddf)\n else:\n print(\"No dask installed. dask-related tests are skipped.\")\n\n def test_dask_iris_classification(self):\n if HAS_DASK and HAS_PANDAS:\n import pandas as pd # pylint: disable=g-import-not-at-top\n import dask.dataframe as dd # pylint: disable=g-import-not-at-top\n random.seed(42)\n iris = datasets.load_iris()\n data = pd.DataFrame(iris.data)\n data = dd.from_pandas(data, npartitions=2)\n labels = pd.DataFrame(iris.target)\n labels = dd.from_pandas(labels, npartitions=2)\n classifier = learn.TensorFlowLinearClassifier(\n feature_columns=learn.infer_real_valued_columns_from_input(data),\n n_classes=3)\n classifier.fit(data, labels)\n predictions = data.map_partitions(classifier.predict).compute()\n score = accuracy_score(labels.compute(), predictions)\n self.assertGreater(score, 0.5, \"Failed with score = {0}\".format(score))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorBoard server handler logic.\n\nTensorboardHandler contains all the logic for serving static files off of disk\nand for handling the API calls to endpoints like /tags that require information\nabout loaded events.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport csv\nimport gzip\nimport imghdr\nimport json\nimport mimetypes\nimport os\nimport re\n\nfrom six import BytesIO\nfrom six import StringIO\nfrom six.moves import BaseHTTPServer\nfrom six.moves import urllib\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nfrom six.moves.urllib import parse as urlparse\n\nfrom tensorflow.python.platform import resource_loader\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.summary import event_accumulator\nfrom tensorflow.python.util import compat\nfrom tensorflow.tensorboard.backend import process_graph\nfrom tensorflow.tensorboard.lib.python import json_util\n\n\nDATA_PREFIX = '/data'\nRUNS_ROUTE = '/runs'\nSCALARS_ROUTE = '/' + event_accumulator.SCALARS\nIMAGES_ROUTE = '/' + event_accumulator.IMAGES\nAUDIO_ROUTE = '/' + event_accumulator.AUDIO\nHISTOGRAMS_ROUTE = '/' + event_accumulator.HISTOGRAMS\nCOMPRESSED_HISTOGRAMS_ROUTE = '/' + event_accumulator.COMPRESSED_HISTOGRAMS\nINDIVIDUAL_IMAGE_ROUTE = '/individualImage'\nINDIVIDUAL_AUDIO_ROUTE = '/individualAudio'\nGRAPH_ROUTE = '/' + event_accumulator.GRAPH\nRUN_METADATA_ROUTE = '/' + event_accumulator.RUN_METADATA\nTAB_ROUTES = ['', '/events', '/images', '/audio', '/graphs', '/histograms']\n\n_IMGHDR_TO_MIMETYPE = {\n 'bmp': 'image/bmp',\n 'gif': 'image/gif',\n 'jpeg': 'image/jpeg',\n 'png': 'image/png'\n}\n_DEFAULT_IMAGE_MIMETYPE = 'application/octet-stream'\n\n# Allows *, gzip or x-gzip, but forbid gzip;q=0\n# https://tools.ietf.org/html/rfc7231#section-5.3.4\n_ALLOWS_GZIP_PATTERN = re.compile(\n r'(?:^|,|\\s)(?:(?:x-)?gzip|\\*)(?!;q=0)(?:\\s|,|$)')\n\n\ndef _content_type_for_image(encoded_image_string):\n image_type = imghdr.what(None, encoded_image_string)\n return _IMGHDR_TO_MIMETYPE.get(image_type, _DEFAULT_IMAGE_MIMETYPE)\n\n\nclass _OutputFormat(object):\n \"\"\"An enum used to list the valid output formats for API calls.\n\n Not all API calls support all formats (for example, only scalars and\n compressed histograms support CSV).\n \"\"\"\n JSON = 'json'\n CSV = 'csv'\n\n\nclass TensorboardHandler(BaseHTTPServer.BaseHTTPRequestHandler):\n \"\"\"Handler class for use with BaseHTTPServer.HTTPServer.\n\n This is essentially a thin wrapper around calls to an EventMultiplexer object\n as well as serving files off disk.\n \"\"\"\n\n # How many samples to include in sampling API calls by default.\n DEFAULT_SAMPLE_COUNT = 10\n\n # NOTE TO MAINTAINERS: An accurate Content-Length MUST be specified on all\n # responses using send_header.\n protocol_version = 'HTTP/1.1'\n\n def __init__(self, multiplexer, *args):\n self._multiplexer = multiplexer\n BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args)\n\n # We use underscore_names for consistency with inherited methods.\n\n def _image_response_for_run(self, run_images, run, tag):\n \"\"\"Builds a JSON-serializable object with information about run_images.\n\n Args:\n run_images: A list of event_accumulator.ImageValueEvent objects.\n run: The name of the run.\n tag: The name of the tag the images all belong to.\n\n Returns:\n A list of dictionaries containing the wall time, step, URL, width, and\n height for each image.\n \"\"\"\n response = []\n for index, run_image in enumerate(run_images):\n response.append({\n 'wall_time': run_image.wall_time,\n 'step': run_image.step,\n # We include the size so that the frontend can add that to the <img>\n # tag so that the page layout doesn't change when the image loads.\n 'width': run_image.width,\n 'height': run_image.height,\n 'query': self._query_for_individual_image(run, tag, index)\n })\n return response\n\n def _audio_response_for_run(self, run_audio, run, tag):\n \"\"\"Builds a JSON-serializable object with information about run_audio.\n\n Args:\n run_audio: A list of event_accumulator.AudioValueEvent objects.\n run: The name of the run.\n tag: The name of the tag the images all belong to.\n\n Returns:\n A list of dictionaries containing the wall time, step, URL, and\n content_type for each audio clip.\n \"\"\"\n response = []\n for index, run_audio_clip in enumerate(run_audio):\n response.append({\n 'wall_time': run_audio_clip.wall_time,\n 'step': run_audio_clip.step,\n 'content_type': run_audio_clip.content_type,\n 'query': self._query_for_individual_audio(run, tag, index)\n })\n return response\n\n def _path_is_safe(self, path):\n \"\"\"Check path is safe (stays within current directory).\n\n This is for preventing directory-traversal attacks.\n\n Args:\n path: The path to check for safety.\n\n Returns:\n True if the given path stays within the current directory, and false\n if it would escape to a higher directory. E.g. _path_is_safe('index.html')\n returns true, but _path_is_safe('../../../etc/password') returns false.\n \"\"\"\n base = os.path.abspath(os.curdir)\n absolute_path = os.path.abspath(path)\n prefix = os.path.commonprefix([base, absolute_path])\n return prefix == base\n\n def _respond(self, content, content_type, code=200, encoding=None):\n \"\"\"Sends HTTP response.\n\n All text responses are assumed to be utf-8 unless specified otherwise.\n\n Args:\n content: The content to respond with, which is converted to bytes.\n content_type: The mime type of the content.\n code: The numeric HTTP status code to use.\n encoding: The encoding if any (not sanity checked.)\n \"\"\"\n content = compat.as_bytes(content)\n self.send_response(code)\n if content_type.startswith(('text/', 'application/json')):\n if 'charset=' not in content_type:\n content_type += '; charset=utf-8'\n self.send_header('Content-Type', content_type)\n self.send_header('Content-Length', len(content))\n if encoding:\n self.send_header('Content-Encoding', encoding)\n self.end_headers()\n self.wfile.write(content)\n\n def _is_gzip_accepted(self):\n \"\"\"Returns true if Accept-Encoding contains gzip.\"\"\"\n accept_encoding = self.headers.get('Accept-Encoding', '')\n return _ALLOWS_GZIP_PATTERN.search(accept_encoding) is not None\n\n def _send_gzip_response(self, content, content_type, code=200):\n \"\"\"Writes the given content as gzip response using the given content type.\n\n If the HTTP client does not accept gzip encoding, then the response will be\n sent uncompressed.\n\n Args:\n content: The content to respond with.\n content_type: The mime type of the content.\n code: The numeric HTTP status code to use.\n \"\"\"\n encoding = None\n if self._is_gzip_accepted():\n out = BytesIO()\n f = gzip.GzipFile(fileobj=out, mode='wb', compresslevel=3)\n f.write(compat.as_bytes(content))\n f.close()\n content = out.getvalue()\n encoding = 'gzip'\n self._respond(content, content_type, code, encoding)\n\n def _send_json_response(self, obj, code=200):\n \"\"\"Writes out the given object as JSON using the given HTTP status code.\n\n This also replaces special float values with stringified versions.\n\n Args:\n obj: The object to respond with.\n code: The numeric HTTP status code to use.\n \"\"\"\n content = json.dumps(json_util.WrapSpecialFloats(obj))\n self._respond(content, 'application/json', code)\n\n def _send_csv_response(self, serialized_csv, code=200):\n \"\"\"Writes out the given string, which represents CSV data.\n\n Unlike _send_json_response, this does *not* perform the CSV serialization\n for you. It only sets the proper headers.\n\n Args:\n serialized_csv: A string containing some CSV data.\n code: The numeric HTTP status code to use.\n \"\"\"\n self._respond(serialized_csv, 'text/csv', code)\n\n def _serve_scalars(self, query_params):\n \"\"\"Given a tag and single run, return array of ScalarEvents.\n\n Alternately, if both the tag and the run are omitted, returns JSON object\n where obj[run][tag] contains sample values for the given tag in the given\n run.\n\n Args:\n query_params: The query parameters as a dict.\n \"\"\"\n # TODO(cassandrax): return HTTP status code for malformed requests\n tag = query_params.get('tag')\n run = query_params.get('run')\n if tag is None and run is None:\n if query_params.get('format') == _OutputFormat.CSV:\n self.send_error(400, 'Scalar sample values only supports JSON output')\n return\n\n sample_count = int(query_params.get('sample_count',\n self.DEFAULT_SAMPLE_COUNT))\n values = {}\n for run_name, tags in self._multiplexer.Runs().items():\n values[run_name] = {\n tag: _uniform_sample(\n self._multiplexer.Scalars(run_name, tag), sample_count)\n for tag in tags['scalars']\n }\n else:\n values = self._multiplexer.Scalars(run, tag)\n\n if query_params.get('format') == _OutputFormat.CSV:\n string_io = StringIO()\n writer = csv.writer(string_io)\n writer.writerow(['Wall time', 'Step', 'Value'])\n writer.writerows(values)\n self._send_csv_response(string_io.getvalue())\n else:\n self._send_json_response(values)\n\n def _serve_graph(self, query_params):\n \"\"\"Given a single run, return the graph definition in json format.\"\"\"\n run = query_params.get('run', None)\n if run is None:\n self.send_error(400, 'query parameter \"run\" is required')\n return\n\n try:\n graph = self._multiplexer.Graph(run)\n except ValueError:\n self.send_response(404)\n return\n\n limit_attr_size = query_params.get('limit_attr_size', None)\n if limit_attr_size is not None:\n try:\n limit_attr_size = int(limit_attr_size)\n except ValueError:\n self.send_error(400, 'The query param `limit_attr_size` must be'\n 'an integer')\n return\n\n large_attrs_key = query_params.get('large_attrs_key', None)\n try:\n process_graph.prepare_graph_for_ui(graph, limit_attr_size,\n large_attrs_key)\n except ValueError as e:\n self.send_error(400, e.message)\n return\n\n # Serialize the graph to pbtxt format.\n graph_pbtxt = str(graph)\n # Gzip it and send it to the user.\n self._send_gzip_response(graph_pbtxt, 'text/plain')\n\n def _serve_run_metadata(self, query_params):\n \"\"\"Given a tag and a TensorFlow run, return the session.run() metadata.\"\"\"\n tag = query_params.get('tag', None)\n run = query_params.get('run', None)\n if tag is None:\n self.send_error(400, 'query parameter \"tag\" is required')\n return\n if run is None:\n self.send_error(400, 'query parameter \"run\" is required')\n return\n\n try:\n run_metadata = self._multiplexer.RunMetadata(run, tag)\n except ValueError:\n self.send_response(404)\n return\n # Serialize to pbtxt format.\n run_metadata_pbtxt = str(run_metadata)\n # Gzip it and send it to the user.\n self._send_gzip_response(run_metadata_pbtxt, 'text/plain')\n\n def _serve_histograms(self, query_params):\n \"\"\"Given a tag and single run, return an array of histogram values.\"\"\"\n tag = query_params.get('tag')\n run = query_params.get('run')\n values = self._multiplexer.Histograms(run, tag)\n self._send_json_response(values)\n\n def _serve_compressed_histograms(self, query_params):\n \"\"\"Given a tag and single run, return an array of compressed histograms.\"\"\"\n tag = query_params.get('tag')\n run = query_params.get('run')\n compressed_histograms = self._multiplexer.CompressedHistograms(run, tag)\n if query_params.get('format') == _OutputFormat.CSV:\n string_io = StringIO()\n writer = csv.writer(string_io)\n\n # Build the headers; we have two columns for timing and two columns for\n # each compressed histogram bucket.\n headers = ['Wall time', 'Step']\n if compressed_histograms:\n bucket_count = len(compressed_histograms[0].compressed_histogram_values)\n for i in xrange(bucket_count):\n headers += ['Edge %d basis points' % i, 'Edge %d value' % i]\n writer.writerow(headers)\n\n for compressed_histogram in compressed_histograms:\n row = [compressed_histogram.wall_time, compressed_histogram.step]\n for value in compressed_histogram.compressed_histogram_values:\n row += [value.rank_in_bps, value.value]\n writer.writerow(row)\n self._send_csv_response(string_io.getvalue())\n else:\n self._send_json_response(compressed_histograms)\n\n def _serve_images(self, query_params):\n \"\"\"Given a tag and list of runs, serve a list of images.\n\n Note that the images themselves are not sent; instead, we respond with URLs\n to the images. The frontend should treat these URLs as opaque and should not\n try to parse information about them or generate them itself, as the format\n may change.\n\n Args:\n query_params: The query parameters as a dict.\n \"\"\"\n tag = query_params.get('tag')\n run = query_params.get('run')\n\n images = self._multiplexer.Images(run, tag)\n response = self._image_response_for_run(images, run, tag)\n self._send_json_response(response)\n\n def _serve_image(self, query_params):\n \"\"\"Serves an individual image.\"\"\"\n tag = query_params.get('tag')\n run = query_params.get('run')\n index = int(query_params.get('index'))\n image = self._multiplexer.Images(run, tag)[index]\n encoded_image_string = image.encoded_image_string\n content_type = _content_type_for_image(encoded_image_string)\n self._respond(encoded_image_string, content_type)\n\n def _query_for_individual_image(self, run, tag, index):\n \"\"\"Builds a URL for accessing the specified image.\n\n This should be kept in sync with _serve_image. Note that the URL is *not*\n guaranteed to always return the same image, since images may be unloaded\n from the reservoir as new images come in.\n\n Args:\n run: The name of the run.\n tag: The tag.\n index: The index of the image. Negative values are OK.\n\n Returns:\n A string representation of a URL that will load the index-th\n sampled image in the given run with the given tag.\n \"\"\"\n query_string = urllib.parse.urlencode({\n 'run': run,\n 'tag': tag,\n 'index': index\n })\n return query_string\n\n def _serve_audio(self, query_params):\n \"\"\"Given a tag and list of runs, serve a list of audio.\n\n Note that the audio clips themselves are not sent; instead, we respond with\n URLs to the audio. The frontend should treat these URLs as opaque and should\n not try to parse information about them or generate them itself, as the\n format may change.\n\n Args:\n query_params: The query parameters as a dict.\n\n \"\"\"\n tag = query_params.get('tag')\n run = query_params.get('run')\n\n audio_list = self._multiplexer.Audio(run, tag)\n response = self._audio_response_for_run(audio_list, run, tag)\n self._send_json_response(response)\n\n def _serve_individual_audio(self, query_params):\n \"\"\"Serves an individual audio clip.\"\"\"\n tag = query_params.get('tag')\n run = query_params.get('run')\n index = int(query_params.get('index'))\n audio = self._multiplexer.Audio(run, tag)[index]\n encoded_audio_string = audio.encoded_audio_string\n content_type = audio.content_type\n self._respond(encoded_audio_string, content_type)\n\n def _query_for_individual_audio(self, run, tag, index):\n \"\"\"Builds a URL for accessing the specified audio.\n\n This should be kept in sync with _serve_individual_audio. Note that the URL\n is *not* guaranteed to always return the same audio, since audio may be\n unloaded from the reservoir as new audio comes in.\n\n Args:\n run: The name of the run.\n tag: The tag.\n index: The index of the audio. Negative values are OK.\n\n Returns:\n A string representation of a URL that will load the index-th\n sampled audio in the given run with the given tag.\n \"\"\"\n query_string = urllib.parse.urlencode({\n 'run': run,\n 'tag': tag,\n 'index': index\n })\n return query_string\n\n def _serve_runs(self, unused_query_params):\n \"\"\"Return a JSON object about runs and tags.\n\n Returns a mapping from runs to tagType to list of tags for that run.\n\n Returns:\n {runName: {images: [tag1, tag2, tag3],\n audio: [tag4, tag5, tag6],\n scalars: [tagA, tagB, tagC],\n histograms: [tagX, tagY, tagZ],\n firstEventTimestamp: 123456.789}}\n \"\"\"\n runs = self._multiplexer.Runs()\n for run_name, run_data in runs.items():\n try:\n run_data['firstEventTimestamp'] = self._multiplexer.FirstEventTimestamp(\n run_name)\n except ValueError:\n logging.warning('Unable to get first event timestamp for run %s',\n run_name)\n run_data['firstEventTimestamp'] = None\n self._send_json_response(runs)\n\n def _serve_index(self, unused_query_params):\n \"\"\"Serves the index page (i.e., the tensorboard app itself).\"\"\"\n self._serve_static_file('/dist/index.html')\n\n def _serve_js(self, unused_query_params):\n \"\"\"Serves the JavaScript for the index page.\"\"\"\n self._serve_static_file('/dist/app.js')\n\n def _serve_static_file(self, path):\n \"\"\"Serves the static file located at the given path.\n\n Args:\n path: The path of the static file, relative to the tensorboard/ directory.\n \"\"\"\n # Strip off the leading forward slash.\n path = path.lstrip('/')\n if not self._path_is_safe(path):\n logging.info('path %s not safe, sending 404', path)\n # Traversal attack, so 404.\n self.send_error(404)\n return\n\n if path.startswith('external'):\n # For compatibility with latest version of Bazel, we renamed bower\n # packages to use '_' rather than '-' in their package name.\n # This means that the directory structure is changed too.\n # So that all our recursive imports work, we need to modify incoming\n # requests to map onto the new directory structure.\n components = path.split('/')\n components[1] = components[1].replace('-', '_')\n path = ('/').join(components)\n path = os.path.join('../', path)\n else:\n path = os.path.join('tensorboard', path)\n # Open the file and read it.\n try:\n contents = resource_loader.load_resource(path)\n except IOError:\n logging.info('path %s not found, sending 404', path)\n self.send_error(404)\n return\n mimetype, encoding = mimetypes.guess_type(path)\n mimetype = mimetype or 'application/octet-stream'\n self._respond(contents, mimetype, encoding=encoding)\n\n def do_GET(self): # pylint: disable=invalid-name\n \"\"\"Handler for all get requests.\"\"\"\n parsed_url = urlparse.urlparse(self.path)\n\n # Remove a trailing slash, if present.\n clean_path = parsed_url.path\n if clean_path.endswith('/'):\n clean_path = clean_path[:-1]\n\n data_handlers = {\n DATA_PREFIX + SCALARS_ROUTE: self._serve_scalars,\n DATA_PREFIX + GRAPH_ROUTE: self._serve_graph,\n DATA_PREFIX + RUN_METADATA_ROUTE: self._serve_run_metadata,\n DATA_PREFIX + HISTOGRAMS_ROUTE: self._serve_histograms,\n DATA_PREFIX + COMPRESSED_HISTOGRAMS_ROUTE:\n self._serve_compressed_histograms,\n DATA_PREFIX + IMAGES_ROUTE: self._serve_images,\n DATA_PREFIX + INDIVIDUAL_IMAGE_ROUTE: self._serve_image,\n DATA_PREFIX + AUDIO_ROUTE: self._serve_audio,\n DATA_PREFIX + INDIVIDUAL_AUDIO_ROUTE: self._serve_individual_audio,\n DATA_PREFIX + RUNS_ROUTE: self._serve_runs,\n '/app.js': self._serve_js\n }\n\n query_params = urlparse.parse_qs(parsed_url.query)\n # parse_qs returns a list of values for each key; we're only interested in\n # the first.\n for key in query_params:\n value_count = len(query_params[key])\n if value_count != 1:\n self.send_error(\n 400, 'query parameter %s should have exactly one value, had %d' %\n (key, value_count))\n return\n query_params[key] = query_params[key][0]\n\n if clean_path in data_handlers:\n data_handlers[clean_path](query_params)\n elif clean_path in TAB_ROUTES:\n self._serve_index(query_params)\n else:\n self._serve_static_file(clean_path)\n\n\ndef _uniform_sample(values, count):\n \"\"\"Samples `count` values uniformly from `values`.\n\n Args:\n values: The values to sample from.\n count: The number of values to sample. Must be at least 2.\n\n Raises:\n ValueError: If `count` is not at least 2.\n TypeError: If `type(count) != int`.\n\n Returns:\n A list of values from `values`. The first and the last element will always\n be included. If `count > len(values)`, then all values will be returned.\n \"\"\"\n\n if count < 2:\n raise ValueError('Must sample at least 2 elements, %d requested' % count)\n\n if count >= len(values):\n # Copy the list in case the caller mutates it.\n return list(values)\n\n return [\n # We divide by count - 1 to make sure we always get the first and the last\n # element.\n values[(len(values) - 1) * i // (count - 1)] for i in xrange(count)\n ]\n"
] | [
[
"tensorflow.GPUOptions",
"tensorflow.get_default_graph",
"tensorflow.Variable",
"tensorflow.test.main",
"tensorflow.ConfigProto",
"tensorflow.initialize_all_variables",
"tensorflow.Session",
"tensorflow.train.ServerDef",
"tensorflow.matmul",
"tensorflow.fill",
"tensorflow.train.Server",
"tensorflow.FIFOQueue",
"tensorflow.InteractiveSession",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.container",
"tensorflow.train.Server.create_local_server",
"tensorflow.reduce_max",
"tensorflow.constant",
"tensorflow.train.ClusterSpec",
"tensorflow.reduce_min",
"numpy.empty",
"tensorflow.Session.reset"
],
[
"tensorflow.contrib.framework.python.ops.variables.get_global_step",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.array_ops.tile",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.contrib.layers.python.layers.feature_column_ops.check_feature_columns",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.contrib.learn.python.learn.estimators.composable_model.LinearComposableModel",
"tensorflow.contrib.learn.python.learn.estimators.composable_model.DNNComposableModel",
"tensorflow.python.training.training.AdagradOptimizer",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.logging_ops.scalar_summary",
"tensorflow.contrib.layers.multi_class_target",
"tensorflow.contrib.layers.regression_target",
"tensorflow.python.ops.array_ops.zeros",
"numpy.argmax",
"tensorflow.python.ops.parsing_ops.parse_example",
"tensorflow.python.framework.ops.control_dependencies"
],
[
"tensorflow.python.ops.math_ops.log",
"tensorflow.python.framework.ops.op_scope",
"tensorflow.python.ops.math_ops.exp",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.math_ops.reduce_sum"
],
[
"tensorflow.python.ops.array_ops.constant",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.nn.in_top_k",
"tensorflow.python.framework.ops.op_scope",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.python.ops.math_ops.greater",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.math_ops.sqrt",
"tensorflow.python.ops.math_ops.to_float",
"tensorflow.contrib.metrics.python.ops.metric_ops_util.remove_squeezable_dimensions",
"tensorflow.python.ops.array_ops.diag_part",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.math_ops.sub",
"tensorflow.python.ops.math_ops.logical_not",
"tensorflow.contrib.metrics.python.ops.set_ops.set_intersection",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.ops.math_ops.to_int64",
"tensorflow.python.ops.math_ops.abs",
"tensorflow.python.ops.math_ops.less",
"tensorflow.contrib.metrics.python.ops.set_ops.set_difference",
"tensorflow.python.ops.math_ops.add",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.array_ops.pack",
"tensorflow.contrib.metrics.python.ops.confusion_matrix_ops.confusion_matrix",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.ops.math_ops.truediv",
"tensorflow.python.ops.math_ops.div",
"tensorflow.python.ops.math_ops.mul",
"tensorflow.python.ops.math_ops.reduce_mean",
"tensorflow.python.ops.nn.top_k",
"tensorflow.python.ops.check_ops.assert_type",
"tensorflow.python.framework.ops.add_to_collections",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.variable_scope.variable_op_scope",
"tensorflow.python.ops.array_ops.boolean_mask",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.util.all_util.make_all"
],
[
"tensorflow.python.ops.math_ops.add_n",
"tensorflow.python.ops.data_flow_ops.RandomShuffleQueue",
"tensorflow.python.training.queue_runner.QueueRunner",
"tensorflow.python.platform.gfile.Glob",
"tensorflow.python.training.input.string_input_producer",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.data_flow_ops.FIFOQueue"
],
[
"pandas.Series",
"pandas.date_range",
"tensorflow.contrib.learn.python.learn.datasets.load_iris",
"tensorflow.test.main",
"pandas.DataFrame",
"tensorflow.contrib.learn.python.learn.infer_real_valued_columns_from_input"
],
[
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.platform.resource_loader.load_resource",
"tensorflow.tensorboard.backend.process_graph.prepare_graph_for_ui",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.tensorboard.lib.python.json_util.WrapSpecialFloats"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"0.12"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Huite/timml | [
"5eb52066be094326343fe26b46555253fef44dc9"
] | [
"timml/model.py"
] | [
"\"\"\"\nModel classes\n\n\"\"\"\n\nimport numpy as np\nimport sys\nimport inspect # Used for storing the input\nfrom .aquifer import Aquifer\nfrom .aquifer_parameters import param_maq, param_3d\nfrom .constant import ConstantStar\nfrom .util import PlotTim\nimport multiprocessing as mp\n\n__all__ = ['Model', 'ModelMaq', 'Model3D']\n\nclass Model(PlotTim):\n \"\"\"\n Model Class to create a model object consisting of an arbitrary\n sequence of aquifer layers and leaky layers.\n Use ModelMaq for regular sequence of aquifers and leaky layers.\n Use Model3D for multi-layer model of a single aquifer\n \n Parameters\n ----------\n kaq : array\n hydraulic conductivity of each aquifer from the top down\n z : array\n elevation tops and bottoms of all layers\n layers may have zero thickness\n c : array\n resistance between two consecutive aquifer layers\n if ltype[0]='a': length is number of aquifers - 1\n if ltype[0]='l': length is number of aquifers\n npor : array\n porosity of all layers from the top down\n ltype : array of characters\n array indicating for each layer whether it is\n 'a' aquifer layer\n 'l' leaky layer\n \n \"\"\"\n \n def __init__(self, kaq, c, z, npor, ltype, f2py=False):\n # All input variables are numpy arrays\n # That should be checked outside this function\n self.elementlist = []\n self.elementdict = {} # only elements that have a label\n self.aq = Aquifer(self, kaq, c, z, npor, ltype)\n self.modelname = 'ml' # Used for writing out input\n self.f2py = False\n if f2py:\n try:\n from .src import besselaesnew\n self.f2py = True\n except:\n print('FORTRAN extension not found while f2py=True')\n print('Using Numba instead')\n\n def initialize(self):\n # remove inhomogeneity elements (they are added again)\n self.elementlist = [e for e in self.elementlist if not e.inhomelement]\n self.aq.initialize()\n for e in self.elementlist:\n e.initialize()\n\n def add_element(self, e):\n self.elementlist.append(e)\n if e.label is not None: self.elementdict[e.label] = e\n\n def remove_element(self, e):\n \"\"\"Remove element `e` from model\n \"\"\"\n \n if e.label is not None: self.elementdict.pop(e.label)\n self.elementlist.remove(e)\n\n def storeinput(self, frame):\n self.inputargs, _, _, self.inputvalues = inspect.getargvalues(frame)\n\n def potential(self, x, y, aq=None):\n if aq is None: aq = self.aq.find_aquifer_data(x, y)\n pot = np.zeros(aq.naq)\n for e in aq.elementlist:\n pot += e.potential(x, y, aq)\n rv = np.sum(pot * aq.eigvec, 1)\n if aq.ltype[0] == 'l':\n # potential for head above leaky layer\n rv += aq.constantstar.potstar\n return rv\n\n def disvec(self, x, y, aq=None):\n \"\"\"Discharge vector at `x`, `y`\n \n Returns\n -------\n \n qxqy : array size (2, naq)\n first row is Qx in each aquifer layer, second row is Qy\n \"\"\"\n \n if aq is None: aq = self.aq.find_aquifer_data(x, y)\n rv = np.zeros((2, aq.naq))\n for e in aq.elementlist:\n rv += e.disvec(x, y, aq)\n rv = np.sum(rv[:, np.newaxis, :] * aq.eigvec, 2)\n return rv\n \n def qztop(self, x, y, aq=None):\n if aq is None: aq = self.aq.find_aquifer_data(x, y)\n rv = 0.0\n if aq.ltype[0] == 'a': # otherwise recharge cannot be added\n for e in aq.elementlist:\n rv += e.qztop(x, y)\n return rv\n\n def head(self, x, y, layers=None, aq=None):\n \"\"\"Head at `x`, `y`\n \n Returns\n -------\n \n h : array length `naq` or `len(layers)`\n head in all `layers` (if not `None`), \n or all layers of aquifer (otherwise)\n \"\"\"\n \n if aq is None: aq = self.aq.find_aquifer_data(x, y)\n rv = self.potential(x, y, aq) / aq.T\n if layers is None:\n return rv\n else:\n return rv[layers]\n\n def headgrid(self, xg, yg, layers=None, printrow=False):\n \"\"\"Grid of heads\n \n Parameters\n ----------\n xg : array\n x values of grid\n yg : array\n y values of grid\n layers : integer, list or array, optional\n layers for which grid is returned\n printrow : boolean, optional\n prints dot to screen for each row of grid if set to `True`\n \n Returns\n -------\n h : array size `nlayers, ny, nx`\n \n See also\n --------\n \n :func:`~timml.model.Model.headgrid2`\n\n \"\"\"\n \n nx, ny = len(xg), len(yg)\n if layers is None:\n Nlayers = self.aq.find_aquifer_data(xg[0], yg[0]).naq\n else:\n Nlayers = len(np.atleast_1d(layers))\n h = np.empty((Nlayers, ny, nx))\n for j in range(ny):\n if printrow:\n print('.', end='', flush=True)\n for i in range(nx):\n h[:, j, i] = self.head(xg[i], yg[j], layers)\n if printrow:\n print('', flush=True)\n return h\n\n def headgrid2(self, x1, x2, nx, y1, y2, ny, layers=None, printrow=False):\n \"\"\"Grid of heads\n \n Parameters\n ----------\n x1, x2, nx : \n x values are generated as linspace(x1, x2, nx)\n y1, y2, ny : \n y values are generated as linspace(y1, y2, ny)\n layers : integer, list or array, optional\n layers for which grid is returned\n printrow : boolean, optional\n prints dot to screen for each row of grid if set to `True`\n \n Returns\n -------\n h : array size `nlayers, ny, nx`\n \n See also\n --------\n \n :func:`~timml.model.Model.headgrid`\n \n \"\"\"\n \n xg, yg = np.linspace(x1, x2, nx), np.linspace(y1, y2, ny)\n return self.headgrid(xg, yg, layers=layers, printrow=printrow)\n\n def headalongline(self, x, y, layers=None):\n \"\"\"Head along line or curve\n \n Parameters\n ----------\n x : array\n x values of line\n y : array\n y values of line\n layers : integer, list or array, optional\n layers for which grid is returned\n \n Returns\n -------\n h : array size `nlayers, nx`\n\n \"\"\"\n \n xg, yg = np.atleast_1d(x), np.atleast_1d(y)\n if layers is None:\n Nlayers = self.aq.find_aquifer_data(xg[0], yg[0]).naq\n else:\n Nlayers = len(np.atleast_1d(layers))\n nx = len(xg)\n if len(yg) == 1:\n yg = yg * np.ones(nx)\n h = np.zeros((Nlayers, nx))\n for i in range(nx):\n h[:, i] = self.head(xg[i], yg[i], layers)\n return h\n \n def disvecalongline(self, x, y, layers=None):\n '''Returns Qx[Nlayers,len(x)], Qy[Nlayers,len(x)]\n Assumes same number of layers for each x and y\n layers may be None or list of layers for which head is computed'''\n xg, yg = np.atleast_1d(x), np.atleast_1d(y)\n if layers is None:\n nlayers = self.aq.find_aquifer_data(xg[0], yg[0]).naq\n else:\n nlayers = len(np.atleast_1d(layers))\n nx = len(xg)\n if len(yg) == 1:\n yg = yg * np.ones(nx)\n Qx = np.zeros((nlayers, nx))\n Qy = np.zeros((nlayers, nx))\n for i in range(nx):\n Qx[:, i], Qy[:, 1] = self.disvec(xg[i], yg[i], layers)\n return Qx, Qy\n \n# def disvec_direction(self, s, x1, y1, cdirection):\n# pass\n# \n# def discharge_across_line(self, x1, y1, x2, y2, layers=None):\n# if layers is None:\n# nlayers = self.aq.find_aquifer_data(x1, y1).naq\n# else:\n# nlayers = len(np.atleast_1d(layers))\n# z1 = x1 + y1 * 1j\n# z2 = x2 + y2 * 1j\n# normvec = (z2 - z1) / np.abs(z2 - z1) * np.exp(-np.pi * 1j / 2)\n# disvec = self.disvec(xg[i], yg[i], layers)\n \n def velocity(self, x, y, z):\n return self.velocomp(x, y, z)\n \n def velocomp(self, x, y, z, aq=None, layer_ltype=None):\n if aq is None: aq = self.aq.find_aquifer_data(x, y)\n assert z <= aq.z[0] and z >= aq.z[-1], \"z value not inside aquifer\"\n if layer_ltype is None:\n layer, ltype, dummy = aq.findlayer(z)\n else:\n layer, ltype = layer_ltype\n h = self.head(x, y, aq=aq)\n # qz between aquifer layers\n qzlayer = np.zeros(aq.naq + 1)\n qzlayer[1:-1] = (h[1:] - h[:-1]) / aq.c[1:]\n if aq.ltype[0] == 'l':\n qzlayer[0] = (h[0] - aq.hstar) / aq.c[0]\n if ltype == 'l':\n vz = qzlayer[layer] / aq.nporll[layer]\n vx = 0\n vy = 0\n else:\n qzbot = qzlayer[layer + 1]\n qztop = qzlayer[layer]\n if layer == 0:\n qztop += self.qztop(x, y) \n vz = (qzbot + (z - aq.zaqbot[layer]) / aq.Haq[layer] * \\\n (qztop - qzbot)) / aq.nporaq[layer] \n qx, qy = self.disvec(x, y, aq=aq)\n vx = qx[layer] / (aq.Haq[layer] * aq.nporaq[layer])\n vy = qy[layer] / (aq.Haq[layer] * aq.nporaq[layer])\n return np.array([vx, vy, vz])\n \n def solve(self, printmat=0, sendback=0, silent=False):\n '''Compute solution'''\n # Initialize elements\n self.initialize()\n # Compute number of equations\n self.neq = np.sum([e.nunknowns for e in self.elementlist])\n if self.neq == 0: return\n if silent is False:\n print('Number of elements, Number of equations:', len(\n self.elementlist), ',', self.neq)\n if self.neq == 0:\n if silent is False: print('No unknowns. Solution complete')\n return\n mat = np.empty((self.neq, self.neq))\n rhs = np.empty(self.neq)\n ieq = 0\n for e in self.elementlist:\n if e.nunknowns > 0:\n mat[ieq:ieq + e.nunknowns, :], rhs[ieq:ieq + e.nunknowns] = \\\n e.equation()\n ieq += e.nunknowns\n if silent is False:\n print('.', end='', flush=True)\n if printmat:\n return mat, rhs\n sol = np.linalg.solve(mat, rhs)\n icount = 0\n for e in self.elementlist:\n if e.nunknowns > 0:\n e.setparams(sol[icount:icount + e.nunknowns])\n icount += e.nunknowns\n if silent is False:\n print() # needed cause the dots are printed\n print('solution complete')\n elif (silent == 'dot') or (silent == '.'):\n print('.', end='', flush=True)\n if sendback:\n return sol\n return\n\n def solve_mp(self, nproc=4, printmat=0, sendback=0, silent=False):\n '''Compute solution, multiprocessing implementation.\n Note: estimated speedup approximately by factor of\n number of physical cores. Virtual cores do not improve\n calculation time.'''\n # Initialize elements\n self.initialize()\n # Compute number of equations\n self.neq = np.sum([e.nunknowns for e in self.elementlist])\n if self.neq == 0: return\n if silent is False:\n print('Number of elements, Number of equations:', len(\n self.elementlist), ',', self.neq)\n if self.neq == 0:\n if silent is False: print('No unknowns. Solution complete')\n return\n mat = np.empty((self.neq, self.neq))\n rhs = np.empty(self.neq)\n\n # start multiprocessing\n if nproc is None:\n nproc = mp.cpu_count() - 1 # make no. of processes equal to 1 less than no. of cores\n elif nproc > mp.cpu_count():\n print(\"Given 'nproc' larger than no. of cores on machine. Setting 'nproc' to {}.\".format(mp.cpu_count()))\n nproc = mp.cpu_count()\n\n pool = mp.Pool(processes=nproc)\n results = []\n for e in self.elementlist:\n if e.nunknowns > 0:\n results.append(pool.apply_async(e.equation))\n if silent is False:\n print('.', end='', flush=True)\n\n pool.close()\n pool.join()\n\n mat = np.empty((self.neq, self.neq))\n rhs = np.zeros(self.neq)\n\n ieq = 0\n\n for p in results:\n imat, irhs = p.get()\n mat[ieq:ieq + imat.shape[0], :] = imat\n rhs[ieq:ieq + irhs.shape[0]] = irhs\n ieq += imat.shape[0]\n\n # end multiprocessing\n\n if printmat:\n return mat, rhs\n sol = np.linalg.solve(mat, rhs)\n icount = 0\n for e in self.elementlist:\n if e.nunknowns > 0:\n e.setparams(sol[icount:icount + e.nunknowns])\n icount += e.nunknowns\n if silent is False:\n print() # needed cause the dots are printed\n print('solution complete')\n elif (silent == 'dot') or (silent == '.'):\n print('.', end='', flush=True)\n if sendback:\n return sol\n return\n \n def write(self):\n rv = self.modelname + ' = ' + self.name + '(\\n'\n for key in self.inputargs[1:]: # The first argument (self) is ignored\n if isinstance(self.inputvalues[key], np.ndarray):\n rv += key + ' = ' + np.array2string(self.inputvalues[key], \n separator=',') + ',\\n'\n elif isinstance(self.inputvalues[key],str): \n rv += key + \" = '\" + self.inputvalues[key] + \"',\\n\"\n else:\n rv += key + ' = ' + str(self.inputvalues[key]) + ',\\n'\n rv += ')\\n'\n return rv\n \n def writemodel(self, fname):\n self.initialize() # So that the model can be written without solving first\n f = open(fname, 'w')\n f.write('from timml import *\\n')\n f.write(self.write())\n for e in self.elementlist:\n f.write(e.write())\n f.close()\n \nclass ModelMaq(Model):\n \"\"\"\n Create a Model object by specifying a mult-aquifer sequence of\n aquifer-leakylayer-aquifer-leakylayer-aquifer etc\n \n Parameters\n ----------\n kaq : float, array or list\n Hydraulic conductivity of each aquifer from the top down.\n If float, hydraulic conductivity is the same in all aquifers.\n z : array or list\n Elevation of tops and bottoms of the aquifers from the top down.\n Leaky layers may have zero thickness.\n * if topboundary='conf': length is 2 * number of aquifers\n * if topboundary='semi': length is 2 * number of aquifers + 1 \n as top of leaky layer on top of systems needs to be specified\n c : float, array or list\n Resistance of leaky layers from the top down.\n * if float, resistance is the same for all leaky layers\n * if topboundary='conf': length is number of aquifers - 1\n * if topboundary='semi': length is number of aquifers\n npor : float, array or list\n Porosity of all aquifers and leaky layers from the top down.\n * if float, porosity is the same for all layers\n * if topboundary='conf': length is 2 * number of aquifers - 1\n * if topboundary='semi': length is 2 * number of aquifers\n topboundary : string, 'conf' or 'semi' (default is 'conf')\n Indicates whether the topboundary is confined ('conf') or\n semi-confined ('semi').\n hstar : float or None (default is None)\n Head value above semi-confining top, only read if topboundary='semi'.\n\n Examples\n --------\n >>> ml = ModelMaq(kaq=[10, 20], z=[20, 12, 10, 0], c=1000)\n \n \"\"\"\n \n def __init__(self, kaq=1, z=[1, 0], c=[], npor=0.3, topboundary='conf',\n hstar=None, f2py=False):\n self.storeinput(inspect.currentframe())\n kaq, c, npor, ltype = param_maq(kaq, z, c, npor, topboundary)\n Model.__init__(self, kaq, c, z, npor, ltype, f2py)\n self.name = 'ModelMaq'\n if self.aq.ltype[0] == 'l':\n ConstantStar(self, hstar, aq=self.aq)\n \nclass Model3D(Model):\n \"\"\"\n Model3D Class to create a multi-layer model object consisting of\n many aquifer layers. The resistance between the layers is computed\n from the vertical hydraulic conductivity of the layers.\n \n Parameters\n ----------\n kaq : float, array or list\n hydraulic conductivity of each layer from the top down\n if float, hydraulic conductivity is the same in all aquifers\n z : array or list\n elevation of top of system followed by bottoms of all layers\n from the top down\n bottom of layer is automatically equal to top of layer below it\n length is number of aquifer layers + 1\n kzoverkh : float\n vertical anisotropy ratio vertical k divided by horizontal k\n if float, value is the same for all layers\n length is number of layers\n npor : float, array or list\n porosity of all aquifer layers\n from the top down\n if float, porosity is the same for all layers\n if topboundary='conf': length is number of layers\n if topboundary='semi': length is number of layers + 1\n topboundary : string, 'conf' or 'semi' (default is 'conf')\n indicating whether the top is confined ('conf') or\n semi-confined ('semi')\n topres : float\n resistance of top semi-confining layer (read if topboundary='semi')\n topthick: float\n thickness of top semi-confining layer (read if topboundary='semi')\n hstar : float or None (default is None)\n head value above semi-confining top (read if topboundary='semi')\n\n Examples\n --------\n >>> ml = Model3D(kaq=10, z=np.arange(20, -1, -2), kzoverkh=0.1)\n \n \"\"\"\n \n def __init__(self, kaq=1, z=[1, 0], kzoverkh=1, npor=0.3,\n topboundary='conf', topres=0, topthick=0, hstar=0,\n f2py=False):\n '''Model3D\n for semi-confined aquifers, set top equal to 'semi' and provide\n topres: resistance of top\n tophick: thickness of top\n hstar: head above top'''\n self.storeinput(inspect.currentframe())\n kaq, c, npor, ltype = param_3d(kaq, z, kzoverkh, npor, topboundary,\n topres)\n if topboundary == 'semi':\n z = np.hstack((z[0] + topthick, z))\n Model.__init__(self, kaq, c, z, npor, ltype, f2py)\n self.name = 'Model3D'\n if self.aq.ltype[0] == 'l':\n ConstantStar(self, hstar, aq=self.aq)\n\n"
] | [
[
"numpy.hstack",
"numpy.linalg.solve",
"numpy.linspace",
"numpy.ones",
"numpy.atleast_1d",
"numpy.array2string",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pearlfranz20/AL_Core | [
"6592079330c7ec3ca264b86f8414970ddab06c0e"
] | [
"apprentice/learners/when_learners/actor_critic.py"
] | [
"import torch\nimport torch.nn as nn\n\n\nclass ValueNet(nn.Module):\n \"\"\"\n The part of the actor critic network that computes the state value. Also,\n returns the hidden layer before state valuation, for use in action network.\n \"\"\"\n\n def __init__(self, n_inputs: int, n_hidden: int = None):\n \"\"\"\n Specify the number of inputs. Also, specify the number of nodes in each\n hidden layer. If no value is provided for the number of hidden, then\n it is set to half the number of inputs.\n \"\"\"\n super(ValueNet, self).__init__()\n\n if n_hidden is None:\n n_hidden = (n_inputs + 2) // 2\n\n self.n_hidden = n_hidden\n\n self.hidden = nn.Sequential(\n nn.Linear(n_inputs, n_hidden),\n nn.ReLU()\n )\n\n self.value = nn.Linear(n_hidden, 1)\n\n def forward(self, x):\n \"\"\"\n Returns the value of the state and the hidden layer values.\n \"\"\"\n x = self.hidden(x)\n return self.value(x), x\n\n\nclass ActionNet(nn.Module):\n \"\"\"\n The part of the actor critic network that computes the action value.\n \"\"\"\n\n def __init__(self, n_action_inputs: int, n_value_hidden: int,\n n_action_hidden: int = None):\n \"\"\"\n Takes as input the action features and the hidden values from the value\n net. Returns a value for the action.\n \"\"\"\n super(ActionNet, self).__init__()\n\n if n_action_hidden is None:\n n_action_hidden = (n_action_inputs + n_value_hidden + 2) // 2\n\n self.hidden = nn.Sequential(\n nn.Linear(n_action_inputs + n_value_hidden, n_action_hidden),\n nn.ReLU()\n )\n\n self.action_value = nn.Linear(n_action_hidden, 1)\n\n def forward(self, action_x, value_hidden):\n \"\"\"\n Returns the value of the state and the hidden layer values.\n \"\"\"\n x = self.hidden(torch.cat((action_x, value_hidden), 1))\n return self.action_value(x)\n"
] | [
[
"torch.nn.Linear",
"torch.nn.ReLU",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hakanhp/chanel | [
"6825b60e86c46daabb18f40f1e45d3de2ff8e983",
"6825b60e86c46daabb18f40f1e45d3de2ff8e983"
] | [
"tensorflow_model_analysis/eval_saved_model/testutil.py",
"tensorflow_model_analysis/eval_saved_model/export.py"
] | [
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utilities for writing tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\nimport math\nimport tempfile\nimport tensorflow as tf\nfrom tensorflow_model_analysis.types_compat import Dict, Iterable, Union, Sequence, Tuple\n\nfrom tensorflow.core.example import example_pb2\n\n\nclass TensorflowModelAnalysisTest(tf.test.TestCase):\n \"\"\"Test class that extends tf.test.TestCase with extra functionality.\"\"\"\n\n def setUp(self):\n self.longMessage = True # pylint: disable=invalid-name\n\n def _getTempDir(self):\n return tempfile.mkdtemp()\n\n def _makeExample(self, **kwargs):\n \"\"\"Make a TensorFlow Example with the given fields.\n\n The arguments can be singleton values, or a list of values, e.g.\n _makeExample(age=3.0, fruits=['apples', 'pears', 'oranges']).\n Empty lists are not allowed, since we won't be able to deduce the type.\n\n Args:\n **kwargs: Each key=value pair defines a field in the example to be\n constructed. The name of the field will be key, and the value will be\n value. The type will be deduced from the type of the value.\n\n Returns:\n TensorFlow.Example with the corresponding fields set to the corresponding\n values.\n\n Raises:\n ValueError: One of the arguments was an empty list.\n TypeError: One of the elements (or one of the elements in a list) had an\n unsupported type.\n \"\"\"\n result = example_pb2.Example()\n for key, value in kwargs.items():\n if isinstance(value, float) or isinstance(value, int):\n result.features.feature[key].float_list.value[:] = [value]\n elif isinstance(value, str):\n result.features.feature[key].bytes_list.value[:] = [value]\n elif isinstance(value, list):\n if len(value) == 0: # pylint: disable=g-explicit-length-test\n raise ValueError('empty lists not allowed, but field %s was an empty '\n 'list' % key)\n if isinstance(value[0], float) or isinstance(value[0], int):\n result.features.feature[key].float_list.value[:] = value\n elif isinstance(value[0], str):\n result.features.feature[key].bytes_list.value[:] = value\n else:\n raise TypeError('field %s was a list, but the first element had '\n 'unknown type %s' % key, type(value[0]))\n else:\n raise TypeError('unrecognised type for field %s: type %s' %\n (key, type(value)))\n return result\n\n def assertHasKeyWithValueAlmostEqual(self,\n d,\n key,\n value,\n places = 5):\n self.assertIn(key, d)\n self.assertAlmostEqual(d[key], value, places=places, msg='key %s' % key)\n\n def assertDictElementsAlmostEqual(self,\n got_values_dict,\n expected_values_dict,\n places = 5):\n for key, expected_value in expected_values_dict.items():\n self.assertHasKeyWithValueAlmostEqual(got_values_dict, key,\n expected_value, places)\n\n def assertDictMatrixRowsAlmostEqual(\n self,\n got_values_dict,\n expected_values_dict,\n places = 5):\n \"\"\"Fails if got_values_dict does not match values in expected_values_dict.\n\n For each entry, expected_values_dict provides the row index and the values\n of that row to be compared to the bucketing result in got_values_dict. For\n example:\n got_values_dict={'key', [[1,2,3],[4,5,6],[7,8,9]]}\n you can check the first and last row of got_values_dict[key] by setting\n expected_values_dict={'key', [(0,[1,2,3]), (2,[7,8,9])]}\n\n Args:\n got_values_dict: The dict got, where each value represents a full\n bucketing result.\n expected_values_dict: The expected dict. It may contain a subset of keys\n in got_values_dict. The value is of type \"Iterable[Tuple[int,\n Iterable[scalar]]]\", where each Tuple contains the index of a row to be\n checked and the expected values of that row.\n places: The number of decimal places to compare.\n \"\"\"\n for key, expected_value in expected_values_dict.items():\n self.assertIn(key, got_values_dict)\n for (row, values) in expected_value:\n self.assertSequenceAlmostEqual(\n got_values_dict[key][row],\n values,\n places=places,\n msg_prefix='for key %s, row %d: ' % (key, row))\n\n def assertSequenceAlmostEqual(self,\n got_seq,\n expected_seq,\n places = 5,\n msg_prefix=''):\n got = list(got_seq)\n expected = list(expected_seq)\n self.assertEqual(\n len(got), len(expected), msg=msg_prefix + 'lengths do not match')\n for index, (a, b) in enumerate(zip(got, expected)):\n msg = msg_prefix + 'at index %d. sequences were: %s and %s' % (index, got,\n expected),\n if math.isnan(a) or math.isnan(b):\n self.assertEqual(math.isnan(a), math.isnan(b), msg=msg)\n else:\n self.assertAlmostEqual(a, b, msg=msg, places=places)\n",
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Library for exporting the EvalSavedModel.\"\"\"\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\nimport os\nimport time\n\n\nimport tensorflow as tf\nfrom tensorflow_model_analysis import types\nfrom tensorflow_model_analysis.eval_saved_model import encoding\nfrom tensorflow_model_analysis.eval_saved_model import util\nfrom tensorflow_model_analysis.types_compat import Callable, Optional, NamedTuple # pytype: disable=not-supported-yet\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.util import compat\n\n\nclass EvalInputReceiver(\n NamedTuple('EvalInputReceiver',\n [('features', types.TensorTypeMaybeDict),\n ('receiver_tensors', types.TensorTypeMaybeDict),\n ('labels', types.TensorTypeMaybeDict)])):\n \"\"\"A return type for eval_input_receiver_fn.\n\n The expected return values are:\n features: A `Tensor`, `SparseTensor`, or dict of string to `Tensor` or\n `SparseTensor`, specifying the features to be passed to the model.\n receiver_tensors: A `Tensor`, or dict of string to `Tensor`, specifying\n input nodes where this receiver expects to be fed by default. Typically\n this is a single placeholder expecting serialized `tf.Example` protos.\n labels: A `Tensor`, `SparseTensor`, or dict of string to `Tensor` or\n `SparseTensor`, specifying the labels to be passed to the model.\n \"\"\"\n\n # When we create a timestamped directory, there is a small chance that the\n\n\n# directory already exists because another worker is also writing exports.\n# In this case we just wait one second to get a new timestamp and try again.\n# If this fails several times in a row, then something is seriously wrong.\nMAX_DIRECTORY_CREATION_ATTEMPTS = 10\n\n\ndef _get_timestamped_export_dir(export_dir_base):\n \"\"\"Builds a path to a new subdirectory within the base directory.\n\n Each export is written into a new subdirectory named using the\n current time. This guarantees monotonically increasing version\n numbers even across multiple runs of the pipeline.\n The timestamp used is the number of seconds since epoch UTC.\n\n Args:\n export_dir_base: A string containing a directory to write the exported\n graph and checkpoints.\n Returns:\n The full path of the new subdirectory (which is not actually created yet).\n\n Raises:\n RuntimeError: if repeated attempts fail to obtain a unique timestamped\n directory name.\n \"\"\"\n attempts = 0\n while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS:\n export_timestamp = int(time.time())\n\n export_dir = os.path.join(\n compat.as_bytes(export_dir_base), compat.as_bytes(\n str(export_timestamp)))\n if not gfile.Exists(export_dir):\n # Collisions are still possible (though extremely unlikely): this\n # directory is not actually created yet, but it will be almost\n # instantly on return from this function.\n return export_dir\n time.sleep(1)\n attempts += 1\n tf.logging.warn(\n 'Export directory {} already exists; retrying (attempt {}/{})'.format(\n export_dir, attempts, MAX_DIRECTORY_CREATION_ATTEMPTS))\n raise RuntimeError('Failed to obtain a unique export directory name after '\n '{} attempts.'.format(MAX_DIRECTORY_CREATION_ATTEMPTS))\n\n\ndef _get_temp_export_dir(timestamped_export_dir):\n \"\"\"Builds a directory name based on the argument but starting with 'temp-'.\n\n This relies on the fact that TensorFlow Serving ignores subdirectories of\n the base directory that can't be parsed as integers.\n\n Args:\n timestamped_export_dir: the name of the eventual export directory, e.g.\n /foo/bar/<timestamp>\n\n Returns:\n A sister directory prefixed with 'temp-', e.g. /foo/bar/temp-<timestamp>.\n \"\"\"\n (dirname, basename) = os.path.split(timestamped_export_dir)\n temp_export_dir = os.path.join(\n compat.as_bytes(dirname), compat.as_bytes('temp-{}'.format(basename)))\n return temp_export_dir\n\n\ndef _encode_and_add_to_node_collection(collection_prefix,\n key,\n node):\n tf.add_to_collection('%s/%s' % (collection_prefix, encoding.KEY_SUFFIX),\n encoding.encode_key(key))\n tf.add_to_collection('%s/%s' % (collection_prefix, encoding.NODE_SUFFIX),\n encoding.encode_tensor_node(node))\n\n\ndef export_eval_savedmodel(\n estimator,\n export_dir_base,\n eval_input_receiver_fn,\n checkpoint_path = None):\n \"\"\"Export a EvalSavedModel for the given estimator.\n\n Args:\n estimator: Estimator to export the graph for.\n export_dir_base: Base path for export. Graph will be exported into a\n subdirectory of this base path.\n eval_input_receiver_fn: Eval input receiver function.\n checkpoint_path: Path to a specific checkpoint to export. If set to None,\n exports the latest checkpoint.\n\n Returns:\n Path to the directory where the eval graph was exported.\n\n Raises:\n ValueError: Could not find a checkpoint to export.\n \"\"\"\n with tf.Graph().as_default() as g:\n eval_input_receiver = eval_input_receiver_fn()\n tf.train.create_global_step(g)\n tf.set_random_seed(estimator.config.tf_random_seed)\n\n # Workaround for TensorFlow issue #17568. Note that we pass the\n # identity-wrapped features and labels to model_fn, but we have to feed\n # the non-identity wrapped Tensors during evaluation.\n #\n # Also note that we can't wrap predictions, so metrics that have control\n # dependencies on predictions will cause the predictions to be recomputed\n # during their evaluation.\n wrapped_features = util.wrap_tensor_or_dict_of_tensors_in_identity(\n eval_input_receiver.features)\n wrapped_labels = util.wrap_tensor_or_dict_of_tensors_in_identity(\n eval_input_receiver.labels)\n\n if isinstance(estimator, tf.estimator.Estimator):\n # This is a core estimator\n estimator_spec = estimator.model_fn(\n features=wrapped_features,\n labels=wrapped_labels,\n mode=tf.estimator.ModeKeys.EVAL,\n config=estimator.config)\n else:\n # This is a contrib estimator\n model_fn_ops = estimator._call_model_fn( # pylint: disable=protected-access\n features=wrapped_features,\n labels=wrapped_labels,\n mode=tf.estimator.ModeKeys.EVAL)\n estimator_spec = lambda x: None\n estimator_spec.predictions = model_fn_ops.predictions\n estimator_spec.eval_metric_ops = model_fn_ops.eval_metric_ops\n estimator_spec.scaffold = model_fn_ops.scaffold\n\n # Save metric using eval_metric_ops.\n for user_metric_key, (value_op, update_op) in (\n estimator_spec.eval_metric_ops.items()):\n tf.add_to_collection('%s/%s' % (encoding.METRICS_COLLECTION,\n encoding.KEY_SUFFIX),\n encoding.encode_key(user_metric_key))\n tf.add_to_collection('%s/%s' % (encoding.METRICS_COLLECTION,\n encoding.VALUE_OP_SUFFIX),\n encoding.encode_tensor_node(value_op))\n tf.add_to_collection('%s/%s' % (encoding.METRICS_COLLECTION,\n encoding.UPDATE_OP_SUFFIX),\n encoding.encode_tensor_node(update_op))\n\n # Save all prediction nodes.\n # Predictions can either be a Tensor, or a dict of Tensors.\n predictions = estimator_spec.predictions\n if not isinstance(predictions, dict):\n predictions = {encoding.DEFAULT_PREDICTIONS_DICT_KEY: predictions}\n\n for prediction_key, prediction_node in predictions.items():\n _encode_and_add_to_node_collection(encoding.PREDICTIONS_COLLECTION,\n prediction_key, prediction_node)\n\n ############################################################\n ## Features, label (and weight) graph\n\n # Placeholder for input example to label graph.\n tf.add_to_collection(encoding.INPUT_EXAMPLE_COLLECTION,\n encoding.encode_tensor_node(\n eval_input_receiver.receiver_tensors['examples']))\n\n # Save all label nodes.\n # Labels can either be a Tensor, or a dict of Tensors.\n labels = eval_input_receiver.labels\n if not isinstance(labels, dict):\n labels = {encoding.DEFAULT_LABELS_DICT_KEY: labels}\n\n for label_key, label_node in labels.items():\n _encode_and_add_to_node_collection(encoding.LABELS_COLLECTION, label_key,\n label_node)\n\n # Save features.\n for feature_name, feature_node in eval_input_receiver.features.items():\n _encode_and_add_to_node_collection(encoding.FEATURES_COLLECTION,\n feature_name, feature_node)\n\n ############################################################\n ## Export as normal\n\n if not checkpoint_path:\n checkpoint_path = tf.train.latest_checkpoint(estimator.model_dir)\n if not checkpoint_path:\n raise ValueError(\n 'Could not find trained model at %s.' % estimator.model_dir)\n\n export_dir = _get_timestamped_export_dir(export_dir_base)\n temp_export_dir = _get_temp_export_dir(export_dir)\n\n if estimator.config.session_config is None:\n session_config = config_pb2.ConfigProto(allow_soft_placement=True)\n else:\n session_config = estimator.config.session_config\n\n with tf.Session(config=session_config) as session:\n if estimator_spec.scaffold and estimator_spec.scaffold.saver:\n saver_for_restore = estimator_spec.scaffold.saver\n else:\n saver_for_restore = tf.train.Saver(sharded=True)\n saver_for_restore.restore(session, checkpoint_path)\n\n if estimator_spec.scaffold and estimator_spec.scaffold.local_init_op:\n local_init_op = estimator_spec.scaffold.local_init_op\n else:\n local_init_op = tf.train.Scaffold._default_local_init_op()\n # pylint: enable=protected-access\n\n # Perform the export\n builder = tf.saved_model.builder.SavedModelBuilder(temp_export_dir)\n builder.add_meta_graph_and_variables(\n session,\n [tf.saved_model.tag_constants.SERVING],\n # Don't export any signatures, since this graph is not actually\n # meant for serving.\n signature_def_map=None,\n assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS),\n legacy_init_op=local_init_op)\n builder.save(False)\n\n gfile.Rename(temp_export_dir, export_dir)\n return export_dir\n\n\ndef make_export_strategy(\n eval_input_receiver_fn,\n exports_to_keep = 5):\n \"\"\"Create an ExportStrategy for EvalSavedModel.\n\n Note: The strip_default_attrs is not used for EvalSavedModel export. And\n writing the EvalSavedModel proto in text format is not supported for now.\n\n Args:\n eval_input_receiver_fn: Eval input receiver function.\n exports_to_keep: Number of exports to keep. Older exports will be\n garbage-collected. Defaults to 5. Set to None to disable garbage\n collection.\n\n Returns:\n An ExportStrategy for EvalSavedModel that can be passed to the\n tf.contrib.learn.Experiment constructor.\n \"\"\"\n\n def export_fn(estimator,\n export_dir_base,\n checkpoint_path=None,\n strip_default_attrs=False):\n del strip_default_attrs\n export_dir = export_eval_savedmodel(\n estimator=estimator,\n export_dir_base=export_dir_base,\n eval_input_receiver_fn=eval_input_receiver_fn,\n checkpoint_path=checkpoint_path)\n tf.contrib.learn.utils.saved_model_export_utils.garbage_collect_exports(\n export_dir_base, exports_to_keep)\n return export_dir\n\n return tf.contrib.export_strategy.ExportStrategy('TFMA', export_fn)\n"
] | [
[
"tensorflow.core.example.example_pb2.Example"
],
[
"tensorflow.contrib.learn.utils.saved_model_export_utils.garbage_collect_exports",
"tensorflow.Graph",
"tensorflow.train.latest_checkpoint",
"tensorflow.get_collection",
"tensorflow.python.platform.gfile.Rename",
"tensorflow.saved_model.builder.SavedModelBuilder",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.contrib.export_strategy.ExportStrategy",
"tensorflow.python.platform.gfile.Exists",
"tensorflow.Session",
"tensorflow.set_random_seed",
"tensorflow.train.create_global_step",
"tensorflow.train.Saver",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.train.Scaffold._default_local_init_op"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MDoid10111/EMNLP2020 | [
"97e4da06abc72873a4830cfa53c035a27eb3975b",
"97e4da06abc72873a4830cfa53c035a27eb3975b"
] | [
"torch_utils.py",
"matchzoo/utils/parse.py"
] | [
"import numpy as np\nimport torch, os\nimport torch.nn.utils.rnn as rnn_utils\nfrom typing import Tuple\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom PIL import Image\nimport torchvision\nfrom torchvision import transforms\n\n\ndef flatten(x):\n '''\n flatten high dimensional tensor x into an array\n :param x: shape (B, D1, D2, ...)\n :return: 1 dimensional tensor\n '''\n dims = x.size()[1:] #remove the first dimension as it is batch dimension\n num_features = 1\n for s in dims: num_features *= s\n return x.contiguous().view(-1, num_features)\n\n\ndef gpu(tensor, gpu=False):\n\n if gpu: return tensor.cuda()\n else: return tensor\n\n\ndef cpu(tensor):\n if tensor.is_cuda: return tensor.cpu()\n else: return tensor\n\n\ndef minibatch(*tensors, **kwargs):\n\n batch_size = kwargs['batch_size']\n\n if len(tensors) == 1:\n tensor = tensors[0]\n for i in range(0, len(tensor), batch_size):\n yield tensor[i:i + batch_size]\n else:\n for i in range(0, len(tensors[0]), batch_size):\n yield tuple(x[i:i + batch_size] for x in tensors)\n\n\ndef shuffle(*arrays, **kwargs):\n \"\"\"This is not an inplace operation. Therefore, you can shuffle without worrying changing data.\"\"\"\n if len(set(len(x) for x in arrays)) != 1:\n raise ValueError('All inputs to shuffle must have '\n 'the same length.')\n\n shuffle_indices = np.arange(len(arrays[0]))\n np.random.shuffle(shuffle_indices) # fix this for reproducible\n\n if len(arrays) == 1:\n return arrays[0][shuffle_indices]\n else:\n return tuple(x[shuffle_indices] for x in arrays)\n\n\ndef assert_no_grad(variable):\n\n if variable.requires_grad:\n raise ValueError(\n \"nn criterions don't compute the gradient w.r.t. targets - please \"\n \"mark these variables as volatile or not requiring gradients\"\n )\n\n\ndef numpy2tensor(x, dtype):\n # torch.tensor(torch.from_numpy(var), dtype = torch.int, torch.long)\n return torch.tensor(torch.from_numpy(x), dtype = dtype)\n\n\ndef tensor2numpy(x):\n # return x.numpy()\n return cpu(x).numpy()\n\n\ndef set_seed(seed, cuda=False):\n\n torch.manual_seed(seed)\n if cuda: torch.cuda.manual_seed(seed)\n\n\ndef create_mask_tensor(query: torch.Tensor, doc: torch.Tensor, threshold: int = 0):\n \"\"\"\n Creating masking of two tensor. These two tensors are integer tensor\n Parameters\n\n ----------\n query: (B, L)\n doc: (B, R)\n threshold: when it is 0, means we ignore padding tokens. when it is 1, it means we ignore <unk> or oov words\n Returns\n -------\n\n \"\"\"\n assert query.size(0) == doc.size(0)\n assert len(query.size()) == 2 and len(doc.size()) == 2\n query_mask = query > threshold\n doc_mask = doc > threshold\n query_mask = query_mask.unsqueeze(2) # (B, L, 1)\n doc_mask = doc_mask.unsqueeze(2) # (B, R, 1)\n doc_mask = doc_mask.permute(0, 2, 1) # (B, 1, R)\n\n mask_tensor = torch.bmm(query_mask.float(), doc_mask.float()) # (B, L, R)\n return mask_tensor # , torch.sum(query_mask, dim = 1).squeeze(), torch.sum(doc_mask, dim = 1).squeeze()\n\n\ndef create_mask_tensor_image(left_indices: torch.Tensor, right_indices: torch.Tensor, threshold: int = 0):\n \"\"\"\n Creating masking of two tensor. These two tensors are integer tensor\n Parameters\n\n ----------\n left_indices: (B1, n1, M1)\n right_indices: (B, n, M2)\n threshold: when it is 0, means we ignore padding tokens. when it is 1, it means we ignore <unk> or oov words\n Returns\n -------\n\n \"\"\"\n B1, n1, M1 = left_indices.size()\n B, n, M2 = right_indices.size()\n assert n1 == 1\n left_mask = left_indices > 0\n right_mask = right_indices > 0\n left_mask = left_mask.view(B1, M1, 1)\n if B1 == 1: left_mask = left_mask.expand(B, M1, 1) # during testing\n right_mask = right_mask.view(B, n * M2, 1)\n ans = torch.bmm(left_mask.float(), right_mask.permute(0, 2, 1).float())\n ans = ans.view(B, M1, n, M2).permute(0, 2, 1, 3) # (B, n, M1, M2)\n return ans\n\n\ndef count_parameters(model: nn.Module):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\ndef get_sorted_index_and_reverse_index(base_array: np.ndarray):\n \"\"\"\n We use sorted_index = np.argsort(-base_array) to find the indices to short the array decreasingly.\n We also need to find the indices to restoring the original order of elements of base_array\n after apply sorted_index.\n This method is important because we need to input the tensor to GRU/LSTM with packed sequence.\n Parameters\n ----------\n base_array: (B, )\n\n Returns\n -------\n\n \"\"\"\n assert type(base_array) == np.ndarray\n batch_size = base_array.shape[0]\n assert base_array.shape == (batch_size,)\n new_indices = np.argsort(-base_array)\n old_indices = np.arange(batch_size)\n r = np.stack([new_indices, old_indices], axis = 1)\n r = r[np.argsort(r[:, 0])]\n restoring_indices = r[:, 1] # the retoring indices. This method is tested very carefully.\n return new_indices, restoring_indices\n\n\ndef packing_sequence(seq: torch.Tensor, seq_lens: np.ndarray, new_index) -> torch.Tensor:\n \"\"\"\n Prepare a packed sequence to input to an RNN. It is required that the length of sequences in `seq` must be sorted.\n After\n\n Parameters\n ----------\n seq: (B, L, D) where L is length of sequence\n seq_lens: (B, )\n new_index: (B, ) this index is used to make sequence lengths sorted\n old_index: (B, ) this index is used to restore the sequence lengths\n Returns\n -------\n\n \"\"\"\n return rnn_utils.pack_padded_sequence(seq[new_index], seq_lens[new_index], batch_first = True)\n\n\ndef torch_repeat_dim0(A: torch.tensor, n: int):\n \"\"\"\n Repeat tensor across a dimension\n Parameters\n ----------\n A\n axis\n\n Returns\n -------\n\n \"\"\"\n assert len(A.size()) == 3\n d1, d2, d3 = A.size()\n A = A.unsqueeze(0).transpose(0, 1).repeat(1, n, 1, 1).view(-1, d2, d3)\n assert A.size() == (n * d1, d2, d3)\n return A\n\n\ndef boolean_mask(target: torch.Tensor, mask: torch.Tensor):\n \"\"\"\n Mimick tf.boolean_mask\n Copied from https://discuss.pytorch.org/t/slicing-tensor-using-boolean-list/7354/3\n Parameters\n ----------\n target\n mask\n\n Returns\n -------\n\n \"\"\"\n x = mask == True\n # y=torch.arange(0,3)\n # x=torch.Tensor([True,False,True])==True\n # print(y[x])\n return target[x]\n\ndef torch_argsort(input, dim=None, descending=False):\n \"\"\"Returns the indices that sort a tensor along a given dimension in ascending\n order by value.\n This is the second value returned by :meth:`torch.sort`. See its documentation\n for the exact semantics of this method.\n Args:\n input (Tensor): the input tensor\n dim (int, optional): the dimension to sort along\n descending (bool, optional): controls the sorting order (ascending or descending)\n Example::\n >>> a = torch.randn(4, 4)\n >>> a\n tensor([[ 0.0785, 1.5267, -0.8521, 0.4065],\n [ 0.1598, 0.0788, -0.0745, -1.2700],\n [ 1.2208, 1.0722, -0.7064, 1.2564],\n [ 0.0669, -0.2318, -0.8229, -0.9280]])\n >>> torch.argsort(a, dim=1)\n tensor([[2, 0, 3, 1],\n [3, 2, 1, 0],\n [2, 1, 0, 3],\n [3, 2, 1, 0]])\n \"\"\"\n # copy from https://github.com/pytorch/pytorch/pull/9600/files\n if dim is None:\n return torch.sort(input, -1, descending)[1]\n return torch.sort(input, dim, descending)[1]\n\n\ndef _predict_process_ids(user_ids, item_ids, num_items, use_cuda):\n \"\"\"\n\n Parameters\n ----------\n user_ids\n item_ids\n num_items\n use_cuda\n\n Returns\n -------\n\n \"\"\"\n if item_ids is None:\n item_ids = np.arange(num_items, dtype=np.int64)\n\n if np.isscalar(user_ids):\n user_ids = np.array(user_ids, dtype=np.int64)\n\n user_ids = torch.from_numpy(user_ids.reshape(-1, 1).astype(np.int64))\n item_ids = torch.from_numpy(item_ids.reshape(-1, 1).astype(np.int64))\n if item_ids.size()[0] != user_ids.size(0):\n user_ids = user_ids.expand(item_ids.size())\n\n user_var = gpu(user_ids, use_cuda)\n item_var = gpu(item_ids, use_cuda)\n\n return user_var.squeeze(), item_var.squeeze()\n\n\ndef idf(total_docs: int, term_freq: int) -> float:\n \"\"\"compute inverse doc frequency. If a term appears at all docs, then, its value is low for discrimination.\n If a term does not show in any doc, then, we simply use set denominator to 1 => largest idf value \"\"\"\n assert term_freq <= total_docs, \"The number of documents that contain a term must be smaller than total_docs\"\n return np.log((1.0 + total_docs) / float(term_freq + 1.0)) + 1.0\n\n\ndef moving_average(input_tensor: torch.Tensor, window_size: int, dimension: int):\n \"\"\"\n\n Parameters\n ----------\n input_tensor: torch.Tensor of shape (B, L, D)\n window_size: sliding windows size\n dimension: dimension we want to apply sliding window\n\n Returns\n -------\n\n \"\"\"\n ret = torch.cumsum(input_tensor, dim = dimension)\n # print(\"Here:\", ret, ret.shape)\n ret[:, window_size:] = ret[:, window_size:] - ret[:, :-window_size]\n return ret[:, window_size - 1:] / window_size\n\n\ndef cosine_distance(a: torch.Tensor, b: torch.Tensor):\n \"\"\"\n Compute the cosine distance between two tensors. This implementation saves a lot of memory since\n memory complexity is O(B x L x R)\n Parameters\n ----------\n a: `torch.Tensor` shape (B, L, D)\n b: `torch.Tensor` shape (B, R, D)\n\n Returns\n -------\n\n \"\"\"\n assert len(a.size()) == len(b.size()) == 3\n A_square = (a * a).sum(dim = - 1) # B, L\n B_square = (b * b).sum(dim = -1) # B, R\n dot = torch.bmm(a, b.permute(0, 2, 1)) # B, L, R\n # added abs in case of negative, added 1e-10 to avoid nan gradient of sqrt\n return torch.sqrt(torch.abs(A_square.unsqueeze(-1) - 2 * dot + B_square.unsqueeze(1)) + 1e-10)\n\n\ndef l1_distance(a: torch.Tensor, b: torch.Tensor):\n \"\"\"\n Compute the l1 distance between two tensors. This implementation consumes a lot of memory since\n mem complexity is O(B x L x R x D) due to x - y. I tried many ways but this is the best thing I can do\n Parameters\n ----------\n a: `torch.Tensor` shape (B, L, D)\n b: `torch.Tensor` shape (B, R, D)\n\n Returns\n -------\n\n \"\"\"\n assert len(a.size()) == len(b.size()) == 3\n x = a.unsqueeze(2) # (B, L, 1, D)\n y = b.unsqueeze(1) # (B, 1, R, D)\n return torch.norm(x - y, p = 1, dim = -1)\n\n\ndef _get_doc_context_copacrr(doc: torch.Tensor, doc_mask: torch.Tensor, context_window_size: int) -> torch.Tensor:\n \"\"\"\n\n Parameters\n ----------\n doc: with shape (B, R, D)\n doc_mask: binary tensor that differentiate real tokens from padding tokens (B, R)\n\n Returns\n -------\n a tensor of shape (B, R, D) which indicates the context representation of each token in doc.\n We also reset padding tokens to zero since they have no context\n \"\"\"\n\n def moving_average(a: torch.Tensor, window_size: int, dimension: int):\n ret = torch.cumsum(a, dim = dimension)\n # print(\"Here:\", ret, ret.shape)\n ret[:, window_size:] = ret[:, window_size:] - ret[:, :-window_size]\n return ret[:, window_size - 1:] / window_size\n\n left = context_window_size // 2\n right = context_window_size - left - 1 # in case context windows is an even number then left=x//2, right=x-x//2\n y = F.pad(doc, (0, 0, left, right)) # (B, c/2 + R + c/2, D)\n document_context = moving_average(y, window_size = context_window_size, dimension = 1)\n document_context = document_context * doc_mask.unsqueeze(-1).float()\n return document_context\n\n\ndef init_weights(m):\n \"\"\"\n Copied from https://discuss.pytorch.org/t/how-are-layer-weights-and-biases-initialized-by-default/13073/3\n Examples:\n >>> w = nn.Linear(3, 4)\n >>> w.apply(init_weights)\n \"\"\"\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\n if hasattr(m.bias, \"data\"): m.bias.data.fill_(0)\n if isinstance(m, nn.Conv2d):\n torch.nn.init.xavier_uniform_(m.weight)\n if m.bias:\n torch.nn.init.xavier_uniform_(m.bias)\n\n\ndef auto_rnn(rnn_cell: nn.RNN, input_feats: torch.Tensor,\n lens: torch.Tensor, new_indices: torch.Tensor, restoring_indices: torch.Tensor, max_len: int):\n \"\"\"\n\n Parameters\n ----------\n rnn_cell : a rnn cell\n input_feats: `torch.Tensor` (B, L, D)\n lens: `torch.Tensor` (B, )\n new_indices: `torch.Tensor` (B, )\n restoring_indices: `torch.Tensor` (B, )\n max_len: int\n Returns\n -------\n\n \"\"\"\n return rnn_cell((input_feats, lens, new_indices, restoring_indices), max_len=max_len, return_h=False)[0]\n\n\ndef rnn_last_h(rnn_cell: nn.RNN, input_feats: torch.Tensor,\n lens: torch.Tensor, new_indices: torch.Tensor, restoring_indices: torch.Tensor, max_len: int):\n \"\"\"\n return the last hidden vectors of an RNN\n Parameters\n ----------\n rnn_cell : a rnn cell\n input_feats: `torch.Tensor` (B, L, D)\n lens: `torch.Tensor` (B, )\n new_indices: `torch.Tensor` (B, )\n restoring_indices: `torch.Tensor` (B, )\n max_len: int\n Returns\n -------\n\n \"\"\"\n return rnn_cell((input_feats, lens, new_indices, restoring_indices), max_len=max_len, return_h=True)[1]\n\n\ndef retrieve_elements_from_indices(tensor: torch.Tensor, indices: torch.Tensor):\n \"\"\"\n Copied from https://discuss.pytorch.org/t/pooling-using-idices-from-another-max-pooling/37209/4\n How does this work? (Checked\n Parameters\n ----------\n tensor: torch.Tensor shape B, C, L, R\n indices: torch.Tensor shape (B, C, L, R) the values are indices where the last two dimensions are flattened\n\n Returns\n -------\n\n \"\"\"\n flattened_tensor = tensor.flatten(start_dim=2)\n output = flattened_tensor.gather(dim=2, index=indices.flatten(start_dim=2)).view_as(indices)\n return output\n\n\ndata_transforms = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])\n\n\ndef load_images(infile):\n im = Image.open(infile).convert('RGB')\n return data_transforms(im)\n",
"import typing\n\nimport torch\nfrom torch import nn\nfrom torch import optim\n\nimport matchzoo\nfrom matchzoo.engine.base_metric import (\n BaseMetric, #RankingMetric, ClassificationMetric\n)\n\nactivation = nn.ModuleDict([\n ['relu', nn.ReLU()],\n ['hardtanh', nn.Hardtanh()],\n ['relu6', nn.ReLU6()],\n ['sigmoid', nn.Sigmoid()],\n ['tanh', nn.Tanh()],\n ['softmax', nn.Softmax()],\n ['softmax2d', nn.Softmax2d()],\n ['logsoftmax', nn.LogSoftmax()],\n ['elu', nn.ELU()],\n ['selu', nn.SELU()],\n # ['celu', nn.CELU()],\n ['hardshrink', nn.Hardshrink()],\n ['leakyrelu', nn.LeakyReLU()],\n ['logsigmoid', nn.LogSigmoid()],\n ['softplus', nn.Softplus()],\n ['softshrink', nn.Softshrink()],\n ['prelu', nn.PReLU()],\n ['softsign', nn.Softsign()],\n ['softmin', nn.Softmin()],\n ['tanhshrink', nn.Tanhshrink()],\n ['rrelu', nn.RReLU()],\n ['glu', nn.GLU()],\n])\n\nloss = nn.ModuleDict([\n ['l1', nn.L1Loss()],\n ['nll', nn.NLLLoss()],\n ['kldiv', nn.KLDivLoss()],\n ['mse', nn.MSELoss()],\n ['bce', nn.BCELoss()],\n ['bce_with_logits', nn.BCEWithLogitsLoss()],\n ['cosine_embedding', nn.CosineEmbeddingLoss()],\n # ['ctc', nn.CTCLoss()],\n ['hinge_embedding', nn.HingeEmbeddingLoss()],\n ['margin_ranking', nn.MarginRankingLoss()],\n ['multi_label_margin', nn.MultiLabelMarginLoss()],\n ['multi_label_soft_margin', nn.MultiLabelSoftMarginLoss()],\n ['multi_margin', nn.MultiMarginLoss()],\n ['smooth_l1', nn.SmoothL1Loss()],\n ['soft_margin', nn.SoftMarginLoss()],\n ['cross_entropy', nn.CrossEntropyLoss()],\n ['triplet_margin', nn.TripletMarginLoss()],\n ['poisson_nll', nn.PoissonNLLLoss()]\n])\n\noptimizer = dict({\n 'adadelta': optim.Adadelta,\n 'adagrad': optim.Adagrad,\n 'adam': optim.Adam,\n 'sparse_adam': optim.SparseAdam,\n 'adamax': optim.Adamax,\n 'asgd': optim.ASGD,\n 'lbfgs': optim.LBFGS,\n 'rmsprop': optim.RMSprop,\n 'rprop': optim.Rprop,\n 'sgd': optim.SGD\n})\n\n\ndef _parse(\n identifier: typing.Union[str, typing.Type[nn.Module], nn.Module],\n dictionary: nn.ModuleDict,\n target: str\n) -> nn.Module:\n \"\"\"\n Parse loss and activation.\n\n :param identifier: activation identifier, one of\n - String: name of a activation\n - Torch Modele subclass\n - Torch Module instance (it will be returned unchanged).\n :param dictionary: nn.ModuleDict instance. Map string identifier to\n nn.Module instance.\n :return: A :class:`nn.Module` instance\n \"\"\"\n if isinstance(identifier, str):\n if identifier in dictionary:\n return dictionary[identifier]\n else:\n raise ValueError(\n 'Could not interpret %s identifier: ' % target + str(identifier)\n )\n elif isinstance(identifier, nn.Module):\n return identifier\n elif issubclass(identifier, nn.Module):\n return identifier()\n else:\n raise ValueError(\n 'Could not interpret %s identifier: ' % (target) + str(identifier)\n )\n\n\ndef parse_activation(\n identifier: typing.Union[str, typing.Type[nn.Module], nn.Module]\n) -> nn.Module:\n \"\"\"\n Retrieves a torch Module instance.\n\n :param identifier: activation identifier, one of\n - String: name of a activation\n - Torch Modele subclass\n - Torch Module instance (it will be returned unchanged).\n :return: A :class:`nn.Module` instance\n\n Examples::\n >>> from torch import nn\n >>> from matchzoo.utils import parse_activation\n\n Use `str` as activation:\n >>> activation = parse_activation('relu')\n >>> type(activation)\n <class 'torch.nn.modules.activation.ReLU'>\n\n Use :class:`torch.nn.Module` subclasses as activation:\n >>> type(parse_activation(nn.ReLU))\n <class 'torch.nn.modules.activation.ReLU'>\n\n Use :class:`torch.nn.Module` instances as activation:\n >>> type(parse_activation(nn.ReLU()))\n <class 'torch.nn.modules.activation.ReLU'>\n\n \"\"\"\n\n return _parse(identifier, activation, 'activation')\n\n\ndef parse_loss(\n identifier: typing.Union[str, typing.Type[nn.Module], nn.Module],\n task: typing.Optional[str] = None\n) -> nn.Module:\n \"\"\"\n Retrieves a torch Module instance.\n\n :param identifier: loss identifier, one of\n - String: name of a loss\n - Torch Module subclass\n - Torch Module instance (it will be returned unchanged).\n :param task: Task type for determining specific loss.\n :return: A :class:`nn.Module` instance\n\n Examples::\n >>> from torch import nn\n >>> from matchzoo.utils import parse_loss\n\n Use `str` as loss:\n >>> loss = parse_loss('mse')\n >>> type(loss)\n <class 'torch.nn.modules.loss.MSELoss'>\n\n Use :class:`torch.nn.Module` subclasses as loss:\n >>> type(parse_loss(nn.MSELoss))\n <class 'torch.nn.modules.loss.MSELoss'>\n\n Use :class:`torch.nn.Module` instances as loss:\n >>> type(parse_loss(nn.MSELoss()))\n <class 'torch.nn.modules.loss.MSELoss'>\n\n \"\"\"\n return _parse(identifier, loss, 'loss')\n\n\ndef _parse_metric(\n metric: typing.Union[str, typing.Type[BaseMetric], BaseMetric],\n Metrix: typing.Type[BaseMetric]\n) -> BaseMetric:\n \"\"\"\n Parse metric.\n\n :param metrc: Input metric in any form.\n :param Metrix: Base Metric class. Either\n :class:`matchzoo.engine.base_metric.RankingMetric` or\n :class:`matchzoo.engine.base_metric.ClassificationMetric`.\n :return: A :class:`BaseMetric` instance\n \"\"\"\n if isinstance(metric, str):\n metric = metric.lower() # ignore case\n for subclass in Metrix.__subclasses__():\n if metric == subclass.ALIAS or metric in subclass.ALIAS:\n return subclass()\n elif isinstance(metric, Metrix):\n return metric\n elif issubclass(metric, Metrix):\n return metric()\n raise ValueError('%s can not be used in current task.' % metric)\n\n\ndef parse_metric(\n metric: typing.Union[str, typing.Type[BaseMetric], BaseMetric],\n task: str\n) -> BaseMetric:\n \"\"\"\n Parse input metric in any form into a :class:`BaseMetric` instance.\n\n :param metric: Input metric in any form.\n :param task: Task type for determining specific metric.\n :return: A :class:`BaseMetric` instance\n\n Examples::\n >>> from matchzoo import metrics\n >>> from matchzoo.utils import parse_metric\n\n Use `str` as MatchZoo metrics:\n >>> mz_metric = parse_metric('map', 'ranking')\n >>> type(mz_metric)\n <class 'matchzoo.metrics.mean_average_precision.MeanAveragePrecision'>\n\n Use :class:`matchzoo.engine.BaseMetric` subclasses as MatchZoo metrics:\n >>> type(parse_metric(metrics.AveragePrecision, 'ranking'))\n <class 'matchzoo.metrics.average_precision.AveragePrecision'>\n\n Use :class:`matchzoo.engine.BaseMetric` instances as MatchZoo metrics:\n >>> type(parse_metric(metrics.AveragePrecision(), 'ranking'))\n <class 'matchzoo.metrics.average_precision.AveragePrecision'>\n\n \"\"\"\n if task is None:\n raise ValueError(\n 'Should specify one `BaseTask`.'\n )\n if task == 'ranking':\n return _parse_metric(metric, RankingMetric)\n if task == 'classification':\n return _parse_metric(metric, ClassificationMetric)\n else:\n raise ValueError(\n 'Should be a Ranking or Classification task.'\n )\n\n\ndef parse_optimizer(\n identifier: typing.Union[str, typing.Type[optim.Optimizer]],\n) -> optim.Optimizer:\n \"\"\"\n Parse input metric in any form into a :class:`Optimizer` class.\n\n :param optimizer: Input optimizer in any form.\n :return: A :class:`Optimizer` class\n\n Examples::\n >>> from torch import optim\n >>> from matchzoo.utils import parse_optimizer\n\n Use `str` as optimizer:\n >>> parse_optimizer('adam')\n <class 'torch.optim.adam.Adam'>\n\n Use :class:`torch.optim.Optimizer` subclasses as optimizer:\n >>> parse_optimizer(optim.Adam)\n <class 'torch.optim.adam.Adam'>\n\n \"\"\"\n if isinstance(identifier, str):\n identifier = identifier.lower() # ignore case\n if identifier in optimizer:\n return optimizer[identifier]\n else:\n raise ValueError(\n 'Could not interpret optimizer identifier: ' + str(identifier)\n )\n elif issubclass(identifier, optim.Optimizer):\n return identifier\n else:\n raise ValueError(\n 'Could not interpret optimizer identifier: ' + str(identifier)\n )\n"
] | [
[
"torch.norm",
"torch.cuda.manual_seed",
"torch.manual_seed",
"numpy.arange",
"torch.from_numpy",
"numpy.stack",
"numpy.random.shuffle",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.sort",
"numpy.isscalar",
"torch.nn.init.xavier_uniform_",
"numpy.argsort",
"numpy.array",
"torch.cumsum",
"torch.nn.functional.pad"
],
[
"torch.nn.Softmax",
"torch.nn.Hardshrink",
"torch.nn.GLU",
"torch.nn.ELU",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.L1Loss",
"torch.nn.MultiMarginLoss",
"torch.nn.CrossEntropyLoss",
"torch.nn.Softplus",
"torch.nn.MultiLabelSoftMarginLoss",
"torch.nn.Softsign",
"torch.nn.Sigmoid",
"torch.nn.LogSigmoid",
"torch.nn.SELU",
"torch.nn.SoftMarginLoss",
"torch.nn.SmoothL1Loss",
"torch.nn.Tanhshrink",
"torch.nn.ReLU6",
"torch.nn.LogSoftmax",
"torch.nn.NLLLoss",
"torch.nn.PReLU",
"torch.nn.Softmax2d",
"torch.nn.BCELoss",
"torch.nn.Softmin",
"torch.nn.LeakyReLU",
"torch.nn.CosineEmbeddingLoss",
"torch.nn.Hardtanh",
"torch.nn.MarginRankingLoss",
"torch.nn.KLDivLoss",
"torch.nn.Softshrink",
"torch.nn.HingeEmbeddingLoss",
"torch.nn.MultiLabelMarginLoss",
"torch.nn.Tanh",
"torch.nn.RReLU",
"torch.nn.PoissonNLLLoss",
"torch.nn.TripletMarginLoss",
"torch.nn.ReLU",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kjarczak/balticlsc_module | [
"d104c66fbfeb2147e8a40a0fa5170326843854c5"
] | [
"examples/face_recogniser/content/processing.py"
] | [
"import os\nfrom typing import List, Tuple, Dict\n\nimport face_recognition\n\nfrom matplotlib import pyplot, patches\n\nfrom PIL import Image\n\nimport numpy as np\n\nfrom balticlsc.access.ftp import upload_file, get_connection\nfrom balticlsc.configs.credential.ftp import FTPCredential\nfrom balticlsc.scheme.api import init_baltic_api\nfrom balticlsc.scheme.logger import logger\nfrom balticlsc.scheme.pin import Pin, MissingPin, PinAttribute, ValuesAttribute\nfrom balticlsc.scheme.processing import ProcessingInterface\nfrom balticlsc.scheme.utils import camel_to_snake, get_random_output_folder\n\nMODULE_VERSION = 'latest'\n\n\nclass Processing(ProcessingInterface):\n def process(self, msg_uid: str, input_pin: Pin, output_pin_name_to_value: Dict[str, Pin]) -> None:\n logger.info('module version = ' + MODULE_VERSION)\n logger.info('starting processing for input pin=\"' + str(input_pin) + '\"')\n input_access_credential = input_pin.getattr(PinAttribute.ACCESS_CREDENTIAL)\n input_folder = input_pin.getattr(PinAttribute.ACCESS_PATH)\n\n if input_access_credential is None:\n raise ValueError(f'missing access credential in the input pin={str(input_pin)}')\n\n if input_folder is None:\n raise ValueError(f'missing access path in the input pin={str(input_pin)}')\n\n input_ftp_credential = FTPCredential(**input_access_credential)\n # START # Establish the output access credential and folder # START #\n output_pin_name: str = 'Output'\n\n if output_pin_name not in output_pin_name_to_value:\n error_msg = 'missing pin with name=\"' + output_pin_name + '\" in output pins config'\n logger.error(error_msg)\n raise MissingPin([pin for pin in output_pin_name_to_value.values()], error_msg)\n\n output_pin = output_pin_name_to_value[output_pin_name]\n logger.info('loading output pin=' + str(output_pin))\n output_access_credential = output_pin.getattr(PinAttribute.ACCESS_CREDENTIAL)\n\n if output_access_credential is None:\n logger.info('output pin access credentials is None, using input access credentials')\n output_ftp_credential = input_ftp_credential\n else:\n output_access_credential = {camel_to_snake(key): value for key, value in output_access_credential.items()}\n\n if str(output_access_credential) == str(input_access_credential):\n logger.info('input and output access credential are the same')\n output_ftp_credential = input_ftp_credential\n else:\n output_ftp_credential = FTPCredential(**output_access_credential)\n\n output_access_path = output_pin.getattr(PinAttribute.ACCESS_PATH)\n\n if output_access_path is None:\n logger.info('access path is not provided in output config')\n logger.info('setting random generated string as output folder name')\n output_folder = get_random_output_folder(input_folder)\n else:\n output_access_path = {camel_to_snake(key): value for key, value in output_access_path.items()}\n\n if 'resource_path' not in output_access_path:\n logger.info('missing \"resource_path\" value in output access path')\n logger.info('setting random generated string as output folder name')\n output_folder = get_random_output_folder(input_folder)\n else:\n output_folder = output_access_path['resource_path']\n logger.info('setting output folder based on output pin config \"resource_path\"=' + output_folder)\n # STOP # Establish output credentials and folder # STOP #\n logger.info('connecting to input ftp server: ' + input_ftp_credential.host)\n input_ftp = get_connection(input_ftp_credential)\n\n if output_ftp_credential != input_ftp_credential:\n logger.info('connecting to output ftp server: ' + output_ftp_credential.host)\n output_ftp = get_connection(output_ftp_credential)\n else:\n logger.info('using the same connection as output ftp')\n output_ftp = input_ftp\n # START # process and send files # START #\n logger.info('changing ftp working directory to \"' + input_folder + '\"')\n input_ftp.cwd(input_folder)\n logger.info('working directory changed')\n logger.info('listing files in the working directory ...')\n filenames: List[str] = input_ftp.nlst()\n logger.info('handling ' + str(len(filenames)) + ' files')\n os.makedirs('tmp', exist_ok=True)\n\n for filename in filenames:\n if not filename.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif')):\n logger.warning('wrong format of the file \"' + filename + '\", omitting')\n continue\n\n logger.info('downloading file \"' + filename + '\"')\n filepath = 'tmp/' + filename\n # Save the image locally\n with open(filepath, 'wb') as file:\n input_ftp.retrbinary(\"RETR \" + filename, file.write)\n # Mark faces and save the image\n image = np.array(Image.open(filepath))\n im = Image.fromarray(image)\n im.save(filepath)\n height: int = image.shape[0]\n width: int = image.shape[1]\n dpi: int = 100\n faces_coords: List[Tuple[int]] = face_recognition.face_locations(image)\n figure = pyplot.figure(frameon=False, dpi=dpi)\n figure.set_size_inches(width / dpi, height / dpi)\n ax = pyplot.Axes(figure, [0., 0., 1., 1.])\n ax.set_axis_off()\n figure.add_axes(ax)\n ax.imshow(image)\n logger.info('adding ' + str(len(faces_coords)) + ' faces to image \"' + filename + '\"')\n fig = pyplot.gcf()\n fig.savefig(fname=filepath, dpi=dpi, bbox_inches='tight')\n\n for index in range(len(faces_coords)):\n x_start = faces_coords[index][3]\n y_start = faces_coords[index][0]\n x_width = (faces_coords[index][1] - faces_coords[index][3])\n y_height = (faces_coords[index][2] - faces_coords[index][0])\n rect = patches.Rectangle((x_start, y_start), x_width, y_height,\n edgecolor='r', facecolor=\"none\")\n ax.add_patch(rect)\n\n pyplot.savefig(fname=filepath, dpi=dpi, bbox_inches='tight')\n pyplot.close()\n # Send file to ftp\n with open(filepath, 'rb') as file:\n logger.info('uploading file \"' + filename + '\" into ' + output_folder)\n upload_file(filename, output_folder, output_ftp, file)\n file.close() # close file and FTP\n\n input_ftp.cwd(input_folder)\n # STOP # process and send files # STOP #\n input_ftp.quit()\n\n if output_ftp_credential != input_ftp_credential:\n output_ftp.quit()\n\n rest_client.send_output_token(\n base_msg_uid=msg_uid,\n values={\n ValuesAttribute.RESOURCE_PATH: output_folder\n },\n output_pin_name=output_pin.getattr(PinAttribute.NAME))\n rest_client.send_ack_token(\n msg_uids=[msg_uid],\n is_final=True,\n is_failed=False,\n )\n\n\napp, rest_client = init_baltic_api(Processing)\n"
] | [
[
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.Axes",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
waterzxj/UNF | [
"5eda8e7c60116735f595f4b21b24547708b36cf5",
"5eda8e7c60116735f595f4b21b24547708b36cf5",
"5eda8e7c60116735f595f4b21b24547708b36cf5"
] | [
"UNF/training/metric.py",
"UNF/modules/embedding/embedding.py",
"UNF/models/predictor.py"
] | [
"#coding:utf-8\n\nimport torch\n\nfrom learner_util import get_ner_BIO\n\n\nclass Metric(object):\n def __call__(self,\n predictions,\n gold_labels,\n mask=None):\n \"\"\"\n metric的抽象类\n\n :params predictions 预测结果的tensor\n :params gold_labels 实际结果的tensor\n :mask mask\n \"\"\"\n raise NotImplementedError\n\n def get_metric(self, reset=False):\n \"\"\"\n 返回metric的指标\n \"\"\"\n raise NotImplementedError\n\n def reset(self):\n \"\"\"\n 重置内部状态\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def unwrap_to_tensors(*tensors):\n \"\"\"\n 把tensor安全的copy到cpu进行操作,避免gpu的oom\n \"\"\"\n return (x.detach().cpu() if isinstance(x, torch.Tensor) else x for x in tensors)\n\n @classmethod\n def from_option(cls, conf):\n return cls(**conf)\n\n\nclass F1Measure(Metric):\n def __init__(self, positive_label):\n \"\"\"\n 准确率、召回率、F值的评价指标\n \"\"\"\n super(F1Measure, self).__init__()\n self._positive_label = positive_label\n self._true_positives = 0.0\n self._true_negatives = 0.0\n self._false_positives = 0.0\n self._false_negatives = 0.0\n \n def __call__(self,\n predictions,\n gold_labels,\n mask=None):\n predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels, mask)\n num_classes = predictions.size(-1)\n if (gold_labels >= num_classes).any():\n raise Exception(\"A gold label passed to F1Measure contains an id >= {}, \"\n \"the number of classes.\".format(num_classes))\n if mask is None:\n mask = torch.ones_like(gold_labels)\n mask = mask.float()\n gold_labels = gold_labels.float()\n\n self.update(predictions, gold_labels, mask)\n\n def update(self, predictions, gold_labels, mask):\n positive_label_mask = gold_labels.eq(self._positive_label).float()\n negative_label_mask = 1.0 - positive_label_mask\n\n argmax_predictions = predictions.max(-1)[1].float().squeeze(-1)\n\n # True Negatives: correct non-positive predictions.\n correct_null_predictions = (argmax_predictions !=\n self._positive_label).float() * negative_label_mask\n self._true_negatives += (correct_null_predictions.float() * mask).sum()\n\n # True Positives: correct positively labeled predictions.\n correct_non_null_predictions = (argmax_predictions ==\n self._positive_label).float() * positive_label_mask\n self._true_positives += (correct_non_null_predictions * mask).sum()\n\n # False Negatives: incorrect negatively labeled predictions.\n incorrect_null_predictions = (argmax_predictions !=\n self._positive_label).float() * positive_label_mask\n self._false_negatives += (incorrect_null_predictions * mask).sum()\n\n # False Positives: incorrect positively labeled predictions\n incorrect_non_null_predictions = (argmax_predictions ==\n self._positive_label).float() * negative_label_mask\n self._false_positives += (incorrect_non_null_predictions * mask).sum()\n\n def get_metric(self, reset=False):\n \"\"\"\n 返回准确率、召回率、F值评价指标\n \"\"\"\n # print('TP',self._true_positives,'TN',self._true_negatives,'FP',self._false_positives,'FN',self._false_negatives)\n\n precision = float(self._true_positives) / float(self._true_positives + self._false_positives + 1e-13)\n recall = float(self._true_positives) / float(self._true_positives + self._false_negatives + 1e-13)\n f1_measure = 2. * ((precision * recall) / (precision + recall + 1e-13))\n if reset:\n self.reset()\n return {\"precision\":precision, \"recall\": recall, \"f1_measure\":f1_measure}\n\n def reset(self):\n self._true_positives = 0.0\n self._true_negatives = 0.0\n self._false_positives = 0.0\n self._false_negatives = 0.0\n\n\nclass NerF1Measure(Metric):\n def __init__(self, label_vocab):\n self.golden_num = 0.0\n self.predict_num = 0.0\n self.right_num = 0.0\n self.label_vocab = label_vocab\n\n def reset(self):\n \"\"\"\n 重置内部状态\n \"\"\"\n self.golden_num = 0.0\n self.predict_num = 0.0\n self.right_num = 0.0\n\n def get_metric(self, reset=False):\n \"\"\"\n 返回metric的指标\n \"\"\"\n if self.predict_num == 0.0:\n precision = -1\n else:\n precision = (self.right_num+0.0)/self.predict_num\n\n if self.golden_num == 0.0:\n recall = -1\n else:\n recall = (self.right_num+0.0)/self.golden_num\n\n if (precision == -1) or (recall == -1) or (precision+recall) <= 0.:\n f_measure = -1\n else:\n f_measure = 2*precision*recall/(precision+recall)\n\n if reset:\n self.reset()\n\n return {\"precision\":precision, \"recall\": recall, \"f1_measure\":f_measure}\n\n def update(self, gold_matrix, pred_matrix):\n right_ner = list(set(gold_matrix).intersection(set(pred_matrix)))\n self.golden_num += len(gold_matrix)\n self.predict_num += len(pred_matrix)\n self.right_num += len(right_ner)\n\n def __call__(self,\n predictions,\n gold_labels,\n mask=None):\n \"\"\"\n metric的抽象类\n\n :params predictions 预测结果的tensor\n :params gold_labels 实际结果的tensor\n :mask mask\n \"\"\"\n batch_size = gold_labels.size(0)\n seq_len = gold_labels.size(1)\n predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels,\n mask)\n\n predictions = predictions.tolist()\n gold_labels = gold_labels.tolist()\n mask = mask.tolist()\n\n for idx in range(batch_size):\n pred = [self.label_vocab[predictions[idx][idy]] for idy in range(seq_len) if mask[idx][idy] != 0]\n gold = [self.label_vocab[gold_labels[idx][idy]] for idy in range(seq_len) if mask[idx][idy] != 0]\n\n\n gold_matrix = get_ner_BIO(gold)\n pred_matrix = get_ner_BIO(pred)\n self.update(gold_matrix, pred_matrix)\n\n\n\n\n\n",
"#coding:utf-8\n\"\"\"\nEmbedding类的抽象\n\"\"\"\nimport os\nimport sys\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom modules.module_util import init_tensor\nfrom modules.base_type import InitType, FAN_MODE, ActivationType\n\n\nclass BaseEmbedding(nn.Module):\n \"\"\"\n Emebdding类的基类\n :params dim int类型,embedding的维度大小\n :params vocab_size int类型\n :params device string or [string1, string2],计算的后端,默认是cpu\n :params init_type string, 初始化的计算方式 ,默认采用uniform初始化\n :params dropout float\n \"\"\"\n\n def __init__(self, dim, vocab_size,\n device=None, dropout=0.0):\n\n super(BaseEmbedding, self).__init__()\n self.dim = dim\n self.vocab_size = vocab_size\n self.device = device\n self.dropout = nn.Dropout(p=dropout)\n\n \n\n @classmethod\n def from_dict(cls, params):\n return cls(**params)\n\n def forward(self, input):\n raise Exception(\"BaseEmbedding forward method not implemented!\")\n\n\nclass TokenEmbedding(BaseEmbedding):\n def __init__(self, dim, vocab_size, device=None,\n dropout=0.0, \n init_type=InitType.XAVIER_NORMAL,\n low=0, high=1, mean=0, std=1,\n activation_type=ActivationType.NONE,\n fan_mode=FAN_MODE.FAN_IN, negative_slope=0\n ):\n \"\"\"\n Embedding类的基础类\n\n :params dim int类型,embedding的维度大小\n :params vocab_size int类型\n :params device string or [string1, string2],计算的后端,默认是cpu\n :params init_type string, 初始化的计算方式 ,默认采用uniform初始化\n :params dropout float\n \"\"\"\n super(TokenEmbedding, self).__init__(dim, vocab_size, device,\n dropout)\n\n self.embeddings = nn.Embedding(vocab_size, dim)\n embedding_lookup_table = init_tensor(tensor=torch.empty(vocab_size, dim),\n init_type=init_type, low=low, high=high, mean=mean, std=std,\n activation_type=activation_type, fan_mode=fan_mode, \n negative_slope=negative_slope)\n\n self.embeddings.weight.data.copy_(embedding_lookup_table)\n \n def forward(self, input):\n embedding = self.embeddings(input)\n return self.dropout(embedding)\n\n @classmethod\n def from_pretrained(cls, vectors, vocab_map=None):\n \"\"\"\n copy从dataloader每个域加载好的预训练的词向量\n\n :params vectors Vector类型\n \"\"\"\n if isinstance(path, (str)):\n raise Exception(\"Load embedding from path not implemented!\")\n \n self.embeddings.weight.data.copy_(vectors)\n\n\n \n\n\n",
"#coding:utf-8\nimport os \nimport json\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom models.model_util import Config\nfrom models.dpcnn import DpCnn\nfrom models.fasttext import FastText\nfrom models.leam import LEAM\nfrom models.self_attention import SelfAttention\nfrom models.textcnn import TextCnn\n\n\nclass Predictor(nn.Module):\n def __init__(self, model_save_path, device=None, model_type=None):\n super(Predictor, self).__init__()\n model_conf = os.path.join(model_save_path, \"conf.json\")\n vocab_path = os.path.join(model_save_path, \"vocab.txt\")\n target_path = os.path.join(model_save_path, \"target.txt\")\n\n self.model_type = model_type\n self.model = self.model_loader(Config.from_json_file(model_conf))\n self.model.load_state_dict(torch.load(os.path.join(model_save_path, \"best.th\")))\n self.model.eval()\n \n self.device = device\n if self.device is not None:\n self.model.to(device)\n\n self.vocab = self.load_vocab(vocab_path)\n self.target = self.load_vocab(target_path, reverse=True)\n\n def model_loader(self, conf):\n name = self.model_type.lower()\n if name == \"textcnn\":\n model = TextCnn(**conf.__dict__)\n elif name == \"fastext\":\n model = FastText(**conf.__dict__)\n elif name == \"dpcnn\":\n model = DpCnn(**conf.__dict__)\n elif name == \"leam\":\n model = LEAM(**conf.__dict__)\n elif name == \"self-attention\":\n model = SelfAttention(**conf.__dict__)\n else:\n raise Exception(\"name:%s model not implemented!\" % (name))\n\n return model\n\n def predict(self, input, **kwargs):\n input = input.split()\n input_ids = [self.vocab.get(item, 0) for item in input]\n\n input_ids = torch.LongTensor(input_ids)\n if self.device is not None:\n input_ids = input_ids.to(self.device)\n\n mask = (input_ids != 1).long()\n\n res = self.model.predict(input_ids, mask)\n res = res.detach().cpu().tolist()[0]\n return res\n\n def load_vocab(self, path, reverse=False):\n res = {}\n tmp = json.load(open(path))\n for index, word in enumerate(tmp):\n if reverse:\n res[index] = word\n else:\n res[word] = index\n return res\n"
] | [
[
"torch.ones_like"
],
[
"torch.nn.Dropout",
"torch.empty",
"torch.nn.Embedding"
],
[
"torch.LongTensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sibo/pysimm_tacticity | [
"cfb20851b26b87b736dbb6a2f4c4e7b668d680d5"
] | [
"pysimm/apps/random_walk.py"
] | [
"# ******************************************************************************\n# pysimm.apps.random_walk module\n# ******************************************************************************\n#\n# psuedo random walk algorithm written using pysimm tools\n#\n# ******************************************************************************\n# License\n# ******************************************************************************\n# The MIT License (MIT)\n#\n# Copyright (c) 2016 Michael E. Fortunato, Coray M. Colina\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\nfrom time import strftime\nfrom itertools import permutations, izip\n\nimport numpy as np\n\nfrom pysimm import system, lmps, forcefield, calc\nfrom pysimm import error_print\n\n\ndef find_last_backbone_vector(s, m):\n \"\"\"pysimm.apps.random_walk.find_last_backbone_vector\n\n Finds vector between backbone atoms in terminal monomer. Requires current system s, and reference monomer m.\n\n Args:\n s: :class:`~pysimm.system.System` object\n m: :class:`~pysimm.system.System` object\n Returns:\n list of vector components\n \"\"\"\n head_pos = [0, 0, 0]\n tail_pos = [0, 0, 0]\n for p in s.particles[-1*m.particles.count:]:\n if p.linker == 'head':\n head_pos = [p.x, p.y, p.z]\n elif p.linker == 'tail':\n tail_pos = [p.x, p.y, p.z]\n return [head_pos[0] - tail_pos[0], head_pos[1] - tail_pos[1], head_pos[2] - tail_pos[2]]\n\n\ndef copolymer(m, nmon, s_=None, **kwargs):\n \"\"\"pysimm.apps.random_walk.copolymer\n\n Builds copolymer using random walk methodology using pattern\n\n Args:\n m: list of reference monomer :class:`~pysimm.system.System`s\n nmon: total number of monomers to add to chain\n s_: :class:`~pysimm.system.System` in which to build polymer chain (None)\n settings: dictionary of simulation settings\n density: density at which to build polymer (0.3)\n forcefield: :class:`~pysimm.forcefield.Forcefield` object to acquire new force field parameters\n capped: True/False if monomers are capped\n unwrap: True to unwrap final system\n traj: True to build xyz trajectory of polymer growth (True)\n pattern: list of pattern for monomer repeat units, should match length of m ([1 for _ in range(len(m))])\n limit: during MD, limit atomic displacement by this max value (LAMMPS ONLY)\n sim: :class:`~pysimm.lmps.Simulation` object for relaxation between polymer growth\n Returns:\n new copolymer :class:`~pysimm.system.System`\n \"\"\"\n m = [x.copy() for x in m]\n\n settings = kwargs.get('settings', {})\n density = kwargs.get('density', 0.3)\n f = kwargs.get('forcefield')\n capped = kwargs.get('capped')\n unwrap = kwargs.get('unwrap')\n traj = kwargs.get('traj', True)\n pattern = kwargs.get('pattern', [1 for _ in range(len(m))])\n limit = kwargs.get('limit', 0.1)\n sim = kwargs.get('sim')\n\n for m_ in m:\n m_.add_particle_bonding()\n for p in m_.particles:\n if p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('H'):\n p.linker = 'head'\n elif p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('T'):\n p.linker = 'tail'\n m_.remove_linker_types()\n\n if s_ is None:\n s = system.replicate(m[0], 1, density=density/nmon)\n else:\n s = system.replicate(m[0], 1, s_=s_, density=density/nmon)\n print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), 1, nmon))\n\n for p in s.particles:\n if p.linker == 'head':\n last_head = p\n\n elif p.linker == 'tail':\n last_tail = p\n\n for m_ in m:\n if capped:\n m_.particles.remove(1)\n m_.remove_spare_bonding()\n m_.add_particle_bonding()\n\n s.add_particle_bonding()\n \n if traj:\n s.write_xyz('random_walk.xyz')\n\n temp_nmon = 1\n\n while True:\n\n m_ = m.pop(0)\n m.append(m_)\n p_ = pattern.pop(0)\n pattern.append(p_)\n\n if temp_nmon == 1 and p_ == 1:\n m_ = m.pop(0)\n m.append(m_)\n p_ = pattern.pop(0)\n pattern.append(p_)\n elif temp_nmon == 1:\n p_ -= 1\n\n for insert in range(p_):\n\n head = None\n tail = None\n\n backbone_vector = np.array([last_head.x - last_tail.x,\n last_head.y - last_tail.y,\n last_head.z - last_tail.z])\n\n ref_head = None\n ref_tail = None\n for p in m_.particles:\n if p.linker == 'head':\n ref_head = p\n elif p.linker == 'tail':\n ref_tail = p\n if ref_head and ref_tail:\n ref_backbone_vector = np.array([ref_head.x - ref_tail.x,\n ref_head.y - ref_tail.y,\n ref_head.z - ref_tail.z])\n rot_matrix = calc.find_rotation(ref_backbone_vector, backbone_vector)\n m_.rotate(around=ref_tail, rot_matrix=rot_matrix)\n translation_vector = [last_tail.x - ref_tail.x,\n last_tail.y - ref_tail.y,\n last_tail.z - ref_tail.z]\n for p in m_.particles:\n p.x = p.x + translation_vector[0] + 3*backbone_vector[0]\n p.y = p.y + translation_vector[1] + 3*backbone_vector[1]\n p.z = p.z + translation_vector[2] + 3*backbone_vector[2]\n else:\n print('reference molecule has no head or tail')\n\n n = m_.copy()\n\n if capped:\n s.particles.remove(s.particles.count)\n s.remove_spare_bonding()\n s.add_particle_bonding()\n\n s.add(n, change_dim=False)\n\n s.add_particle_bonding()\n\n head = last_head\n for p in s.particles[-1*n.particles.count:]:\n if p.linker == 'tail':\n tail = p\n\n s.make_new_bonds(head, tail, f)\n temp_nmon += 1\n print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), temp_nmon, nmon))\n\n if unwrap:\n s.unwrap()\n \n if sim is None:\n sim = lmps.Simulation(s, name='relax_%03d' % (temp_nmon), log='relax.log', **settings)\n sim.add_md(ensemble='nve', limit=limit, **settings)\n sim.add_min(**settings)\n if isinstance(sim, lmps.Simulation):\n sim.system = s\n sim.name = 'relax_%03d' % (temp_nmon)\n sim.run(np=settings.get('np'))\n\n if unwrap:\n s.unwrap()\n\n if unwrap:\n s.wrap()\n\n for p in s.particles[-1*n.particles.count:]:\n if p.linker == 'head':\n last_head = p\n elif p.linker == 'tail':\n last_tail = p\n\n if temp_nmon >= nmon:\n break\n \n if unwrap:\n if not s.unwrap():\n error_print('something went wrong')\n return s\n \n if traj:\n s.write_xyz('random_walk.xyz', append=True)\n \n if unwrap:\n s.wrap()\n \n for p in s.particles:\n if p not in s.molecules[p.molecule.tag].particles:\n s.molecules[p.molecule.tag].particles.add(p)\n\n s.write_lammps('polymer.lmps')\n s.unwrap()\n s.write_xyz('polymer.xyz')\n\n return s\n\n\ndef random_walk(m, nmon, s_=None, **kwargs):\n \"\"\"pysimm.apps.random_walk.random_walk\n\n Builds homopolymer using random walk methodology\n\n Args:\n m: reference monomer :class:`~pysimm.system.System`\n nmon: total number of monomers to add to chain\n s_: :class:`~pysimm.system.System` in which to build polymer chain (None)\n extra_bonds: EXPERMINTAL, True if making ladder backbone polymer\n settings: dictionary of simulation settings\n density: density at which to build polymer (0.3)\n forcefield: :class:`~pysimm.forcefield.Forcefield` object to acquire new force field parameters\n capped: True/False if monomers are capped\n unwrap: True to unwrap final system\n traj: True to build xyz trajectory of polymer growth (True)\n limit: during MD, limit atomic displacement by this max value (LAMMPS ONLY)\n sim: :class:`~pysimm.lmps.Simulation` object for relaxation between polymer growth\n Returns:\n new polymer :class:`~pysimm.system.System`\n \"\"\"\n m = m.copy()\n\n extra_bonds = kwargs.get('extra_bonds', False)\n\n settings = kwargs.get('settings', {})\n density = kwargs.get('density', 0.3)\n f = kwargs.get('forcefield')\n capped = kwargs.get('capped')\n unwrap = kwargs.get('unwrap')\n traj = kwargs.get('traj', True)\n limit = kwargs.get('limit', 0.1)\n sim = kwargs.get('sim')\n\n m.add_particle_bonding()\n\n for p in m.particles:\n if p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('H'):\n p.linker = 'head'\n elif p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('T'):\n p.linker = 'tail'\n\n m.remove_linker_types()\n\n if s_ is None:\n s = system.replicate(m, 1, density=density/nmon)\n else:\n s = system.replicate(m, 1, s_=s_, density=None)\n print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), 1, nmon))\n\n if traj:\n s.write_xyz('random_walk.xyz')\n\n if capped:\n m.particles.remove(1)\n m.remove_spare_bonding()\n m.add_particle_bonding()\n\n for insertion in range(nmon - 1):\n\n head = None\n tail = None\n\n backbone_vector = np.array(find_last_backbone_vector(s, m))\n\n for p, p_ in izip(s.particles[-1*m.particles.count:], m.particles):\n p_.x = p.x + 3*backbone_vector[0]\n p_.y = p.y + 3*backbone_vector[1]\n p_.z = p.z + 3*backbone_vector[2]\n\n n = m.copy()\n\n if capped:\n s.particles.remove(s.particles.count)\n s.remove_spare_bonding()\n s.add_particle_bonding()\n\n if extra_bonds:\n heads = []\n for p in s.particles[-1*n.particles.count:]:\n if p.linker == 'head':\n heads.append(p)\n else:\n for p in s.particles[-1*n.particles.count:]:\n if p.linker == 'head':\n head = p\n\n s.add(n, change_dim=False)\n\n s.add_particle_bonding()\n\n if extra_bonds:\n tails = []\n for p in s.particles[-1*n.particles.count:]:\n if p.linker == 'tail':\n tails.append(p)\n else:\n for p in s.particles[-1*n.particles.count:]:\n if p.linker == 'tail':\n tail = p\n\n for p in s.particles:\n if not p.bonded_to:\n print(p.tag)\n\n if head and tail:\n s.make_new_bonds(head, tail, f)\n print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), insertion+2, nmon))\n elif extra_bonds and len(heads) == len(tails):\n for h, t in izip(heads, tails):\n s.make_new_bonds(h, t, f)\n print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), insertion+2, nmon))\n else:\n print('cannot find head and tail')\n\n if sim is None:\n sim = lmps.Simulation(s, name='relax_%03d' % (insertion+2), log='relax.log', **settings)\n sim.add_md(ensemble='nve', limit=limit, **settings)\n sim.add_min(**settings)\n if isinstance(sim, lmps.Simulation):\n sim.system = s\n sim.name = 'relax_%03d' % (insertion+2)\n sim.run(np=settings.get('np'))\n\n if unwrap:\n if not s.unwrap():\n error_print('something went wrong')\n return s\n\n if traj:\n s.write_xyz('random_walk.xyz', append=True)\n\n if unwrap:\n s.wrap()\n \n for p in s.particles:\n if p not in s.molecules[p.molecule.tag].particles:\n s.molecules[p.molecule.tag].particles.add(p)\n\n s.write_lammps('polymer.lmps')\n s.unwrap()\n s.write_xyz('polymer.xyz')\n\n return s\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
llimeht/sasview | [
"d0c10746a2397c5021ed8bbc842ba99243a9b0ac"
] | [
"test/sascalculator/utest_sas_gen.py"
] | [
"\"\"\"\nUnit tests for the sas_gen\n\"\"\"\n\nimport os.path\nimport warnings\nwarnings.simplefilter(\"ignore\")\n\nimport unittest\nimport numpy as np\n\nfrom sas.sascalc.calculator import sas_gen\n\n\ndef find(filename):\n return os.path.join(os.path.dirname(__file__), 'data', filename)\n\n\nclass sas_gen_test(unittest.TestCase):\n\n def setUp(self):\n self.sldloader = sas_gen.SLDReader()\n self.pdbloader = sas_gen.PDBReader()\n self.omfloader = sas_gen.OMFReader()\n\n def test_sldreader(self):\n \"\"\"\n Test .sld file loaded\n \"\"\"\n f = self.sldloader.read(find(\"sld_file.sld\"))\n self.assertEqual(f.pos_x[0], -40.5)\n self.assertEqual(f.pos_y[0], -13.5)\n self.assertEqual(f.pos_z[0], -13.5)\n\n def test_pdbreader(self):\n \"\"\"\n Test .pdb file loaded\n \"\"\"\n f = self.pdbloader.read(find(\"c60.pdb\"))\n self.assertEqual(f.pos_x[0], -0.733)\n self.assertEqual(f.pos_y[0], -1.008)\n self.assertEqual(f.pos_z[0], 3.326)\n\n def test_omfreader(self):\n \"\"\"\n Test .omf file loaded\n \"\"\"\n f = self.omfloader.read(find(\"A_Raw_Example-1.omf\"))\n output = sas_gen.OMF2SLD()\n output.set_data(f)\n self.assertEqual(f.mx[0], 0)\n self.assertEqual(f.my[0], 0)\n self.assertEqual(f.mz[0], 0)\n self.assertEqual(output.pos_x[0], 0.0)\n self.assertEqual(output.pos_y[0], 0.0)\n self.assertEqual(output.pos_z[0], 0.0)\n\n def test_calculator(self):\n \"\"\"\n Test that the calculator calculates.\n \"\"\"\n f = self.omfloader.read(find(\"A_Raw_Example-1.omf\"))\n omf2sld = sas_gen.OMF2SLD()\n omf2sld.set_data(f)\n model = sas_gen.GenSAS()\n model.set_sld_data(omf2sld.output)\n x = np.linspace(0, 0.1, 11)[1:]\n model.runXY([x, x])\n\n\nif __name__ == '__main__':\n unittest.main()\n\n"
] | [
[
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ddasdkimo/Towards-Realtime-MOT | [
"cfe0e26331969450b6e2a645dfa5c14947514ba5"
] | [
"track.py"
] | [
"import os\r\nimport os.path as osp\r\nimport cv2\r\nimport logging\r\nimport argparse\r\nimport motmetrics as mm\r\n\r\nimport torch\r\nfrom tracker.multitracker import JDETracker\r\nfrom utils import visualization as vis\r\nfrom utils.log import logger\r\nfrom utils.timer import Timer\r\nfrom utils.evaluation import Evaluator\r\nfrom utils.parse_config import parse_model_cfg\r\nimport utils.datasets as datasets\r\nfrom utils.utils import *\r\n\r\n\r\ndef write_results(filename, results, data_type):\r\n if data_type == 'mot':\r\n save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\\n'\r\n elif data_type == 'kitti':\r\n save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\\n'\r\n else:\r\n raise ValueError(data_type)\r\n\r\n with open(filename, 'w') as f:\r\n for frame_id, tlwhs, track_ids in results:\r\n if data_type == 'kitti':\r\n frame_id -= 1\r\n for tlwh, track_id in zip(tlwhs, track_ids):\r\n if track_id < 0:\r\n continue\r\n x1, y1, w, h = tlwh\r\n x2, y2 = x1 + w, y1 + h\r\n line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)\r\n f.write(line)\r\n logger.info('save results to {}'.format(filename))\r\ndef conversion_frame_init(opt,frame_rate):\r\n global tracker,timer,results,frame_id,objopt,resultscamera\r\n objopt = opt\r\n tracker = JDETracker(opt, frame_rate=frame_rate)\r\n timer = Timer()\r\n results = []\r\n resultscamera = [None] * 1000\r\n \r\n frame_id = 0\r\n\r\ndef conversion_frame(img, img0):\r\n global tracker,timer,resultscamera,frame_id,objopt\r\n # run tracking\r\n timer.tic()\r\n blob = torch.from_numpy(img).cuda().unsqueeze(0)\r\n online_targets = tracker.update(blob, img0)\r\n online_tlwhs = []\r\n online_ids = []\r\n for t in online_targets:\r\n tlwh = t.tlwh\r\n tid = t.track_id\r\n vertical = tlwh[2] / tlwh[3] > 1.6\r\n if tlwh[2] * tlwh[3] > objopt.min_box_area and not vertical:\r\n online_tlwhs.append(tlwh)\r\n online_ids.append(tid)\r\n timer.toc()\r\n count = frame_id%1000\r\n resultscamera[count] = (frame_id + 1, online_tlwhs, online_ids)\r\n online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,\r\n fps=1. / timer.average_time)\r\n frame_id += 1\r\n return online_im,resultscamera[count]\r\n\r\ndef eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30):\r\n '''\r\n Processes the video sequence given and provides the output of tracking result (write the results in video file)\r\n\r\n It uses JDE model for getting information about the online targets present.\r\n\r\n Parameters\r\n ----------\r\n opt : Namespace\r\n Contains information passed as commandline arguments.\r\n\r\n dataloader : LoadVideo\r\n Instance of LoadVideo class used for fetching the image sequence and associated data.\r\n\r\n data_type : String\r\n Type of dataset corresponding(similar) to the given video.\r\n\r\n result_filename : String\r\n The name(path) of the file for storing results.\r\n\r\n save_dir : String\r\n Path to the folder for storing the frames containing bounding box information (Result frames).\r\n\r\n show_image : bool\r\n Option for shhowing individial frames during run-time.\r\n\r\n frame_rate : int\r\n Frame-rate of the given video.\r\n\r\n Returns\r\n -------\r\n (Returns are not significant here)\r\n frame_id : int\r\n Sequence number of the last sequence\r\n '''\r\n\r\n if save_dir:\r\n mkdir_if_missing(save_dir)\r\n tracker = JDETracker(opt, frame_rate=frame_rate)\r\n timer = Timer()\r\n results = []\r\n frame_id = 0\r\n for path, img, img0 in dataloader:\r\n if frame_id % 20 == 0:\r\n logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1./max(1e-5, timer.average_time)))\r\n\r\n # run tracking\r\n timer.tic()\r\n blob = torch.from_numpy(img).cuda().unsqueeze(0)\r\n online_targets = tracker.update(blob, img0)\r\n online_tlwhs = []\r\n online_ids = []\r\n for t in online_targets:\r\n tlwh = t.tlwh\r\n tid = t.track_id\r\n vertical = tlwh[2] / tlwh[3] > 1.6\r\n if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:\r\n online_tlwhs.append(tlwh)\r\n online_ids.append(tid)\r\n timer.toc()\r\n # save results\r\n results.append((frame_id + 1, online_tlwhs, online_ids))\r\n if show_image or save_dir is not None:\r\n online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,\r\n fps=1. / timer.average_time)\r\n if show_image:\r\n cv2.imshow('online_im', online_im)\r\n if save_dir is not None:\r\n cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)\r\n frame_id += 1\r\n # save results\r\n write_results(result_filename, results, data_type)\r\n return frame_id, timer.average_time, timer.calls\r\n\r\n\r\ndef main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo', \r\n save_images=False, save_videos=False, show_image=True):\r\n logger.setLevel(logging.INFO)\r\n result_root = os.path.join(data_root, '..', 'results', exp_name)\r\n mkdir_if_missing(result_root)\r\n data_type = 'mot'\r\n\r\n # Read config\r\n cfg_dict = parse_model_cfg(opt.cfg)\r\n opt.img_size = [int(cfg_dict[0]['width']), int(cfg_dict[0]['height'])]\r\n\r\n # run tracking\r\n accs = []\r\n n_frame = 0\r\n timer_avgs, timer_calls = [], []\r\n for seq in seqs:\r\n output_dir = os.path.join(data_root, '..','outputs', exp_name, seq) if save_images or save_videos else None\r\n\r\n logger.info('start seq: {}'.format(seq))\r\n dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)\r\n result_filename = os.path.join(result_root, '{}.txt'.format(seq))\r\n meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read() \r\n frame_rate = int(meta_info[meta_info.find('frameRate')+10:meta_info.find('\\nseqLength')])\r\n nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,\r\n save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)\r\n n_frame += nf\r\n timer_avgs.append(ta)\r\n timer_calls.append(tc)\r\n\r\n # eval\r\n logger.info('Evaluate seq: {}'.format(seq))\r\n evaluator = Evaluator(data_root, seq, data_type)\r\n accs.append(evaluator.eval_file(result_filename))\r\n if save_videos:\r\n output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))\r\n cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)\r\n os.system(cmd_str)\r\n timer_avgs = np.asarray(timer_avgs)\r\n timer_calls = np.asarray(timer_calls)\r\n all_time = np.dot(timer_avgs, timer_calls)\r\n avg_time = all_time / np.sum(timer_calls)\r\n logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))\r\n\r\n # get summary\r\n metrics = mm.metrics.motchallenge_metrics\r\n mh = mm.metrics.create()\r\n summary = Evaluator.get_summary(accs, seqs, metrics)\r\n strsummary = mm.io.render_summary(\r\n summary,\r\n formatters=mh.formatters,\r\n namemap=mm.io.motchallenge_metric_names\r\n )\r\n print(strsummary)\r\n Evaluator.save_summary(summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(prog='track.py')\r\n parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')\r\n parser.add_argument('--weights', type=str, default='weights/latest.pt', help='path to weights file')\r\n parser.add_argument('--iou-thres', type=float, default=0.5, help='iou threshold required to qualify as detected')\r\n parser.add_argument('--conf-thres', type=float, default=0.5, help='object confidence threshold')\r\n parser.add_argument('--nms-thres', type=float, default=0.4, help='iou threshold for non-maximum suppression')\r\n parser.add_argument('--min-box-area', type=float, default=200, help='filter out tiny boxes')\r\n parser.add_argument('--track-buffer', type=int, default=30, help='tracking buffer')\r\n parser.add_argument('--test-mot16', action='store_true', help='tracking buffer')\r\n parser.add_argument('--save-images', action='store_true', help='save tracking results (image)')\r\n parser.add_argument('--save-videos', action='store_true', help='save tracking results (video)')\r\n opt = parser.parse_args()\r\n print(opt, end='\\n\\n')\r\n \r\n if not opt.test_mot16:\r\n seqs_str = '''MOT17-02-SDP\r\n MOT17-04-SDP\r\n MOT17-05-SDP\r\n MOT17-09-SDP\r\n MOT17-10-SDP\r\n MOT17-11-SDP\r\n MOT17-13-SDP\r\n '''\r\n data_root = '/home/wangzd/datasets/MOT/MOT17/images/train'\r\n else:\r\n seqs_str = '''MOT16-01\r\n MOT16-03\r\n MOT16-06\r\n MOT16-07\r\n MOT16-08\r\n MOT16-12\r\n MOT16-14'''\r\n data_root = '/home/wangzd/datasets/MOT/MOT16/images/test'\r\n seqs = [seq.strip() for seq in seqs_str.split()]\r\n\r\n main(opt,\r\n data_root=data_root,\r\n seqs=seqs,\r\n exp_name=opt.weights.split('/')[-2],\r\n show_image=False,\r\n save_images=opt.save_images, \r\n save_videos=opt.save_videos)\r\n\r\n"
] | [
[
"torch.from_numpy"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
erikw/taiga_stats | [
"7e28ffff5169707e248be6a4ab6e31326fc2ca85"
] | [
"taiga_stats/helpers.py"
] | [
"import datetime as dt\nimport sys\n\nimport matplotlib\n\nimport taiga_stats.constants as c\n\nmatplotlib.use(\"TkAgg\") # Reference: https://stackoverflow.com/a/48374671/265508\n\n\nDOT_HEADER_FMT = \"\"\"digraph {:s} {{\n labelloc=\"t\";\n //labelfontsize=\"40\"\n label=\"{:s}\";\n //size=\"7.5,10\"\n ratio=\"compress\"\n //orientation=landscape\n\"\"\"\n\n\ndef get_tag_str(tag):\n return \"\" if tag == c.TAG_MATCH_ALL else tag\n\n\ndef get_stories_with_tag(project, tag):\n uss = project.list_user_stories()\n ret_uss = None\n if tag == c.TAG_MATCH_ALL:\n ret_uss = uss\n else:\n ret_uss = []\n for us in uss:\n if us.tags and tag in us.tags:\n ret_uss.append(us)\n\n if ret_uss is None or len(ret_uss) == 0:\n print(\n \"Warning: no userstories matching '{:s}' was found.\".format(tag),\n file=sys.stderr,\n )\n sys.exit(1)\n return ret_uss\n\n\ndef get_us_stauts_id_from_name(project, name):\n statuses = project.list_user_story_statuses()\n for status in statuses:\n if status.name == name:\n return status.id\n return None\n\n\ndef get_us_status_name_from_id(project, status_id):\n statuses = project.list_user_story_statuses()\n for status in statuses:\n if status.id == status_id:\n return status.name\n return None\n\n\ndef remove_closed_stories(_project, uss):\n ret_uss = []\n for us in uss:\n if not us.is_closed:\n ret_uss.append(us)\n return ret_uss\n\n\ndef get_statuses_sorted_by_order(project):\n statuses = project.list_user_story_statuses()\n return sorted(statuses, key=lambda status: status.order)\n\n\ndef get_statuses_sorted_by_id(project):\n statuses = project.list_user_story_statuses()\n return sorted(statuses, key=lambda status: status.id)\n\n\ndef get_status_id_sorted(project):\n return [status.id for status in get_statuses_sorted_by_order(project)]\n\n\ndef get_status_and_names_sorted(project):\n status_ids = get_status_id_sorted(project)[::-1]\n status_names = []\n for status_id in status_ids:\n status_names.append(get_us_status_name_from_id(project, status_id))\n\n return status_ids, status_names\n\n\ndef get_dot_header(name, title):\n return DOT_HEADER_FMT.format(name, title)\n\n\ndef get_dot_footer():\n return \"}\"\n\n\ndef read_daily_cfd(path, tag):\n data_file = c.CFD_DATA_FILE_FMT.format(get_tag_str(tag))\n data_path = \"{:s}/{:s}\".format(path, data_file)\n data = []\n try:\n with open(data_path, \"r\", encoding=\"utf-8\") as fdata:\n row = 0\n for line in fdata:\n line = line.rstrip()\n parts = line.split(\"\\t\")\n if row == 0:\n data = [[] for _ in range(len(parts) + 1)]\n else:\n for col in range(len(parts)):\n value = parts[col]\n if col == 0: # First col is dates\n value = dt.datetime.strptime(value, \"%Y-%m-%d\")\n elif col == 1: # Second col is annotations\n pass\n else:\n value = int(value)\n data[col].append(value)\n\n row += 1\n except IOError as e:\n print(\n \"Could not read {:s}, error: {:s}\".format(data_path, str(e)),\n file=sys.stderr,\n )\n sys.exit(2)\n\n return data\n\n\nclass assert_args:\n \"\"\"\n Assert that the given arguments exists.\n \"\"\"\n\n def __init__(self, *args):\n self.needed_args = args\n\n def __call__(self, func):\n dec = self\n\n def wrapper(args):\n for arg in dec.needed_args:\n if arg not in args or args[arg] is None:\n print(\"Required argument ''{:s}' was not supplied on commandline or set in config file.\".format(arg))\n return 1\n func(args)\n\n return wrapper\n"
] | [
[
"matplotlib.use"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mayankj/xView2-Solution | [
"804aa15a3d9f28c7c1d73e50ce0ed0c359a0493e",
"804aa15a3d9f28c7c1d73e50ce0ed0c359a0493e"
] | [
"xview/models/unetv2.py",
"fit_predict.py"
] | [
"from functools import partial\r\nfrom typing import List, Union, Callable\r\n\r\nimport torch\r\nfrom pytorch_toolbelt.modules import ABN, ACT_RELU, ACT_SWISH\r\nfrom pytorch_toolbelt.modules import encoders as E\r\nfrom pytorch_toolbelt.modules.decoders import DecoderModule\r\nfrom pytorch_toolbelt.modules.encoders import EncoderModule\r\nfrom torch import nn\r\nfrom torch.nn import functional as F\r\n\r\nfrom .common import disaster_type_classifier, damage_types_classifier\r\nfrom ..dataset import OUTPUT_MASK_KEY, DISASTER_TYPE_KEY, DISASTER_TYPES, DAMAGE_TYPE_KEY, DAMAGE_TYPES\r\n\r\n__all__ = [\"UnetV2SegmentationModel\"]\r\n\r\n\r\nclass ConvBottleneck(nn.Module):\r\n def __init__(self, in_channels, out_channels):\r\n super().__init__()\r\n self.seq = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.ReLU(inplace=True))\r\n\r\n def forward(self, dec, enc):\r\n x = torch.cat([dec, enc], dim=1)\r\n return self.seq(x)\r\n\r\n\r\nclass UnetDecoderBlock(nn.Module):\r\n def __init__(self, in_channels, middle_channels, out_channels):\r\n super().__init__()\r\n self.layer = nn.Sequential(\r\n nn.Upsample(scale_factor=2), nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.ReLU(inplace=True)\r\n )\r\n\r\n def forward(self, x):\r\n return self.layer(x)\r\n\r\n\r\nclass UNetDecoderV2(DecoderModule):\r\n def __init__(\r\n self,\r\n feature_maps: List[int],\r\n decoder_features: List[int],\r\n mask_channels: int,\r\n last_upsample_filters=None,\r\n dropout=0.0,\r\n abn_block=ABN,\r\n ):\r\n super().__init__()\r\n\r\n if not isinstance(decoder_features, list):\r\n decoder_features = [decoder_features * (2 ** i) for i in range(len(feature_maps))]\r\n\r\n if last_upsample_filters is None:\r\n last_upsample_filters = decoder_features[0]\r\n\r\n self.encoder_features = feature_maps\r\n self.decoder_features = decoder_features\r\n self.decoder_stages = nn.ModuleList([self.get_decoder(idx) for idx in range(0, len(self.decoder_features))])\r\n\r\n self.bottlenecks = nn.ModuleList(\r\n [\r\n ConvBottleneck(self.encoder_features[-i - 2] + f, f)\r\n for i, f in enumerate(reversed(self.decoder_features[:]))\r\n ]\r\n )\r\n\r\n self.output_filters = decoder_features\r\n\r\n self.last_upsample = UnetDecoderBlock(decoder_features[0], last_upsample_filters, last_upsample_filters)\r\n\r\n self.final = nn.Conv2d(last_upsample_filters, mask_channels, kernel_size=1)\r\n\r\n def get_decoder(self, layer):\r\n in_channels = (\r\n self.encoder_features[layer + 1]\r\n if layer + 1 == len(self.decoder_features)\r\n else self.decoder_features[layer + 1]\r\n )\r\n return UnetDecoderBlock(in_channels, self.decoder_features[layer], self.decoder_features[max(layer, 0)])\r\n\r\n def forward(self, feature_maps):\r\n\r\n last_dec_out = feature_maps[-1]\r\n\r\n x = last_dec_out\r\n for idx, bottleneck in enumerate(self.bottlenecks):\r\n rev_idx = -(idx + 1)\r\n decoder = self.decoder_stages[rev_idx]\r\n x = decoder(x)\r\n x = bottleneck(x, feature_maps[rev_idx - 1])\r\n\r\n x = self.last_upsample(x)\r\n\r\n f = self.final(x)\r\n\r\n return f\r\n\r\n\r\nclass UnetV2SegmentationModel(nn.Module):\r\n def __init__(\r\n self,\r\n encoder: EncoderModule,\r\n num_classes: int,\r\n disaster_type_classes: int,\r\n damage_type_classes: int,\r\n unet_channels: List[int],\r\n dropout=0.25,\r\n abn_block: Union[ABN, Callable[[int], nn.Module]] = ABN,\r\n full_size_mask=True,\r\n ):\r\n super().__init__()\r\n self.encoder = encoder\r\n\r\n feature_maps = [2 * fm for fm in encoder.output_filters]\r\n\r\n self.decoder = UNetDecoderV2(\r\n feature_maps=feature_maps,\r\n decoder_features=unet_channels,\r\n mask_channels=num_classes,\r\n dropout=dropout,\r\n abn_block=abn_block,\r\n )\r\n\r\n self.full_size_mask = full_size_mask\r\n if disaster_type_classes is not None:\r\n self.disaster_type_classifier = disaster_type_classifier(\r\n feature_maps[-1], disaster_type_classes, dropout=dropout\r\n )\r\n else:\r\n self.disaster_type_classifier = None\r\n\r\n if damage_type_classes is not None:\r\n self.damage_types_classifier = damage_types_classifier(\r\n feature_maps[-1], damage_type_classes, dropout=dropout\r\n )\r\n else:\r\n self.damage_types_classifier = None\r\n\r\n def forward(self, x):\r\n batch_size = x.size(0)\r\n pre, post = x[:, 0:3, ...], x[:, 3:6, ...]\r\n\r\n if self.training:\r\n x = torch.cat([pre, post], dim=0)\r\n features = self.encoder(x)\r\n features = [torch.cat([f[0:batch_size], f[batch_size : batch_size * 2]], dim=1) for f in features]\r\n else:\r\n pre_features, post_features = self.encoder(pre), self.encoder(post)\r\n features = [torch.cat([pre, post], dim=1) for pre, post in zip(pre_features, post_features)]\r\n\r\n # Decode mask\r\n mask = self.decoder(features)\r\n\r\n if self.full_size_mask:\r\n mask = F.interpolate(mask, size=x.size()[2:], mode=\"bilinear\", align_corners=False)\r\n\r\n output = {OUTPUT_MASK_KEY: mask}\r\n\r\n if self.disaster_type_classifier is not None:\r\n disaster_type = self.disaster_type_classifier(features[-1])\r\n output[DISASTER_TYPE_KEY] = disaster_type\r\n\r\n if self.damage_types_classifier is not None:\r\n damage_types = self.damage_types_classifier(features[-1])\r\n output[DAMAGE_TYPE_KEY] = damage_types\r\n\r\n return output\r\n\r\n\r\ndef efficientb3_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):\r\n encoder = E.EfficientNetB3Encoder(pretrained=pretrained,\r\n layers=[0, 1, 2, 4, 6],\r\n abn_params={\"activation\": ACT_RELU})\r\n return UnetV2SegmentationModel(\r\n encoder,\r\n num_classes=num_classes,\r\n disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,\r\n damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,\r\n unet_channels=[64, 128, 256, 256],\r\n dropout=dropout,\r\n abn_block=partial(ABN, activation=ACT_RELU),\r\n )\r\n\r\n\r\ndef densenet121_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):\r\n encoder = E.DenseNet121Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])\r\n return UnetV2SegmentationModel(\r\n encoder,\r\n num_classes=num_classes,\r\n disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,\r\n damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,\r\n unet_channels=[64, 128, 256, 256],\r\n dropout=dropout,\r\n abn_block=partial(ABN, activation=ACT_RELU),\r\n )\r\n\r\n\r\ndef densenet169_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):\r\n encoder = E.DenseNet169Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])\r\n return UnetV2SegmentationModel(\r\n encoder,\r\n num_classes=num_classes,\r\n disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,\r\n damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,\r\n unet_channels=[128, 128, 256, 256],\r\n dropout=dropout,\r\n abn_block=partial(ABN, activation=ACT_RELU),\r\n )\r\n\r\ndef resnet18_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):\r\n encoder = E.Resnet18Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])\r\n return UnetV2SegmentationModel(\r\n encoder,\r\n num_classes=num_classes,\r\n disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,\r\n damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,\r\n unet_channels=[64, 128, 256, 256],\r\n dropout=dropout,\r\n abn_block=partial(ABN, activation=ACT_RELU),\r\n )\r\ndef resnet34_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):\r\n encoder = E.Resnet34Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])\r\n return UnetV2SegmentationModel(\r\n encoder,\r\n num_classes=num_classes,\r\n disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,\r\n damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,\r\n unet_channels=[64, 128, 256, 256],\r\n dropout=dropout,\r\n abn_block=partial(ABN, activation=ACT_RELU),\r\n )\r\n\r\n\r\ndef resnet50_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):\r\n encoder = E.Resnet50Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])\r\n return UnetV2SegmentationModel(\r\n encoder,\r\n num_classes=num_classes,\r\n disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,\r\n damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,\r\n unet_channels=[96, 128, 256, 256],\r\n dropout=dropout,\r\n abn_block=partial(ABN, activation=ACT_RELU),\r\n )\r\n\r\n\r\ndef resnet101_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):\r\n encoder = E.Resnet101Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])\r\n return UnetV2SegmentationModel(\r\n encoder,\r\n num_classes=num_classes,\r\n disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,\r\n damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,\r\n unet_channels=[64, 128, 256, 384],\r\n dropout=dropout,\r\n abn_block=partial(ABN, activation=ACT_RELU),\r\n )\r\n\r\n\r\ndef seresnext50_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):\r\n encoder = E.SEResNeXt50Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])\r\n return UnetV2SegmentationModel(\r\n encoder,\r\n num_classes=num_classes,\r\n disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,\r\n damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,\r\n unet_channels=[64, 128, 256, 256],\r\n dropout=dropout,\r\n abn_block=partial(ABN, activation=ACT_RELU),\r\n )\r\n\r\n\r\ndef seresnext101_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):\r\n encoder = E.SEResNeXt101Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])\r\n return UnetV2SegmentationModel(\r\n encoder,\r\n num_classes=num_classes,\r\n disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,\r\n damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,\r\n unet_channels=[128, 128, 256, 384],\r\n dropout=dropout,\r\n abn_block=partial(ABN, activation=ACT_RELU),\r\n )\r\n",
"from __future__ import absolute_import\n\nimport argparse\nimport collections\nimport gc\nimport json\nimport os\nfrom datetime import datetime\n\nimport torch\nfrom catalyst.dl import SupervisedRunner, OptimizerCallback, SchedulerCallback\nfrom catalyst.dl.callbacks import CriterionAggregatorCallback, AccuracyCallback\nfrom catalyst.utils import load_checkpoint, unpack_checkpoint\nfrom pytorch_toolbelt.optimization.functional import get_lr_decay_parameters\nfrom pytorch_toolbelt.utils import fs, torch_utils\nfrom pytorch_toolbelt.utils.catalyst import ShowPolarBatchesCallback, ConfusionMatrixCallback\nfrom pytorch_toolbelt.utils.random import set_manual_seed\nfrom pytorch_toolbelt.utils.torch_utils import count_parameters, transfer_weights, get_optimizable_parameters\nfrom torch import nn\nfrom torch.optim.lr_scheduler import CyclicLR\nfrom torch.utils.data import DataLoader\n\nfrom xview.dataset import (\n INPUT_IMAGE_KEY,\n OUTPUT_MASK_KEY,\n INPUT_MASK_KEY,\n get_datasets,\n OUTPUT_MASK_4_KEY,\n UNLABELED_SAMPLE,\n get_pseudolabeling_dataset,\n DISASTER_TYPE_KEY,\n UNKNOWN_DISASTER_TYPE_CLASS,\n DISASTER_TYPES,\n OUTPUT_EMBEDDING_KEY,\n DAMAGE_TYPE_KEY,\n OUTPUT_MASK_8_KEY, OUTPUT_MASK_16_KEY, OUTPUT_MASK_32_KEY)\nfrom xview.metric import CompetitionMetricCallback\nfrom xview.models import get_model\nfrom xview.optim import get_optimizer\nfrom xview.pseudo import CEOnlinePseudolabelingCallback2d\nfrom xview.scheduler import get_scheduler\nfrom xview.train_utils import clean_checkpoint, report_checkpoint, get_criterion_callback\nfrom xview.visualization import draw_predictions\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-acc\", \"--accumulation-steps\", type=int, default=1, help=\"Number of batches to process\")\n parser.add_argument(\"--seed\", type=int, default=42, help=\"Random seed\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\n parser.add_argument(\"--fast\", action=\"store_true\")\n parser.add_argument(\n \"-dd\", \"--data-dir\", type=str, required=True, help=\"Data directory for INRIA sattelite dataset\"\n )\n parser.add_argument(\"-m\", \"--model\", type=str, default=\"resnet34_fpncat128\", help=\"\")\n parser.add_argument(\"-b\", \"--batch-size\", type=int, default=8, help=\"Batch Size during training, e.g. -b 64\")\n parser.add_argument(\"-e\", \"--epochs\", type=int, default=100, help=\"Epoch to run\")\n # parser.add_argument('-es', '--early-stopping', type=int, default=None, help='Maximum number of epochs without improvement')\n # parser.add_argument('-fe', '--freeze-encoder', type=int, default=0, help='Freeze encoder parameters for N epochs')\n # parser.add_argument('-ft', '--fine-tune', action='store_true')\n parser.add_argument(\"-lr\", \"--learning-rate\", type=float, default=1e-3, help=\"Initial learning rate\")\n parser.add_argument(\n \"--disaster-type-loss\",\n type=str,\n default=None, # [[\"ce\", 1.0]],\n action=\"append\",\n nargs=\"+\",\n help=\"Criterion for classifying disaster type\",\n )\n parser.add_argument(\n \"--damage-type-loss\",\n type=str,\n default=None, # [[\"bce\", 1.0]],\n action=\"append\",\n nargs=\"+\",\n help=\"Criterion for classifying presence of building with particular damage type\",\n )\n\n parser.add_argument(\"-l\", \"--criterion\", type=str, default=None, action=\"append\", nargs=\"+\", help=\"Criterion\")\n parser.add_argument(\"--mask4\", type=str, default=None, action=\"append\", nargs=\"+\", help=\"Criterion for mask with stride 4\")\n parser.add_argument(\"--mask8\", type=str, default=None, action=\"append\", nargs=\"+\", help=\"Criterion for mask with stride 8\")\n parser.add_argument(\"--mask16\", type=str, default=None, action=\"append\", nargs=\"+\", help=\"Criterion for mask with stride 16\")\n parser.add_argument(\"--mask32\", type=str, default=None, action=\"append\", nargs=\"+\", help=\"Criterion for mask with stride 32\")\n parser.add_argument(\"--embedding\", type=str, default=None)\n\n parser.add_argument(\"-o\", \"--optimizer\", default=\"RAdam\", help=\"Name of the optimizer\")\n parser.add_argument(\n \"-c\", \"--checkpoint\", type=str, default=None, help=\"Checkpoint filename to use as initial model weights\"\n )\n parser.add_argument(\"-w\", \"--workers\", default=8, type=int, help=\"Num workers\")\n parser.add_argument(\"-a\", \"--augmentations\", default=\"safe\", type=str, help=\"Level of image augmentations\")\n parser.add_argument(\"--transfer\", default=None, type=str, help=\"\")\n parser.add_argument(\"--fp16\", action=\"store_true\")\n parser.add_argument(\"--size\", default=512, type=int)\n parser.add_argument(\"--fold\", default=0, type=int)\n parser.add_argument(\"-s\", \"--scheduler\", default=\"multistep\", type=str, help=\"\")\n parser.add_argument(\"-x\", \"--experiment\", default=None, type=str, help=\"\")\n parser.add_argument(\"-d\", \"--dropout\", default=0.0, type=float, help=\"Dropout before head layer\")\n parser.add_argument(\"--opl\", action=\"store_true\")\n parser.add_argument(\n \"--warmup\", default=0, type=int, help=\"Number of warmup epochs with reduced LR on encoder parameters\"\n )\n parser.add_argument(\"-wd\", \"--weight-decay\", default=0, type=float, help=\"L2 weight decay\")\n parser.add_argument(\"--show\", action=\"store_true\")\n parser.add_argument(\"--dsv\", action=\"store_true\")\n parser.add_argument(\"--balance\", action=\"store_true\")\n parser.add_argument(\"--only-buildings\", action=\"store_true\")\n parser.add_argument(\"--freeze-bn\", action=\"store_true\")\n parser.add_argument(\"--crops\", action=\"store_true\", help=\"Train on random crops\")\n parser.add_argument(\"--post-transform\", action=\"store_true\")\n\n args = parser.parse_args()\n set_manual_seed(args.seed)\n\n data_dir = args.data_dir\n num_workers = args.workers\n num_epochs = args.epochs\n learning_rate = args.learning_rate\n model_name = args.model\n optimizer_name = args.optimizer\n image_size = args.size, args.size\n fast = args.fast\n augmentations = args.augmentations\n fp16 = args.fp16\n scheduler_name = args.scheduler\n experiment = args.experiment\n dropout = args.dropout\n online_pseudolabeling = args.opl\n segmentation_losses = args.criterion\n verbose = args.verbose\n warmup = args.warmup\n show = args.show\n accumulation_steps = args.accumulation_steps\n weight_decay = args.weight_decay\n fold = args.fold\n balance = args.balance\n only_buildings = args.only_buildings\n freeze_bn = args.freeze_bn\n train_on_crops = args.crops\n enable_post_image_transform = args.post_transform\n disaster_type_loss = args.disaster_type_loss\n train_batch_size = args.batch_size\n embedding_criterion = args.embedding\n damage_type_loss = args.damage_type_loss\n\n # Compute batch size for validaion\n if train_on_crops:\n valid_batch_size = max(1, (train_batch_size * (image_size[0] * image_size[1])) // (1024 ** 2))\n else:\n valid_batch_size = train_batch_size\n\n run_train = num_epochs > 0\n\n model: nn.Module = get_model(model_name, dropout=dropout).cuda()\n\n if args.transfer:\n transfer_checkpoint = fs.auto_file(args.transfer)\n print(\"Transfering weights from model checkpoint\", transfer_checkpoint)\n checkpoint = load_checkpoint(transfer_checkpoint)\n pretrained_dict = checkpoint[\"model_state_dict\"]\n\n transfer_weights(model, pretrained_dict)\n\n if args.checkpoint:\n checkpoint = load_checkpoint(fs.auto_file(args.checkpoint))\n unpack_checkpoint(checkpoint, model=model)\n\n print(\"Loaded model weights from:\", args.checkpoint)\n report_checkpoint(checkpoint)\n\n if freeze_bn:\n torch_utils.freeze_bn(model)\n print(\"Freezing bn params\")\n\n runner = SupervisedRunner(input_key=INPUT_IMAGE_KEY, output_key=None)\n main_metric = \"weighted_f1\"\n cmd_args = vars(args)\n\n current_time = datetime.now().strftime(\"%b%d_%H_%M\")\n checkpoint_prefix = f\"{current_time}_{args.model}_{args.size}_fold{fold}\"\n\n if fp16:\n checkpoint_prefix += \"_fp16\"\n\n if fast:\n checkpoint_prefix += \"_fast\"\n\n if online_pseudolabeling:\n checkpoint_prefix += \"_opl\"\n\n if train_on_crops:\n checkpoint_prefix += \"_crops\"\n\n if experiment is not None:\n checkpoint_prefix = experiment\n\n log_dir = os.path.join(\"runs\", checkpoint_prefix)\n os.makedirs(log_dir, exist_ok=False)\n\n config_fname = os.path.join(log_dir, f\"{checkpoint_prefix}.json\")\n with open(config_fname, \"w\") as f:\n train_session_args = vars(args)\n f.write(json.dumps(train_session_args, indent=2))\n\n default_callbacks = [\n CompetitionMetricCallback(input_key=INPUT_MASK_KEY, output_key=OUTPUT_MASK_KEY, prefix=\"weighted_f1\"),\n ConfusionMatrixCallback(\n input_key=INPUT_MASK_KEY,\n output_key=OUTPUT_MASK_KEY,\n class_names=[\"land\", \"no_damage\", \"minor_damage\", \"major_damage\", \"destroyed\"],\n ignore_index=UNLABELED_SAMPLE,\n ),\n ]\n\n if show:\n default_callbacks += [\n ShowPolarBatchesCallback(draw_predictions, metric=main_metric + \"_batch\", minimize=False)\n ]\n\n train_ds, valid_ds, train_sampler = get_datasets(\n data_dir=data_dir,\n image_size=image_size,\n augmentation=augmentations,\n fast=fast,\n fold=fold,\n balance=balance,\n only_buildings=only_buildings,\n train_on_crops=train_on_crops,\n enable_post_image_transform=enable_post_image_transform,\n )\n\n # Pretrain/warmup\n if warmup:\n callbacks = default_callbacks.copy()\n criterions_dict = {}\n losses = []\n\n for criterion in segmentation_losses:\n if isinstance(criterion, (list, tuple)):\n loss_name, loss_weight = criterion\n else:\n loss_name, loss_weight = criterion, 1.0\n\n cd, criterion, criterion_name = get_criterion_callback(\n loss_name, input_key=INPUT_MASK_KEY, output_key=OUTPUT_MASK_KEY, loss_weight=float(loss_weight)\n )\n criterions_dict.update(cd)\n callbacks.append(criterion)\n losses.append(criterion_name)\n print(\"Using loss\", loss_name, loss_weight)\n\n if args.mask4 is not None:\n for criterion in args.mask4:\n if isinstance(criterion, (list, tuple)):\n loss_name, loss_weight = criterion\n else:\n loss_name, loss_weight = criterion, 1.0\n\n cd, criterion, criterion_name = get_criterion_callback(\n loss_name, input_key=INPUT_MASK_KEY, output_key=OUTPUT_MASK_4_KEY, loss_weight=float(loss_weight)\n )\n criterions_dict.update(cd)\n callbacks.append(criterion)\n losses.append(criterion_name)\n print(\"Using loss\", loss_name, loss_weight)\n\n callbacks += [\n CriterionAggregatorCallback(prefix=\"loss\", loss_keys=losses),\n OptimizerCallback(accumulation_steps=accumulation_steps, decouple_weight_decay=False),\n ]\n\n parameters = get_lr_decay_parameters(model.named_parameters(), learning_rate, {\"encoder\": 0.1})\n optimizer = get_optimizer(\"RAdam\", parameters, learning_rate=learning_rate * 0.1)\n\n loaders = collections.OrderedDict()\n loaders[\"train\"] = DataLoader(\n train_ds,\n batch_size=train_batch_size,\n num_workers=num_workers,\n pin_memory=True,\n drop_last=True,\n shuffle=train_sampler is None,\n sampler=train_sampler,\n )\n\n loaders[\"valid\"] = DataLoader(valid_ds, batch_size=valid_batch_size, num_workers=num_workers, pin_memory=True)\n\n runner.train(\n fp16=fp16,\n model=model,\n criterion=criterions_dict,\n optimizer=optimizer,\n scheduler=None,\n callbacks=callbacks,\n loaders=loaders,\n logdir=os.path.join(log_dir, \"warmup\"),\n num_epochs=warmup,\n verbose=verbose,\n main_metric=main_metric,\n minimize_metric=False,\n checkpoint_data={\"cmd_args\": cmd_args},\n )\n\n del optimizer, loaders\n\n best_checkpoint = os.path.join(log_dir, \"warmup\", \"checkpoints\", \"best.pth\")\n model_checkpoint = os.path.join(log_dir, \"warmup\", \"checkpoints\", f\"{checkpoint_prefix}_warmup.pth\")\n clean_checkpoint(best_checkpoint, model_checkpoint)\n\n torch.cuda.empty_cache()\n gc.collect()\n\n if run_train:\n loaders = collections.OrderedDict()\n callbacks = default_callbacks.copy()\n criterions_dict = {}\n losses = []\n\n if online_pseudolabeling:\n unlabeled_label = get_pseudolabeling_dataset(\n data_dir, include_masks=False, image_size=image_size, augmentation=None\n )\n\n unlabeled_train = get_pseudolabeling_dataset(\n data_dir,\n include_masks=True,\n image_size=image_size,\n augmentation=augmentations,\n train_on_crops=train_on_crops,\n enable_post_image_transform=enable_post_image_transform,\n )\n\n loaders[\"label\"] = DataLoader(\n unlabeled_label, batch_size=valid_batch_size, num_workers=num_workers, pin_memory=True\n )\n\n train_ds = train_ds + unlabeled_train\n train_sampler = None\n\n callbacks += [\n CEOnlinePseudolabelingCallback2d(\n unlabeled_train,\n pseudolabel_loader=\"label\",\n prob_threshold=0.75,\n output_key=OUTPUT_MASK_KEY,\n unlabeled_class=UNLABELED_SAMPLE,\n label_frequency=5,\n )\n ]\n\n print(\"Using online pseudolabeling with \", len(unlabeled_label), \"samples\")\n\n loaders[\"train\"] = DataLoader(\n train_ds,\n batch_size=train_batch_size,\n num_workers=num_workers,\n pin_memory=True,\n drop_last=True,\n shuffle=train_sampler is None,\n sampler=train_sampler,\n )\n\n loaders[\"valid\"] = DataLoader(valid_ds, batch_size=valid_batch_size, num_workers=num_workers, pin_memory=True)\n\n # Create losses\n for criterion in segmentation_losses:\n if isinstance(criterion, (list, tuple)) and len(criterion) == 2:\n loss_name, loss_weight = criterion\n else:\n loss_name, loss_weight = criterion[0], 1.0\n\n cd, criterion, criterion_name = get_criterion_callback(\n loss_name,\n prefix=\"segmentation\",\n input_key=INPUT_MASK_KEY,\n output_key=OUTPUT_MASK_KEY,\n loss_weight=float(loss_weight),\n )\n criterions_dict.update(cd)\n callbacks.append(criterion)\n losses.append(criterion_name)\n print(INPUT_MASK_KEY, \"Using loss\", loss_name, loss_weight)\n\n if args.mask4 is not None:\n for criterion in args.mask4:\n if isinstance(criterion, (list, tuple)):\n loss_name, loss_weight = criterion\n else:\n loss_name, loss_weight = criterion, 1.0\n\n cd, criterion, criterion_name = get_criterion_callback(\n loss_name,\n prefix=\"mask4\",\n input_key=INPUT_MASK_KEY,\n output_key=OUTPUT_MASK_4_KEY,\n loss_weight=float(loss_weight),\n )\n criterions_dict.update(cd)\n callbacks.append(criterion)\n losses.append(criterion_name)\n print(OUTPUT_MASK_4_KEY, \"Using loss\", loss_name, loss_weight)\n\n if args.mask8 is not None:\n for criterion in args.mask8:\n if isinstance(criterion, (list, tuple)):\n loss_name, loss_weight = criterion\n else:\n loss_name, loss_weight = criterion, 1.0\n\n cd, criterion, criterion_name = get_criterion_callback(\n loss_name,\n prefix=\"mask8\",\n input_key=INPUT_MASK_KEY,\n output_key=OUTPUT_MASK_8_KEY,\n loss_weight=float(loss_weight),\n )\n criterions_dict.update(cd)\n callbacks.append(criterion)\n losses.append(criterion_name)\n print(OUTPUT_MASK_8_KEY, \"Using loss\", loss_name, loss_weight)\n\n if args.mask16 is not None:\n for criterion in args.mask16:\n if isinstance(criterion, (list, tuple)):\n loss_name, loss_weight = criterion\n else:\n loss_name, loss_weight = criterion, 1.0\n\n cd, criterion, criterion_name = get_criterion_callback(\n loss_name,\n prefix=\"mask16\",\n input_key=INPUT_MASK_KEY,\n output_key=OUTPUT_MASK_16_KEY,\n loss_weight=float(loss_weight),\n )\n criterions_dict.update(cd)\n callbacks.append(criterion)\n losses.append(criterion_name)\n print(OUTPUT_MASK_16_KEY, \"Using loss\", loss_name, loss_weight)\n\n if args.mask32 is not None:\n for criterion in args.mask32:\n if isinstance(criterion, (list, tuple)):\n loss_name, loss_weight = criterion\n else:\n loss_name, loss_weight = criterion, 1.0\n\n cd, criterion, criterion_name = get_criterion_callback(\n loss_name,\n prefix=\"mask32\",\n input_key=INPUT_MASK_KEY,\n output_key=OUTPUT_MASK_32_KEY,\n loss_weight=float(loss_weight),\n )\n criterions_dict.update(cd)\n callbacks.append(criterion)\n losses.append(criterion_name)\n print(OUTPUT_MASK_32_KEY, \"Using loss\", loss_name, loss_weight)\n\n if disaster_type_loss is not None:\n callbacks += [\n ConfusionMatrixCallback(\n input_key=DISASTER_TYPE_KEY,\n output_key=DISASTER_TYPE_KEY,\n class_names=DISASTER_TYPES,\n ignore_index=UNKNOWN_DISASTER_TYPE_CLASS,\n prefix=f\"{DISASTER_TYPE_KEY}/confusion_matrix\",\n ),\n AccuracyCallback(\n input_key=DISASTER_TYPE_KEY,\n output_key=DISASTER_TYPE_KEY,\n prefix=f\"{DISASTER_TYPE_KEY}/accuracy\",\n activation=\"Softmax\",\n ),\n ]\n\n for criterion in disaster_type_loss:\n if isinstance(criterion, (list, tuple)):\n loss_name, loss_weight = criterion\n else:\n loss_name, loss_weight = criterion, 1.0\n\n cd, criterion, criterion_name = get_criterion_callback(\n loss_name,\n prefix=DISASTER_TYPE_KEY,\n input_key=DISASTER_TYPE_KEY,\n output_key=DISASTER_TYPE_KEY,\n loss_weight=float(loss_weight),\n ignore_index=UNKNOWN_DISASTER_TYPE_CLASS,\n )\n criterions_dict.update(cd)\n callbacks.append(criterion)\n losses.append(criterion_name)\n print(DISASTER_TYPE_KEY, \"Using loss\", loss_name, loss_weight)\n\n if damage_type_loss is not None:\n callbacks += [\n # MultilabelConfusionMatrixCallback(\n # input_key=DAMAGE_TYPE_KEY,\n # output_key=DAMAGE_TYPE_KEY,\n # class_names=DAMAGE_TYPES,\n # prefix=f\"{DAMAGE_TYPE_KEY}/confusion_matrix\",\n # ),\n AccuracyCallback(\n input_key=DAMAGE_TYPE_KEY,\n output_key=DAMAGE_TYPE_KEY,\n prefix=f\"{DAMAGE_TYPE_KEY}/accuracy\",\n activation=\"Sigmoid\",\n threshold=0.5,\n )\n ]\n\n for criterion in damage_type_loss:\n if isinstance(criterion, (list, tuple)):\n loss_name, loss_weight = criterion\n else:\n loss_name, loss_weight = criterion, 1.0\n\n cd, criterion, criterion_name = get_criterion_callback(\n loss_name,\n prefix=DAMAGE_TYPE_KEY,\n input_key=DAMAGE_TYPE_KEY,\n output_key=DAMAGE_TYPE_KEY,\n loss_weight=float(loss_weight),\n )\n criterions_dict.update(cd)\n callbacks.append(criterion)\n losses.append(criterion_name)\n print(DAMAGE_TYPE_KEY, \"Using loss\", loss_name, loss_weight)\n\n if embedding_criterion is not None:\n cd, criterion, criterion_name = get_criterion_callback(\n embedding_criterion,\n prefix=\"embedding\",\n input_key=INPUT_MASK_KEY,\n output_key=OUTPUT_EMBEDDING_KEY,\n loss_weight=1.0,\n )\n criterions_dict.update(cd)\n callbacks.append(criterion)\n losses.append(criterion_name)\n print(OUTPUT_EMBEDDING_KEY, \"Using loss\", embedding_criterion)\n\n callbacks += [\n CriterionAggregatorCallback(prefix=\"loss\", loss_keys=losses),\n OptimizerCallback(accumulation_steps=accumulation_steps, decouple_weight_decay=False),\n ]\n\n optimizer = get_optimizer(\n optimizer_name, get_optimizable_parameters(model), learning_rate, weight_decay=weight_decay\n )\n scheduler = get_scheduler(\n scheduler_name, optimizer, lr=learning_rate, num_epochs=num_epochs, batches_in_epoch=len(loaders[\"train\"])\n )\n if isinstance(scheduler, CyclicLR):\n callbacks += [SchedulerCallback(mode=\"batch\")]\n\n print(\"Train session :\", checkpoint_prefix)\n print(\" FP16 mode :\", fp16)\n print(\" Fast mode :\", args.fast)\n print(\" Epochs :\", num_epochs)\n print(\" Workers :\", num_workers)\n print(\" Data dir :\", data_dir)\n print(\" Log dir :\", log_dir)\n print(\"Data \")\n print(\" Augmentations :\", augmentations)\n print(\" Train size :\", len(loaders[\"train\"]), len(train_ds))\n print(\" Valid size :\", len(loaders[\"valid\"]), len(valid_ds))\n print(\" Image size :\", image_size)\n print(\" Train on crops :\", train_on_crops)\n print(\" Balance :\", balance)\n print(\" Buildings only :\", only_buildings)\n print(\" Post transform :\", enable_post_image_transform)\n print(\"Model :\", model_name)\n print(\" Parameters :\", count_parameters(model))\n print(\" Dropout :\", dropout)\n print(\"Optimizer :\", optimizer_name)\n print(\" Learning rate :\", learning_rate)\n print(\" Weight decay :\", weight_decay)\n print(\" Scheduler :\", scheduler_name)\n print(\" Batch sizes :\", train_batch_size, valid_batch_size)\n print(\" Criterion :\", segmentation_losses)\n print(\" Damage type :\", damage_type_loss)\n print(\" Disaster type :\", disaster_type_loss)\n print(\" Embedding :\", embedding_criterion)\n\n # model training\n runner.train(\n fp16=fp16,\n model=model,\n criterion=criterions_dict,\n optimizer=optimizer,\n scheduler=scheduler,\n callbacks=callbacks,\n loaders=loaders,\n logdir=os.path.join(log_dir, \"main\"),\n num_epochs=num_epochs,\n verbose=verbose,\n main_metric=main_metric,\n minimize_metric=False,\n checkpoint_data={\"cmd_args\": vars(args)},\n )\n\n # Training is finished. Let's run predictions using best checkpoint weights\n best_checkpoint = os.path.join(log_dir, \"main\", \"checkpoints\", \"best.pth\")\n\n model_checkpoint = os.path.join(log_dir, \"main\", \"checkpoints\", f\"{checkpoint_prefix}.pth\")\n clean_checkpoint(best_checkpoint, model_checkpoint)\n\n del optimizer, loaders\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Upsample",
"torch.cat"
],
[
"torch.utils.data.DataLoader",
"torch.cuda.empty_cache"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
adelmuursepp/ML-React-App-Template | [
"d0afed66b8dd037464edc39b1be7709b6207e834"
] | [
"example/iris-data-classifier/ML-React-App-Template/service/model_generator.py"
] | [
"# Import libraries\nimport numpy as np\nprint('imported numpy')\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.externals import joblib\nimport pandas as pd\n\n\n\n\n\n#Otsustuspuud\nfrom sklearn.tree import DecisionTreeClassifier\n\nprint('imported all')\n\ndata_table = pd.read_csv('postags_lemmas_levels_data.csv')\ndata_table = data_table.drop(['Unnamed: 0','tekstikood', 'filename'], 1)\n\nprint('read data')\n\n# data_table.groupby(\"keeletase\").A.plot(kind='kde')\n#data_table.groupby(\"keeletase\").A.hist(alpha=0.4)|\n\nfrom sklearn.preprocessing import LabelEncoder\nlabelencoder_0 = LabelEncoder() #independent variable encoder\ndata_table.iloc[:,17] = labelencoder_0.fit_transform(data_table.iloc[:,17])\n\n#Transforming values into percentages of total and splitting into target and features\nfeatures = data_table.loc[:, \"A\":\"Z\"]\ntarget_var = data_table.loc[:, \"keeletase\"]\n\nprint('split to test and train')\n# X_train, X_test, y_train, y_test =\\\n# train_test_split(features.loc[:,'A':\"Z\"], target_var, test_size = 0.5, random_state=1111)\n\n\n\n# Get the dataset\n# dataset = datasets.load_iris()\n\n# Split the dataset into features and labels\nX = features\ny = target_var\n\n# Split the dataset into training (80%) and testing (20%) data\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0, shuffle = True)\n\n# Build the classifier and make prediction\nclassifier = DecisionTreeClassifier()\nclassifier.fit(X_train, y_train)\nprint('fit trainging data')\nprediction = classifier.predict(X_test)\n\n# Print the confusion matrix\n\n\n# Save the model to disk\njoblib.dump(classifier, 'classifier.joblib')\n\n\n\n\n\n"
] | [
[
"sklearn.externals.joblib.dump",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.preprocessing.LabelEncoder"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Garsiet/MchLE | [
"4afca0328a5710f16fa08f22b38431a6e84e6910"
] | [
"lab-10-2-mnist_nn.py"
] | [
"# Lab 10 MNIST and NN\nimport tensorflow as tf\nimport random\n# import matplotlib.pyplot as plt\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\ntf.set_random_seed(777) # reproducibility\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n# Check out https://www.tensorflow.org/get_started/mnist/beginners for\n# more information about the mnist dataset\n\n# parameters\nlearning_rate = 0.001\ntraining_epochs = 15\nbatch_size = 100\n\n# input place holders\nX = tf.placeholder(tf.float32, [None, 784])\nY = tf.placeholder(tf.float32, [None, 10])\n\n# weights & bias for nn layers\nW1 = tf.Variable(tf.random_normal([784, 256]))\nb1 = tf.Variable(tf.random_normal([256]))\nL1 = tf.nn.relu(tf.matmul(X, W1) + b1)\n\nW2 = tf.Variable(tf.random_normal([256, 256]))\nb2 = tf.Variable(tf.random_normal([256]))\nL2 = tf.nn.relu(tf.matmul(L1, W2) + b2)\n\nW3 = tf.Variable(tf.random_normal([256, 10]))\nb3 = tf.Variable(tf.random_normal([10]))\nhypothesis = tf.matmul(L2, W3) + b3\n\n# define cost/loss & optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=hypothesis, labels=Y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n# initialize\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\n# train my model\nfor epoch in range(training_epochs):\n avg_cost = 0\n total_batch = int(mnist.train.num_examples / batch_size)\n\n for i in range(total_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n feed_dict = {X: batch_xs, Y: batch_ys}\n c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)\n avg_cost += c / total_batch\n\n print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))\n\nprint('Learning Finished!')\n\n# Test model and check accuracy\ncorrect_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nprint('Accuracy:', sess.run(accuracy, feed_dict={\n X: mnist.test.images, Y: mnist.test.labels}))\n\n# Get one and predict\nr = random.randint(0, mnist.test.num_examples - 1)\nprint(\"Label: \", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))\nprint(\"Prediction: \", sess.run(\n tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1]}))\n\n# plt.imshow(mnist.test.images[r:r + 1].\n# reshape(28, 28), cmap='Greys', interpolation='nearest')\n# plt.show()\n\n'''\nEpoch: 0001 cost = 141.207671860\nEpoch: 0002 cost = 38.788445864\nEpoch: 0003 cost = 23.977515479\nEpoch: 0004 cost = 16.315132428\nEpoch: 0005 cost = 11.702554882\nEpoch: 0006 cost = 8.573139748\nEpoch: 0007 cost = 6.370995680\nEpoch: 0008 cost = 4.537178684\nEpoch: 0009 cost = 3.216900532\nEpoch: 0010 cost = 2.329708954\nEpoch: 0011 cost = 1.715552875\nEpoch: 0012 cost = 1.189857912\nEpoch: 0013 cost = 0.820965160\nEpoch: 0014 cost = 0.624131458\nEpoch: 0015 cost = 0.454633765\nLearning Finished!\nAccuracy: 0.9455\n'''\n"
] | [
[
"tensorflow.matmul",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.train.AdamOptimizer",
"tensorflow.set_random_seed",
"tensorflow.argmax",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.random_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
bell-one/pifuhd | [
"3221d266a042ad58de702e65e588ada5426b08f6"
] | [
"apps/recon.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport sys\nimport os\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nROOT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nimport time\nimport json \nimport numpy as np\nimport cv2\nimport random\nimport torch\nimport torch.nn as nn\nfrom tqdm import tqdm\nfrom torch.utils.data import DataLoader\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport matplotlib\nfrom numpy.linalg import inv\n\nfrom lib.options import BaseOptions\nfrom lib.mesh_util import save_obj_mesh_with_color, reconstruction\nfrom lib.data import EvalWPoseDataset, EvalDataset\nfrom lib.model import HGPIFuNetwNML, HGPIFuMRNet\nfrom lib.geometry import index\n\nfrom PIL import Image\n\nparser = BaseOptions()\n\ndef gen_mesh(res, net, cuda, data, save_path, thresh=0.5, use_octree=True, components=False):\n image_tensor_global = data['img_512'].to(device=cuda)\n image_tensor = data['img'].to(device=cuda)\n calib_tensor = data['calib'].to(device=cuda)\n\n net.filter_global(image_tensor_global)\n net.filter_local(image_tensor[:,None])\n\n try:\n if net.netG.netF is not None:\n image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlF], 0)\n if net.netG.netB is not None:\n image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlB], 0)\n except:\n pass\n \n b_min = data['b_min']\n b_max = data['b_max']\n try:\n save_img_path = save_path[:-4] + '.png'\n save_img_list = []\n for v in range(image_tensor_global.shape[0]):\n save_img = (np.transpose(image_tensor_global[v].detach().cpu().numpy(), (1, 2, 0)) * 0.5 + 0.5)[:, :, ::-1] * 255.0\n save_img_list.append(save_img)\n save_img = np.concatenate(save_img_list, axis=1)\n cv2.imwrite(save_img_path, save_img)\n\n verts, faces, _, _ = reconstruction(\n net, cuda, calib_tensor, res, b_min, b_max, thresh, use_octree=use_octree, num_samples=50000)\n verts_tensor = torch.from_numpy(verts.T).unsqueeze(0).to(device=cuda).float()\n # if 'calib_world' in data:\n # calib_world = data['calib_world'].numpy()[0]\n # verts = np.matmul(np.concatenate([verts, np.ones_like(verts[:,:1])],1), inv(calib_world).T)[:,:3]\n\n color = np.zeros(verts.shape)\n interval = 50000\n for i in range(len(color) // interval + 1):\n left = i * interval\n if i == len(color) // interval:\n right = -1\n else:\n right = (i + 1) * interval\n net.calc_normal(verts_tensor[:, None, :, left:right], calib_tensor[:,None], calib_tensor)\n nml = net.nmls.detach().cpu().numpy()[0] * 0.5 + 0.5\n color[left:right] = nml.T\n\n save_obj_mesh_with_color(save_path, verts, faces, color)\n except Exception as e:\n print(e)\n\n\ndef gen_mesh_imgColor(res, net, cuda, data, save_path, thresh=0.5, use_octree=True, components=False):\n image_tensor_global = data['img_512'].to(device=cuda)\n image_tensor = data['img'].to(device=cuda)\n calib_tensor = data['calib'].to(device=cuda)\n\n net.filter_global(image_tensor_global)\n net.filter_local(image_tensor[:,None])\n\n try:\n if net.netG.netF is not None:\n image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlF], 0)\n if net.netG.netB is not None:\n image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlB], 0)\n except:\n pass\n\n b_min = data['b_min']\n b_max = data['b_max']\n try:\n save_img_path = save_path[:-4] + '.png'\n save_img_list = []\n for v in range(image_tensor_global.shape[0]):\n save_img = (np.transpose(image_tensor_global[v].detach().cpu().numpy(), (1, 2, 0)) * 0.5 + 0.5)[:, :, ::-1] * 255.0\n save_img_list.append(save_img)\n save_img = np.concatenate(save_img_list, axis=1)\n cv2.imwrite(save_img_path, save_img)\n\n verts, faces, _, _ = reconstruction(\n net, cuda, calib_tensor, res, b_min, b_max, thresh, use_octree=use_octree, num_samples=100000)\n verts_tensor = torch.from_numpy(verts.T).unsqueeze(0).to(device=cuda).float()\n\n # if this returns error, projection must be defined somewhere else\n xyz_tensor = net.projection(verts_tensor, calib_tensor[:1])\n uv = xyz_tensor[:, :2, :]\n color = index(image_tensor[:1], uv).detach().cpu().numpy()[0].T\n color = color * 0.5 + 0.5\n\n if 'calib_world' in data:\n calib_world = data['calib_world'].numpy()[0]\n verts = np.matmul(np.concatenate([verts, np.ones_like(verts[:,:1])],1), inv(calib_world).T)[:,:3]\n\n save_obj_mesh_with_color(save_path, verts, faces, color)\n\n except Exception as e:\n print(e)\n\n\ndef recon(opt, use_rect=False):\n # load checkpoints\n state_dict_path = None\n if opt.load_netMR_checkpoint_path is not None:\n state_dict_path = opt.load_netMR_checkpoint_path\n elif opt.resume_epoch < 0:\n state_dict_path = '%s/%s_train_latest' % (opt.checkpoints_path, opt.name)\n opt.resume_epoch = 0\n else:\n state_dict_path = '%s/%s_train_epoch_%d' % (opt.checkpoints_path, opt.name, opt.resume_epoch)\n \n start_id = opt.start_id\n end_id = opt.end_id\n\n cuda = torch.device('cuda:%d' % opt.gpu_id if torch.cuda.is_available() else 'cpu')\n\n state_dict = None\n if state_dict_path is not None and os.path.exists(state_dict_path):\n print('Resuming from ', state_dict_path)\n state_dict = torch.load(state_dict_path, map_location=cuda) \n print('Warning: opt is overwritten.')\n dataroot = opt.dataroot\n resolution = opt.resolution\n results_path = opt.results_path\n loadSize = opt.loadSize\n \n opt = state_dict['opt']\n opt.dataroot = dataroot\n opt.resolution = resolution\n opt.results_path = results_path\n opt.loadSize = loadSize\n else:\n raise Exception('failed loading state dict!', state_dict_path)\n \n # parser.print_options(opt)\n\n if use_rect:\n test_dataset = EvalDataset(opt)\n else:\n test_dataset = EvalWPoseDataset(opt)\n\n print('test data size: ', len(test_dataset))\n projection_mode = test_dataset.projection_mode\n\n opt_netG = state_dict['opt_netG']\n netG = HGPIFuNetwNML(opt_netG, projection_mode).to(device=cuda)\n netMR = HGPIFuMRNet(opt, netG, projection_mode).to(device=cuda)\n\n def set_eval():\n netG.eval()\n\n # load checkpoints\n netMR.load_state_dict(state_dict['model_state_dict'])\n\n os.makedirs(opt.checkpoints_path, exist_ok=True)\n os.makedirs(opt.results_path, exist_ok=True)\n os.makedirs('%s/%s/recon' % (opt.results_path, opt.name), exist_ok=True)\n\n if start_id < 0:\n start_id = 0\n if end_id < 0:\n end_id = len(test_dataset)\n\n ## test\n with torch.no_grad():\n set_eval()\n\n print('generate mesh (test) ...')\n for i in tqdm(range(start_id, end_id)):\n if i >= len(test_dataset):\n break\n \n # for multi-person processing, set it to False\n if True:\n test_data = test_dataset[i]\n\n save_path = '%s/%s/recon/result_%s_%d.obj' % (opt.results_path, opt.name, test_data['name'], opt.resolution)\n\n print(save_path)\n gen_mesh_imgColor(opt.resolution, netMR, cuda, test_data, save_path, components=opt.use_compose)\n else:\n for j in range(test_dataset.get_n_person(i)):\n test_dataset.person_id = j\n test_data = test_dataset[i]\n save_path = '%s/%s/recon/result_%s_%d.obj' % (opt.results_path, opt.name, test_data['name'], j)\n gen_mesh_imgColor(opt.resolution, netMR, cuda, test_data, save_path, components=opt.use_compose)\n\ndef reconWrapper(args=None, use_rect=False):\n opt = parser.parse(args)\n recon(opt, use_rect)\n\nif __name__ == '__main__':\n reconWrapper()\n \n"
] | [
[
"numpy.ones_like",
"torch.cat",
"torch.load",
"numpy.linalg.inv",
"torch.from_numpy",
"numpy.concatenate",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rahulgovind/pysph | [
"3d493e6f2c5284ea9c0f0d008e4eb9a0870da0d9",
"3d493e6f2c5284ea9c0f0d008e4eb9a0870da0d9",
"3d493e6f2c5284ea9c0f0d008e4eb9a0870da0d9"
] | [
"pysph/sph/rigid_body.py",
"examples/cpy/axpb_jit.py",
"pysph/examples/surface_tension/square_droplet.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Rigid body related equations.\n\"\"\"\nfrom pysph.base.reduce_array import parallel_reduce_array\nfrom pysph.sph.equation import Equation\nfrom pysph.sph.integrator_step import IntegratorStep\nimport numpy as np\nimport numpy\nfrom math import sqrt\n\n\ndef skew(vec):\n import sympy as S\n x, y, z = vec[0], vec[1], vec[2]\n return S.Matrix([[0, -z, y], [z, 0, -x], [-y, x, 0]])\n\n\ndef get_alpha_dot():\n \"\"\"Use sympy to perform most of the math and use the resulting formulae\n to calculate:\n\n inv(I) (\\tau - w x (I w))\n \"\"\"\n import sympy as S\n ixx, iyy, izz, ixy, ixz, iyz = S.symbols(\"ixx, iyy, izz, ixy, ixz, iyz\")\n tx, ty, tz = S.symbols(\"tx, ty, tz\")\n wx, wy, wz = S.symbols('wx, wy, wz')\n tau = S.Matrix([tx, ty, tz])\n I = S.Matrix([[ixx, ixy, ixz], [ixy, iyy, iyz], [ixz, iyz, izz]])\n w = S.Matrix([wx, wy, wz])\n Iinv = I.inv()\n Iinv.simplify()\n # inv(I) (\\tau - w x (Iw))\n res = Iinv*(tau - w.cross(I*w))\n res.simplify()\n # Now do some awesome sympy magic.\n syms, result = S.cse(res, symbols=S.numbered_symbols('tmp'))\n for lhs, rhs in syms:\n print(\"%s = %s\" % (lhs, rhs))\n for i in range(3):\n print(\"omega_dot[%d] =\" % i, result[0][i])\n\n\ndef get_torque():\n \"\"\"Use sympy to perform some simple math.\n R x F\n C_m x F\n w x r\n \"\"\"\n import sympy as S\n x, y, z, fx, fy, fz = S.symbols(\"x, y, z, fx, fy, fz\")\n R = S.Matrix([x, y, z])\n F = S.Matrix([fx, fy, fz])\n print(\"Torque:\", R.cross(F))\n cx, cy, cz = S.symbols('cx, cy, cz')\n d = S.Matrix([cx, cy, cz])\n print(\"c_m x f = \", d.cross(F))\n wx, wy, wz = S.symbols('wx, wy, wz')\n rx, ry, rz = S.symbols('rx, ry, rz')\n w = S.Matrix([wx, wy, wz])\n r = S.Matrix([rx, ry, rz])\n print(\"w x r = %s\" % w.cross(r))\n\n\n# This is defined to silence editor warnings for the use of declare.\ndef declare(*args): pass\n\n\nclass RigidBodyMoments(Equation):\n def reduce(self, dst, t, dt):\n # FIXME: this will be slow in opencl\n nbody = declare('int')\n i = declare('int')\n base_mi = declare('int')\n base = declare('int')\n nbody = dst.num_body[0]\n if dst.gpu:\n dst.gpu.pull('omega', 'x', 'y', 'z', 'fx', 'fy', 'fz')\n\n d_mi = declare('object')\n m = declare('object')\n x = declare('object')\n y = declare('object')\n z = declare('object')\n fx = declare('object')\n fy = declare('object')\n fz = declare('object')\n d_mi = dst.mi\n cond = declare('object')\n for i in range(nbody):\n cond = dst.body_id == i\n base = i*16\n m = dst.m[cond]\n x = dst.x[cond]\n y = dst.y[cond]\n z = dst.z[cond]\n # Find the total_mass, center of mass and second moments.\n d_mi[base + 0] = numpy.sum(m)\n d_mi[base + 1] = numpy.sum(m*x)\n d_mi[base + 2] = numpy.sum(m*y)\n d_mi[base + 3] = numpy.sum(m*z)\n # Only do the lower triangle of values moments of inertia.\n d_mi[base + 4] = numpy.sum(m*(y*y + z*z))\n d_mi[base + 5] = numpy.sum(m*(x*x + z*z))\n d_mi[base + 6] = numpy.sum(m*(x*x + y*y))\n\n d_mi[base + 7] = -numpy.sum(m*x*y)\n d_mi[base + 8] = -numpy.sum(m*x*z)\n d_mi[base + 9] = -numpy.sum(m*y*z)\n\n # the total force and torque\n fx = dst.fx[cond]\n fy = dst.fy[cond]\n fz = dst.fz[cond]\n d_mi[base + 10] = numpy.sum(fx)\n d_mi[base + 11] = numpy.sum(fy)\n d_mi[base + 12] = numpy.sum(fz)\n\n # Calculate the torque and reduce it.\n d_mi[base + 13] = numpy.sum(y*fz - z*fy)\n d_mi[base + 14] = numpy.sum(z*fx - x*fz)\n d_mi[base + 15] = numpy.sum(x*fy - y*fx)\n\n # Reduce the temporary mi values in parallel across processors.\n d_mi[:] = parallel_reduce_array(dst.mi)\n\n # Set the reduced values.\n for i in range(nbody):\n base_mi = i*16\n base = i*3\n m = d_mi[base_mi + 0]\n dst.total_mass[i] = m\n cx = d_mi[base_mi + 1]/m\n cy = d_mi[base_mi + 2]/m\n cz = d_mi[base_mi + 3]/m\n dst.cm[base + 0] = cx\n dst.cm[base + 1] = cy\n dst.cm[base + 2] = cz\n\n # The actual moment of inertia about center of mass from parallel\n # axes theorem.\n ixx = d_mi[base_mi + 4] - (cy*cy + cz*cz)*m\n iyy = d_mi[base_mi + 5] - (cx*cx + cz*cz)*m\n izz = d_mi[base_mi + 6] - (cx*cx + cy*cy)*m\n ixy = d_mi[base_mi + 7] + cx*cy*m\n ixz = d_mi[base_mi + 8] + cx*cz*m\n iyz = d_mi[base_mi + 9] + cy*cz*m\n\n d_mi[base_mi + 0] = ixx\n d_mi[base_mi + 1] = ixy\n d_mi[base_mi + 2] = ixz\n d_mi[base_mi + 3] = ixy\n d_mi[base_mi + 4] = iyy\n d_mi[base_mi + 5] = iyz\n d_mi[base_mi + 6] = ixz\n d_mi[base_mi + 7] = iyz\n d_mi[base_mi + 8] = izz\n\n fx = d_mi[base_mi + 10]\n fy = d_mi[base_mi + 11]\n fz = d_mi[base_mi + 12]\n dst.force[base + 0] = fx\n dst.force[base + 1] = fy\n dst.force[base + 2] = fz\n\n # Acceleration of CM.\n dst.ac[base + 0] = fx/m\n dst.ac[base + 1] = fy/m\n dst.ac[base + 2] = fz/m\n\n # Find torque about the Center of Mass and not origin.\n tx = d_mi[base_mi + 13]\n ty = d_mi[base_mi + 14]\n tz = d_mi[base_mi + 15]\n tx -= cy*fz - cz*fy\n ty -= -cx*fz + cz*fx\n tz -= cx*fy - cy*fx\n dst.torque[base + 0] = tx\n dst.torque[base + 1] = ty\n dst.torque[base + 2] = tz\n\n wx = dst.omega[base + 0]\n wy = dst.omega[base + 1]\n wz = dst.omega[base + 2]\n # Find omega_dot from: omega_dot = inv(I) (\\tau - w x (Iw))\n # This was done using the sympy code above.\n tmp0 = iyz**2\n tmp1 = ixy**2\n tmp2 = ixz**2\n tmp3 = ixx*iyy\n tmp4 = ixy*ixz\n tmp5 = 1./(ixx*tmp0 + iyy*tmp2 - 2*iyz*tmp4 + izz*tmp1 - izz*tmp3)\n tmp6 = ixy*izz - ixz*iyz\n tmp7 = ixz*wx + iyz*wy + izz*wz\n tmp8 = ixx*wx + ixy*wy + ixz*wz\n tmp9 = tmp7*wx - tmp8*wz + ty\n tmp10 = ixy*iyz - ixz*iyy\n tmp11 = ixy*wx + iyy*wy + iyz*wz\n tmp12 = -tmp11*wx + tmp8*wy + tz\n tmp13 = tmp11*wz - tmp7*wy + tx\n tmp14 = ixx*iyz - tmp4\n dst.omega_dot[base + 0] = tmp5*(-tmp10*tmp12 -\n tmp13*(iyy*izz - tmp0) + tmp6*tmp9)\n dst.omega_dot[base + 1] = tmp5*(tmp12*tmp14 +\n tmp13*tmp6 - tmp9*(ixx*izz - tmp2))\n dst.omega_dot[base + 2] = tmp5*(-tmp10*tmp13 -\n tmp12*(-tmp1 + tmp3) + tmp14*tmp9)\n if dst.gpu:\n dst.gpu.push(\n 'total_mass', 'mi', 'cm', 'force', 'ac', 'torque',\n 'omega_dot'\n )\n\n\nclass RigidBodyMotion(Equation):\n def initialize(self, d_idx, d_x, d_y, d_z, d_u, d_v, d_w,\n d_cm, d_vc, d_ac, d_omega, d_body_id):\n base = declare('int')\n base = d_body_id[d_idx]*3\n wx = d_omega[base + 0]\n wy = d_omega[base + 1]\n wz = d_omega[base + 2]\n rx = d_x[d_idx] - d_cm[base + 0]\n ry = d_y[d_idx] - d_cm[base + 1]\n rz = d_z[d_idx] - d_cm[base + 2]\n\n d_u[d_idx] = d_vc[base + 0] + wy*rz - wz*ry\n d_v[d_idx] = d_vc[base + 1] + wz*rx - wx*rz\n d_w[d_idx] = d_vc[base + 2] + wx*ry - wy*rx\n\n\nclass BodyForce(Equation):\n def __init__(self, dest, sources, gx=0.0, gy=0.0, gz=0.0):\n self.gx = gx\n self.gy = gy\n self.gz = gz\n super(BodyForce, self).__init__(dest, sources)\n\n def initialize(self, d_idx, d_m, d_fx, d_fy, d_fz, d_num_body, d_mi):\n d_fx[d_idx] = d_m[d_idx]*self.gx\n d_fy[d_idx] = d_m[d_idx]*self.gy\n d_fz[d_idx] = d_m[d_idx]*self.gz\n\n\nclass SummationDensityBoundary(Equation):\n r\"\"\"Equation to find the density of the\n fluid particle due to any boundary or a rigid body\n\n :math:`\\rho_a = \\sum_b {\\rho}_fluid V_b W_{ab}`\n\n \"\"\"\n def __init__(self, dest, sources, fluid_rho=1000.0):\n self.fluid_rho = fluid_rho\n super(SummationDensityBoundary, self).__init__(dest, sources)\n\n def loop(self, d_idx, d_rho, s_idx, s_m, s_V, WIJ):\n d_rho[d_idx] += self.fluid_rho * s_V[s_idx] * WIJ\n\n\nclass NumberDensity(Equation):\n def initialize(self, d_idx, d_V):\n d_V[d_idx] = 0.0\n\n def loop(self, d_idx, d_V, WIJ):\n d_V[d_idx] += WIJ\n\n\nclass SummationDensityRigidBody(Equation):\n def __init__(self, dest, sources, rho0):\n self.rho0 = rho0\n super(SummationDensityRigidBody, self).__init__(dest, sources)\n\n def initialize(self, d_idx, d_rho):\n d_rho[d_idx] = 0.0\n\n def loop(self, d_idx, d_rho, s_idx, s_V, WIJ):\n d_rho[d_idx] += self.rho0/s_V[s_idx]*WIJ\n\n\nclass ViscosityRigidBody(Equation):\n\n \"\"\"The viscous acceleration on the fluid/solid due to a boundary.\n Implemented from Akinci et al. http://dx.doi.org/10.1145/2185520.2185558\n\n Use this with the fluid as a destination and body as source.\n \"\"\"\n\n def __init__(self, dest, sources, rho0, nu):\n self.nu = nu\n self.rho0 = rho0\n super(ViscosityRigidBody, self).__init__(dest, sources)\n\n def loop(self, d_idx, d_m, d_au, d_av, d_aw, d_rho,\n s_idx, s_V, s_fx, s_fy, s_fz,\n EPS, VIJ, XIJ, R2IJ, DWIJ):\n phi_b = self.rho0/(s_V[s_idx]*d_rho[d_idx])\n vijdotxij = min(VIJ[0]*XIJ[0] + VIJ[1]*XIJ[1] + VIJ[2]*XIJ[2], 0.0)\n\n fac = self.nu*phi_b*vijdotxij/(R2IJ + EPS)\n ax = fac*DWIJ[0]\n ay = fac*DWIJ[1]\n az = fac*DWIJ[2]\n d_au[d_idx] += ax\n d_av[d_idx] += ay\n d_aw[d_idx] += az\n s_fx[s_idx] += -d_m[d_idx]*ax\n s_fy[s_idx] += -d_m[d_idx]*ay\n s_fz[s_idx] += -d_m[d_idx]*az\n\n\nclass PressureRigidBody(Equation):\n\n \"\"\"The pressure acceleration on the fluid/solid due to a boundary.\n Implemented from Akinci et al. http://dx.doi.org/10.1145/2185520.2185558\n\n Use this with the fluid as a destination and body as source.\n \"\"\"\n\n def __init__(self, dest, sources, rho0):\n self.rho0 = rho0\n super(PressureRigidBody, self).__init__(dest, sources)\n\n def loop(self, d_idx, d_m, d_rho, d_au, d_av, d_aw, d_p,\n s_idx, s_V, s_fx, s_fy, s_fz, DWIJ):\n rho1 = 1.0/d_rho[d_idx]\n fac = -d_p[d_idx]*rho1*rho1*self.rho0/s_V[s_idx]\n ax = fac*DWIJ[0]\n ay = fac*DWIJ[1]\n az = fac*DWIJ[2]\n d_au[d_idx] += ax\n d_av[d_idx] += ay\n d_aw[d_idx] += az\n s_fx[s_idx] += -d_m[d_idx]*ax\n s_fy[s_idx] += -d_m[d_idx]*ay\n s_fz[s_idx] += -d_m[d_idx]*az\n\n\nclass AkinciRigidFluidCoupling(Equation):\n \"\"\"Force between a solid sphere and a SPH fluid particle. This is\n implemented using Akinci's[1] force and additional force from solid\n bodies pressure which is implemented by Liu[2]\n\n [1]'Versatile Rigid-Fluid Coupling for Incompressible SPH'\n\n URL: https://graphics.ethz.ch/~sobarbar/papers/Sol12/Sol12.pdf\n\n [2]A 3D Simulation of a Moving Solid in Viscous Free-Surface Flows by\n Coupling SPH and DEM\n\n https://doi.org/10.1155/2017/3174904\n\n\n Note: Here forces for both the phases are added at once.\n Please make sure that this force is applied only once\n for both the particle properties.\n\n \"\"\"\n def __init__(self, dest, sources, fluid_rho=1000):\n super(AkinciRigidFluidCoupling, self).__init__(dest, sources)\n self.fluid_rho = fluid_rho\n\n def loop(self, d_idx, d_m, d_rho, d_au, d_av, d_aw, d_p,\n s_idx, s_V, s_fx, s_fy, s_fz, DWIJ, s_m, s_p, s_rho):\n\n psi = s_V[s_idx] * self.fluid_rho\n\n _t1 = 2 * d_p[d_idx] / (d_rho[d_idx]**2)\n\n d_au[d_idx] += -psi * _t1 * DWIJ[0]\n d_av[d_idx] += -psi * _t1 * DWIJ[1]\n d_aw[d_idx] += -psi * _t1 * DWIJ[2]\n\n s_fx[s_idx] += d_m[d_idx] * psi * _t1 * DWIJ[0]\n s_fy[s_idx] += d_m[d_idx] * psi * _t1 * DWIJ[1]\n s_fz[s_idx] += d_m[d_idx] * psi * _t1 * DWIJ[2]\n\n\nclass LiuFluidForce(Equation):\n \"\"\"Force between a solid sphere and a SPH fluid particle. This is\n implemented using Akinci's[1] force and additional force from solid\n bodies pressure which is implemented by Liu[2]\n\n [1]'Versatile Rigid-Fluid Coupling for Incompressible SPH'\n\n URL: https://graphics.ethz.ch/~sobarbar/papers/Sol12/Sol12.pdf\n\n [2]A 3D Simulation of a Moving Solid in Viscous Free-Surface Flows by\n Coupling SPH and DEM\n\n https://doi.org/10.1155/2017/3174904\n\n\n Note: Here forces for both the phases are added at once.\n Please make sure that this force is applied only once\n for both the particle properties.\n\n \"\"\"\n def __init__(self, dest, sources):\n super(LiuFluidForce, self).__init__(dest, sources)\n\n def loop(self, d_idx, d_m, d_rho, d_au, d_av, d_aw, d_p,\n s_idx, s_V, s_fx, s_fy, s_fz, DWIJ, s_m, s_p, s_rho):\n _t1 = s_p[s_idx] / (s_rho[s_idx]**2) + d_p[d_idx] / (d_rho[d_idx]**2)\n\n d_au[d_idx] += -s_m[s_idx] * _t1 * DWIJ[0]\n d_av[d_idx] += -s_m[s_idx] * _t1 * DWIJ[1]\n d_aw[d_idx] += -s_m[s_idx] * _t1 * DWIJ[2]\n\n s_fx[s_idx] += d_m[d_idx] * s_m[s_idx] * _t1 * DWIJ[0]\n s_fy[s_idx] += d_m[d_idx] * s_m[s_idx] * _t1 * DWIJ[1]\n s_fz[s_idx] += d_m[d_idx] * s_m[s_idx] * _t1 * DWIJ[2]\n\n\nclass RigidBodyForceGPUGems(Equation):\n \"\"\"This is inspired from\n http://http.developer.nvidia.com/GPUGems3/gpugems3_ch29.html\n and\n BK Mishra's article on DEM\n http://dx.doi.org/10.1016/S0301-7516(03)00032-2\n A review of computer simulation of tumbling mills by the discrete element\n method: Part I - contact mechanics\n \"\"\"\n def __init__(self, dest, sources, k=1.0, d=1.0, eta=1.0, kt=1.0):\n \"\"\"Note that d is a factor multiplied with the \"h\" of the particle.\n \"\"\"\n self.k = k\n self.d = d\n self.eta = eta\n self.kt = kt\n super(RigidBodyForceGPUGems, self).__init__(dest, sources)\n\n def loop(self, d_idx, d_fx, d_fy, d_fz, d_h, d_total_mass, XIJ,\n RIJ, R2IJ, VIJ):\n vijdotrij = VIJ[0]*XIJ[0] + VIJ[1]*XIJ[1] + VIJ[2]*XIJ[2]\n if RIJ > 1e-9:\n vijdotrij_r2ij = vijdotrij/R2IJ\n nij_x = XIJ[0]/RIJ\n nij_y = XIJ[1]/RIJ\n nij_z = XIJ[2]/RIJ\n else:\n vijdotrij_r2ij = 0.0\n nij_x = 0.0\n nij_y = 0.0\n nij_z = 0.0\n vijt_x = VIJ[0] - vijdotrij_r2ij*XIJ[0]\n vijt_y = VIJ[1] - vijdotrij_r2ij*XIJ[1]\n vijt_z = VIJ[2] - vijdotrij_r2ij*XIJ[2]\n\n d = self.d*d_h[d_idx]\n fac = self.k*d_total_mass[0]/d*max(d - RIJ, 0.0)\n\n d_fx[d_idx] += fac*nij_x - self.eta*VIJ[0] - self.kt*vijt_x\n d_fy[d_idx] += fac*nij_y - self.eta*VIJ[1] - self.kt*vijt_y\n d_fz[d_idx] += fac*nij_z - self.eta*VIJ[2] - self.kt*vijt_z\n\n\nclass RigidBodyCollision(Equation):\n \"\"\"Force between two spheres is implemented using DEM contact force law.\n\n Refer https://doi.org/10.1016/j.powtec.2011.09.019 for more\n information.\n\n Open-source MFIX-DEM software for gas–solids flows:\n Part I—Verification studies .\n\n \"\"\"\n def __init__(self, dest, sources, kn=1e3, mu=0.5, en=0.8):\n \"\"\"Initialise the required coefficients for force calculation.\n\n\n Keyword arguments:\n kn -- Normal spring stiffness (default 1e3)\n mu -- friction coefficient (default 0.5)\n en -- coefficient of restitution (0.8)\n\n Given these coefficients, tangential spring stiffness, normal and\n tangential damping coefficient are calculated by default.\n\n \"\"\"\n self.kn = kn\n self.kt = 2. / 7. * kn\n m_eff = np.pi * 0.5**2 * 1e-6 * 2120\n self.gamma_n = -(2 * np.sqrt(kn * m_eff) * np.log(en)) / (\n np.sqrt(np.pi**2 + np.log(en)**2))\n self.gamma_t = 0.5 * self.gamma_n\n self.mu = mu\n super(RigidBodyCollision, self).__init__(dest, sources)\n\n def loop(self, d_idx, d_fx, d_fy, d_fz, d_h, d_total_mass, d_rad_s,\n d_tang_disp_x, d_tang_disp_y, d_tang_disp_z, d_tang_velocity_x,\n d_tang_velocity_y, d_tang_velocity_z, s_idx, s_rad_s, XIJ, RIJ,\n R2IJ, VIJ):\n overlap = 0\n if RIJ > 1e-9:\n overlap = d_rad_s[d_idx] + s_rad_s[s_idx] - RIJ\n\n if overlap > 0:\n # normal vector passing from particle i to j\n nij_x = -XIJ[0] / RIJ\n nij_y = -XIJ[1] / RIJ\n nij_z = -XIJ[2] / RIJ\n\n # overlap speed: a scalar\n vijdotnij = VIJ[0] * nij_x + VIJ[1] * nij_y + VIJ[2] * nij_z\n\n # normal velocity\n vijn_x = vijdotnij * nij_x\n vijn_y = vijdotnij * nij_y\n vijn_z = vijdotnij * nij_z\n\n # normal force with conservative and dissipation part\n fn_x = -self.kn * overlap * nij_x - self.gamma_n * vijn_x\n fn_y = -self.kn * overlap * nij_y - self.gamma_n * vijn_y\n fn_z = -self.kn * overlap * nij_z - self.gamma_n * vijn_z\n\n # ----------------------Tangential force---------------------- #\n\n # tangential velocity\n d_tang_velocity_x[d_idx] = VIJ[0] - vijn_x\n d_tang_velocity_y[d_idx] = VIJ[1] - vijn_y\n d_tang_velocity_z[d_idx] = VIJ[2] - vijn_z\n\n dtvx = d_tang_velocity_x[d_idx]\n dtvy = d_tang_velocity_y[d_idx]\n dtvz = d_tang_velocity_z[d_idx]\n _tang = sqrt(dtvx*dtvx + dtvy*dtvy + dtvz*dtvz)\n\n # tangential unit vector\n tij_x = 0\n tij_y = 0\n tij_z = 0\n if _tang > 0:\n tij_x = d_tang_velocity_x[d_idx] / _tang\n tij_y = d_tang_velocity_y[d_idx] / _tang\n tij_z = d_tang_velocity_z[d_idx] / _tang\n\n # damping force or dissipation\n ft_x_d = -self.gamma_t * d_tang_velocity_x[d_idx]\n ft_y_d = -self.gamma_t * d_tang_velocity_y[d_idx]\n ft_z_d = -self.gamma_t * d_tang_velocity_z[d_idx]\n\n # tangential spring force\n ft_x_s = -self.kt * d_tang_disp_x[d_idx]\n ft_y_s = -self.kt * d_tang_disp_y[d_idx]\n ft_z_s = -self.kt * d_tang_disp_z[d_idx]\n\n ft_x = ft_x_d + ft_x_s\n ft_y = ft_y_d + ft_y_s\n ft_z = ft_z_d + ft_z_s\n\n # coulomb law\n ftij = sqrt((ft_x**2) + (ft_y**2) + (ft_z**2))\n fnij = sqrt((fn_x**2) + (fn_y**2) + (fn_z**2))\n\n _fnij = self.mu * fnij\n\n if _fnij < ftij:\n ft_x = -_fnij * tij_x\n ft_y = -_fnij * tij_y\n ft_z = -_fnij * tij_z\n\n d_fx[d_idx] += fn_x + ft_x\n d_fy[d_idx] += fn_y + ft_y\n d_fz[d_idx] += fn_z + ft_z\n else:\n d_tang_velocity_x[d_idx] = 0\n d_tang_velocity_y[d_idx] = 0\n d_tang_velocity_z[d_idx] = 0\n\n d_tang_disp_x[d_idx] = 0\n d_tang_disp_y[d_idx] = 0\n d_tang_disp_z[d_idx] = 0\n\n\nclass RigidBodyWallCollision(Equation):\n \"\"\"Force between sphere and a wall is implemented using\n DEM contact force law.\n\n Refer https://doi.org/10.1016/j.powtec.2011.09.019 for more\n information.\n\n Open-source MFIX-DEM software for gas–solids flows:\n Part I—Verification studies .\n\n \"\"\"\n def __init__(self, dest, sources, kn=1e3, mu=0.5, en=0.8):\n \"\"\"Initialise the required coefficients for force calculation.\n\n\n Keyword arguments:\n kn -- Normal spring stiffness (default 1e3)\n mu -- friction coefficient (default 0.5)\n en -- coefficient of restitution (0.8)\n\n Given these coefficients, tangential spring stiffness, normal and\n tangential damping coefficient are calculated by default.\n\n \"\"\"\n self.kn = kn\n self.kt = 2. / 7. * kn\n m_eff = np.pi * 0.5**2 * 1e-6 * 2120\n self.gamma_n = -(2 * np.sqrt(kn * m_eff) * np.log(en)) / (\n np.sqrt(np.pi**2 + np.log(en)**2))\n print(self.gamma_n)\n self.gamma_t = 0.5 * self.gamma_n\n self.mu = mu\n super(RigidBodyWallCollision, self).__init__(dest, sources)\n\n def loop(self, d_idx, d_fx, d_fy, d_fz, d_h, d_total_mass, d_rad_s,\n d_tang_disp_x, d_tang_disp_y, d_tang_disp_z, d_tang_velocity_x,\n d_tang_velocity_y, d_tang_velocity_z, s_idx, XIJ, RIJ,\n R2IJ, VIJ, s_nx, s_ny, s_nz):\n # check overlap amount\n overlap = d_rad_s[d_idx] - (XIJ[0] * s_nx[s_idx] + XIJ[1] *\n s_ny[s_idx] + XIJ[2] * s_nz[s_idx])\n\n if overlap > 0:\n # basic variables: normal vector\n nij_x = -s_nx[s_idx]\n nij_y = -s_ny[s_idx]\n nij_z = -s_nz[s_idx]\n\n # overlap speed: a scalar\n vijdotnij = VIJ[0] * nij_x + VIJ[1] * nij_y + VIJ[2] * nij_z\n\n # normal velocity\n vijn_x = vijdotnij * nij_x\n vijn_y = vijdotnij * nij_y\n vijn_z = vijdotnij * nij_z\n\n # normal force with conservative and dissipation part\n fn_x = -self.kn * overlap * nij_x - self.gamma_n * vijn_x\n fn_y = -self.kn * overlap * nij_y - self.gamma_n * vijn_y\n fn_z = -self.kn * overlap * nij_z - self.gamma_n * vijn_z\n\n # ----------------------Tangential force---------------------- #\n\n # tangential velocity\n d_tang_velocity_x[d_idx] = VIJ[0] - vijn_x\n d_tang_velocity_y[d_idx] = VIJ[1] - vijn_y\n d_tang_velocity_z[d_idx] = VIJ[2] - vijn_z\n\n _tang = (\n (d_tang_velocity_x[d_idx]**2) + (d_tang_velocity_y[d_idx]**2) +\n (d_tang_velocity_z[d_idx]**2))**(1. / 2.)\n\n # tangential unit vector\n tij_x = 0\n tij_y = 0\n tij_z = 0\n if _tang > 0:\n tij_x = d_tang_velocity_x[d_idx] / _tang\n tij_y = d_tang_velocity_y[d_idx] / _tang\n tij_z = d_tang_velocity_z[d_idx] / _tang\n\n # damping force or dissipation\n ft_x_d = -self.gamma_t * d_tang_velocity_x[d_idx]\n ft_y_d = -self.gamma_t * d_tang_velocity_y[d_idx]\n ft_z_d = -self.gamma_t * d_tang_velocity_z[d_idx]\n\n # tangential spring force\n ft_x_s = -self.kt * d_tang_disp_x[d_idx]\n ft_y_s = -self.kt * d_tang_disp_y[d_idx]\n ft_z_s = -self.kt * d_tang_disp_z[d_idx]\n\n ft_x = ft_x_d + ft_x_s\n ft_y = ft_y_d + ft_y_s\n ft_z = ft_z_d + ft_z_s\n\n # coulomb law\n ftij = ((ft_x**2) + (ft_y**2) + (ft_z**2))**(1. / 2.)\n fnij = ((fn_x**2) + (fn_y**2) + (fn_z**2))**(1. / 2.)\n\n _fnij = self.mu * fnij\n\n if _fnij < ftij:\n ft_x = -_fnij * tij_x\n ft_y = -_fnij * tij_y\n ft_z = -_fnij * tij_z\n\n d_fx[d_idx] += fn_x + ft_x\n d_fy[d_idx] += fn_y + ft_y\n d_fz[d_idx] += fn_z + ft_z\n # print(d_fz[d_idx])\n else:\n d_tang_velocity_x[d_idx] = 0\n d_tang_velocity_y[d_idx] = 0\n d_tang_velocity_z[d_idx] = 0\n\n d_tang_disp_x[d_idx] = 0\n d_tang_disp_y[d_idx] = 0\n d_tang_disp_z[d_idx] = 0\n\n\nclass EulerStepRigidBody(IntegratorStep):\n \"\"\"Fast but inaccurate integrator. Use this for testing\"\"\"\n def initialize(self):\n pass\n\n def stage1(self, d_idx, d_u, d_v, d_w, d_x, d_y, d_z,\n d_omega, d_omega_dot, d_vc, d_ac, d_num_body,\n dt=0.0):\n _i = declare('int')\n _j = declare('int')\n base = declare('int')\n if d_idx == 0:\n for _i in range(d_num_body[0]):\n base = 3*_i\n for _j in range(3):\n d_vc[base + _j] += d_ac[base + _j]*dt\n d_omega[base + _j] += d_omega_dot[base + _j]*dt\n\n d_x[d_idx] += dt*d_u[d_idx]\n d_y[d_idx] += dt*d_v[d_idx]\n d_z[d_idx] += dt*d_w[d_idx]\n\n\nclass RK2StepRigidBody(IntegratorStep):\n def initialize(self, d_idx, d_x, d_y, d_z, d_x0, d_y0, d_z0,\n d_omega, d_omega0, d_vc, d_vc0, d_num_body):\n _i = declare('int')\n _j = declare('int')\n base = declare('int')\n if d_idx == 0:\n for _i in range(d_num_body[0]):\n base = 3*_i\n for _j in range(3):\n d_vc0[base + _j] = d_vc[base + _j]\n d_omega0[base + _j] = d_omega[base + _j]\n\n d_x0[d_idx] = d_x[d_idx]\n d_y0[d_idx] = d_y[d_idx]\n d_z0[d_idx] = d_z[d_idx]\n\n def stage1(self, d_idx, d_u, d_v, d_w, d_x, d_y, d_z, d_x0, d_y0, d_z0,\n d_omega, d_omega_dot, d_vc, d_ac, d_omega0, d_vc0, d_num_body,\n dt=0.0):\n dtb2 = 0.5*dt\n _i = declare('int')\n j = declare('int')\n base = declare('int')\n if d_idx == 0:\n for _i in range(d_num_body[0]):\n base = 3*_i\n for j in range(3):\n d_vc[base + j] = d_vc0[base + j] + d_ac[base + j]*dtb2\n d_omega[base + j] = (d_omega0[base + j] +\n d_omega_dot[base + j]*dtb2)\n\n d_x[d_idx] = d_x0[d_idx] + dtb2*d_u[d_idx]\n d_y[d_idx] = d_y0[d_idx] + dtb2*d_v[d_idx]\n d_z[d_idx] = d_z0[d_idx] + dtb2*d_w[d_idx]\n\n def stage2(self, d_idx, d_u, d_v, d_w, d_x, d_y, d_z, d_x0, d_y0, d_z0,\n d_omega, d_omega_dot, d_vc, d_ac, d_omega0, d_vc0, d_num_body,\n dt=0.0):\n _i = declare('int')\n j = declare('int')\n base = declare('int')\n if d_idx == 0:\n for _i in range(d_num_body[0]):\n base = 3*_i\n for j in range(3):\n d_vc[base + j] = d_vc0[base + j] + d_ac[base + j]*dt\n d_omega[base + j] = (d_omega0[base + j] +\n d_omega_dot[base + j]*dt)\n\n d_x[d_idx] = d_x0[d_idx] + dt*d_u[d_idx]\n d_y[d_idx] = d_y0[d_idx] + dt*d_v[d_idx]\n d_z[d_idx] = d_z0[d_idx] + dt*d_w[d_idx]\n",
"\"\"\"Shows the use of annotate without any type information.\nThe type information is extracted from the arguments passed\nand the function is annotated and compiled at runtime.\n\"\"\"\n\nfrom pysph.cpy.api import annotate, Elementwise, wrap, get_config\nimport numpy as np\n\n\n@annotate\ndef axpb(i, x, y, a, b):\n xi = declare('double')\n xi = x[i]\n y[i] = a * sin(xi) + b\n\n\nx = np.linspace(0, 1, 10000)\ny = np.zeros_like(x)\na = 2.0\nb = 3.0\n\nbackend = 'opencl'\nget_config().use_openmp = True\nx, y = wrap(x, y, backend=backend)\ne = Elementwise(axpb, backend=backend)\ne(x, y, a, b)\n",
"\"\"\"Deformation of a square droplet. (15 minutes)\n\n\n_______________________________\n| |\n| |\n| 0 |\n| |\n| ___________ |\n| | | |\n| | 1 | |\n| | | |\n| |_________| |\n| |\n| |\n| |\n| |\n|_____________________________|\n\n\n\nInitially, two two fluids of the same density are distinguished by a\ncolor index assigned to them and allowed to settle under the effects\nof surface tension. It is expected that the surface tension at the\ninterface between the two fluids deforms the initially square droplet\ninto a cirular droplet to minimize the interface area/length.\n\nThe references for this problem are\n\n - J. Morris \"Simulating surface tension with smoothed particle\n hydrodynamics\", 2000, IJNMF, 33, pp 333--353 [JM00]\n\n - S. Adami, X.Y. Hu, N.A. Adams \"A new surface tension formulation\n for multi-phase SPH using a reproducing divergence approximation\",\n 2010, JCP, 229, pp 5011--5021 [AHA10]\n\n - M. S. Shadloo, M. Yildiz \"Numerical modelling of Kelvin-Helmholtz\n instability using smoothed particle hydrodynamics\", IJNME, 2011,\n 87, pp 988--1006 [SY11]\n\nThe surface-tension model used currently is the CSF model based on\ninterface curvature and normals computed from the color function.\n\n\"\"\"\nimport numpy\n\n# Particle generator\nfrom pysph.base.utils import get_particle_array\nfrom pysph.base.kernels import CubicSpline, WendlandQuintic, Gaussian, \\\n QuinticSpline\n\n# SPH Equations and Group\nfrom pysph.sph.equation import Group\n\nfrom pysph.sph.wc.viscosity import ClearyArtificialViscosity\n\nfrom pysph.sph.wc.transport_velocity import SummationDensity, \\\n MomentumEquationPressureGradient,\\\n SolidWallPressureBC, SolidWallNoSlipBC, StateEquation,\\\n MomentumEquationArtificialStress, MomentumEquationViscosity\n\nfrom pysph.sph.surface_tension import InterfaceCurvatureFromNumberDensity, \\\n ShadlooYildizSurfaceTensionForce, CSFSurfaceTensionForce, \\\n SmoothedColor, AdamiColorGradient, MorrisColorGradient, \\\n SY11DiracDelta, AdamiReproducingDivergence, SY11ColorGradient\n\nfrom pysph.sph.gas_dynamics.basic import ScaleSmoothingLength\n\n# PySPH solver and application\nfrom pysph.solver.application import Application\nfrom pysph.solver.solver import Solver\n\n# Integrators and Steppers\nfrom pysph.sph.integrator_step import TransportVelocityStep\nfrom pysph.sph.integrator import PECIntegrator\n\n# Domain manager for periodic domains\nfrom pysph.base.nnps import DomainManager\n\n# problem parameters\ndim = 2\ndomain_width = 1.0\ndomain_height = 1.0\n\n# numerical constants\nsigma = 1.0\n\n# set factor1 to [0.5 ~ 1.0] to simulate a thick or thin\n# interface. Larger values result in a thick interface.\nfactor1 = 0.8\nfactor2 = 1./factor1\n\n# discretization parameters\ndx = dy = 0.0125\ndxb2 = dyb2 = 0.5 * dx\nvolume = dx*dx\nhdx = 1.3\nh0 = hdx * dx\nrho0 = 1000.0\nc0 = 20.0\np0 = c0*c0*rho0\nnu = 1.0/rho0\n\n# correction factor for Morris's Method I. Set with_morris_correction\n# to True when using this correction.\nepsilon = 0.01/h0\n\n# time steps\ntf = 1.0\ndt_cfl = 0.25 * h0/(1.1*c0)\ndt_viscous = 0.125 * h0**2/nu\ndt_force = 1.0\n\ndt = 0.9 * min(dt_cfl, dt_viscous, dt_force)\n\n\nclass SquareDroplet(Application):\n def add_user_options(self, group):\n choices = ['adami', 'morris', 'shadloo']\n group.add_argument(\n \"--scheme\", action=\"store\", dest=\"scheme\", default='morris',\n choices=choices,\n help=\"Specify scheme to use among %s\" % choices\n )\n\n def create_particles(self):\n x, y = numpy.mgrid[dxb2:domain_width:dx, dyb2:domain_height:dy]\n x = x.ravel()\n y = y.ravel()\n\n m = numpy.ones_like(x) * volume * rho0\n rho = numpy.ones_like(x) * rho0\n h = numpy.ones_like(x) * h0\n cs = numpy.ones_like(x) * c0\n\n # additional properties required for the fluid.\n additional_props = [\n # volume inverse or number density\n 'V',\n\n # color and gradients\n 'color', 'scolor', 'cx', 'cy', 'cz', 'cx2', 'cy2', 'cz2',\n\n # discretized interface normals and dirac delta\n 'nx', 'ny', 'nz', 'ddelta',\n\n # interface curvature\n 'kappa',\n\n # transport velocities\n 'uhat', 'vhat', 'what', 'auhat', 'avhat', 'awhat',\n\n # imposed accelerations on the solid wall\n 'ax', 'ay', 'az', 'wij',\n\n # velocity of magnitude squared\n 'vmag2',\n\n # variable to indicate reliable normals and normalizing\n # constant\n 'N', 'wij_sum',\n\n ]\n\n # get the fluid particle array\n fluid = get_particle_array(\n name='fluid', x=x, y=y, h=h, m=m, rho=rho, cs=cs,\n additional_props=additional_props)\n\n # set the color of the inner square\n for i in range(x.size):\n if ((fluid.x[i] > 0.35) and (fluid.x[i] < 0.65)):\n if ((fluid.y[i] > 0.35) and (fluid.y[i] < 0.65)):\n fluid.color[i] = 1.0\n\n # particle volume\n fluid.V[:] = 1./volume\n\n # set additional output arrays for the fluid\n fluid.add_output_arrays(['V', 'color', 'cx', 'cy', 'nx', 'ny',\n 'ddelta', 'kappa', 'N', 'scolor', 'p'])\n\n print(\"2D Square droplet deformation with %d fluid particles\" % (\n fluid.get_number_of_particles()))\n\n return [fluid, ]\n\n def create_domain(self):\n return DomainManager(\n xmin=0, xmax=domain_width, ymin=0, ymax=domain_height,\n periodic_in_x=True, periodic_in_y=True)\n\n def create_solver(self):\n kernel = QuinticSpline(dim=2)\n integrator = PECIntegrator(fluid=TransportVelocityStep())\n solver = Solver(\n kernel=kernel, dim=dim, integrator=integrator,\n dt=dt, tf=tf, adaptive_timestep=False)\n return solver\n\n def create_equations(self):\n sy11_equations = [\n # We first compute the mass and number density of the fluid\n # phase. This is used in all force computations henceforth. The\n # number density (1/volume) is explicitly set for the solid phase\n # and this isn't modified for the simulation.\n Group(equations=[\n SummationDensity(dest='fluid', sources=['fluid'])\n ]),\n\n # Given the updated number density for the fluid, we can update\n # the fluid pressure. Additionally, we can compute the Shepard\n # Filtered velocity required for the no-penetration boundary\n # condition. Also compute the gradient of the color function to\n # compute the normal at the interface.\n Group(equations=[\n StateEquation(dest='fluid', sources=None, rho0=rho0,\n p0=p0),\n SY11ColorGradient(dest='fluid', sources=['fluid'])\n ]),\n\n #################################################################\n # Begin Surface tension formulation\n #################################################################\n # Scale the smoothing lengths to determine the interface\n # quantities.\n Group(equations=[\n ScaleSmoothingLength(dest='fluid', sources=None,\n factor=factor1)\n ], update_nnps=False),\n\n # Compute the discretized dirac delta with respect to the new\n # smoothing length.\n Group(equations=[\n SY11DiracDelta(dest='fluid', sources=['fluid'])\n ],\n ),\n\n # Compute the interface curvature using the modified smoothing\n # length and interface normals computed in the previous Group.\n Group(equations=[\n InterfaceCurvatureFromNumberDensity(\n dest='fluid', sources=['fluid'],\n with_morris_correction=True),\n ], ),\n\n # Now rescale the smoothing length to the original value for the\n # rest of the computations.\n Group(equations=[\n ScaleSmoothingLength(dest='fluid', sources=None,\n factor=factor2)\n ], update_nnps=False,\n ),\n #################################################################\n # End Surface tension formulation\n #################################################################\n\n # The main acceleration block\n Group(\n equations=[\n\n # Gradient of pressure for the fluid phase using the\n # number density formulation. No penetration boundary\n # condition using Adami et al's generalized wall boundary\n # condition. The extrapolated pressure and density on the\n # wall particles is used in the gradient of pressure to\n # simulate a repulsive force.\n MomentumEquationPressureGradient(\n dest='fluid', sources=['fluid'], pb=p0),\n\n # Artificial viscosity for the fluid phase.\n MomentumEquationViscosity(\n dest='fluid', sources=['fluid'], nu=nu),\n\n # Surface tension force for the SY11 formulation\n ShadlooYildizSurfaceTensionForce(dest='fluid',\n sources=None,\n sigma=sigma),\n\n # Artificial stress for the fluid phase\n MomentumEquationArtificialStress(dest='fluid',\n sources=['fluid']),\n\n ], )\n ]\n\n morris_equations = [\n\n # We first compute the mass and number density of the fluid\n # phase. This is used in all force computations henceforth. The\n # number density (1/volume) is explicitly set for the solid phase\n # and this isn't modified for the simulation.\n Group(equations=[\n SummationDensity(dest='fluid', sources=['fluid'])\n ]),\n\n # Given the updated number density for the fluid, we can update\n # the fluid pressure. Additionally, we can compute the Shepard\n # Filtered velocity required for the no-penetration boundary\n # condition. Also compute the smoothed color based on the color\n # index for a particle.\n Group(equations=[\n StateEquation(dest='fluid', sources=None, rho0=rho0,\n p0=p0),\n SmoothedColor(dest='fluid', sources=['fluid']),\n ]),\n\n #################################################################\n # Begin Surface tension formulation\n #################################################################\n # Compute the gradient of the smoothed color field. At the end of\n # this Group, we will have the interface normals and the\n # discretized dirac delta function for the fluid-fluid interface.\n Group(equations=[\n MorrisColorGradient(dest='fluid', sources=['fluid'],\n epsilon=epsilon),\n ],\n ),\n\n # Compute the interface curvature computed in the previous Group.\n Group(equations=[\n InterfaceCurvatureFromNumberDensity(\n dest='fluid', sources=['fluid'],\n with_morris_correction=True),\n ], ),\n #################################################################\n # End Surface tension formulation\n #################################################################\n\n # The main acceleration block\n Group(\n equations=[\n\n # Gradient of pressure for the fluid phase\n MomentumEquationPressureGradient(\n dest='fluid', sources=['fluid'], pb=p0),\n\n # Artificial viscosity for the fluid phase.\n MomentumEquationViscosity(\n dest='fluid', sources=['fluid'], nu=nu),\n\n # Surface tension force for the Morris formulation\n CSFSurfaceTensionForce(dest='fluid', sources=None,\n sigma=sigma),\n\n # Artificial stress for the fluid phase\n MomentumEquationArtificialStress(dest='fluid',\n sources=['fluid']),\n\n ], )\n ]\n\n adami_equations = [\n\n # We first compute the mass and number density of the fluid\n # phase. This is used in all force computations henceforth. The\n # number density (1/volume) is explicitly set for the solid phase\n # and this isn't modified for the simulation.\n Group(equations=[\n SummationDensity(dest='fluid', sources=['fluid'])\n ]),\n\n # Given the updated number density for the fluid, we can update\n # the fluid pressure. Additionally, we can compute the Shepard\n # Filtered velocity required for the no-penetration boundary\n # condition.\n Group(equations=[\n StateEquation(dest='fluid', sources=None, rho0=rho0,\n p0=p0),\n ]),\n\n #################################################################\n # Begin Surface tension formulation\n #################################################################\n # Compute the gradient of the color field.\n Group(equations=[\n AdamiColorGradient(dest='fluid', sources=['fluid']),\n ],\n ),\n\n # Compute the interface curvature using the color gradients\n # computed in the previous Group.\n Group(equations=[\n AdamiReproducingDivergence(dest='fluid', sources=['fluid'],\n dim=2),\n ], ),\n #################################################################\n # End Surface tension formulation\n #################################################################\n\n # The main acceleration block\n Group(\n equations=[\n\n # Gradient of pressure for the fluid phase\n MomentumEquationPressureGradient(\n dest='fluid', sources=['fluid'], pb=p0),\n\n # Artificial viscosity for the fluid phase.\n MomentumEquationViscosity(\n dest='fluid', sources=['fluid'], nu=nu),\n\n # Surface tension force for the CSF formulation\n CSFSurfaceTensionForce(dest='fluid', sources=None,\n sigma=sigma),\n\n # Artificial stress for the fluid phase\n MomentumEquationArtificialStress(dest='fluid',\n sources=['fluid']),\n\n ], )\n ]\n\n if self.options.scheme == 'morris':\n return morris_equations\n elif self.options.scheme == 'adami':\n return adami_equations\n else:\n return sy11_equations\n\n\nif __name__ == '__main__':\n app = SquareDroplet()\n app.run()\n"
] | [
[
"numpy.log",
"numpy.sum",
"numpy.sqrt"
],
[
"numpy.zeros_like",
"numpy.linspace"
],
[
"numpy.ones_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yoxu515/aot-benchmark | [
"99f74f051c91ac221e44f3edab3534ae4dd233f7",
"99f74f051c91ac221e44f3edab3534ae4dd233f7"
] | [
"tools/train.py",
"networks/layers/transformer.py"
] | [
"import importlib\nimport random\nimport sys\n\nsys.setrecursionlimit(10000)\nsys.path.append('.')\nsys.path.append('..')\n\nimport torch.multiprocessing as mp\n\nfrom networks.managers.trainer import Trainer\n\n\ndef main_worker(gpu, cfg, enable_amp=True):\n # Initiate a training manager\n trainer = Trainer(rank=gpu, cfg=cfg, enable_amp=enable_amp)\n # Start Training\n trainer.sequential_training()\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser(description=\"Train VOS\")\n parser.add_argument('--exp_name', type=str, default='')\n parser.add_argument('--stage', type=str, default='pre')\n parser.add_argument('--model', type=str, default='aott')\n\n parser.add_argument('--start_gpu', type=int, default=0)\n parser.add_argument('--gpu_num', type=int, default=-1)\n parser.add_argument('--batch_size', type=int, default=-1)\n parser.add_argument('--dist_url', type=str, default='')\n parser.add_argument('--amp', action='store_true')\n parser.set_defaults(amp=False)\n\n parser.add_argument('--pretrained_path', type=str, default='')\n\n parser.add_argument('--datasets', nargs='+', type=str, default=[])\n parser.add_argument('--lr', type=float, default=-1.)\n parser.add_argument('--total_step', type=int, default=-1.)\n parser.add_argument('--start_step', type=int, default=-1.)\n\n args = parser.parse_args()\n\n engine_config = importlib.import_module('configs.' + args.stage)\n\n cfg = engine_config.EngineConfig(args.exp_name, args.model)\n\n if len(args.datasets) > 0:\n cfg.DATASETS = args.datasets\n\n cfg.DIST_START_GPU = args.start_gpu\n if args.gpu_num > 0:\n cfg.TRAIN_GPUS = args.gpu_num\n if args.batch_size > 0:\n cfg.TRAIN_BATCH_SIZE = args.batch_size\n\n if args.pretrained_path != '':\n cfg.PRETRAIN_MODEL = args.pretrained_path\n\n if args.lr > 0:\n cfg.TRAIN_LR = args.lr\n\n if args.total_step > 0:\n cfg.TRAIN_TOTAL_STEPS = args.total_step\n\n if args.start_step > 0:\n cfg.TRAIN_START_STEP = args.start_step\n\n if args.dist_url == '':\n cfg.DIST_URL = 'tcp://127.0.0.1:123' + str(random.randint(0, 9)) + str(\n random.randint(0, 9))\n else:\n cfg.DIST_URL = args.dist_url\n # Use torch.multiprocessing.spawn to launch distributed processes\n mp.spawn(main_worker, nprocs=cfg.TRAIN_GPUS, args=(cfg, args.amp))\n\n\nif __name__ == '__main__':\n main()\n",
"import torch.nn.functional as F\nfrom torch import nn\n\nfrom networks.layers.basic import DropPath, GroupNorm1D, GNActDWConv2d, seq_to_2d\nfrom networks.layers.attention import MultiheadAttention, MultiheadLocalAttentionV2, MultiheadLocalAttentionV3\n\n\ndef _get_norm(indim, type='ln', groups=8):\n if type == 'gn':\n return GroupNorm1D(indim, groups)\n else:\n return nn.LayerNorm(indim)\n\n\ndef _get_activation_fn(activation):\n \"\"\"Return an activation function given a string\"\"\"\n if activation == \"relu\":\n return F.relu\n if activation == \"gelu\":\n return F.gelu\n if activation == \"glu\":\n return F.glu\n raise RuntimeError(\n F\"activation should be relu/gele/glu, not {activation}.\")\n\n\nclass LongShortTermTransformer(nn.Module):\n def __init__(self,\n num_layers=2,\n d_model=256,\n self_nhead=8,\n att_nhead=8,\n dim_feedforward=1024,\n emb_dropout=0.,\n droppath=0.1,\n lt_dropout=0.,\n st_dropout=0.,\n droppath_lst=False,\n droppath_scaling=False,\n activation=\"gelu\",\n return_intermediate=False,\n intermediate_norm=True,\n final_norm=True):\n\n super().__init__()\n self.intermediate_norm = intermediate_norm\n self.final_norm = final_norm\n self.num_layers = num_layers\n self.return_intermediate = return_intermediate\n\n self.emb_dropout = nn.Dropout(emb_dropout, True)\n\n layers = []\n for idx in range(num_layers):\n if droppath_scaling:\n if num_layers == 1:\n droppath_rate = 0\n else:\n droppath_rate = droppath * idx / (num_layers - 1)\n else:\n droppath_rate = droppath\n layers.append(\n LongShortTermTransformerBlock(d_model, self_nhead, att_nhead,\n dim_feedforward, droppath_rate,\n lt_dropout, st_dropout,\n droppath_lst, activation))\n self.layers = nn.ModuleList(layers)\n\n num_norms = num_layers - 1 if intermediate_norm else 0\n if final_norm:\n num_norms += 1\n self.decoder_norms = [\n _get_norm(d_model, type='ln') for _ in range(num_norms)\n ] if num_norms > 0 else None\n\n if self.decoder_norms is not None:\n self.decoder_norms = nn.ModuleList(self.decoder_norms)\n\n def forward(self,\n tgt,\n long_term_memories,\n short_term_memories,\n curr_id_emb=None,\n self_pos=None,\n size_2d=None):\n\n output = self.emb_dropout(tgt)\n\n intermediate = []\n intermediate_memories = []\n\n for idx, layer in enumerate(self.layers):\n output, memories = layer(output,\n long_term_memories[idx] if\n long_term_memories is not None else None,\n short_term_memories[idx] if\n short_term_memories is not None else None,\n curr_id_emb=curr_id_emb,\n self_pos=self_pos,\n size_2d=size_2d)\n\n if self.return_intermediate:\n intermediate.append(output)\n intermediate_memories.append(memories)\n\n if self.decoder_norms is not None:\n if self.final_norm:\n output = self.decoder_norms[-1](output)\n\n if self.return_intermediate:\n intermediate.pop()\n intermediate.append(output)\n\n if self.intermediate_norm:\n for idx in range(len(intermediate) - 1):\n intermediate[idx] = self.decoder_norms[idx](\n intermediate[idx])\n\n if self.return_intermediate:\n return intermediate, intermediate_memories\n\n return output, memories\n\n\nclass LongShortTermTransformerBlock(nn.Module):\n def __init__(self,\n d_model,\n self_nhead,\n att_nhead,\n dim_feedforward=1024,\n droppath=0.1,\n lt_dropout=0.,\n st_dropout=0.,\n droppath_lst=False,\n activation=\"gelu\",\n local_dilation=1,\n enable_corr=True):\n super().__init__()\n\n # Self-attention\n self.norm1 = _get_norm(d_model)\n self.self_attn = MultiheadAttention(d_model, self_nhead)\n\n # Long Short-Term Attention\n self.norm2 = _get_norm(d_model)\n self.linear_Q = nn.Linear(d_model, d_model)\n self.linear_V = nn.Linear(d_model, d_model)\n\n self.long_term_attn = MultiheadAttention(d_model,\n att_nhead,\n use_linear=False,\n dropout=lt_dropout)\n if enable_corr:\n try:\n import spatial_correlation_sampler\n MultiheadLocalAttention = MultiheadLocalAttentionV2\n except Exception as inst:\n print(inst)\n print(\n \"Failed to import PyTorch Correlation. For better efficiency, please install it.\"\n )\n MultiheadLocalAttention = MultiheadLocalAttentionV3\n else:\n MultiheadLocalAttention = MultiheadLocalAttentionV3\n self.short_term_attn = MultiheadLocalAttention(d_model,\n att_nhead,\n dilation=local_dilation,\n use_linear=False,\n dropout=st_dropout)\n\n self.droppath_lst = droppath_lst\n\n # Feed-forward\n self.norm3 = _get_norm(d_model)\n self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.activation = GNActDWConv2d(dim_feedforward)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n\n self.droppath = DropPath(droppath, batch_dim=1)\n self._init_weight()\n\n def with_pos_embed(self, tensor, pos=None):\n size = tensor.size()\n if len(size) == 4 and pos is not None:\n n, c, h, w = size\n pos = pos.view(h, w, n, c).permute(2, 3, 0, 1)\n return tensor if pos is None else tensor + pos\n\n def forward(self,\n tgt,\n long_term_memory=None,\n short_term_memory=None,\n curr_id_emb=None,\n self_pos=None,\n size_2d=(30, 30)):\n\n # Self-attention\n _tgt = self.norm1(tgt)\n q = k = self.with_pos_embed(_tgt, self_pos)\n v = _tgt\n tgt2 = self.self_attn(q, k, v)[0]\n\n tgt = tgt + self.droppath(tgt2)\n\n # Long Short-Term Attention\n _tgt = self.norm2(tgt)\n\n curr_Q = self.linear_Q(_tgt)\n curr_K = curr_Q\n curr_V = _tgt\n\n local_Q = seq_to_2d(curr_Q, size_2d)\n\n if curr_id_emb is not None:\n global_K = curr_K\n global_V = self.linear_V(curr_V + curr_id_emb)\n local_K = seq_to_2d(global_K, size_2d)\n local_V = seq_to_2d(global_V, size_2d)\n else:\n global_K, global_V = long_term_memory\n local_K, local_V = short_term_memory\n\n tgt2 = self.long_term_attn(curr_Q, global_K, global_V)[0]\n tgt3 = self.short_term_attn(local_Q, local_K, local_V)[0]\n\n if self.droppath_lst:\n tgt = tgt + self.droppath(tgt2 + tgt3)\n else:\n tgt = tgt + tgt2 + tgt3\n\n # Feed-forward\n _tgt = self.norm3(tgt)\n\n tgt2 = self.linear2(self.activation(self.linear1(_tgt), size_2d))\n\n tgt = tgt + self.droppath(tgt2)\n\n return tgt, [[curr_K, curr_V], [global_K, global_V],\n [local_K, local_V]]\n\n def _init_weight(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n"
] | [
[
"torch.multiprocessing.spawn"
],
[
"torch.nn.Dropout",
"torch.nn.ModuleList",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.nn.init.xavier_uniform_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ghislainp/mishchenko_brf | [
"de7fe70730b53f17fb7e7aa9a45f08bf7d97abd1"
] | [
"tests/test_mishchenko_refllib.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"Tests for `mishchenko_brf` package.\"\"\"\n\nimport numpy as np\n\nfrom mishchenko_brf.lib.refl import brf\n\n\ndef test_brf():\n \"\"\"Sample pytest test function with the pytest fixture as an argument.\"\"\"\n # from bs4 import BeautifulSoup\n # assert 'GitHub' in BeautifulSoup(response.content).title.string\n\n ssalb, _, legendre = setup()\n\n _, spherical_albedo, albedo, _, r = brf(ssalb, len(legendre), legendre)\n\n exptected_spherical_albedo, expected_albedo, expected_r1 = results()\n\n np.testing.assert_allclose(albedo, expected_albedo, atol=1e-6, rtol=0)\n\n r1 = np.concatenate([r[1, i, : i + 1] for i in range(r.shape[1])])\n np.testing.assert_allclose(r1, expected_r1, atol=1e-5, rtol=0)\n\n\ndef setup():\n\n ssalb = 0.85404045e00\n # 642\n Legendre_coef = [\n 0.1000000e01,\n 0.2512562e01,\n 0.3759305e01,\n 0.4408389e01,\n 0.5536463e01,\n 0.6260982e01,\n 0.7525636e01,\n 0.8312788e01,\n 0.9542491e01,\n 0.1040885e02,\n 0.1151645e02,\n 0.1244280e02,\n 0.1343854e02,\n 0.1442002e02,\n 0.1533074e02,\n 0.1628946e02,\n 0.1717182e02,\n 0.1807816e02,\n 0.1898665e02,\n 0.1978204e02,\n 0.2073036e02,\n 0.2142049e02,\n 0.2241713e02,\n 0.2301598e02,\n 0.2401247e02,\n 0.2456069e02,\n 0.2552589e02,\n 0.2607232e02,\n 0.2695832e02,\n 0.2752722e02,\n 0.2831653e02,\n 0.2892665e02,\n 0.2962000e02,\n 0.3025463e02,\n 0.3086891e02,\n 0.3150598e02,\n 0.3207453e02,\n 0.3268248e02,\n 0.3323146e02,\n 0.3378450e02,\n 0.3433640e02,\n 0.3482245e02,\n 0.3538333e02,\n 0.3580127e02,\n 0.3636525e02,\n 0.3672771e02,\n 0.3728034e02,\n 0.3760455e02,\n 0.3812729e02,\n 0.3843143e02,\n 0.3890899e02,\n 0.3920687e02,\n 0.3962926e02,\n 0.3992750e02,\n 0.4029233e02,\n 0.4059085e02,\n 0.4090206e02,\n 0.4119521e02,\n 0.4146075e02,\n 0.4174035e02,\n 0.4196960e02,\n 0.4222747e02,\n 0.4242858e02,\n 0.4265858e02,\n 0.4283710e02,\n 0.4303618e02,\n 0.4319451e02,\n 0.4336267e02,\n 0.4350045e02,\n 0.4364008e02,\n 0.4375514e02,\n 0.4386992e02,\n 0.4395939e02,\n 0.4405318e02,\n 0.4411450e02,\n 0.4419058e02,\n 0.4422209e02,\n 0.4428264e02,\n 0.4428387e02,\n 0.4432995e02,\n 0.4430155e02,\n 0.4433324e02,\n 0.4427669e02,\n 0.4429349e02,\n 0.4421068e02,\n 0.4421187e02,\n 0.4410481e02,\n 0.4408970e02,\n 0.4396023e02,\n 0.4392847e02,\n 0.4377812e02,\n 0.4372965e02,\n 0.4355963e02,\n 0.4349478e02,\n 0.4330600e02,\n 0.4322534e02,\n 0.4301853e02,\n 0.4292279e02,\n 0.4269857e02,\n 0.4258853e02,\n 0.4234756e02,\n 0.4222396e02,\n 0.4196694e02,\n 0.4183048e02,\n 0.4155822e02,\n 0.4140946e02,\n 0.4112286e02,\n 0.4096230e02,\n 0.4066235e02,\n 0.4049042e02,\n 0.4017813e02,\n 0.3999523e02,\n 0.3967166e02,\n 0.3947818e02,\n 0.3914435e02,\n 0.3894071e02,\n 0.3859761e02,\n 0.3838426e02,\n 0.3803282e02,\n 0.3781024e02,\n 0.3745135e02,\n 0.3722008e02,\n 0.3685457e02,\n 0.3661513e02,\n 0.3624382e02,\n 0.3599677e02,\n 0.3562045e02,\n 0.3536629e02,\n 0.3498576e02,\n 0.3472502e02,\n 0.3434105e02,\n 0.3407421e02,\n 0.3368756e02,\n 0.3341511e02,\n 0.3302651e02,\n 0.3274894e02,\n 0.3235911e02,\n 0.3207689e02,\n 0.3168649e02,\n 0.3140011e02,\n 0.3100977e02,\n 0.3071973e02,\n 0.3033004e02,\n 0.3003681e02,\n 0.2964833e02,\n 0.2935240e02,\n 0.2896567e02,\n 0.2866749e02,\n 0.2828303e02,\n 0.2798304e02,\n 0.2760134e02,\n 0.2729996e02,\n 0.2692148e02,\n 0.2661913e02,\n 0.2624432e02,\n 0.2594138e02,\n 0.2557065e02,\n 0.2526752e02,\n 0.2490123e02,\n 0.2459831e02,\n 0.2423680e02,\n 0.2393445e02,\n 0.2357803e02,\n 0.2327663e02,\n 0.2292556e02,\n 0.2262546e02,\n 0.2228000e02,\n 0.2198153e02,\n 0.2164193e02,\n 0.2134540e02,\n 0.2101185e02,\n 0.2071756e02,\n 0.2039027e02,\n 0.2009849e02,\n 0.1977763e02,\n 0.1948862e02,\n 0.1917433e02,\n 0.1888834e02,\n 0.1858075e02,\n 0.1829802e02,\n 0.1799722e02,\n 0.1771798e02,\n 0.1742405e02,\n 0.1714851e02,\n 0.1686151e02,\n 0.1658986e02,\n 0.1630983e02,\n 0.1604226e02,\n 0.1576923e02,\n 0.1550590e02,\n 0.1523988e02,\n 0.1498093e02,\n 0.1472193e02,\n 0.1446749e02,\n 0.1421550e02,\n 0.1396569e02,\n 0.1372068e02,\n 0.1347561e02,\n 0.1323754e02,\n 0.1299730e02,\n 0.1276613e02,\n 0.1253080e02,\n 0.1230645e02,\n 0.1207611e02,\n 0.1185852e02,\n 0.1163322e02,\n 0.1142231e02,\n 0.1120210e02,\n 0.1099778e02,\n 0.1078270e02,\n 0.1058487e02,\n 0.1037494e02,\n 0.1018351e02,\n 0.9978738e01,\n 0.9793600e01,\n 0.9593997e01,\n 0.9415044e01,\n 0.9220600e01,\n 0.9047715e01,\n 0.8858418e01,\n 0.8691482e01,\n 0.8507312e01,\n 0.8346198e01,\n 0.8167129e01,\n 0.8011710e01,\n 0.7837708e01,\n 0.7687854e01,\n 0.7518876e01,\n 0.7374456e01,\n 0.7210453e01,\n 0.7071336e01,\n 0.6912254e01,\n 0.6778307e01,\n 0.6624084e01,\n 0.6495174e01,\n 0.6345745e01,\n 0.6221738e01,\n 0.6077033e01,\n 0.5957794e01,\n 0.5817740e01,\n 0.5703134e01,\n 0.5567654e01,\n 0.5457548e01,\n 0.5326563e01,\n 0.5220821e01,\n 0.5094248e01,\n 0.4992739e01,\n 0.4870492e01,\n 0.4773085e01,\n 0.4655075e01,\n 0.4561642e01,\n 0.4447778e01,\n 0.4358190e01,\n 0.4248381e01,\n 0.4162514e01,\n 0.4056666e01,\n 0.3974395e01,\n 0.3872413e01,\n 0.3793618e01,\n 0.3695406e01,\n 0.3619966e01,\n 0.3525429e01,\n 0.3453228e01,\n 0.3362271e01,\n 0.3293193e01,\n 0.3205718e01,\n 0.3139651e01,\n 0.3055564e01,\n 0.2992397e01,\n 0.2911601e01,\n 0.2851228e01,\n 0.2773628e01,\n 0.2715944e01,\n 0.2641446e01,\n 0.2586348e01,\n 0.2514857e01,\n 0.2462248e01,\n 0.2393671e01,\n 0.2343453e01,\n 0.2277698e01,\n 0.2229778e01,\n 0.2166754e01,\n 0.2121041e01,\n 0.2060659e01,\n 0.2017065e01,\n 0.1959237e01,\n 0.1917674e01,\n 0.1862314e01,\n 0.1822700e01,\n 0.1769722e01,\n 0.1731977e01,\n 0.1681298e01,\n 0.1645344e01,\n 0.1596882e01,\n 0.1562643e01,\n 0.1516319e01,\n 0.1483723e01,\n 0.1439458e01,\n 0.1408435e01,\n 0.1366152e01,\n 0.1336633e01,\n 0.1296260e01,\n 0.1268180e01,\n 0.1229642e01,\n 0.1202937e01,\n 0.1166165e01,\n 0.1140775e01,\n 0.1105699e01,\n 0.1081566e01,\n 0.1048119e01,\n 0.1025186e01,\n 0.9933033e00,\n 0.9715168e00,\n 0.9411347e00,\n 0.9204422e00,\n 0.8914999e00,\n 0.8718511e00,\n 0.8442892e00,\n 0.8256361e00,\n 0.7993970e00,\n 0.7816934e00,\n 0.7567216e00,\n 0.7399231e00,\n 0.7161648e00,\n 0.7002287e00,\n 0.6776319e00,\n 0.6625175e00,\n 0.6410319e00,\n 0.6267001e00,\n 0.6062772e00,\n 0.5926905e00,\n 0.5732835e00,\n 0.5604061e00,\n 0.5419698e00,\n 0.5297674e00,\n 0.5122584e00,\n 0.5006981e00,\n 0.4840745e00,\n 0.4731249e00,\n 0.4573463e00,\n 0.4469774e00,\n 0.4320051e00,\n 0.4221882e00,\n 0.4079849e00,\n 0.3986924e00,\n 0.3852225e00,\n 0.3764283e00,\n 0.3636572e00,\n 0.3553362e00,\n 0.3432310e00,\n 0.3353594e00,\n 0.3238883e00,\n 0.3164434e00,\n 0.3055761e00,\n 0.2985361e00,\n 0.2882435e00,\n 0.2815877e00,\n 0.2718419e00,\n 0.2655505e00,\n 0.2563248e00,\n 0.2503791e00,\n 0.2416479e00,\n 0.2360299e00,\n 0.2277687e00,\n 0.2224615e00,\n 0.2146470e00,\n 0.2096341e00,\n 0.2022440e00,\n 0.1975101e00,\n 0.1905229e00,\n 0.1860533e00,\n 0.1794487e00,\n 0.1752294e00,\n 0.1689879e00,\n 0.1650056e00,\n 0.1591086e00,\n 0.1553506e00,\n 0.1497804e00,\n 0.1462348e00,\n 0.1409744e00,\n 0.1376297e00,\n 0.1326632e00,\n 0.1295086e00,\n 0.1248204e00,\n 0.1218456e00,\n 0.1174212e00,\n 0.1146165e00,\n 0.1104419e00,\n 0.1077980e00,\n 0.1038600e00,\n 0.1013680e00,\n 0.9765404e-01,\n 0.9530568e-01,\n 0.9180371e-01,\n 0.8959107e-01,\n 0.8628968e-01,\n 0.8420526e-01,\n 0.8109362e-01,\n 0.7913032e-01,\n 0.7619812e-01,\n 0.7434919e-01,\n 0.7158666e-01,\n 0.6984571e-01,\n 0.6724356e-01,\n 0.6560456e-01,\n 0.6315396e-01,\n 0.6161117e-01,\n 0.5930377e-01,\n 0.5785177e-01,\n 0.5567961e-01,\n 0.5431328e-01,\n 0.5226885e-01,\n 0.5098332e-01,\n 0.4905947e-01,\n 0.4785016e-01,\n 0.4604013e-01,\n 0.4490269e-01,\n 0.4320006e-01,\n 0.4213037e-01,\n 0.4052908e-01,\n 0.3952327e-01,\n 0.3801755e-01,\n 0.3707194e-01,\n 0.3565637e-01,\n 0.3476748e-01,\n 0.3343689e-01,\n 0.3260145e-01,\n 0.3135097e-01,\n 0.3056588e-01,\n 0.2939089e-01,\n 0.2865322e-01,\n 0.2754936e-01,\n 0.2685635e-01,\n 0.2581950e-01,\n 0.2516853e-01,\n 0.2419479e-01,\n 0.2358340e-01,\n 0.2266910e-01,\n 0.2209496e-01,\n 0.2123660e-01,\n 0.2069752e-01,\n 0.1989183e-01,\n 0.1938574e-01,\n 0.1862960e-01,\n 0.1815454e-01,\n 0.1744504e-01,\n 0.1699918e-01,\n 0.1633353e-01,\n 0.1591513e-01,\n 0.1529073e-01,\n 0.1489815e-01,\n 0.1431255e-01,\n 0.1394424e-01,\n 0.1339511e-01,\n 0.1304962e-01,\n 0.1253477e-01,\n 0.1221073e-01,\n 0.1172810e-01,\n 0.1142421e-01,\n 0.1097185e-01,\n 0.1068691e-01,\n 0.1026299e-01,\n 0.9995839e-02,\n 0.9598632e-02,\n 0.9348197e-02,\n 0.8976075e-02,\n 0.8741341e-02,\n 0.8392774e-02,\n 0.8172784e-02,\n 0.7846332e-02,\n 0.7640186e-02,\n 0.7334491e-02,\n 0.7141344e-02,\n 0.6855129e-02,\n 0.6674184e-02,\n 0.6406246e-02,\n 0.6236754e-02,\n 0.5985964e-02,\n 0.5827218e-02,\n 0.5592512e-02,\n 0.5443850e-02,\n 0.5224227e-02,\n 0.5085025e-02,\n 0.4879544e-02,\n 0.4749216e-02,\n 0.4556995e-02,\n 0.4434989e-02,\n 0.4255197e-02,\n 0.4140997e-02,\n 0.3972854e-02,\n 0.3865974e-02,\n 0.3708747e-02,\n 0.3608730e-02,\n 0.3461733e-02,\n 0.3368151e-02,\n 0.3230736e-02,\n 0.3143185e-02,\n 0.3014746e-02,\n 0.2932847e-02,\n 0.2812813e-02,\n 0.2736210e-02,\n 0.2624045e-02,\n 0.2552404e-02,\n 0.2447605e-02,\n 0.2380611e-02,\n 0.2282708e-02,\n 0.2220067e-02,\n 0.2128618e-02,\n 0.2070056e-02,\n 0.1984648e-02,\n 0.1929906e-02,\n 0.1850152e-02,\n 0.1798987e-02,\n 0.1724522e-02,\n 0.1676708e-02,\n 0.1607190e-02,\n 0.1562511e-02,\n 0.1497620e-02,\n 0.1455875e-02,\n 0.1395308e-02,\n 0.1356308e-02,\n 0.1299785e-02,\n 0.1263353e-02,\n 0.1210611e-02,\n 0.1176583e-02,\n 0.1127375e-02,\n 0.1095598e-02,\n 0.1049696e-02,\n 0.1020025e-02,\n 0.9772121e-03,\n 0.9495118e-03,\n 0.9095846e-03,\n 0.8837259e-03,\n 0.8464937e-03,\n 0.8223557e-03,\n 0.7876397e-03,\n 0.7651098e-03,\n 0.7327438e-03,\n 0.7117181e-03,\n 0.6815480e-03,\n 0.6619300e-03,\n 0.6338120e-03,\n 0.6155113e-03,\n 0.5893102e-03,\n 0.5722408e-03,\n 0.5478283e-03,\n 0.5319081e-03,\n 0.5091634e-03,\n 0.4943152e-03,\n 0.4731258e-03,\n 0.4592787e-03,\n 0.4395410e-03,\n 0.4266303e-03,\n 0.4082493e-03,\n 0.3962155e-03,\n 0.3791019e-03,\n 0.3678880e-03,\n 0.3519565e-03,\n 0.3415071e-03,\n 0.3266762e-03,\n 0.3169383e-03,\n 0.3031319e-03,\n 0.2940574e-03,\n 0.2812067e-03,\n 0.2727531e-03,\n 0.2607960e-03,\n 0.2529247e-03,\n 0.2418025e-03,\n 0.2344753e-03,\n 0.2241307e-03,\n 0.2173091e-03,\n 0.2076865e-03,\n 0.2013343e-03,\n 0.1923831e-03,\n 0.1864691e-03,\n 0.1781456e-03,\n 0.1726433e-03,\n 0.1649075e-03,\n 0.1597912e-03,\n 0.1526032e-03,\n 0.1478454e-03,\n 0.1411648e-03,\n 0.1367381e-03,\n 0.1305278e-03,\n 0.1264096e-03,\n 0.1206392e-03,\n 0.1168121e-03,\n 0.1114548e-03,\n 0.1079011e-03,\n 0.1029286e-03,\n 0.9962750e-04,\n 0.9500928e-04,\n 0.9194008e-04,\n 0.8765030e-04,\n 0.8479853e-04,\n 0.8081775e-04,\n 0.7817267e-04,\n 0.7448255e-04,\n 0.7203030e-04,\n 0.6860758e-04,\n 0.6633051e-04,\n 0.6315326e-04,\n 0.6103854e-04,\n 0.5809158e-04,\n 0.5613237e-04,\n 0.5340456e-04,\n 0.5159216e-04,\n 0.4906537e-04,\n 0.4738409e-04,\n 0.4504024e-04,\n 0.4348066e-04,\n 0.4130949e-04,\n 0.3986734e-04,\n 0.3786116e-04,\n 0.3653038e-04,\n 0.3467413e-04,\n 0.3343939e-04,\n 0.3171734e-04,\n 0.3057505e-04,\n 0.2898660e-04,\n 0.2793625e-04,\n 0.2646920e-04,\n 0.2549635e-04,\n 0.2413735e-04,\n 0.2323900e-04,\n 0.2198645e-04,\n 0.2116049e-04,\n 0.2000504e-04,\n 0.1924253e-04,\n 0.1817554e-04,\n 0.1747326e-04,\n 0.1649066e-04,\n 0.1584529e-04,\n 0.1494019e-04,\n 0.1434660e-04,\n 0.1351308e-04,\n 0.1296814e-04,\n 0.1220153e-04,\n 0.1170191e-04,\n 0.1099720e-04,\n 0.1053953e-04,\n 0.9892289e-05,\n ]\n\n return (\n ssalb,\n len(Legendre_coef),\n np.pad(Legendre_coef, (0, 700 - len(Legendre_coef))),\n )\n\n\ndef results():\n spherical_albedo = 0.1400516239529828\n\n albedo = [\n 0.57934552e00,\n 0.55945677e00,\n 0.53431237e00,\n 0.50788230e00,\n 0.48296762e00,\n 0.46127653e00,\n 0.44329438e00,\n 0.42849159e00,\n 0.41589457e00,\n 0.40462923e00,\n 0.39412692e00,\n 0.38407087e00,\n 0.37428829e00,\n 0.36468229e00,\n 0.35519615e00,\n 0.34579977e00,\n 0.33647874e00,\n 0.32722980e00,\n 0.31805637e00,\n 0.30896705e00,\n 0.29997292e00,\n 0.29108667e00,\n 0.28232241e00,\n 0.27369434e00,\n 0.26521713e00,\n 0.25690463e00,\n 0.24876949e00,\n 0.24082196e00,\n 0.23306957e00,\n 0.22551830e00,\n 0.21817389e00,\n 0.21104220e00,\n 0.20412904e00,\n 0.19744009e00,\n 0.19098036e00,\n 0.18475346e00,\n 0.17876221e00,\n 0.17300782e00,\n 0.16749054e00,\n 0.16220950e00,\n 0.15716265e00,\n 0.15234718e00,\n 0.14775957e00,\n 0.14339539e00,\n 0.13924994e00,\n 0.13531761e00,\n 0.13159263e00,\n 0.12806895e00,\n 0.12473993e00,\n 0.12159910e00,\n 0.11863959e00,\n 0.11585440e00,\n 0.11323670e00,\n 0.11077949e00,\n 0.10847593e00,\n 0.10631904e00,\n 0.10430222e00,\n 0.10241879e00,\n 0.10066233e00,\n 0.99026598e-01,\n 0.97505502e-01,\n 0.96093059e-01,\n 0.94783649e-01,\n 0.93571737e-01,\n 0.92452131e-01,\n 0.91419615e-01,\n 0.90469383e-01,\n 0.89596771e-01,\n 0.88797286e-01,\n 0.88066630e-01,\n 0.87400697e-01,\n 0.86795583e-01,\n 0.86247541e-01,\n 0.85752994e-01,\n 0.85308485e-01,\n 0.84910698e-01,\n 0.84556349e-01,\n 0.84242381e-01,\n 0.83965667e-01,\n 0.83723314e-01,\n 0.83512425e-01,\n 0.83330259e-01,\n 0.83174184e-01,\n 0.83041623e-01,\n 0.82930155e-01,\n 0.82837544e-01,\n 0.82761563e-01,\n 0.82700156e-01,\n 0.82651392e-01,\n 0.82613394e-01,\n 0.82584500e-01,\n 0.82563184e-01,\n 0.82548007e-01,\n 0.82537644e-01,\n 0.82530975e-01,\n 0.82526997e-01,\n 0.82524881e-01,\n 0.82523920e-01,\n 0.82523584e-01,\n 0.82523517e-01,\n ]\n\n expected_r1 = np.array(\n [\n 0.38368369e03,\n 0.25770578e03,\n 0.23945151e03,\n 0.16855780e03,\n 0.18244296e03,\n 0.16236093e03,\n 0.10911653e03,\n 0.12755070e03,\n 0.12647316e03,\n 0.10830920e03,\n 0.67582947e02,\n 0.83225288e02,\n 0.89081558e02,\n 0.82212189e02,\n 0.66570000e02,\n 0.39153576e02,\n 0.50326321e02,\n 0.57166462e02,\n 0.56031170e02,\n 0.47971886e02,\n 0.36434990e02,\n 0.21253523e02,\n 0.28237167e02,\n 0.33642113e02,\n 0.34688950e02,\n 0.31254200e02,\n 0.25037840e02,\n 0.18253815e02,\n 0.11248275e02,\n 0.15133494e02,\n 0.18625698e02,\n 0.20033745e02,\n 0.18957489e02,\n 0.16075739e02,\n 0.12519300e02,\n 0.92340946e01,\n 0.62269855e01,\n 0.82350597e01,\n 0.10240828e02,\n 0.11357998e02,\n 0.11247568e02,\n 0.10105079e02,\n 0.84183950e01,\n 0.66705170e01,\n 0.51578894e01,\n 0.37987945e01,\n 0.48435707e01,\n 0.59637489e01,\n 0.67243404e01,\n 0.69045143e01,\n 0.65221829e01,\n 0.57613211e01,\n 0.48524532e01,\n 0.39752564e01,\n 0.32225568e01,\n 0.25505664e01,\n 0.31431477e01,\n 0.38052323e01,\n 0.43142323e01,\n 0.45352106e01,\n 0.44408669e01,\n 0.40962334e01,\n 0.36127684e01,\n 0.30965683e01,\n 0.26170671e01,\n 0.22043598e01,\n 0.18349921e01,\n 0.22099471e01,\n 0.26385496e01,\n 0.29933913e01,\n 0.31896119e01,\n 0.31966636e01,\n 0.30375271e01,\n 0.27696035e01,\n 0.24563296e01,\n 0.21455364e01,\n 0.18630074e01,\n 0.16174023e01,\n 0.13863298e01,\n 0.16447055e01,\n 0.19445310e01,\n 0.22046304e01,\n 0.23685496e01,\n 0.24110959e01,\n 0.23400669e01,\n 0.21870027e01,\n 0.19907905e01,\n 0.17838671e01,\n 0.15866874e01,\n 0.14085795e01,\n 0.12514458e01,\n 0.10881330e01,\n 0.12770401e01,\n 0.14988452e01,\n 0.16977799e01,\n 0.18342333e01,\n 0.18884524e01,\n 0.18623250e01,\n 0.17742110e01,\n 0.16486713e01,\n 0.15075257e01,\n 0.13662242e01,\n 0.12339157e01,\n 0.11142954e01,\n 0.10071722e01,\n 0.88128895e00,\n 0.10257436e01,\n 0.11969687e01,\n 0.13544649e01,\n 0.14691297e01,\n 0.15254281e01,\n 0.15228883e01,\n 0.14728941e01,\n 0.13916924e01,\n 0.12941229e01,\n 0.11911522e01,\n 0.10903227e01,\n 0.99648142e00,\n 0.91146982e00,\n 0.83437926e00,\n 0.73236907e00,\n 0.84691751e00,\n 0.98372436e00,\n 0.11120189e01,\n 0.12095475e01,\n 0.12638915e01,\n 0.12736813e01,\n 0.12464422e01,\n 0.11935405e01,\n 0.11255139e01,\n 0.10501394e01,\n 0.97279239e00,\n 0.89753741e00,\n 0.82755452e00,\n 0.76412642e00,\n 0.70607662e00,\n 0.62143141e00,\n 0.71498531e00,\n 0.82739562e00,\n 0.93439400e00,\n 0.10183749e01,\n 0.10692183e01,\n 0.10852710e01,\n 0.10717980e01,\n 0.10371528e01,\n 0.98918498e00,\n 0.93369889e00,\n 0.87458736e00,\n 0.81457925e00,\n 0.75610143e00,\n 0.70160747e00,\n 0.65229672e00,\n 0.60690910e00,\n 0.53620493e00,\n 0.61444676e00,\n 0.70891893e00,\n 0.79989344e00,\n 0.87304217e00,\n 0.91996312e00,\n 0.93897974e00,\n 0.93395931e00,\n 0.91129071e00,\n 0.87703383e00,\n 0.83570266e00,\n 0.79033947e00,\n 0.74295175e00,\n 0.69498348e00,\n 0.64796978e00,\n 0.60397410e00,\n 0.56429613e00,\n 0.52768981e00,\n 0.46893141e00,\n 0.53562319e00,\n 0.61647099e00,\n 0.69504291e00,\n 0.75940472e00,\n 0.80246264e00,\n 0.82262319e00,\n 0.82284969e00,\n 0.80819505e00,\n 0.78347129e00,\n 0.75227189e00,\n 0.71705294e00,\n 0.67947024e00,\n 0.64061898e00,\n 0.60127056e00,\n 0.56240505e00,\n 0.52581406e00,\n 0.49295956e00,\n 0.46267310e00,\n 0.41456842e00,\n 0.47228998e00,\n 0.54249328e00,\n 0.61122215e00,\n 0.66834646e00,\n 0.70778871e00,\n 0.72807664e00,\n 0.73156416e00,\n 0.72236371e00,\n 0.70439237e00,\n 0.68056595e00,\n 0.65288788e00,\n 0.62274611e00,\n 0.59110469e00,\n 0.55858999e00,\n 0.52560019e00,\n 0.49273926e00,\n 0.46156633e00,\n 0.43370983e00,\n 0.40812615e00,\n 0.36973703e00,\n 0.42031151e00,\n 0.48198968e00,\n 0.54273206e00,\n 0.59380746e00,\n 0.62994283e00,\n 0.64979416e00,\n 0.65528655e00,\n 0.64984703e00,\n 0.63673460e00,\n 0.61836016e00,\n 0.59637630e00,\n 0.57194269e00,\n 0.54589856e00,\n 0.51883745e00,\n 0.49112943e00,\n 0.46296096e00,\n 0.43467191e00,\n 0.40760121e00,\n 0.38351870e00,\n 0.36154264e00,\n 0.33207551e00,\n 0.37681752e00,\n 0.43150941e00,\n 0.48563948e00,\n 0.53159100e00,\n 0.56474036e00,\n 0.58386314e00,\n 0.59056675e00,\n 0.58775848e00,\n 0.57819819e00,\n 0.56391406e00,\n 0.54628950e00,\n 0.52629930e00,\n 0.50466305e00,\n 0.48191690e00,\n 0.45844156e00,\n 0.43446112e00,\n 0.41004205e00,\n 0.38533735e00,\n 0.36147287e00,\n 0.34032100e00,\n 0.32119045e00,\n 0.29983068e00,\n 0.33969635e00,\n 0.38853076e00,\n 0.43707687e00,\n 0.47863159e00,\n 0.50910699e00,\n 0.52738410e00,\n 0.53481984e00,\n 0.53390729e00,\n 0.52700996e00,\n 0.51585591e00,\n 0.50161958e00,\n 0.48513207e00,\n 0.46701470e00,\n 0.44774175e00,\n 0.42767024e00,\n 0.40705225e00,\n 0.38602614e00,\n 0.36459178e00,\n 0.34277132e00,\n 0.32148623e00,\n 0.30266759e00,\n 0.28583673e00,\n 0.27165741e00,\n 0.30735224e00,\n 0.35116890e00,\n 0.39490715e00,\n 0.43263298e00,\n 0.46071306e00,\n 0.47812337e00,\n 0.48601636e00,\n 0.48654056e00,\n 0.48172772e00,\n 0.47305444e00,\n 0.46152285e00,\n 0.44784895e00,\n 0.43257853e00,\n 0.41613755e00,\n 0.39885530e00,\n 0.38097894e00,\n 0.36268044e00,\n 0.34404564e00,\n 0.32503796e00,\n 0.30558982e00,\n 0.28643203e00,\n 0.26951250e00,\n 0.25458133e00,\n 0.24664548e00,\n 0.27872956e00,\n 0.31819224e00,\n 0.35773003e00,\n 0.39206341e00,\n 0.41795400e00,\n 0.43447414e00,\n 0.44261932e00,\n 0.44425684e00,\n 0.44113833e00,\n 0.43451858e00,\n 0.42523941e00,\n 0.41390744e00,\n 0.40100044e00,\n 0.38690761e00,\n 0.37194157e00,\n 0.35634446e00,\n 0.34029481e00,\n 0.32391262e00,\n 0.30725011e00,\n 0.29025167e00,\n 0.27279079e00,\n 0.25542563e00,\n 0.24008393e00,\n 0.22675417e00,\n 0.22445151e00,\n 0.25343820e00,\n 0.28914347e00,\n 0.32500827e00,\n 0.35629919e00,\n 0.38011837e00,\n 0.39564186e00,\n 0.40376574e00,\n 0.40615430e00,\n 0.40434766e00,\n 0.39941984e00,\n 0.39206272e00,\n 0.38276103e00,\n 0.37190381e00,\n 0.35982931e00,\n 0.34683278e00,\n 0.33315977e00,\n 0.31900054e00,\n 0.30449098e00,\n 0.28971705e00,\n 0.27470860e00,\n 0.25940111e00,\n 0.24363182e00,\n 0.22780687e00,\n 0.21380231e00,\n 0.20184751e00,\n 0.20519748e00,\n 0.23159876e00,\n 0.26413625e00,\n 0.29684672e00,\n 0.32543322e00,\n 0.34727564e00,\n 0.36164755e00,\n 0.36939174e00,\n 0.37204832e00,\n 0.37103242e00,\n 0.36731219e00,\n 0.36147776e00,\n 0.35390341e00,\n 0.34486532e00,\n 0.33460709e00,\n 0.32336712e00,\n 0.31137651e00,\n 0.29884037e00,\n 0.28591970e00,\n 0.27272525e00,\n 0.25932097e00,\n 0.24572186e00,\n 0.23185994e00,\n 0.21755250e00,\n 0.20307408e00,\n 0.19022347e00,\n 0.17946769e00,\n 0.18898228e00,\n 0.21327148e00,\n 0.24319792e00,\n 0.27327064e00,\n 0.29953399e00,\n 0.31957966e00,\n 0.33274972e00,\n 0.33984205e00,\n 0.34230980e00,\n 0.34150216e00,\n 0.33835727e00,\n 0.33344826e00,\n 0.32711285e00,\n 0.31955174e00,\n 0.31089956e00,\n 0.30128181e00,\n 0.29084933e00,\n 0.27977982e00,\n 0.26825124e00,\n 0.25640994e00,\n 0.24435396e00,\n 0.23213391e00,\n 0.21975470e00,\n 0.20714773e00,\n 0.19412118e00,\n 0.18083785e00,\n 0.16899987e00,\n 0.15930425e00,\n 0.17541476e00,\n 0.19795303e00,\n 0.22571321e00,\n 0.25359881e00,\n 0.27793473e00,\n 0.29647639e00,\n 0.30860057e00,\n 0.31503823e00,\n 0.31714112e00,\n 0.31618607e00,\n 0.31309542e00,\n 0.30847403e00,\n 0.30271024e00,\n 0.29603517e00,\n 0.28855911e00,\n 0.28031746e00,\n 0.27133235e00,\n 0.26166755e00,\n 0.25144571e00,\n 0.24082130e00,\n 0.22993420e00,\n 0.21887848e00,\n 0.20769787e00,\n 0.19639082e00,\n 0.18488961e00,\n 0.17300032e00,\n 0.16079190e00,\n 0.14985578e00,\n 0.14110740e00,\n 0.16366631e00,\n 0.18467116e00,\n 0.21054901e00,\n 0.23656210e00,\n 0.25929046e00,\n 0.27663431e00,\n 0.28799024e00,\n 0.29400581e00,\n 0.29590416e00,\n 0.29484844e00,\n 0.29169577e00,\n 0.28704572e00,\n 0.28133944e00,\n 0.27490255e00,\n 0.26794240e00,\n 0.26054066e00,\n 0.25267535e00,\n 0.24428359e00,\n 0.23534042e00,\n 0.22590491e00,\n 0.21610361e00,\n 0.20607288e00,\n 0.19590905e00,\n 0.18565454e00,\n 0.17530420e00,\n 0.16479163e00,\n 0.15392394e00,\n 0.14269111e00,\n 0.13256522e00,\n 0.12466694e00,\n 0.15291582e00,\n 0.17249423e00,\n 0.19663572e00,\n 0.22094680e00,\n 0.24225558e00,\n 0.25860691e00,\n 0.26942277e00,\n 0.27527362e00,\n 0.27725279e00,\n 0.27639005e00,\n 0.27343193e00,\n 0.26890206e00,\n 0.26321408e00,\n 0.25673062e00,\n 0.24976483e00,\n 0.24254556e00,\n 0.23517576e00,\n 0.22762197e00,\n 0.21976374e00,\n 0.21149129e00,\n 0.20278960e00,\n 0.19374785e00,\n 0.18449736e00,\n 0.17514092e00,\n 0.16572388e00,\n 0.15623912e00,\n 0.14661992e00,\n 0.13667518e00,\n 0.12632957e00,\n 0.11693286e00,\n 0.10979707e00,\n 0.14267196e00,\n 0.16087982e00,\n 0.18335804e00,\n 0.20604582e00,\n 0.22601375e00,\n 0.24145372e00,\n 0.25182438e00,\n 0.25763780e00,\n 0.25987828e00,\n 0.25945812e00,\n 0.25701439e00,\n 0.25296855e00,\n 0.24764267e00,\n 0.24133593e00,\n 0.23435625e00,\n 0.22701317e00,\n 0.21957387e00,\n 0.21219650e00,\n 0.20488037e00,\n 0.19748402e00,\n 0.18982655e00,\n 0.18181197e00,\n 0.17347980e00,\n 0.16495080e00,\n 0.15633532e00,\n 0.14768384e00,\n 0.13898759e00,\n 0.13017787e00,\n 0.12106603e00,\n 0.11152479e00,\n 0.10278418e00,\n 0.96333064e-01,\n 0.13278867e00,\n 0.14967601e00,\n 0.17054874e00,\n 0.19166234e00,\n 0.21031891e00,\n 0.22485405e00,\n 0.23476954e00,\n 0.24053511e00,\n 0.24305005e00,\n 0.24313784e00,\n 0.24135487e00,\n 0.23804264e00,\n 0.23343392e00,\n 0.22772875e00,\n 0.22114034e00,\n 0.21392128e00,\n 0.20636588e00,\n 0.19877388e00,\n 0.19137226e00,\n 0.18422794e00,\n 0.17721902e00,\n 0.17011781e00,\n 0.16274700e00,\n 0.15508358e00,\n 0.14722840e00,\n 0.13929746e00,\n 0.13134745e00,\n 0.12336826e00,\n 0.11528943e00,\n 0.10692697e00,\n 0.98114364e-01,\n 0.89966424e-01,\n 0.84133029e-01,\n 0.12330588e00,\n 0.13893479e00,\n 0.15827183e00,\n 0.17786814e00,\n 0.19524175e00,\n 0.20886324e00,\n 0.21827731e00,\n 0.22391967e00,\n 0.22662000e00,\n 0.22713451e00,\n 0.22596700e00,\n 0.22341314e00,\n 0.21965274e00,\n 0.21481460e00,\n 0.20901735e00,\n 0.20240258e00,\n 0.19516377e00,\n 0.18756257e00,\n 0.17991112e00,\n 0.17249928e00,\n 0.16548070e00,\n 0.15879016e00,\n 0.15218471e00,\n 0.14541364e00,\n 0.13838096e00,\n 0.13115591e00,\n 0.12385615e00,\n 0.11654575e00,\n 0.10921578e00,\n 0.10179584e00,\n 0.94108447e-01,\n 0.85958004e-01,\n 0.78349575e-01,\n 0.73076993e-01,\n 0.11430455e00,\n 0.12874915e00,\n 0.14663576e00,\n 0.16478880e00,\n 0.18092515e00,\n 0.19363941e00,\n 0.20251557e00,\n 0.20795847e00,\n 0.21073578e00,\n 0.21154700e00,\n 0.21085797e00,\n 0.20893978e00,\n 0.20595059e00,\n 0.20198931e00,\n 0.19712524e00,\n 0.19142093e00,\n 0.18495877e00,\n 0.17787308e00,\n 0.17037868e00,\n 0.16277534e00,\n 0.15539503e00,\n 0.14847951e00,\n 0.14204761e00,\n 0.13587566e00,\n 0.12965593e00,\n 0.12321350e00,\n 0.11657458e00,\n 0.10985404e00,\n 0.10312499e00,\n 0.96382633e-01,\n 0.89558579e-01,\n 0.82482606e-01,\n 0.74937083e-01,\n 0.67823343e-01,\n 0.63062489e-01,\n 0.10584079e00,\n 0.11918116e00,\n 0.13571160e00,\n 0.15250790e00,\n 0.16746905e00,\n 0.17930275e00,\n 0.18762796e00,\n 0.19281991e00,\n 0.19558841e00,\n 0.19658093e00,\n 0.19623034e00,\n 0.19479063e00,\n 0.19241145e00,\n 0.18918501e00,\n 0.18516850e00,\n 0.18039672e00,\n 0.17489515e00,\n 0.16870056e00,\n 0.16189243e00,\n 0.15463088e00,\n 0.14718156e00,\n 0.13989125e00,\n 0.13307634e00,\n 0.12685405e00,\n 0.12105249e00,\n 0.11533057e00,\n 0.10943508e00,\n 0.10333905e00,\n 0.97149357e-01,\n 0.90949543e-01,\n 0.84741533e-01,\n 0.78459755e-01,\n 0.71940817e-01,\n 0.64950287e-01,\n 0.58292422e-01,\n 0.53999700e-01,\n 0.97934157e-01,\n 0.11025076e00,\n 0.12552127e00,\n 0.14105226e00,\n 0.15490949e00,\n 0.16590366e00,\n 0.17368492e00,\n 0.17860012e00,\n 0.18130451e00,\n 0.18239634e00,\n 0.18227696e00,\n 0.18118428e00,\n 0.17926148e00,\n 0.17660023e00,\n 0.17325978e00,\n 0.16927499e00,\n 0.16466121e00,\n 0.15942113e00,\n 0.15355882e00,\n 0.14710733e00,\n 0.14017075e00,\n 0.13296603e00,\n 0.12582819e00,\n 0.11912578e00,\n 0.11307607e00,\n 0.10758535e00,\n 0.10230618e00,\n 0.96913703e-01,\n 0.91320075e-01,\n 0.85618503e-01,\n 0.79903029e-01,\n 0.74183889e-01,\n 0.68398476e-01,\n 0.62389236e-01,\n 0.55908926e-01,\n 0.49671900e-01,\n 0.45807466e-01,\n 0.90577021e-01,\n 0.10194721e00,\n 0.11605130e00,\n 0.13040775e00,\n 0.14323507e00,\n 0.15343815e00,\n 0.16069512e00,\n 0.16532598e00,\n 0.16793491e00,\n 0.16907367e00,\n 0.16911317e00,\n 0.16827461e00,\n 0.16669342e00,\n 0.16445951e00,\n 0.16163501e00,\n 0.15826119e00,\n 0.15436088e00,\n 0.14993919e00,\n 0.14498582e00,\n 0.13948333e00,\n 0.13342866e00,\n 0.12687427e00,\n 0.11998184e00,\n 0.11305442e00,\n 0.10648688e00,\n 0.10058656e00,\n 0.95354967e-01,\n 0.90465494e-01,\n 0.85533582e-01,\n 0.80405675e-01,\n 0.75155161e-01,\n 0.69885492e-01,\n 0.64615801e-01,\n 0.59286319e-01,\n 0.53744264e-01,\n 0.47732841e-01,\n 0.41884389e-01,\n 0.38411867e-01,\n 0.83746620e-01,\n 0.94243065e-01,\n 0.10726915e00,\n 0.12053798e00,\n 0.13240825e00,\n 0.14187106e00,\n 0.14862999e00,\n 0.15297991e00,\n 0.15547749e00,\n 0.15663114e00,\n 0.15678266e00,\n 0.15613645e00,\n 0.15481880e00,\n 0.15291539e00,\n 0.15048827e00,\n 0.14758278e00,\n 0.14422987e00,\n 0.14044581e00,\n 0.13623075e00,\n 0.13156785e00,\n 0.12642694e00,\n 0.12077967e00,\n 0.11463551e00,\n 0.10810074e00,\n 0.10143317e00,\n 0.95029272e-01,\n 0.89268476e-01,\n 0.84254339e-01,\n 0.79705626e-01,\n 0.75194120e-01,\n 0.70498869e-01,\n 0.65667070e-01,\n 0.60809318e-01,\n 0.55953730e-01,\n 0.51043399e-01,\n 0.45929730e-01,\n 0.40349029e-01,\n 0.34859274e-01,\n 0.31745121e-01,\n 0.77413671e-01,\n 0.87103941e-01,\n 0.99134557e-01,\n 0.11139757e00,\n 0.12238043e00,\n 0.13115339e00,\n 0.13744320e00,\n 0.14152151e00,\n 0.14390105e00,\n 0.14504991e00,\n 0.14528263e00,\n 0.14478727e00,\n 0.14368038e00,\n 0.14204189e00,\n 0.13993120e00,\n 0.13739403e00,\n 0.13446525e00,\n 0.13116941e00,\n 0.12751934e00,\n 0.12351336e00,\n 0.11913250e00,\n 0.11434042e00,\n 0.10909266e00,\n 0.10336579e00,\n 0.97215243e-01,\n 0.90846524e-01,\n 0.84636919e-01,\n 0.79015903e-01,\n 0.74187510e-01,\n 0.69935963e-01,\n 0.65807395e-01,\n 0.61514482e-01,\n 0.57072140e-01,\n 0.52595474e-01,\n 0.48121743e-01,\n 0.43596964e-01,\n 0.38876079e-01,\n 0.33690531e-01,\n 0.28531600e-01,\n 0.25744777e-01,\n 0.71547434e-01,\n 0.80494061e-01,\n 0.91605820e-01,\n 0.10293934e00,\n 0.11310040e00,\n 0.12123200e00,\n 0.12708212e00,\n 0.13090093e00,\n 0.13316067e00,\n 0.13429219e00,\n 0.13458471e00,\n 0.13421088e00,\n 0.13327757e00,\n 0.13185826e00,\n 0.13000821e00,\n 0.12777114e00,\n 0.12518245e00,\n 0.12227035e00,\n 0.11905541e00,\n 0.11554869e00,\n 0.11174847e00,\n 0.10763626e00,\n 0.10317403e00,\n 0.98308414e-01,\n 0.92992358e-01,\n 0.87237559e-01,\n 0.81194960e-01,\n 0.75207628e-01,\n 0.69733076e-01,\n 0.65067738e-01,\n 0.61075501e-01,\n 0.57294834e-01,\n 0.53375702e-01,\n 0.49295910e-01,\n 0.45172136e-01,\n 0.41050550e-01,\n 0.36880266e-01,\n 0.32519296e-01,\n 0.27695602e-01,\n 0.22840958e-01,\n 0.20352198e-01,\n 0.66117376e-01,\n 0.74378133e-01,\n 0.84641933e-01,\n 0.95116824e-01,\n 0.10451740e00,\n 0.11205351e00,\n 0.11749266e00,\n 0.12106522e00,\n 0.12320609e00,\n 0.12431186e00,\n 0.12464833e00,\n 0.12437376e00,\n 0.12358582e00,\n 0.12235164e00,\n 0.12072182e00,\n 0.11873700e00,\n 0.11643104e00,\n 0.11383259e00,\n 0.11096542e00,\n 0.10784754e00,\n 0.10448926e00,\n 0.10088971e00,\n 0.97032204e-01,\n 0.92879705e-01,\n 0.88374905e-01,\n 0.83454721e-01,\n 0.78095064e-01,\n 0.72394073e-01,\n 0.66651307e-01,\n 0.61332978e-01,\n 0.56815393e-01,\n 0.53050254e-01,\n 0.49585145e-01,\n 0.46012942e-01,\n 0.42270541e-01,\n 0.38473442e-01,\n 0.34676433e-01,\n 0.30831696e-01,\n 0.26799770e-01,\n 0.22306219e-01,\n 0.17730433e-01,\n 0.15513073e-01,\n 0.61093956e-01,\n 0.68722166e-01,\n 0.78203514e-01,\n 0.87885372e-01,\n 0.96582450e-01,\n 0.10356604e00,\n 0.10862161e00,\n 0.11196126e00,\n 0.11398555e00,\n 0.11505950e00,\n 0.11542739e00,\n 0.11523422e00,\n 0.11456916e00,\n 0.11349328e00,\n 0.11205266e00,\n 0.11028446e00,\n 0.10822011e00,\n 0.10588704e00,\n 0.10330941e00,\n 0.10050797e00,\n 0.97499035e-01,\n 0.94292536e-01,\n 0.90888672e-01,\n 0.87273069e-01,\n 0.83411098e-01,\n 0.79244599e-01,\n 0.74700132e-01,\n 0.69725469e-01,\n 0.64370766e-01,\n 0.58887802e-01,\n 0.53736102e-01,\n 0.49357101e-01,\n 0.45792568e-01,\n 0.42613342e-01,\n 0.39362472e-01,\n 0.35933696e-01,\n 0.32438610e-01,\n 0.28940061e-01,\n 0.25393331e-01,\n 0.21661280e-01,\n 0.17468622e-01,\n 0.13150477e-01,\n 0.11185951e-01,\n 0.56448560e-01,\n 0.63493401e-01,\n 0.72252735e-01,\n 0.81202179e-01,\n 0.89248493e-01,\n 0.95719531e-01,\n 0.10041723e00,\n 0.10353691e00,\n 0.10544755e00,\n 0.10648517e00,\n 0.10687432e00,\n 0.10674787e00,\n 0.10618718e00,\n 0.10524774e00,\n 0.10397130e00,\n 0.10239167e00,\n 0.10053762e00,\n 0.98434702e-01,\n 0.96106045e-01,\n 0.93572617e-01,\n 0.90852953e-01,\n 0.87962106e-01,\n 0.84909752e-01,\n 0.81697099e-01,\n 0.78312054e-01,\n 0.74722745e-01,\n 0.70871852e-01,\n 0.66679642e-01,\n 0.62072858e-01,\n 0.57060491e-01,\n 0.51846057e-01,\n 0.46870694e-01,\n 0.42625420e-01,\n 0.39239943e-01,\n 0.36319576e-01,\n 0.33365458e-01,\n 0.30227283e-01,\n 0.27010450e-01,\n 0.23785481e-01,\n 0.20511542e-01,\n 0.17054949e-01,\n 0.13141878e-01,\n 0.90698684e-02,\n 0.73469649e-02,\n 0.52153420e-01,\n 0.58660157e-01,\n 0.66753164e-01,\n 0.75026073e-01,\n 0.82470380e-01,\n 0.88465959e-01,\n 0.92829920e-01,\n 0.95742144e-01,\n 0.97542584e-01,\n 0.98540656e-01,\n 0.98942772e-01,\n 0.98870747e-01,\n 0.98398849e-01,\n 0.97577512e-01,\n 0.96444599e-01,\n 0.95030688e-01,\n 0.93361929e-01,\n 0.91461726e-01,\n 0.89351647e-01,\n 0.87051816e-01,\n 0.84580876e-01,\n 0.81955560e-01,\n 0.79189852e-01,\n 0.76293178e-01,\n 0.73267482e-01,\n 0.70102490e-01,\n 0.66768855e-01,\n 0.63210987e-01,\n 0.59346184e-01,\n 0.55086352e-01,\n 0.50406374e-01,\n 0.45463238e-01,\n 0.40671837e-01,\n 0.36558144e-01,\n 0.33334181e-01,\n 0.30647837e-01,\n 0.27966481e-01,\n 0.25096513e-01,\n 0.22136096e-01,\n 0.19164244e-01,\n 0.16145656e-01,\n 0.12950187e-01,\n 0.93025165e-02,\n 0.54607159e-02,\n 0.39474810e-02,\n 0.48182234e-01,\n 0.54192506e-01,\n 0.61670437e-01,\n 0.69318332e-01,\n 0.76205671e-01,\n 0.81760220e-01,\n 0.85813068e-01,\n 0.88529900e-01,\n 0.90223983e-01,\n 0.91180287e-01,\n 0.91588661e-01,\n 0.91560833e-01,\n 0.91164641e-01,\n 0.90445958e-01,\n 0.89439072e-01,\n 0.88171646e-01,\n 0.86667374e-01,\n 0.84947526e-01,\n 0.83031908e-01,\n 0.80939271e-01,\n 0.78687482e-01,\n 0.76293327e-01,\n 0.73772058e-01,\n 0.71136616e-01,\n 0.68396017e-01,\n 0.65552600e-01,\n 0.62597387e-01,\n 0.59503239e-01,\n 0.56216817e-01,\n 0.52654829e-01,\n 0.48719283e-01,\n 0.44357602e-01,\n 0.39683431e-01,\n 0.35080492e-01,\n 0.31097105e-01,\n 0.28020034e-01,\n 0.25545072e-01,\n 0.23114407e-01,\n 0.20494236e-01,\n 0.17775815e-01,\n 0.15046233e-01,\n 0.12272255e-01,\n 0.93193343e-02,\n 0.59020361e-02,\n 0.22407323e-02,\n 0.87492354e-03,\n 0.44510506e-01,\n 0.50062627e-01,\n 0.56972671e-01,\n 0.64043038e-01,\n 0.70415020e-01,\n 0.75560495e-01,\n 0.79323418e-01,\n 0.81856459e-01,\n 0.83448350e-01,\n 0.84361628e-01,\n 0.84770963e-01,\n 0.84778860e-01,\n 0.84447332e-01,\n 0.83818108e-01,\n 0.82922280e-01,\n 0.81784874e-01,\n 0.80427296e-01,\n 0.78868859e-01,\n 0.77127583e-01,\n 0.75220726e-01,\n 0.73164918e-01,\n 0.70976183e-01,\n 0.68669744e-01,\n 0.66259526e-01,\n 0.63757502e-01,\n 0.61172180e-01,\n 0.58505908e-01,\n 0.55750374e-01,\n 0.52880324e-01,\n 0.49845133e-01,\n 0.46562638e-01,\n 0.42928446e-01,\n 0.38867969e-01,\n 0.34455679e-01,\n 0.30041935e-01,\n 0.26187586e-01,\n 0.23246434e-01,\n 0.20965882e-01,\n 0.18771386e-01,\n 0.16391607e-01,\n 0.13906728e-01,\n 0.11403601e-01,\n 0.88423118e-02,\n 0.60800756e-02,\n 0.28280553e-02,\n -0.70986536e-03,\n -0.19648359e-02,\n 0.41115671e-01,\n 0.46244897e-01,\n 0.52630525e-01,\n 0.59167176e-01,\n 0.65062307e-01,\n 0.69828428e-01,\n 0.73321380e-01,\n 0.75681835e-01,\n 0.77175975e-01,\n 0.78045711e-01,\n 0.78451805e-01,\n 0.78488372e-01,\n 0.78212120e-01,\n 0.77661060e-01,\n 0.76863378e-01,\n 0.75841703e-01,\n 0.74615397e-01,\n 0.73201917e-01,\n 0.71617633e-01,\n 0.69878295e-01,\n 0.67999192e-01,\n 0.65995254e-01,\n 0.63880973e-01,\n 0.61670251e-01,\n 0.59375945e-01,\n 0.57009004e-01,\n 0.54577064e-01,\n 0.52082047e-01,\n 0.49516134e-01,\n 0.46855822e-01,\n 0.44053324e-01,\n 0.41028392e-01,\n 0.37672661e-01,\n 0.33894073e-01,\n 0.29732887e-01,\n 0.25506891e-01,\n 0.21783372e-01,\n 0.18975813e-01,\n 0.16882956e-01,\n 0.14915733e-01,\n 0.12760971e-01,\n 0.10479322e-01,\n 0.81534600e-02,\n 0.57432470e-02,\n 0.31131236e-02,\n -0.13105665e-04,\n -0.34361165e-02,\n -0.45660972e-02,\n 0.37977196e-01,\n 0.42716030e-01,\n 0.48617344e-01,\n 0.54660756e-01,\n 0.60114693e-01,\n 0.64529106e-01,\n 0.67770794e-01,\n 0.69969401e-01,\n 0.71370378e-01,\n 0.72196685e-01,\n 0.72596297e-01,\n 0.72655670e-01,\n 0.72426699e-01,\n 0.71944013e-01,\n 0.71233213e-01,\n 0.70314772e-01,\n 0.69206156e-01,\n 0.67923151e-01,\n 0.66480570e-01,\n 0.64892717e-01,\n 0.63173585e-01,\n 0.61336990e-01,\n 0.59396494e-01,\n 0.57365343e-01,\n 0.55256244e-01,\n 0.53080887e-01,\n 0.50849102e-01,\n 0.48567444e-01,\n 0.46237227e-01,\n 0.43851011e-01,\n 0.41386928e-01,\n 0.38799874e-01,\n 0.36011849e-01,\n 0.32912444e-01,\n 0.29396715e-01,\n 0.25476838e-01,\n 0.21440787e-01,\n 0.17856820e-01,\n 0.15185956e-01,\n 0.13268581e-01,\n 0.11497635e-01,\n 0.95190108e-02,\n 0.73810630e-02,\n 0.51771011e-02,\n 0.28821086e-02,\n 0.37416635e-03,\n -0.26154167e-02,\n -0.58998531e-02,\n -0.68843709e-02,\n 0.35076261e-01,\n 0.39454699e-01,\n 0.44908728e-01,\n 0.50496329e-01,\n 0.55542119e-01,\n 0.59630550e-01,\n 0.62638551e-01,\n 0.64685628e-01,\n 0.65998107e-01,\n 0.66781543e-01,\n 0.67172192e-01,\n 0.67249492e-01,\n 0.67060962e-01,\n 0.66638172e-01,\n 0.66004358e-01,\n 0.65178059e-01,\n 0.64175054e-01,\n 0.63009582e-01,\n 0.61695036e-01,\n 0.60244419e-01,\n 0.58670532e-01,\n 0.56986067e-01,\n 0.55203587e-01,\n 0.53335473e-01,\n 0.51393870e-01,\n 0.49390342e-01,\n 0.47335327e-01,\n 0.45237295e-01,\n 0.43101642e-01,\n 0.40929142e-01,\n 0.38712744e-01,\n 0.36432110e-01,\n 0.34044892e-01,\n 0.31476185e-01,\n 0.28615938e-01,\n 0.25350343e-01,\n 0.21667363e-01,\n 0.17823832e-01,\n 0.14378536e-01,\n 0.11824818e-01,\n 0.10038571e-01,\n 0.84049767e-02,\n 0.65482627e-02,\n 0.45205113e-02,\n 0.24313403e-02,\n 0.26542676e-03,\n -0.20983638e-02,\n -0.49343021e-02,\n -0.80712391e-02,\n -0.89136148e-02,\n 0.32395583e-01,\n 0.36441319e-01,\n 0.41482292e-01,\n 0.46648715e-01,\n 0.51316999e-01,\n 0.55103421e-01,\n 0.57894230e-01,\n 0.59799597e-01,\n 0.61028276e-01,\n 0.61769772e-01,\n 0.62149592e-01,\n 0.62240742e-01,\n 0.62086754e-01,\n 0.61716419e-01,\n 0.61150856e-01,\n 0.60406826e-01,\n 0.59498589e-01,\n 0.58439020e-01,\n 0.57240289e-01,\n 0.55914193e-01,\n 0.54472402e-01,\n 0.52926507e-01,\n 0.51288098e-01,\n 0.49568728e-01,\n 0.47779780e-01,\n 0.45932278e-01,\n 0.44036478e-01,\n 0.42101391e-01,\n 0.40134147e-01,\n 0.38139164e-01,\n 0.36116980e-01,\n 0.34061395e-01,\n 0.31954251e-01,\n 0.29756844e-01,\n 0.27398031e-01,\n 0.24767753e-01,\n 0.21741455e-01,\n 0.18279733e-01,\n 0.14604551e-01,\n 0.11262298e-01,\n 0.87792939e-02,\n 0.70768204e-02,\n 0.55486909e-02,\n 0.38075072e-02,\n 0.19052560e-02,\n -0.45109729e-04,\n -0.20623163e-02,\n -0.42746197e-02,\n -0.69636726e-02,\n -0.99668913e-02,\n -0.10686405e-01,\n 0.29919144e-01,\n 0.33657782e-01,\n 0.38317338e-01,\n 0.43094639e-01,\n 0.47413833e-01,\n 0.50920542e-01,\n 0.53509615e-01,\n 0.55282630e-01,\n 0.56432150e-01,\n 0.57132918e-01,\n 0.57500545e-01,\n 0.57602141e-01,\n 0.57477590e-01,\n 0.57153169e-01,\n 0.56648072e-01,\n 0.55977501e-01,\n 0.55154376e-01,\n 0.54190353e-01,\n 0.53096451e-01,\n 0.51883381e-01,\n 0.50561778e-01,\n 0.49142279e-01,\n 0.47635533e-01,\n 0.46052203e-01,\n 0.44402875e-01,\n 0.42697888e-01,\n 0.40947042e-01,\n 0.39159160e-01,\n 0.37341885e-01,\n 0.35501439e-01,\n 0.33642113e-01,\n 0.31765264e-01,\n 0.29866640e-01,\n 0.27930658e-01,\n 0.25920473e-01,\n 0.23763975e-01,\n 0.21344284e-01,\n 0.18521296e-01,\n 0.15230944e-01,\n 0.11671958e-01,\n 0.83932094e-02,\n 0.59620556e-02,\n 0.43443809e-02,\n 0.29380166e-02,\n 0.13360849e-02,\n -0.42020026e-03,\n -0.22225457e-02,\n -0.40951176e-02,\n -0.61711343e-02,\n -0.87360274e-02,\n -0.11627702e-01,\n -0.12245870e-01,\n 0.27632145e-01,\n 0.31087343e-01,\n 0.35394758e-01,\n 0.39812610e-01,\n 0.43809064e-01,\n 0.47056779e-01,\n 0.49458548e-01,\n 0.51108032e-01,\n 0.52182894e-01,\n 0.52844279e-01,\n 0.53198714e-01,\n 0.53307895e-01,\n 0.53208377e-01,\n 0.52924160e-01,\n 0.52472707e-01,\n 0.51867817e-01,\n 0.51121201e-01,\n 0.50243411e-01,\n 0.49244415e-01,\n 0.48133992e-01,\n 0.46921846e-01,\n 0.45617707e-01,\n 0.44231363e-01,\n 0.42772591e-01,\n 0.41251190e-01,\n 0.39676767e-01,\n 0.38058497e-01,\n 0.36404900e-01,\n 0.34723751e-01,\n 0.33022195e-01,\n 0.31306833e-01,\n 0.29583117e-01,\n 0.27853649e-01,\n 0.26114259e-01,\n 0.24346959e-01,\n 0.22509510e-01,\n 0.20522581e-01,\n 0.18262342e-01,\n 0.15582572e-01,\n 0.12412144e-01,\n 0.89437887e-02,\n 0.57348427e-02,\n 0.33833538e-02,\n 0.18811167e-02,\n 0.61761297e-03,\n -0.83635934e-03,\n -0.24498748e-02,\n -0.41174246e-02,\n -0.58653555e-02,\n -0.78286622e-02,\n -0.10294430e-01,\n -0.13095230e-01,\n -0.13630256e-01,\n 0.25520688e-01,\n 0.28714316e-01,\n 0.32696646e-01,\n 0.36782503e-01,\n 0.40480625e-01,\n 0.43488596e-01,\n 0.45716532e-01,\n 0.47250807e-01,\n 0.48255347e-01,\n 0.48878800e-01,\n 0.49219336e-01,\n 0.49333718e-01,\n 0.49255468e-01,\n 0.49006518e-01,\n 0.48602745e-01,\n 0.48056658e-01,\n 0.47378853e-01,\n 0.46578914e-01,\n 0.45665938e-01,\n 0.44648807e-01,\n 0.43536380e-01,\n 0.42337555e-01,\n 0.41061286e-01,\n 0.39716586e-01,\n 0.38312454e-01,\n 0.36857765e-01,\n 0.35361193e-01,\n 0.33831026e-01,\n 0.32275263e-01,\n 0.30701900e-01,\n 0.29118838e-01,\n 0.27533358e-01,\n 0.25950057e-01,\n 0.24367830e-01,\n 0.22775861e-01,\n 0.21148378e-01,\n 0.19438008e-01,\n 0.17566392e-01,\n 0.15416590e-01,\n 0.12849388e-01,\n 0.97933495e-02,\n 0.64339326e-02,\n 0.33275706e-02,\n 0.10872351e-02,\n -0.28362754e-03,\n -0.14064757e-02,\n -0.27260752e-02,\n -0.42158812e-02,\n -0.57702521e-02,\n -0.74158944e-02,\n -0.92886547e-02,\n -0.11677106e-01,\n -0.14403825e-01,\n -0.14870039e-01,\n 0.23571694e-01,\n 0.26523935e-01,\n 0.30206172e-01,\n 0.33985410e-01,\n 0.37407782e-01,\n 0.40193867e-01,\n 0.42260528e-01,\n 0.43687437e-01,\n 0.44625875e-01,\n 0.45212984e-01,\n 0.45539256e-01,\n 0.45656916e-01,\n 0.45596711e-01,\n 0.45378670e-01,\n 0.45017261e-01,\n 0.44523854e-01,\n 0.43908067e-01,\n 0.43178570e-01,\n 0.42343616e-01,\n 0.41411307e-01,\n 0.40389728e-01,\n 0.39287016e-01,\n 0.38111381e-01,\n 0.36871076e-01,\n 0.35574421e-01,\n 0.34229808e-01,\n 0.32845549e-01,\n 0.31430013e-01,\n 0.29991681e-01,\n 0.28539073e-01,\n 0.27080379e-01,\n 0.25622051e-01,\n 0.24167063e-01,\n 0.22712942e-01,\n 0.21250511e-01,\n 0.19763654e-01,\n 0.18227577e-01,\n 0.16602959e-01,\n 0.14823279e-01,\n 0.12781378e-01,\n 0.10339825e-01,\n 0.74180500e-02,\n 0.41875704e-02,\n 0.12003324e-02,\n -0.92070986e-03,\n -0.21659597e-02,\n -0.31659470e-02,\n -0.43731593e-02,\n -0.57607451e-02,\n -0.72222133e-02,\n -0.87847579e-02,\n -0.10585406e-01,\n -0.12914552e-01,\n -0.15580352e-01,\n -0.15989216e-01,\n 0.21772955e-01,\n 0.24502428e-01,\n 0.27907638e-01,\n 0.31403694e-01,\n 0.34571216e-01,\n 0.37151974e-01,\n 0.39069071e-01,\n 0.40396009e-01,\n 0.41272413e-01,\n 0.41824844e-01,\n 0.42136710e-01,\n 0.42256072e-01,\n 0.42211138e-01,\n 0.42020235e-01,\n 0.41696560e-01,\n 0.41250426e-01,\n 0.40690560e-01,\n 0.40024836e-01,\n 0.39260726e-01,\n 0.38405582e-01,\n 0.37466776e-01,\n 0.36451757e-01,\n 0.35368055e-01,\n 0.34223344e-01,\n 0.33025496e-01,\n 0.31782612e-01,\n 0.30503126e-01,\n 0.29195679e-01,\n 0.27868953e-01,\n 0.26531136e-01,\n 0.25188750e-01,\n 0.23845278e-01,\n 0.22500057e-01,\n 0.21148371e-01,\n 0.19782964e-01,\n 0.18395819e-01,\n 0.16978383e-01,\n 0.15516935e-01,\n 0.13982293e-01,\n 0.12313342e-01,\n 0.10401287e-01,\n 0.81002032e-02,\n 0.53164582e-02,\n 0.22110264e-02,\n -0.66281413e-03,\n -0.26719193e-02,\n -0.38054741e-02,\n -0.47028475e-02,\n -0.58184257e-02,\n -0.71222009e-02,\n -0.85073700e-02,\n -0.10002246e-01,\n -0.11745773e-01,\n -0.14030821e-01,\n -0.16646272e-01,\n -0.17007222e-01,\n 0.20113155e-01,\n 0.22637051e-01,\n 0.25786523e-01,\n 0.29021049e-01,\n 0.31953044e-01,\n 0.34343820e-01,\n 0.36122233e-01,\n 0.37356097e-01,\n 0.38174324e-01,\n 0.38693711e-01,\n 0.38991190e-01,\n 0.39110996e-01,\n 0.39079025e-01,\n 0.38912032e-01,\n 0.38622018e-01,\n 0.38218360e-01,\n 0.37708975e-01,\n 0.37100986e-01,\n 0.36401182e-01,\n 0.35616249e-01,\n 0.34752883e-01,\n 0.33817910e-01,\n 0.32818370e-01,\n 0.31761579e-01,\n 0.30655265e-01,\n 0.29507659e-01,\n 0.28327364e-01,\n 0.27123058e-01,\n 0.25902657e-01,\n 0.24672238e-01,\n 0.23434937e-01,\n 0.22190256e-01,\n 0.20934626e-01,\n 0.19663457e-01,\n 0.18373784e-01,\n 0.17065847e-01,\n 0.15741942e-01,\n 0.14402619e-01,\n 0.13038933e-01,\n 0.11620902e-01,\n 0.10082259e-01,\n 0.83062556e-02,\n 0.61375611e-02,\n 0.34738728e-02,\n 0.47380762e-03,\n -0.23008718e-02,\n -0.42078327e-02,\n -0.52424278e-02,\n -0.60545313e-02,\n -0.70956550e-02,\n -0.83303098e-02,\n -0.96523445e-02,\n -0.11092181e-01,\n -0.12791218e-01,\n -0.15045415e-01,\n -0.17619489e-01,\n -0.17940814e-01,\n 0.18581720e-01,\n 0.20915883e-01,\n 0.23829265e-01,\n 0.26822245e-01,\n 0.29536562e-01,\n 0.31751547e-01,\n 0.33401374e-01,\n 0.34548633e-01,\n 0.35312355e-01,\n 0.35800364e-01,\n 0.36083620e-01,\n 0.36202855e-01,\n 0.36181841e-01,\n 0.36035892e-01,\n 0.35775941e-01,\n 0.35410490e-01,\n 0.34946699e-01,\n 0.34391019e-01,\n 0.33749603e-01,\n 0.33028524e-01,\n 0.32233991e-01,\n 0.31372394e-01,\n 0.30450473e-01,\n 0.29475490e-01,\n 0.28455326e-01,\n 0.27398327e-01,\n 0.26312927e-01,\n 0.25206672e-01,\n 0.24085123e-01,\n 0.22950860e-01,\n 0.21803081e-01,\n 0.20638589e-01,\n 0.19453924e-01,\n 0.18248010e-01,\n 0.17023819e-01,\n 0.15788162e-01,\n 0.14549590e-01,\n 0.13314429e-01,\n 0.12082065e-01,\n 0.10838719e-01,\n 0.95478874e-02,\n 0.81366943e-02,\n 0.64820210e-02,\n 0.44224774e-02,\n 0.18523625e-02,\n -0.10648414e-02,\n -0.37534775e-02,\n -0.55650654e-02,\n -0.65100682e-02,\n -0.72507914e-02,\n -0.82312562e-02,\n -0.94086677e-02,\n -0.10678349e-01,\n -0.12073795e-01,\n -0.13739666e-01,\n -0.15975196e-01,\n -0.18515551e-01,\n -0.18804787e-01,\n 0.17168749e-01,\n 0.19327780e-01,\n 0.22023177e-01,\n 0.24793051e-01,\n 0.27306184e-01,\n 0.29358532e-01,\n 0.30889180e-01,\n 0.31955894e-01,\n 0.32668613e-01,\n 0.33126883e-01,\n 0.33396173e-01,\n 0.33513986e-01,\n 0.33502162e-01,\n 0.33374704e-01,\n 0.33141572e-01,\n 0.32810479e-01,\n 0.32387879e-01,\n 0.31879608e-01,\n 0.31291280e-01,\n 0.30628527e-01,\n 0.29897207e-01,\n 0.29103544e-01,\n 0.28254325e-01,\n 0.27356898e-01,\n 0.26419124e-01,\n 0.25448931e-01,\n 0.24453351e-01,\n 0.23437385e-01,\n 0.22403084e-01,\n 0.21349376e-01,\n 0.20273272e-01,\n 0.19171963e-01,\n 0.18045446e-01,\n 0.16897894e-01,\n 0.15737535e-01,\n 0.14574960e-01,\n 0.13420397e-01,\n 0.12280968e-01,\n 0.11158198e-01,\n 0.10045296e-01,\n 0.89227157e-02,\n 0.77492185e-02,\n 0.64479085e-02,\n 0.48916428e-02,\n 0.29151232e-02,\n 0.41272587e-03,\n -0.24413855e-02,\n -0.50533284e-02,\n -0.67725605e-02,\n -0.76342914e-02,\n -0.83150435e-02,\n -0.92462925e-02,\n -0.10376302e-01,\n -0.11603034e-01,\n -0.12963645e-01,\n -0.14606736e-01,\n -0.16835131e-01,\n -0.19348454e-01,\n -0.19612487e-01,\n 0.15865134e-01,\n 0.17862506e-01,\n 0.20356622e-01,\n 0.22920400e-01,\n 0.25247563e-01,\n 0.27149413e-01,\n 0.28569562e-01,\n 0.29561354e-01,\n 0.30226331e-01,\n 0.30656436e-01,\n 0.30912070e-01,\n 0.31027781e-01,\n 0.31023609e-01,\n 0.30912362e-01,\n 0.30703111e-01,\n 0.30402854e-01,\n 0.30017478e-01,\n 0.29552329e-01,\n 0.29012615e-01,\n 0.28403712e-01,\n 0.27731372e-01,\n 0.27001891e-01,\n 0.26222091e-01,\n 0.25399221e-01,\n 0.24540421e-01,\n 0.23651907e-01,\n 0.22738006e-01,\n 0.21800319e-01,\n 0.20837659e-01,\n 0.19847298e-01,\n 0.18827075e-01,\n 0.17777784e-01,\n 0.16704248e-01,\n 0.15615269e-01,\n 0.14521973e-01,\n 0.13435473e-01,\n 0.12364727e-01,\n 0.11315233e-01,\n 0.10288181e-01,\n 0.92796814e-02,\n 0.82787825e-02,\n 0.72630350e-02,\n 0.61889994e-02,\n 0.49772034e-02,\n 0.34969368e-02,\n 0.15794969e-02,\n -0.87806711e-03,\n -0.36851568e-02,\n -0.62257764e-02,\n -0.78527220e-02,\n -0.86356523e-02,\n -0.92662042e-02,\n -0.10158284e-01,\n -0.11249557e-01,\n -0.12441714e-01,\n -0.13776368e-01,\n -0.15406553e-01,\n -0.17638773e-01,\n -0.20130811e-01,\n -0.20375945e-01,\n 0.14662431e-01,\n 0.16510550e-01,\n 0.18818781e-01,\n 0.21192145e-01,\n 0.23347380e-01,\n 0.25109937e-01,\n 0.26427617e-01,\n 0.27349673e-01,\n 0.27969934e-01,\n 0.28373329e-01,\n 0.28615609e-01,\n 0.28728599e-01,\n 0.28730700e-01,\n 0.28633634e-01,\n 0.28445678e-01,\n 0.28173234e-01,\n 0.27821736e-01,\n 0.27396221e-01,\n 0.26901733e-01,\n 0.26343603e-01,\n 0.25727598e-01,\n 0.25059966e-01,\n 0.24347208e-01,\n 0.23595579e-01,\n 0.22810331e-01,\n 0.21994932e-01,\n 0.21150416e-01,\n 0.20275565e-01,\n 0.19368108e-01,\n 0.18426621e-01,\n 0.17452605e-01,\n 0.16451407e-01,\n 0.15432071e-01,\n 0.14405738e-01,\n 0.13383573e-01,\n 0.12374919e-01,\n 0.11386278e-01,\n 0.10420920e-01,\n 0.94791828e-02,\n 0.85583618e-02,\n 0.76520443e-02,\n 0.67478423e-02,\n 0.58222217e-02,\n 0.48302943e-02,\n 0.36898023e-02,\n 0.22657616e-02,\n 0.38620809e-03,\n -0.20464570e-02,\n -0.48193890e-02,\n -0.72907535e-02,\n -0.88232690e-02,\n -0.95311515e-02,\n -0.10120570e-01,\n -0.10982438e-01,\n -0.12042844e-01,\n -0.13208187e-01,\n -0.14525170e-01,\n -0.16151898e-01,\n -0.18398402e-01,\n -0.20873869e-01,\n -0.21105917e-01,\n 0.13552637e-01,\n 0.15262923e-01,\n 0.17399436e-01,\n 0.19596824e-01,\n 0.21593064e-01,\n 0.23226669e-01,\n 0.24449309e-01,\n 0.25306473e-01,\n 0.25884863e-01,\n 0.26262935e-01,\n 0.26492154e-01,\n 0.26601870e-01,\n 0.26609030e-01,\n 0.26524415e-01,\n 0.26355645e-01,\n 0.26108669e-01,\n 0.25788641e-01,\n 0.25400463e-01,\n 0.24949163e-01,\n 0.24440058e-01,\n 0.23878768e-01,\n 0.23270955e-01,\n 0.22621866e-01,\n 0.21935685e-01,\n 0.21214921e-01,\n 0.20460036e-01,\n 0.19669790e-01,\n 0.18842377e-01,\n 0.17977156e-01,\n 0.17076379e-01,\n 0.16145866e-01,\n 0.15194753e-01,\n 0.14234038e-01,\n 0.13274660e-01,\n 0.12325864e-01,\n 0.11394359e-01,\n 0.10484057e-01,\n 0.95965564e-02,\n 0.87314118e-02,\n 0.78865895e-02,\n 0.70581776e-02,\n 0.62391688e-02,\n 0.54168063e-02,\n 0.45667454e-02,\n 0.36422682e-02,\n 0.25576062e-02,\n 0.11724485e-02,\n -0.68855018e-03,\n -0.31142109e-02,\n -0.58629848e-02,\n -0.82644373e-02,\n -0.96990969e-02,\n -0.10335494e-01,\n -0.10892544e-01,\n -0.11732457e-01,\n -0.12769138e-01,\n -0.13914877e-01,\n -0.15221934e-01,\n -0.16854212e-01,\n -0.19125070e-01,\n -0.21587910e-01,\n -0.21812512e-01,\n 0.12528375e-01,\n 0.14111324e-01,\n 0.16089143e-01,\n 0.18123835e-01,\n 0.19972973e-01,\n 0.21487134e-01,\n 0.22621555e-01,\n 0.23418266e-01,\n 0.23957409e-01,\n 0.24311502e-01,\n 0.24528088e-01,\n 0.24634261e-01,\n 0.24645658e-01,\n 0.24572272e-01,\n 0.24421265e-01,\n 0.24198342e-01,\n 0.23908518e-01,\n 0.23556618e-01,\n 0.23147549e-01,\n 0.22686291e-01,\n 0.22177676e-01,\n 0.21625938e-01,\n 0.21034168e-01,\n 0.20403879e-01,\n 0.19734910e-01,\n 0.19025959e-01,\n 0.18275727e-01,\n 0.17484382e-01,\n 0.16654858e-01,\n 0.15793348e-01,\n 0.14909030e-01,\n 0.14012692e-01,\n 0.13114928e-01,\n 0.12224767e-01,\n 0.11348879e-01,\n 0.10491370e-01,\n 0.96542966e-02,\n 0.88380333e-02,\n 0.80419006e-02,\n 0.72643128e-02,\n 0.65027624e-02,\n 0.57532680e-02,\n 0.50087180e-02,\n 0.42559886e-02,\n 0.34697335e-02,\n 0.26006724e-02,\n 0.15582409e-02,\n 0.19586713e-03,\n -0.16648462e-02,\n -0.41000666e-02,\n -0.68322704e-02,\n -0.91604441e-02,\n -0.10493010e-01,\n -0.11061922e-01,\n -0.11595163e-01,\n -0.12420692e-01,\n -0.13440105e-01,\n -0.14572812e-01,\n -0.15877264e-01,\n -0.17524028e-01,\n -0.19829245e-01,\n -0.22282675e-01,\n -0.22505168e-01,\n 0.11582712e-01,\n 0.13047962e-01,\n 0.14879054e-01,\n 0.16763248e-01,\n 0.18476224e-01,\n 0.19879704e-01,\n 0.20932244e-01,\n 0.21672688e-01,\n 0.22175148e-01,\n 0.22506684e-01,\n 0.22711273e-01,\n 0.22814000e-01,\n 0.22829419e-01,\n 0.22766909e-01,\n 0.22633271e-01,\n 0.22434004e-01,\n 0.22174012e-01,\n 0.21857906e-01,\n 0.21490037e-01,\n 0.21074377e-01,\n 0.20614149e-01,\n 0.20111440e-01,\n 0.19566908e-01,\n 0.18979838e-01,\n 0.18348834e-01,\n 0.17672971e-01,\n 0.16953107e-01,\n 0.16192837e-01,\n 0.15398719e-01,\n 0.14579884e-01,\n 0.13746735e-01,\n 0.12909485e-01,\n 0.12076905e-01,\n 0.11255554e-01,\n 0.10449625e-01,\n 0.96614184e-02,\n 0.88916803e-02,\n 0.81402799e-02,\n 0.74064764e-02,\n 0.66891308e-02,\n 0.59866421e-02,\n 0.52965824e-02,\n 0.46150312e-02,\n 0.39348379e-02,\n 0.32422331e-02,\n 0.25101355e-02,\n 0.16859862e-02,\n 0.67323016e-03,\n -0.68223715e-03,\n -0.25608686e-02,\n -0.50209761e-02,\n -0.77415816e-02,\n -0.99905552e-02,\n -0.11216282e-01,\n -0.11722405e-01,\n -0.12240239e-01,\n -0.13058126e-01,\n -0.14066087e-01,\n -0.15192053e-01,\n -0.16501144e-01,\n -0.18171404e-01,\n -0.20520791e-01,\n -0.22966992e-01,\n -0.23192288e-01,\n 0.10709423e-01,\n 0.12065823e-01,\n 0.13761187e-01,\n 0.15506121e-01,\n 0.17093049e-01,\n 0.18394005e-01,\n 0.19370625e-01,\n 0.20058842e-01,\n 0.20527244e-01,\n 0.20837912e-01,\n 0.21031609e-01,\n 0.21131653e-01,\n 0.21151649e-01,\n 0.21100447e-01,\n 0.20984545e-01,\n 0.20809161e-01,\n 0.20578744e-01,\n 0.20297162e-01,\n 0.19967660e-01,\n 0.19592566e-01,\n 0.19173032e-01,\n 0.18708948e-01,\n 0.18199200e-01,\n 0.17642427e-01,\n 0.17038029e-01,\n 0.16387362e-01,\n 0.15694451e-01,\n 0.14966104e-01,\n 0.14211375e-01,\n 0.13440318e-01,\n 0.12662663e-01,\n 0.11886760e-01,\n 0.11119010e-01,\n 0.10363686e-01,\n 0.96232807e-02,\n 0.88988189e-02,\n 0.81905108e-02,\n 0.74980101e-02,\n 0.68207132e-02,\n 0.61578541e-02,\n 0.55083996e-02,\n 0.48709009e-02,\n 0.42431629e-02,\n 0.36212225e-02,\n 0.29974312e-02,\n 0.23571376e-02,\n 0.16709621e-02,\n 0.88186091e-03,\n -0.11385441e-03,\n -0.14786492e-02,\n -0.33936626e-02,\n -0.58928211e-02,\n -0.86036716e-02,\n -0.10764806e-01,\n -0.11878857e-01,\n -0.12327690e-01,\n -0.12838322e-01,\n -0.13654754e-01,\n -0.14656777e-01,\n -0.15782116e-01,\n -0.17102929e-01,\n -0.18805560e-01,\n -0.21208614e-01,\n -0.23648826e-01,\n -0.23881109e-01,\n 0.99031366e-02,\n 0.11158862e-01,\n 0.12728675e-01,\n 0.14344782e-01,\n 0.15815102e-01,\n 0.17021233e-01,\n 0.17927697e-01,\n 0.18567791e-01,\n 0.19005040e-01,\n 0.19296985e-01,\n 0.19481450e-01,\n 0.19580156e-01,\n 0.19605840e-01,\n 0.19566806e-01,\n 0.19469030e-01,\n 0.19317091e-01,\n 0.19114554e-01,\n 0.18864036e-01,\n 0.18567108e-01,\n 0.18224217e-01,\n 0.17834747e-01,\n 0.17397350e-01,\n 0.16910696e-01,\n 0.16374495e-01,\n 0.15790507e-01,\n 0.15163027e-01,\n 0.14498919e-01,\n 0.13806998e-01,\n 0.13096904e-01,\n 0.12377877e-01,\n 0.11657882e-01,\n 0.10943062e-01,\n 0.10237600e-01,\n 0.95441425e-02,\n 0.88640014e-02,\n 0.81976959e-02,\n 0.75451867e-02,\n 0.69061988e-02,\n 0.62803319e-02,\n 0.56670737e-02,\n 0.50658169e-02,\n 0.44758078e-02,\n 0.38957228e-02,\n 0.33232321e-02,\n 0.27542519e-02,\n 0.21804536e-02,\n 0.15856215e-02,\n 0.93752530e-03,\n 0.17370789e-03,\n -0.81816822e-03,\n -0.22095586e-02,\n -0.41797617e-02,\n -0.67304755e-02,\n -0.94296988e-02,\n -0.11491459e-01,\n -0.12489353e-01,\n -0.12887697e-01,\n -0.13399359e-01,\n -0.14220017e-01,\n -0.15221213e-01,\n -0.16351623e-01,\n -0.17690992e-01,\n -0.19434931e-01,\n -0.21900531e-01,\n -0.24334403e-01,\n -0.24576908e-01,\n 0.91596041e-02,\n 0.10322293e-01,\n 0.11776099e-01,\n 0.13273214e-01,\n 0.14635916e-01,\n 0.15754675e-01,\n 0.16596718e-01,\n 0.17192930e-01,\n 0.17602194e-01,\n 0.17877869e-01,\n 0.18055072e-01,\n 0.18153975e-01,\n 0.18186348e-01,\n 0.18159697e-01,\n 0.18079169e-01,\n 0.17948311e-01,\n 0.17769367e-01,\n 0.17543392e-01,\n 0.17270328e-01,\n 0.16949220e-01,\n 0.16578663e-01,\n 0.16157566e-01,\n 0.15685990e-01,\n 0.15165905e-01,\n 0.14601668e-01,\n 0.14000042e-01,\n 0.13369541e-01,\n 0.12719298e-01,\n 0.12057992e-01,\n 0.11393145e-01,\n 0.10730589e-01,\n 0.10074451e-01,\n 0.94274255e-02,\n 0.87909503e-02,\n 0.81658233e-02,\n 0.75523388e-02,\n 0.69505223e-02,\n 0.63602584e-02,\n 0.57813367e-02,\n 0.52134916e-02,\n 0.46564899e-02,\n 0.41098930e-02,\n 0.35728957e-02,\n 0.30442516e-02,\n 0.25214758e-02,\n 0.19998695e-02,\n 0.14702382e-02,\n 0.91451913e-03,\n 0.29673893e-03,\n -0.45211281e-03,\n -0.14543475e-02,\n -0.28908835e-02,\n -0.49353936e-02,\n -0.75476863e-02,\n -0.10228912e-01,\n -0.12177330e-01,\n -0.13055839e-01,\n -0.13411977e-01,\n -0.13932705e-01,\n -0.14762470e-01,\n -0.15767431e-01,\n -0.16908331e-01,\n -0.18272618e-01,\n -0.20066334e-01,\n -0.22602497e-01,\n -0.25027955e-01,\n -0.25282444e-01,\n 0.84757134e-02,\n 0.95526502e-02,\n 0.10899562e-01,\n 0.12287115e-01,\n 0.13550865e-01,\n 0.14589507e-01,\n 0.15372781e-01,\n 0.15929360e-01,\n 0.16313823e-01,\n 0.16575595e-01,\n 0.16747160e-01,\n 0.16847055e-01,\n 0.16885890e-01,\n 0.16870106e-01,\n 0.16803687e-01,\n 0.16688865e-01,\n 0.16526472e-01,\n 0.16316228e-01,\n 0.16057082e-01,\n 0.15747746e-01,\n 0.15387391e-01,\n 0.14976392e-01,\n 0.14516956e-01,\n 0.14013476e-01,\n 0.13472388e-01,\n 0.12901680e-01,\n 0.12309901e-01,\n 0.11705250e-01,\n 0.11094769e-01,\n 0.10483908e-01,\n 0.98766321e-02,\n 0.92755975e-02,\n 0.86824028e-02,\n 0.80980305e-02,\n 0.75229523e-02,\n 0.69574793e-02,\n 0.64018168e-02,\n 0.58560292e-02,\n 0.53201127e-02,\n 0.47941157e-02,\n 0.42779413e-02,\n 0.37713288e-02,\n 0.32739611e-02,\n 0.27850254e-02,\n 0.23029358e-02,\n 0.18249402e-02,\n 0.13457378e-02,\n 0.85484359e-03,\n 0.33187922e-03,\n -0.26360160e-03,\n -0.10086164e-02,\n -0.20368681e-02,\n -0.35384803e-02,\n -0.56763412e-02,\n -0.83571654e-02,\n -0.11009140e-01,\n -0.12828228e-01,\n -0.13585953e-01,\n -0.13909469e-01,\n -0.14446771e-01,\n -0.15289789e-01,\n -0.16302353e-01,\n -0.17458308e-01,\n -0.18853234e-01,\n -0.20704713e-01,\n -0.23318050e-01,\n -0.25730822e-01,\n -0.25997423e-01,\n 0.78488868e-02,\n 0.88471249e-02,\n 0.10095963e-01,\n 0.11383073e-01,\n 0.12556248e-01,\n 0.13521745e-01,\n 0.14251599e-01,\n 0.14772387e-01,\n 0.15134628e-01,\n 0.15383984e-01,\n 0.15550311e-01,\n 0.15650392e-01,\n 0.15693463e-01,\n 0.15684696e-01,\n 0.15626790e-01,\n 0.15520756e-01,\n 0.15366406e-01,\n 0.15162837e-01,\n 0.14909022e-01,\n 0.14604477e-01,\n 0.14249890e-01,\n 0.13847613e-01,\n 0.13401926e-01,\n 0.12918934e-01,\n 0.12406076e-01,\n 0.11871213e-01,\n 0.11321891e-01,\n 0.10764673e-01,\n 0.10204747e-01,\n 0.96458644e-02,\n 0.90905391e-02,\n 0.85403854e-02,\n 0.79964781e-02,\n 0.74595166e-02,\n 0.69300104e-02,\n 0.64083328e-02,\n 0.58948221e-02,\n 0.53897803e-02,\n 0.48935004e-02,\n 0.44061504e-02,\n 0.39277528e-02,\n 0.34583132e-02,\n 0.29976317e-02,\n 0.25451381e-02,\n 0.20998786e-02,\n 0.16600611e-02,\n 0.12224072e-02,\n 0.78081363e-03,\n 0.32335857e-03,\n -0.17330737e-03,\n -0.75507897e-03,\n -0.15084407e-02,\n -0.25801368e-02,\n -0.41686138e-02,\n -0.64185774e-02,\n -0.91707250e-02,\n -0.11776590e-01,\n -0.13448806e-01,\n -0.14086716e-01,\n -0.14388485e-01,\n -0.14948927e-01,\n -0.15807996e-01,\n -0.16831016e-01,\n -0.18005654e-01,\n -0.19435892e-01,\n -0.21352261e-01,\n -0.24047775e-01,\n -0.26441328e-01,\n -0.26718097e-01,\n 0.72761136e-02,\n 0.82024941e-02,\n 0.93617616e-02,\n 0.10557142e-01,\n 0.11647610e-01,\n 0.12546310e-01,\n 0.13227314e-01,\n 0.13715181e-01,\n 0.14056569e-01,\n 0.14293512e-01,\n 0.14453239e-01,\n 0.14550695e-01,\n 0.14593684e-01,\n 0.14586142e-01,\n 0.14529741e-01,\n 0.14424738e-01,\n 0.14270628e-01,\n 0.14066807e-01,\n 0.13813181e-01,\n 0.13510717e-01,\n 0.13161879e-01,\n 0.12770806e-01,\n 0.12343196e-01,\n 0.11885820e-01,\n 0.11405828e-01,\n 0.10910179e-01,\n 0.10404940e-01,\n 0.98949000e-02,\n 0.93836067e-02,\n 0.88735307e-02,\n 0.83662188e-02,\n 0.78627151e-02,\n 0.73637851e-02,\n 0.68700551e-02,\n 0.63821264e-02,\n 0.59005306e-02,\n 0.54257177e-02,\n 0.49582440e-02,\n 0.44985944e-02,\n 0.40470236e-02,\n 0.36037776e-02,\n 0.31689804e-02,\n 0.27423929e-02,\n 0.23237155e-02,\n 0.19122795e-02,\n 0.15068311e-02,\n 0.11052121e-02,\n 0.70368673e-03,\n 0.29509133e-03,\n -0.13425731e-03,\n -0.61128259e-03,\n -0.11886779e-02,\n -0.19643204e-02,\n -0.30994283e-02,\n -0.47985963e-02,\n -0.71782242e-02,\n -0.99987285e-02,\n -0.12535306e-01,\n -0.14042607e-01,\n -0.14564627e-01,\n -0.14856030e-01,\n -0.15444703e-01,\n -0.16321121e-01,\n -0.17356047e-01,\n -0.18551625e-01,\n -0.20020738e-01,\n -0.22008330e-01,\n -0.24788912e-01,\n -0.27153788e-01,\n -0.27435988e-01,\n 0.67528659e-02,\n 0.76138428e-02,\n 0.86914934e-02,\n 0.98031582e-02,\n 0.10817948e-01,\n 0.11655207e-01,\n 0.12290766e-01,\n 0.12747228e-01,\n 0.13067597e-01,\n 0.13290452e-01,\n 0.13440499e-01,\n 0.13530949e-01,\n 0.13568352e-01,\n 0.13555742e-01,\n 0.13494246e-01,\n 0.13384038e-01,\n 0.13225075e-01,\n 0.13017694e-01,\n 0.12763139e-01,\n 0.12463950e-01,\n 0.12124085e-01,\n 0.11748808e-01,\n 0.11344243e-01,\n 0.10916842e-01,\n 0.10472860e-01,\n 0.10017826e-01,\n 0.95562525e-02,\n 0.90915291e-02,\n 0.86259460e-02,\n 0.81609664e-02,\n 0.76976661e-02,\n 0.72367843e-02,\n 0.67789229e-02,\n 0.63247578e-02,\n 0.58749435e-02,\n 0.54301512e-02,\n 0.49911011e-02,\n 0.45584417e-02,\n 0.41327197e-02,\n 0.37144551e-02,\n 0.33040245e-02,\n 0.29014633e-02,\n 0.25067537e-02,\n 0.21196145e-02,\n 0.17395143e-02,\n 0.13655368e-02,\n 0.99622435e-03,\n 0.62900473e-03,\n 0.25951708e-03,\n -0.12054403e-03,\n -0.52714220e-03,\n -0.99177333e-03,\n -0.15755766e-02,\n -0.23898906e-02,\n -0.36114564e-02,\n -0.54468848e-02,\n -0.79709487e-02,\n -0.10849346e-01,\n -0.13287026e-01,\n -0.14611776e-01,\n -0.15024997e-01,\n -0.15317407e-01,\n -0.15937271e-01,\n -0.16830366e-01,\n -0.17877102e-01,\n -0.19094490e-01,\n -0.20604689e-01,\n -0.22668475e-01,\n -0.25534086e-01,\n -0.27857255e-01,\n -0.28136132e-01,\n 0.62725060e-02,\n 0.70738452e-02,\n 0.80769034e-02,\n 0.91117928e-02,\n 0.10056756e-01,\n 0.10836687e-01,\n 0.11428916e-01,\n 0.11854174e-01,\n 0.12152059e-01,\n 0.12357980e-01,\n 0.12494393e-01,\n 0.12573079e-01,\n 0.12599719e-01,\n 0.12576928e-01,\n 0.12505878e-01,\n 0.12387235e-01,\n 0.12221836e-01,\n 0.12011216e-01,\n 0.11757963e-01,\n 0.11465836e-01,\n 0.11139663e-01,\n 0.10784952e-01,\n 0.10407481e-01,\n 0.10012860e-01,\n 0.96060764e-02,\n 0.91912504e-02,\n 0.87715685e-02,\n 0.83493032e-02,\n 0.79259127e-02,\n 0.75023416e-02,\n 0.70792502e-02,\n 0.66572730e-02,\n 0.62370505e-02,\n 0.58192150e-02,\n 0.54045119e-02,\n 0.49937805e-02,\n 0.45878929e-02,\n 0.41875835e-02,\n 0.37935111e-02,\n 0.34063701e-02,\n 0.30265125e-02,\n 0.26541308e-02,\n 0.22892184e-02,\n 0.19315761e-02,\n 0.15806772e-02,\n 0.12358783e-02,\n 0.89602108e-03,\n 0.55933301e-03,\n 0.22290465e-03,\n -0.11837736e-03,\n -0.47439354e-03,\n -0.86409569e-03,\n -0.13247277e-02,\n -0.19277434e-02,\n -0.28001412e-02,\n -0.41344408e-02,\n -0.61328700e-02,\n -0.88115418e-02,\n -0.11727598e-01,\n -0.14030309e-01,\n -0.15156906e-01,\n -0.15471662e-01,\n -0.15775396e-01,\n -0.16426805e-01,\n -0.17333917e-01,\n -0.18390546e-01,\n -0.19628605e-01,\n -0.21180209e-01,\n -0.23323257e-01,\n -0.26269557e-01,\n -0.28532716e-01,\n -0.28793098e-01,\n 0.58267605e-02,\n 0.65732352e-02,\n 0.75074541e-02,\n 0.84711155e-02,\n 0.93507199e-02,\n 0.10076155e-01,\n 0.10626074e-01,\n 0.11019482e-01,\n 0.11292907e-01,\n 0.11478950e-01,\n 0.11598186e-01,\n 0.11661368e-01,\n 0.11673780e-01,\n 0.11638117e-01,\n 0.11556003e-01,\n 0.11428879e-01,\n 0.11258594e-01,\n 0.11047787e-01,\n 0.10800015e-01,\n 0.10519676e-01,\n 0.10211711e-01,\n 0.98812887e-02,\n 0.95334202e-02,\n 0.91725811e-02,\n 0.88025127e-02,\n 0.84261857e-02,\n 0.80457665e-02,\n 0.76626753e-02,\n 0.72779190e-02,\n 0.68921465e-02,\n 0.65058237e-02,\n 0.61194906e-02,\n 0.57338160e-02,\n 0.53495029e-02,\n 0.49673491e-02,\n 0.45883162e-02,\n 0.42133094e-02,\n 0.38431743e-02,\n 0.34787988e-02,\n 0.31207474e-02,\n 0.27695056e-02,\n 0.24253791e-02,\n 0.20884008e-02,\n 0.17583206e-02,\n 0.14347950e-02,\n 0.11171702e-02,\n 0.80453861e-03,\n 0.49563375e-03,\n 0.18838409e-03,\n -0.12067244e-03,\n -0.43773217e-03,\n -0.77462569e-03,\n -0.11541138e-02,\n -0.16205981e-02,\n -0.22580458e-02,\n -0.32116263e-02,\n -0.46886820e-02,\n -0.68765748e-02,\n -0.97123422e-02,\n -0.12634131e-01,\n -0.14760367e-01,\n -0.15676778e-01,\n -0.15906356e-01,\n -0.16229935e-01,\n -0.16910229e-01,\n -0.17826060e-01,\n -0.18888203e-01,\n -0.20143216e-01,\n -0.21733690e-01,\n -0.23955522e-01,\n -0.26971251e-01,\n -0.29147699e-01,\n -0.29363733e-01,\n 0.54070717e-02,\n 0.61022816e-02,\n 0.69719772e-02,\n 0.78685069e-02,\n 0.86859381e-02,\n 0.93587395e-02,\n 0.98667927e-02,\n 0.10227575e-01,\n 0.10474904e-01,\n 0.10638993e-01,\n 0.10738960e-01,\n 0.10784928e-01,\n 0.10782113e-01,\n 0.10733562e-01,\n 0.10641565e-01,\n 0.10508422e-01,\n 0.10336863e-01,\n 0.10130250e-01,\n 0.98925652e-02,\n 0.96282410e-02,\n 0.93418919e-02,\n 0.90379883e-02,\n 0.87205544e-02,\n 0.83929868e-02,\n 0.80580246e-02,\n 0.77177160e-02,\n 0.73734652e-02,\n 0.70262798e-02,\n 0.66767824e-02,\n 0.63254600e-02,\n 0.59727905e-02,\n 0.56192488e-02,\n 0.52654315e-02,\n 0.49121627e-02,\n 0.45603677e-02,\n 0.42109452e-02,\n 0.38648082e-02,\n 0.35230482e-02,\n 0.31864857e-02,\n 0.28557458e-02,\n 0.25313764e-02,\n 0.22137545e-02,\n 0.19029069e-02,\n 0.15987142e-02,\n 0.13008172e-02,\n 0.10086106e-02,\n 0.72139426e-03,\n 0.43811998e-03,\n 0.15723592e-03,\n -0.12376621e-03,\n -0.40906059e-03,\n -0.70619176e-03,\n -0.10294477e-02,\n -0.14063339e-02,\n -0.18904223e-02,\n -0.25809507e-02,\n -0.36431164e-02,\n -0.52961027e-02,\n -0.76976335e-02,\n -0.10682030e-01,\n -0.13564016e-01,\n -0.15468677e-01,\n -0.16168477e-01,\n -0.16328586e-01,\n -0.16677296e-01,\n -0.17379910e-01,\n -0.18295975e-01,\n -0.19355783e-01,\n -0.20619802e-01,\n -0.22241637e-01,\n -0.24535159e-01,\n -0.27597541e-01,\n -0.29646901e-01,\n -0.29775085e-01,\n 0.50061666e-02,\n 0.56525916e-02,\n 0.64607500e-02,\n 0.72929878e-02,\n 0.80504669e-02,\n 0.86719748e-02,\n 0.91386139e-02,\n 0.94665419e-02,\n 0.96872011e-02,\n 0.98287789e-02,\n 0.99092564e-02,\n 0.99384002e-02,\n 0.99215917e-02,\n 0.98623503e-02,\n 0.97636050e-02,\n 0.96283276e-02,\n 0.94598122e-02,\n 0.92617264e-02,\n 0.90380237e-02,\n 0.87927980e-02,\n 0.85300365e-02,\n 0.82533574e-02,\n 0.79658525e-02,\n 0.76700603e-02,\n 0.73679010e-02,\n 0.70607387e-02,\n 0.67495992e-02,\n 0.64351326e-02,\n 0.61177979e-02,\n 0.57979897e-02,\n 0.54761712e-02,\n 0.51528304e-02,\n 0.48285746e-02,\n 0.45042443e-02,\n 0.41807955e-02,\n 0.38590934e-02,\n 0.35402139e-02,\n 0.32251289e-02,\n 0.29146906e-02,\n 0.26096692e-02,\n 0.23106080e-02,\n 0.20178384e-02,\n 0.17315357e-02,\n 0.14516271e-02,\n 0.11777402e-02,\n 0.90943626e-03,\n 0.64600032e-03,\n 0.38659063e-03,\n 0.12990853e-03,\n -0.12597593e-03,\n -0.38408191e-03,\n -0.64947037e-03,\n -0.93128323e-03,\n -0.12469848e-02,\n -0.16304143e-02,\n -0.21464191e-02,\n -0.29125693e-02,\n -0.41156993e-02,\n -0.59804209e-02,\n -0.86140092e-02,\n -0.11723425e-01,\n -0.14505944e-01,\n -0.16143575e-01,\n -0.16627248e-01,\n -0.16734317e-01,\n -0.17108738e-01,\n -0.17822174e-01,\n -0.18725207e-01,\n -0.19769130e-01,\n -0.21026963e-01,\n -0.22663718e-01,\n -0.25009871e-01,\n -0.28077208e-01,\n -0.29936161e-01,\n -0.29903086e-01,\n 0.46191481e-02,\n 0.52183974e-02,\n 0.59670168e-02,\n 0.67369491e-02,\n 0.74361749e-02,\n 0.80076661e-02,\n 0.84337806e-02,\n 0.87295286e-02,\n 0.89242216e-02,\n 0.90442616e-02,\n 0.91067078e-02,\n 0.91211265e-02,\n 0.90931328e-02,\n 0.90266792e-02,\n 0.89251632e-02,\n 0.87919217e-02,\n 0.86304275e-02,\n 0.84443204e-02,\n 0.82372911e-02,\n 0.80128787e-02,\n 0.77742967e-02,\n 0.75243628e-02,\n 0.72654327e-02,\n 0.69993068e-02,\n 0.67273136e-02,\n 0.64504785e-02,\n 0.61694812e-02,\n 0.58848094e-02,\n 0.55968449e-02,\n 0.53059584e-02,\n 0.50125136e-02,\n 0.47170832e-02,\n 0.44203578e-02,\n 0.41231164e-02,\n 0.38261772e-02,\n 0.35306206e-02,\n 0.32374398e-02,\n 0.29475170e-02,\n 0.26617502e-02,\n 0.23809616e-02,\n 0.21057320e-02,\n 0.18363672e-02,\n 0.15731406e-02,\n 0.13160043e-02,\n 0.10647054e-02,\n 0.81881043e-03,\n 0.57776505e-03,\n 0.34065990e-03,\n 0.10639309e-03,\n -0.12660332e-03,\n -0.36068761e-03,\n -0.59937662e-03,\n -0.84880623e-03,\n -0.11204274e-02,\n -0.14357190e-02,\n -0.18365043e-02,\n -0.24020236e-02,\n -0.32714859e-02,\n -0.46532732e-02,\n -0.67663151e-02,\n -0.96402615e-02,\n -0.12831893e-01,\n -0.15441692e-01,\n -0.16770078e-01,\n -0.17045652e-01,\n -0.17114457e-01,\n -0.17508077e-01,\n -0.18213695e-01,\n -0.19082699e-01,\n -0.20087397e-01,\n -0.21311399e-01,\n -0.22931056e-01,\n -0.25289452e-01,\n -0.28288210e-01,\n -0.29855460e-01,\n -0.29540431e-01,\n 0.42438372e-02,\n 0.47970237e-02,\n 0.54875640e-02,\n 0.61967750e-02,\n 0.68393080e-02,\n 0.73622656e-02,\n 0.77493028e-02,\n 0.80144051e-02,\n 0.81849089e-02,\n 0.82855793e-02,\n 0.83326427e-02,\n 0.83354777e-02,\n 0.82998471e-02,\n 0.82299737e-02,\n 0.81294924e-02,\n 0.80018509e-02,\n 0.78504579e-02,\n 0.76786745e-02,\n 0.74897227e-02,\n 0.72865360e-02,\n 0.70716739e-02,\n 0.68472759e-02,\n 0.66150147e-02,\n 0.63761803e-02,\n 0.61317808e-02,\n 0.58825328e-02,\n 0.56289569e-02,\n 0.53714588e-02,\n 0.51103933e-02,\n 0.48460537e-02,\n 0.45788516e-02,\n 0.43093446e-02,\n 0.40383078e-02,\n 0.37663926e-02,\n 0.34944667e-02,\n 0.32235801e-02,\n 0.29546390e-02,\n 0.26885115e-02,\n 0.24261076e-02,\n 0.21682207e-02,\n 0.19154118e-02,\n 0.16681626e-02,\n 0.14266440e-02,\n 0.11909056e-02,\n 0.96084451e-03,\n 0.73608221e-03,\n 0.51601185e-03,\n 0.29983022e-03,\n 0.86455977e-04,\n -0.12548176e-03,\n -0.33780126e-03,\n -0.55322744e-03,\n -0.77611348e-03,\n -0.10140111e-02,\n -0.12810014e-02,\n -0.16041107e-02,\n -0.20356334e-02,\n -0.26726879e-02,\n -0.36793316e-02,\n -0.52829208e-02,\n -0.76788506e-02,\n -0.10785309e-01,\n -0.13992673e-01,\n -0.16345447e-01,\n -0.17329855e-01,\n -0.17411666e-01,\n -0.17451219e-01,\n -0.17846853e-01,\n -0.18514944e-01,\n -0.19316018e-01,\n -0.20241588e-01,\n -0.21382602e-01,\n -0.22925586e-01,\n -0.25219092e-01,\n -0.28025590e-01,\n -0.29143283e-01,\n -0.28364539e-01,\n 0.38802244e-02,\n 0.43883347e-02,\n 0.50221425e-02,\n 0.56721950e-02,\n 0.62597329e-02,\n 0.67359642e-02,\n 0.70858784e-02,\n 0.73225168e-02,\n 0.74713244e-02,\n 0.75554666e-02,\n 0.75903148e-02,\n 0.75849718e-02,\n 0.75452076e-02,\n 0.74753161e-02,\n 0.73789642e-02,\n 0.72595305e-02,\n 0.71201995e-02,\n 0.69639226e-02,\n 0.67933705e-02,\n 0.66109342e-02,\n 0.64186375e-02,\n 0.62180408e-02,\n 0.60103335e-02,\n 0.57964721e-02,\n 0.55771875e-02,\n 0.53530186e-02,\n 0.51244237e-02,\n 0.48917811e-02,\n 0.46553644e-02,\n 0.44155009e-02,\n 0.41726306e-02,\n 0.39273482e-02,\n 0.36802413e-02,\n 0.34321158e-02,\n 0.31838417e-02,\n 0.29362249e-02,\n 0.26901832e-02,\n 0.24466240e-02,\n 0.22063667e-02,\n 0.19700988e-02,\n 0.17385191e-02,\n 0.15121014e-02,\n 0.12910479e-02,\n 0.10755060e-02,\n 0.86542196e-03,\n 0.66050846e-03,\n 0.46016547e-03,\n 0.26359106e-03,\n 0.69742789e-04,\n -0.12264148e-03,\n -0.31505234e-03,\n -0.50968054e-03,\n -0.70983008e-03,\n -0.92059938e-03,\n -0.11514680e-02,\n -0.14202188e-02,\n -0.17611791e-02,\n -0.22402222e-02,\n -0.29767312e-02,\n -0.41622054e-02,\n -0.60354555e-02,\n -0.87412195e-02,\n -0.12048509e-01,\n -0.15178418e-01,\n -0.17183300e-01,\n -0.17799482e-01,\n -0.17704254e-01,\n -0.17711623e-01,\n -0.18075820e-01,\n -0.18659104e-01,\n -0.19336520e-01,\n -0.20114373e-01,\n -0.21086661e-01,\n -0.22448380e-01,\n -0.24543686e-01,\n -0.26968870e-01,\n -0.27425798e-01,\n -0.25982859e-01,\n 0.35297391e-02,\n 0.39938814e-02,\n 0.45724809e-02,\n 0.51651788e-02,\n 0.56997212e-02,\n 0.61313836e-02,\n 0.64464915e-02,\n 0.66571729e-02,\n 0.67870081e-02,\n 0.68575540e-02,\n 0.68832617e-02,\n 0.68728351e-02,\n 0.68318904e-02,\n 0.67646406e-02,\n 0.66746362e-02,\n 0.65650316e-02,\n 0.64386618e-02,\n 0.62980657e-02,\n 0.61454801e-02,\n 0.59827766e-02,\n 0.58114552e-02,\n 0.56327097e-02,\n 0.54474538e-02,\n 0.52563460e-02,\n 0.50598914e-02,\n 0.48585772e-02,\n 0.46528103e-02,\n 0.44429065e-02,\n 0.42291693e-02,\n 0.40119542e-02,\n 0.37917392e-02,\n 0.35690265e-02,\n 0.33443815e-02,\n 0.31186463e-02,\n 0.28926407e-02,\n 0.26670455e-02,\n 0.24426919e-02,\n 0.22205103e-02,\n 0.20012076e-02,\n 0.17854705e-02,\n 0.15739684e-02,\n 0.13671899e-02,\n 0.11654871e-02,\n 0.96899009e-03,\n 0.77774836e-03,\n 0.59148681e-03,\n 0.40965760e-03,\n 0.23145005e-03,\n 0.55848002e-04,\n -0.11826525e-03,\n -0.29229349e-03,\n -0.46810537e-03,\n -0.64812414e-03,\n -0.83613564e-03,\n -0.10385482e-02,\n -0.12673559e-02,\n -0.15453240e-02,\n -0.19167364e-02,\n -0.24653263e-02,\n -0.33373679e-02,\n -0.47518569e-02,\n -0.69443211e-02,\n -0.99710366e-02,\n -0.13414696e-01,\n -0.16345950e-01,\n -0.17911386e-01,\n -0.18145934e-01,\n -0.17885052e-01,\n -0.17836532e-01,\n -0.18110715e-01,\n -0.18532671e-01,\n -0.18993681e-01,\n -0.19508589e-01,\n -0.20170461e-01,\n -0.21185948e-01,\n -0.22894341e-01,\n -0.24724348e-01,\n -0.24370475e-01,\n -0.22268195e-01,\n 0.31942856e-02,\n 0.36158317e-02,\n 0.41410751e-02,\n 0.46785669e-02,\n 0.51624421e-02,\n 0.55519687e-02,\n 0.58347746e-02,\n 0.60220673e-02,\n 0.61355513e-02,\n 0.61951405e-02,\n 0.62143006e-02,\n 0.62012500e-02,\n 0.61613480e-02,\n 0.60985861e-02,\n 0.60162526e-02,\n 0.59171882e-02,\n 0.58038891e-02,\n 0.56785326e-02,\n 0.55429381e-02,\n 0.53985524e-02,\n 0.52465028e-02,\n 0.50876997e-02,\n 0.49228426e-02,\n 0.47523943e-02,\n 0.45767589e-02,\n 0.43963231e-02,\n 0.42114421e-02,\n 0.40224311e-02,\n 0.38296313e-02,\n 0.36334537e-02,\n 0.34343314e-02,\n 0.32326737e-02,\n 0.30291770e-02,\n 0.28245456e-02,\n 0.26194807e-02,\n 0.24146992e-02,\n 0.22109461e-02,\n 0.20089839e-02,\n 0.18095284e-02,\n 0.16132607e-02,\n 0.14207716e-02,\n 0.12325735e-02,\n 0.10491284e-02,\n 0.87066717e-03,\n 0.69715641e-03,\n 0.52842626e-03,\n 0.36394782e-03,\n 0.20294097e-03,\n 0.44450713e-04,\n -0.11262445e-03,\n -0.26963896e-03,\n -0.42814302e-03,\n -0.59004046e-03,\n -0.75823971e-03,\n -0.93725865e-03,\n -0.11351963e-02,\n -0.13674999e-02,\n -0.16641312e-02,\n -0.20829688e-02,\n -0.27305069e-02,\n -0.37841904e-02,\n -0.54863039e-02,\n -0.80432529e-02,\n -0.11373957e-01,\n -0.14846842e-01,\n -0.17431933e-01,\n -0.18471425e-01,\n -0.18317387e-01,\n -0.17884307e-01,\n -0.17721877e-01,\n -0.17807389e-01,\n -0.17944368e-01,\n -0.18038927e-01,\n -0.18111341e-01,\n -0.18263325e-01,\n -0.18744303e-01,\n -0.19931488e-01,\n -0.21152221e-01,\n -0.20258194e-01,\n -0.18154927e-01,\n 0.28755944e-02,\n 0.32562227e-02,\n 0.37303183e-02,\n 0.42150859e-02,\n 0.46508745e-02,\n 0.50008302e-02,\n 0.52538323e-02,\n 0.54201568e-02,\n 0.55196281e-02,\n 0.55704820e-02,\n 0.55851364e-02,\n 0.55712480e-02,\n 0.55338335e-02,\n 0.54765893e-02,\n 0.54024858e-02,\n 0.53140409e-02,\n 0.52134213e-02,\n 0.51024402e-02,\n 0.49825548e-02,\n 0.48549226e-02,\n 0.47204504e-02,\n 0.45797881e-02,\n 0.44334051e-02,\n 0.42817341e-02,\n 0.41251029e-02,\n 0.39637811e-02,\n 0.37980722e-02,\n 0.36283240e-02,\n 0.34549329e-02,\n 0.32783179e-02,\n 0.30987991e-02,\n 0.29169142e-02,\n 0.27332988e-02,\n 0.25485274e-02,\n 0.23631887e-02,\n 0.21780862e-02,\n 0.19938366e-02,\n 0.18110086e-02,\n 0.16303621e-02,\n 0.14525431e-02,\n 0.12780537e-02,\n 0.11074701e-02,\n 0.94128802e-03,\n 0.77982916e-03,\n 0.62306761e-03,\n 0.47078525e-03,\n 0.32257999e-03,\n 0.17765176e-03,\n 0.35159428e-04,\n -0.10600228e-03,\n -0.24719909e-03,\n -0.38967439e-03,\n -0.53508411e-03,\n -0.68561384e-03,\n -0.84459153e-03,\n -0.10175847e-02,\n -0.12152690e-02,\n -0.14582346e-02,\n -0.17864328e-02,\n -0.22758693e-02,\n -0.30617819e-02,\n -0.43552169e-02,\n -0.64090681e-02,\n -0.93599092e-02,\n -0.12933343e-01,\n -0.16276378e-01,\n -0.18346461e-01,\n -0.18781584e-01,\n -0.18226096e-01,\n -0.17576260e-01,\n -0.17187940e-01,\n -0.16925601e-01,\n -0.16586989e-01,\n -0.16100395e-01,\n -0.15514784e-01,\n -0.15000328e-01,\n -0.14952605e-01,\n -0.15897794e-01,\n -0.17156484e-01,\n -0.16811144e-01,\n -0.16059641e-01,\n 0.25749404e-02,\n 0.29165992e-02,\n 0.33420587e-02,\n 0.37768418e-02,\n 0.41672788e-02,\n 0.44802395e-02,\n 0.47057904e-02,\n 0.48532858e-02,\n 0.49406979e-02,\n 0.49845842e-02,\n 0.49962578e-02,\n 0.49827271e-02,\n 0.49485951e-02,\n 0.48972480e-02,\n 0.48313839e-02,\n 0.47532097e-02,\n 0.46645310e-02,\n 0.45668236e-02,\n 0.44612922e-02,\n 0.43488974e-02,\n 0.42303428e-02,\n 0.41060960e-02,\n 0.39765178e-02,\n 0.38419361e-02,\n 0.37026398e-02,\n 0.35588457e-02,\n 0.34108032e-02,\n 0.32588942e-02,\n 0.31035459e-02,\n 0.29450748e-02,\n 0.27838917e-02,\n 0.26205610e-02,\n 0.24555970e-02,\n 0.22894754e-02,\n 0.21228124e-02,\n 0.19562438e-02,\n 0.17903824e-02,\n 0.16257121e-02,\n 0.14628675e-02,\n 0.13024764e-02,\n 0.11450466e-02,\n 0.99116936e-03,\n 0.84134901e-03,\n 0.69589523e-03,\n 0.55489980e-03,\n 0.41809730e-03,\n 0.28507152e-03,\n 0.15523843e-03,\n 0.27696389e-04,\n -0.98706871e-04,\n -0.22511023e-03,\n -0.35272306e-03,\n -0.48294108e-03,\n -0.61748014e-03,\n -0.75878931e-03,\n -0.91083115e-03,\n -0.10810896e-02,\n -0.12839842e-02,\n -0.15473457e-02,\n -0.19251802e-02,\n -0.25176301e-02,\n -0.34943304e-02,\n -0.50974879e-02,\n -0.75646825e-02,\n -0.10904021e-01,\n -0.14594574e-01,\n -0.17590806e-01,\n -0.18962247e-01,\n -0.18717308e-01,\n -0.17718159e-01,\n -0.16742069e-01,\n -0.15938997e-01,\n -0.15096456e-01,\n -0.14041902e-01,\n -0.12778138e-01,\n -0.11481387e-01,\n -0.10537255e-01,\n -0.10632942e-01,\n -0.12457354e-01,\n -0.15134930e-01,\n -0.16634356e-01,\n -0.17927606e-01,\n 0.22929932e-02,\n 0.25978240e-02,\n 0.29773721e-02,\n 0.33650806e-02,\n 0.37129789e-02,\n 0.39914814e-02,\n 0.41917670e-02,\n 0.43222816e-02,\n 0.43991888e-02,\n 0.44373926e-02,\n 0.44470965e-02,\n 0.44346289e-02,\n 0.44041476e-02,\n 0.43587163e-02,\n 0.43007480e-02,\n 0.42321626e-02,\n 0.41544731e-02,\n 0.40688911e-02,\n 0.39764121e-02,\n 0.38778088e-02,\n 0.37736101e-02,\n 0.36642191e-02,\n 0.35499295e-02,\n 0.34309430e-02,\n 0.33074785e-02,\n 0.31797730e-02,\n 0.30480758e-02,\n 0.29127430e-02,\n 0.27741243e-02,\n 0.26325521e-02,\n 0.24885421e-02,\n 0.23425936e-02,\n 0.21950973e-02,\n 0.20465283e-02,\n 0.18974531e-02,\n 0.17483715e-02,\n 0.15998175e-02,\n 0.14523078e-02,\n 0.13063080e-02,\n 0.11623653e-02,\n 0.10211229e-02,\n 0.88305573e-03,\n 0.74868003e-03,\n 0.61835541e-03,\n 0.49214705e-03,\n 0.36987942e-03,\n 0.25109114e-03,\n 0.13533220e-03,\n 0.21703247e-04,\n -0.90926857e-04,\n -0.20352546e-03,\n -0.31733362e-03,\n -0.43350609e-03,\n -0.55338885e-03,\n -0.67877601e-03,\n -0.81268302e-03,\n -0.96038845e-03,\n -0.11320858e-02,\n -0.13475646e-02,\n -0.16453972e-02,\n -0.20989443e-02,\n -0.28392603e-02,\n -0.40741763e-02,\n -0.60647582e-02,\n -0.89876316e-02,\n -0.12647802e-01,\n -0.16243244e-01,\n -0.18616004e-01,\n -0.19093577e-01,\n -0.18077876e-01,\n -0.16530357e-01,\n -0.15029374e-01,\n -0.13550565e-01,\n -0.11885053e-01,\n -0.99943755e-02,\n -0.81011895e-02,\n -0.66745249e-02,\n -0.64049885e-02,\n -0.81325630e-02,\n -0.12245839e-01,\n -0.17089149e-01,\n -0.20167973e-01,\n -0.22245478e-01,\n 0.20299123e-02,\n 0.23001628e-02,\n 0.26366366e-02,\n 0.29802567e-02,\n 0.32884392e-02,\n 0.35349382e-02,\n 0.37119628e-02,\n 0.38270687e-02,\n 0.38946737e-02,\n 0.39280793e-02,\n 0.39364123e-02,\n 0.39253496e-02,\n 0.38986101e-02,\n 0.38588869e-02,\n 0.38082711e-02,\n 0.37484372e-02,\n 0.36807158e-02,\n 0.36061385e-02,\n 0.35254795e-02,\n 0.34393084e-02,\n 0.33480865e-02,\n 0.32521591e-02,\n 0.31517555e-02,\n 0.30470104e-02,\n 0.29380592e-02,\n 0.28251486e-02,\n 0.27085962e-02,\n 0.25886497e-02,\n 0.24655664e-02,\n 0.23397931e-02,\n 0.22118504e-02,\n 0.20821332e-02,\n 0.19509925e-02,\n 0.18189276e-02,\n 0.16863818e-02,\n 0.15537632e-02,\n 0.14214842e-02,\n 0.12901434e-02,\n 0.11600459e-02,\n 0.10316600e-02,\n 0.90566400e-03,\n 0.78258343e-03,\n 0.66282053e-03,\n 0.54670696e-03,\n 0.43438902e-03,\n 0.32569954e-03,\n 0.22029619e-03,\n 0.11765160e-03,\n 0.16918209e-04,\n -0.82841863e-04,\n -0.18264857e-03,\n -0.28356965e-03,\n -0.38668749e-03,\n -0.49303711e-03,\n -0.60395780e-03,\n -0.72176283e-03,\n -0.85029344e-03,\n -0.99679129e-03,\n -0.11756377e-02,\n -0.14147292e-02,\n -0.17678216e-02,\n -0.23343621e-02,\n -0.32830362e-02,\n -0.48576673e-02,\n -0.73089227e-02,\n -0.10680980e-01,\n -0.14495158e-01,\n -0.17674342e-01,\n -0.19089572e-01,\n -0.18462013e-01,\n -0.16541397e-01,\n -0.14258571e-01,\n -0.11979854e-01,\n -0.96273897e-02,\n -0.71729873e-02,\n -0.49013551e-02,\n -0.33841925e-02,\n -0.33057944e-02,\n -0.52674399e-02,\n -0.96266540e-02,\n -0.15894135e-01,\n -0.21601008e-01,\n -0.24401912e-01,\n -0.25605565e-01,\n 0.17854190e-02,\n 0.20233756e-02,\n 0.23196433e-02,\n 0.26221615e-02,\n 0.28933994e-02,\n 0.31102363e-02,\n 0.32658295e-02,\n 0.33668731e-02,\n 0.34261208e-02,\n 0.34553525e-02,\n 0.34626699e-02,\n 0.34531134e-02,\n 0.34299616e-02,\n 0.33955548e-02,\n 0.33516849e-02,\n 0.32997937e-02,\n 0.32410624e-02,\n 0.31763858e-02,\n 0.31063606e-02,\n 0.30314266e-02,\n 0.29519610e-02,\n 0.28682463e-02,\n 0.27804545e-02,\n 0.26887048e-02,\n 0.25930938e-02,\n 0.24938518e-02,\n 0.23912911e-02,\n 0.22855958e-02,\n 0.21769952e-02,\n 0.20659990e-02,\n 0.19530528e-02,\n 0.18384572e-02,\n 0.17226556e-02,\n 0.16060474e-02,\n 0.14889956e-02,\n 0.13717994e-02,\n 0.12548816e-02,\n 0.11386774e-02,\n 0.10235143e-02,\n 0.90985751e-03,\n 0.79824886e-03,\n 0.68927044e-03,\n 0.58327196e-03,\n 0.48052520e-03,\n 0.38123387e-03,\n 0.28524606e-03,\n 0.19232801e-03,\n 0.10188403e-03,\n 0.13148935e-04,\n -0.74649892e-04,\n -0.16256304e-03,\n -0.25152223e-03,\n -0.34248683e-03,\n -0.43625894e-03,\n -0.53394295e-03,\n -0.63723064e-03,\n -0.74901024e-03,\n -0.87452604e-03,\n -0.10243639e-02,\n -0.12190320e-02,\n -0.14982653e-02,\n -0.19371029e-02,\n -0.26686641e-02,\n -0.39032472e-02,\n -0.59056627e-02,\n -0.88597359e-02,\n -0.12578117e-01,\n -0.16234819e-01,\n -0.18552493e-01,\n -0.18625131e-01,\n -0.16659645e-01,\n -0.13658211e-01,\n -0.10453119e-01,\n -0.73239915e-02,\n -0.43831225e-02,\n -0.19895632e-02,\n -0.75665663e-03,\n -0.12641177e-02,\n -0.37606840e-02,\n -0.81160162e-02,\n -0.14009449e-01,\n -0.20516826e-01,\n -0.25252979e-01,\n -0.26749684e-01,\n -0.26949666e-01,\n 0.15589494e-02,\n 0.17668876e-02,\n 0.20257886e-02,\n 0.22901346e-02,\n 0.25271075e-02,\n 0.27164973e-02,\n 0.28523379e-02,\n 0.29405085e-02,\n 0.29921937e-02,\n 0.30177287e-02,\n 0.30242268e-02,\n 0.30161096e-02,\n 0.29962433e-02,\n 0.29666633e-02,\n 0.29289203e-02,\n 0.28842422e-02,\n 0.28336111e-02,\n 0.27777818e-02,\n 0.27172987e-02,\n 0.26525310e-02,\n 0.25837307e-02,\n 0.25110987e-02,\n 0.24347827e-02,\n 0.23548861e-02,\n 0.22715516e-02,\n 0.21849549e-02,\n 0.20953005e-02,\n 0.20027831e-02,\n 0.19077143e-02,\n 0.18105077e-02,\n 0.17114856e-02,\n 0.16110209e-02,\n 0.15095366e-02,\n 0.14073697e-02,\n 0.13047857e-02,\n 0.12020444e-02,\n 0.10995269e-02,\n 0.99746429e-03,\n 0.89634967e-03,\n 0.79651998e-03,\n 0.69846597e-03,\n 0.60272694e-03,\n 0.50964806e-03,\n 0.41947985e-03,\n 0.33230876e-03,\n 0.24821598e-03,\n 0.16693448e-03,\n 0.87793873e-04,\n 0.10210203e-04,\n -0.66529035e-04,\n -0.14337612e-03,\n -0.22124445e-03,\n -0.30088221e-03,\n -0.38298120e-03,\n -0.46846602e-03,\n -0.55856747e-03,\n -0.65543054e-03,\n -0.76301250e-03,\n -0.88915287e-03,\n -0.10492229e-02,\n -0.12729177e-02,\n -0.16169698e-02,\n -0.21851282e-02,\n -0.31512871e-02,\n -0.47627124e-02,\n -0.72655436e-02,\n -0.10682470e-01,\n -0.14481976e-01,\n -0.17479382e-01,\n -0.18369909e-01,\n -0.16695704e-01,\n -0.13192192e-01,\n -0.90477718e-02,\n -0.50837407e-02,\n -0.17456600e-02,\n 0.50775765e-03,\n 0.11405456e-02,\n -0.15771549e-03,\n -0.32012013e-02,\n -0.74220751e-02,\n -0.12348277e-01,\n -0.17880509e-01,\n -0.23407269e-01,\n -0.26902754e-01,\n -0.27527036e-01,\n -0.27351677e-01,\n 0.13498098e-02,\n 0.15299571e-02,\n 0.17542662e-02,\n 0.19832880e-02,\n 0.21885794e-02,\n 0.23526303e-02,\n 0.24702842e-02,\n 0.25466559e-02,\n 0.25914581e-02,\n 0.26136602e-02,\n 0.26194309e-02,\n 0.26126066e-02,\n 0.25956824e-02,\n 0.25704356e-02,\n 0.25382130e-02,\n 0.25000437e-02,\n 0.24567009e-02,\n 0.24088090e-02,\n 0.23568932e-02,\n 0.23012925e-02,\n 0.22421621e-02,\n 0.21796003e-02,\n 0.21137316e-02,\n 0.20446836e-02,\n 0.19726283e-02,\n 0.18976876e-02,\n 0.18199275e-02,\n 0.17396152e-02,\n 0.16571450e-02,\n 0.15727388e-02,\n 0.14866672e-02,\n 0.13993700e-02,\n 0.13112020e-02,\n 0.12224860e-02,\n 0.11333575e-02,\n 0.10441266e-02,\n 0.95500512e-03,\n 0.86619117e-03,\n 0.77818817e-03,\n 0.69130980e-03,\n 0.60596765e-03,\n 0.52263023e-03,\n 0.44163744e-03,\n 0.36320378e-03,\n 0.28736555e-03,\n 0.21433813e-03,\n 0.14382407e-03,\n 0.75184740e-04,\n 0.79467936e-05,\n -0.58581456e-04,\n -0.12522114e-03,\n -0.19278513e-03,\n -0.26187330e-03,\n -0.33317646e-03,\n -0.40739289e-03,\n -0.48542794e-03,\n -0.56885718e-03,\n -0.66080934e-03,\n -0.76709641e-03,\n -0.89942804e-03,\n -0.10802879e-02,\n -0.13528165e-02,\n -0.17975677e-02,\n -0.25551619e-02,\n -0.38419224e-02,\n -0.59169503e-02,\n -0.89288345e-02,\n -0.12605214e-01,\n -0.15980633e-01,\n -0.17610485e-01,\n -0.16458714e-01,\n -0.12757922e-01,\n -0.78148004e-02,\n -0.30447829e-02,\n 0.58803929e-03,\n 0.24830827e-02,\n 0.23169408e-02,\n 0.21695795e-03,\n -0.31757678e-02,\n -0.70471163e-02,\n -0.10940184e-01,\n -0.15055859e-01,\n -0.19874115e-01,\n -0.24761699e-01,\n -0.27603908e-01,\n -0.27851652e-01,\n -0.27539870e-01,\n 0.11573090e-02,\n 0.13118297e-02,\n 0.15042420e-02,\n 0.17007013e-02,\n 0.18768052e-02,\n 0.20175355e-02,\n 0.21184757e-02,\n 0.21840241e-02,\n 0.22225198e-02,\n 0.22416625e-02,\n 0.22467452e-02,\n 0.22410604e-02,\n 0.22267597e-02,\n 0.22053893e-02,\n 0.21781034e-02,\n 0.21457481e-02,\n 0.21089534e-02,\n 0.20682334e-02,\n 0.20240291e-02,\n 0.19766658e-02,\n 0.19262728e-02,\n 0.18728701e-02,\n 0.18165240e-02,\n 0.17574138e-02,\n 0.16956952e-02,\n 0.16314362e-02,\n 0.15646521e-02,\n 0.14956590e-02,\n 0.14248277e-02,\n 0.13522609e-02,\n 0.12782239e-02,\n 0.12031234e-02,\n 0.11273304e-02,\n 0.10510463e-02,\n 0.97442413e-03,\n 0.89773658e-03,\n 0.82104641e-03,\n 0.74460293e-03,\n 0.66874258e-03,\n 0.59397513e-03,\n 0.52050536e-03,\n 0.44870362e-03,\n 0.37896109e-03,\n 0.31140892e-03,\n 0.24619559e-03,\n 0.18341106e-03,\n 0.12278830e-03,\n 0.63891501e-04,\n 0.61988922e-05,\n -0.50909595e-04,\n -0.10814507e-03,\n -0.16616660e-03,\n -0.22550514e-03,\n -0.28680690e-03,\n -0.35062019e-03,\n -0.41758217e-03,\n -0.48887444e-03,\n -0.56698389e-03,\n -0.65631996e-03,\n -0.76583039e-03,\n -0.91279822e-03,\n -0.11302640e-02,\n -0.14807846e-02,\n -0.20767434e-02,\n -0.31004245e-02,\n -0.47965096e-02,\n -0.73740366e-02,\n -0.10751202e-01,\n -0.14214644e-01,\n -0.16375747e-01,\n -0.15823588e-01,\n -0.12226088e-01,\n -0.67599509e-02,\n -0.13356704e-02,\n 0.24713113e-02,\n 0.38747264e-02,\n 0.28679580e-02,\n 0.11903104e-03,\n -0.33430753e-02,\n -0.66695027e-02,\n -0.96058799e-02,\n -0.12532249e-01,\n -0.16198792e-01,\n -0.21001449e-01,\n -0.25733067e-01,\n -0.28118331e-01,\n -0.27977712e-01,\n -0.27381005e-01,\n 0.98083925e-03,\n 0.11118341e-02,\n 0.12749631e-02,\n 0.14415317e-02,\n 0.15908498e-02,\n 0.17101859e-02,\n 0.17957988e-02,\n 0.18514196e-02,\n 0.18841210e-02,\n 0.19004331e-02,\n 0.19048484e-02,\n 0.19001653e-02,\n 0.18882133e-02,\n 0.18703013e-02,\n 0.18473966e-02,\n 0.18202132e-02,\n 0.17892951e-02,\n 0.17550559e-02,\n 0.17178158e-02,\n 0.16778519e-02,\n 0.16353233e-02,\n 0.15902224e-02,\n 0.15425772e-02,\n 0.14925570e-02,\n 0.14402777e-02,\n 0.13857534e-02,\n 0.13290886e-02,\n 0.12705566e-02,\n 0.12104037e-02,\n 0.11487597e-02,\n 0.10858473e-02,\n 0.10220249e-02,\n 0.95764559e-03,\n 0.89280307e-03,\n 0.82777342e-03,\n 0.76263881e-03,\n 0.69743692e-03,\n 0.63244364e-03,\n 0.56787435e-03,\n 0.50428713e-03,\n 0.44183279e-03,\n 0.38074178e-03,\n 0.32141685e-03,\n 0.26393912e-03,\n 0.20854473e-03,\n 0.15521001e-03,\n 0.10369664e-03,\n 0.53771990e-04,\n 0.48405859e-05,\n -0.43625452e-04,\n -0.92199283e-04,\n -0.14142317e-03,\n -0.19180843e-03,\n -0.24387297e-03,\n -0.29810442e-03,\n -0.35491036e-03,\n -0.41522519e-03,\n -0.48093690e-03,\n -0.55558363e-03,\n -0.64595538e-03,\n -0.76544558e-03,\n -0.93956996e-03,\n -0.12170107e-02,\n -0.16869402e-02,\n -0.24994221e-02,\n -0.38717622e-02,\n -0.60297726e-02,\n -0.90123136e-02,\n -0.12333807e-01,\n -0.14771741e-01,\n -0.14760138e-01,\n -0.11492035e-01,\n -0.58502830e-02,\n -0.39222432e-04,\n 0.38003121e-02,\n 0.46805777e-02,\n 0.29477894e-02,\n -0.19698367e-03,\n -0.34707133e-02,\n -0.61565377e-02,\n -0.82763545e-02,\n -0.10341127e-01,\n -0.13046334e-01,\n -0.17025929e-01,\n -0.22158746e-01,\n -0.26648698e-01,\n -0.28339002e-01,\n -0.27549669e-01,\n -0.26395839e-01,\n 0.81985246e-03,\n 0.92936639e-03,\n 0.10657535e-02,\n 0.12050235e-02,\n 0.13298774e-02,\n 0.14296711e-02,\n 0.15012774e-02,\n 0.15478174e-02,\n 0.15752059e-02,\n 0.15889057e-02,\n 0.15926794e-02,\n 0.15888754e-02,\n 0.15790189e-02,\n 0.15641790e-02,\n 0.15451561e-02,\n 0.15225782e-02,\n 0.14969195e-02,\n 0.14685052e-02,\n 0.14375511e-02,\n 0.14042476e-02,\n 0.13687719e-02,\n 0.13311717e-02,\n 0.12914606e-02,\n 0.12497153e-02,\n 0.12060066e-02,\n 0.11603512e-02,\n 0.11129703e-02,\n 0.10640363e-02,\n 0.10136392e-02,\n 0.96200767e-03,\n 0.90932270e-03,\n 0.85588358e-03,\n 0.80193224e-03,\n 0.74761838e-03,\n 0.69321913e-03,\n 0.63866674e-03,\n 0.58403739e-03,\n 0.52954548e-03,\n 0.47544795e-03,\n 0.42208337e-03,\n 0.36977371e-03,\n 0.31858659e-03,\n 0.26881197e-03,\n 0.22063967e-03,\n 0.17424425e-03,\n 0.12957763e-03,\n 0.86436587e-04,\n 0.44699889e-04,\n 0.37974135e-05,\n -0.36786892e-04,\n -0.77447279e-04,\n -0.11859972e-03,\n -0.16077398e-03,\n -0.20438076e-03,\n -0.24981576e-03,\n -0.29733716e-03,\n -0.34771886e-03,\n -0.40232454e-03,\n -0.46408881e-03,\n -0.53816853e-03,\n -0.63491607e-03,\n -0.77419216e-03,\n -0.99393132e-03,\n -0.13644308e-02,\n -0.20072565e-02,\n -0.31077981e-02,\n -0.48824749e-02,\n -0.74348073e-02,\n -0.10457132e-01,\n -0.12935034e-01,\n -0.13322859e-01,\n -0.10510216e-01,\n -0.50372370e-02,\n 0.82519592e-03,\n 0.45412472e-02,\n 0.49564531e-02,\n 0.27259453e-02,\n -0.53395459e-03,\n -0.34414190e-02,\n -0.54976437e-02,\n -0.69722342e-02,\n -0.84393835e-02,\n -0.10464143e-01,\n -0.13568074e-01,\n -0.18088147e-01,\n -0.23360549e-01,\n -0.27191006e-01,\n -0.27798105e-01,\n -0.26014257e-01,\n -0.23941122e-01,\n 0.67388569e-03,\n 0.76391455e-03,\n 0.87603967e-03,\n 0.99053816e-03,\n 0.10931880e-02,\n 0.11752385e-02,\n 0.12341201e-02,\n 0.12724005e-02,\n 0.12949465e-02,\n 0.13062552e-02,\n 0.13094257e-02,\n 0.13063867e-02,\n 0.12983767e-02,\n 0.12862559e-02,\n 0.12706857e-02,\n 0.12522101e-02,\n 0.12312382e-02,\n 0.12080271e-02,\n 0.11827156e-02,\n 0.11554065e-02,\n 0.11262480e-02,\n 0.10953922e-02,\n 0.10628636e-02,\n 0.10286153e-02,\n 0.99264085e-03,\n 0.95504802e-03,\n 0.91611175e-03,\n 0.87590015e-03,\n 0.83438883e-03,\n 0.79185999e-03,\n 0.74851344e-03,\n 0.70453959e-03,\n 0.66006870e-03,\n 0.61538786e-03,\n 0.57062833e-03,\n 0.52572385e-03,\n 0.48075779e-03,\n 0.43583699e-03,\n 0.39130700e-03,\n 0.34728937e-03,\n 0.30423937e-03,\n 0.26211512e-03,\n 0.22102211e-03,\n 0.18136523e-03,\n 0.14320243e-03,\n 0.10641267e-03,\n 0.70891205e-04,\n 0.36568774e-04,\n 0.29850753e-05,\n -0.30428575e-04,\n -0.63911211e-04,\n -0.97726115e-04,\n -0.13241505e-03,\n -0.16834371e-03,\n -0.20574214e-03,\n -0.24483376e-03,\n -0.28623073e-03,\n -0.33093616e-03,\n -0.38131754e-03,\n -0.44136596e-03,\n -0.51900127e-03,\n -0.62973931e-03,\n -0.80306787e-03,\n -0.10939565e-02,\n -0.15995824e-02,\n -0.24733264e-02,\n -0.39081872e-02,\n -0.60330946e-02,\n -0.86653326e-02,\n -0.10997782e-01,\n -0.11619342e-01,\n -0.93000261e-02,\n -0.42796796e-02,\n 0.12966705e-02,\n 0.47367909e-02,\n 0.48012077e-02,\n 0.23494719e-02,\n -0.77558518e-03,\n -0.32301398e-02,\n -0.47397721e-02,\n -0.57379329e-02,\n -0.67931442e-02,\n -0.83390176e-02,\n -0.10730988e-01,\n -0.14351542e-01,\n -0.19255424e-01,\n -0.24215246e-01,\n -0.26824960e-01,\n -0.25858946e-01,\n -0.22609137e-01,\n -0.19117815e-01,\n 0.54258847e-03,\n 0.61508559e-03,\n 0.70537691e-03,\n 0.79757918e-03,\n 0.88023918e-03,\n 0.94631047e-03,\n 0.99372561e-03,\n 0.10245563e-02,\n 0.10427287e-02,\n 0.10518713e-02,\n 0.10544811e-02,\n 0.10520979e-02,\n 0.10457006e-02,\n 0.10359808e-02,\n 0.10234818e-02,\n 0.10086508e-02,\n 0.99183375e-03,\n 0.97323948e-03,\n 0.95295400e-03,\n 0.93100907e-03,\n 0.90750493e-03,\n 0.88267523e-03,\n 0.85658359e-03,\n 0.82906929e-03,\n 0.80003683e-03,\n 0.76972524e-03,\n 0.73839410e-03,\n 0.70602290e-03,\n 0.67255105e-03,\n 0.63823280e-03,\n 0.60333178e-03,\n 0.56789321e-03,\n 0.53199811e-03,\n 0.49601536e-03,\n 0.45993822e-03,\n 0.42375299e-03,\n 0.38750842e-03,\n 0.35126708e-03,\n 0.31534981e-03,\n 0.27984436e-03,\n 0.24513889e-03,\n 0.21121174e-03,\n 0.17798673e-03,\n 0.14601613e-03,\n 0.11531570e-03,\n 0.85622109e-04,\n 0.56977922e-04,\n 0.29328881e-04,\n 0.23219493e-05,\n -0.24609051e-04,\n -0.51613693e-04,\n -0.78829580e-04,\n -0.10677023e-03,\n -0.13576847e-03,\n -0.16589028e-03,\n -0.19740456e-03,\n -0.23071076e-03,\n -0.26662330e-03,\n -0.30694614e-03,\n -0.35483547e-03,\n -0.41624482e-03,\n -0.50326047e-03,\n -0.63863368e-03,\n -0.86484943e-03,\n -0.12584554e-02,\n -0.19429043e-02,\n -0.30815345e-02,\n -0.48037078e-02,\n -0.70073316e-02,\n -0.90698786e-02,\n -0.97764749e-02,\n -0.79293735e-02,\n -0.35561759e-02,\n 0.14528841e-02,\n 0.44905520e-02,\n 0.43353909e-02,\n 0.19235054e-02,\n -0.88058808e-03,\n -0.28684181e-02,\n -0.39436910e-02,\n -0.46082577e-02,\n -0.53751953e-02,\n -0.65655066e-02,\n -0.84049981e-02,\n -0.11191580e-01,\n -0.15233751e-01,\n -0.20167785e-01,\n -0.24146404e-01,\n -0.24793619e-01,\n -0.21611178e-01,\n -0.16357245e-01,\n -0.11137053e-01,\n 0.42571200e-03,\n 0.48259774e-03,\n 0.55344484e-03,\n 0.62578957e-03,\n 0.69064385e-03,\n 0.74247847e-03,\n 0.77967369e-03,\n 0.80386118e-03,\n 0.81812969e-03,\n 0.82533201e-03,\n 0.82742231e-03,\n 0.82559290e-03,\n 0.82059979e-03,\n 0.81299205e-03,\n 0.80320571e-03,\n 0.79159206e-03,\n 0.77843288e-03,\n 0.76390296e-03,\n 0.74805156e-03,\n 0.73085778e-03,\n 0.71239163e-03,\n 0.69290656e-03,\n 0.67251193e-03,\n 0.65096776e-03,\n 0.62812673e-03,\n 0.60432340e-03,\n 0.57976606e-03,\n 0.55436889e-03,\n 0.52808766e-03,\n 0.50110236e-03,\n 0.47373594e-03,\n 0.44590727e-03,\n 0.41769189e-03,\n 0.38944930e-03,\n 0.36113514e-03,\n 0.33273242e-03,\n 0.30424978e-03,\n 0.27580233e-03,\n 0.24755922e-03,\n 0.21969499e-03,\n 0.19240828e-03,\n 0.16580452e-03,\n 0.13966703e-03,\n 0.11454133e-03,\n 0.90488385e-04,\n 0.67134846e-04,\n 0.44648674e-04,\n 0.22957218e-04,\n 0.17676108e-05,\n -0.19375148e-04,\n -0.40583618e-04,\n -0.61928760e-04,\n -0.83859930e-04,\n -0.10665116e-03,\n -0.13027368e-03,\n -0.15504849e-03,\n -0.18114736e-03,\n -0.20928915e-03,\n -0.24077985e-03,\n -0.27812496e-03,\n -0.32568991e-03,\n -0.39278640e-03,\n -0.49672712e-03,\n -0.66978729e-03,\n -0.97107701e-03,\n -0.14971532e-02,\n -0.23801890e-02,\n -0.37356641e-02,\n -0.55097779e-02,\n -0.72345133e-02,\n -0.79152975e-02,\n -0.64894208e-02,\n -0.28653429e-02,\n 0.13867198e-02,\n 0.39382749e-02,\n 0.36805528e-02,\n 0.15104047e-02,\n -0.85980649e-03,\n -0.24118538e-02,\n -0.31621221e-02,\n -0.36013129e-02,\n -0.41603735e-02,\n -0.50707515e-02,\n -0.64725522e-02,\n -0.85721835e-02,\n -0.11695998e-01,\n -0.15934667e-01,\n -0.20252047e-01,\n -0.22257976e-01,\n -0.20039089e-01,\n -0.14158701e-01,\n -0.70773158e-02,\n -0.11583769e-02,\n 0.32308648e-03,\n 0.36626059e-03,\n 0.42002957e-03,\n 0.47493313e-03,\n 0.52414869e-03,\n 0.56347938e-03,\n 0.59169793e-03,\n 0.61004912e-03,\n 0.62088534e-03,\n 0.62637340e-03,\n 0.62798650e-03,\n 0.62661694e-03,\n 0.62283722e-03,\n 0.61707193e-03,\n 0.60965854e-03,\n 0.60086075e-03,\n 0.59088779e-03,\n 0.57988992e-03,\n 0.56789967e-03,\n 0.55486214e-03,\n 0.54083182e-03,\n 0.52603544e-03,\n 0.51060918e-03,\n 0.49428100e-03,\n 0.47690922e-03,\n 0.45883877e-03,\n 0.44021819e-03,\n 0.42093766e-03,\n 0.40098530e-03,\n 0.38046407e-03,\n 0.35971330e-03,\n 0.33858226e-03,\n 0.31714686e-03,\n 0.29569783e-03,\n 0.27421847e-03,\n 0.25265093e-03,\n 0.23099354e-03,\n 0.20943003e-03,\n 0.18794239e-03,\n 0.16680970e-03,\n 0.14604363e-03,\n 0.12587215e-03,\n 0.10602640e-03,\n 0.86910019e-04,\n 0.68677633e-04,\n 0.50912015e-04,\n 0.33862842e-04,\n 0.17410162e-04,\n 0.13113118e-05,\n -0.14745443e-04,\n -0.30839601e-04,\n -0.47037924e-04,\n -0.63687643e-04,\n -0.81004437e-04,\n -0.98914243e-04,\n -0.11776508e-03,\n -0.13753906e-03,\n -0.15888251e-03,\n -0.18270283e-03,\n -0.21094215e-03,\n -0.24672056e-03,\n -0.29703090e-03,\n -0.37476781e-03,\n -0.50383923e-03,\n -0.72860206e-03,\n -0.11220841e-02,\n -0.17865424e-02,\n -0.28166992e-02,\n -0.41856705e-02,\n -0.55507738e-02,\n -0.61370027e-02,\n -0.50718379e-02,\n -0.22190376e-02,\n 0.11870584e-02,\n 0.32180531e-02,\n 0.29439172e-02,\n 0.11399792e-02,\n -0.75004215e-03,\n -0.19179990e-02,\n -0.24332437e-02,\n -0.27225760e-02,\n -0.31262944e-02,\n -0.38075345e-02,\n -0.48529524e-02,\n -0.64000175e-02,\n -0.87149860e-02,\n -0.12041125e-01,\n -0.15928924e-01,\n -0.18521670e-01,\n -0.17406538e-01,\n -0.11980806e-01,\n -0.43471926e-02,\n 0.24430454e-02,\n 0.64657470e-02,\n 0.23461202e-03,\n 0.26596323e-03,\n 0.30500707e-03,\n 0.34487346e-03,\n 0.38060735e-03,\n 0.40916057e-03,\n 0.42964329e-03,\n 0.44296391e-03,\n 0.45083603e-03,\n 0.45483361e-03,\n 0.45601939e-03,\n 0.45503493e-03,\n 0.45229631e-03,\n 0.44811305e-03,\n 0.44273314e-03,\n 0.43635038e-03,\n 0.42911444e-03,\n 0.42114657e-03,\n 0.41246001e-03,\n 0.40299704e-03,\n 0.39280354e-03,\n 0.38204770e-03,\n 0.37087622e-03,\n 0.35903638e-03,\n 0.34639766e-03,\n 0.33326566e-03,\n 0.31975409e-03,\n 0.30575541e-03,\n 0.29127134e-03,\n 0.27634719e-03,\n 0.26128866e-03,\n 0.24593511e-03,\n 0.23035664e-03,\n 0.21476793e-03,\n 0.19919084e-03,\n 0.18352638e-03,\n 0.16776577e-03,\n 0.15213400e-03,\n 0.13650597e-03,\n 0.12117127e-03,\n 0.10604783e-03,\n 0.91420814e-04,\n 0.77014876e-04,\n 0.63102707e-04,\n 0.49865157e-04,\n 0.36943147e-04,\n 0.24579298e-04,\n 0.12646942e-04,\n 0.93583714e-06,\n -0.10726711e-04,\n -0.22409520e-04,\n -0.34169330e-04,\n -0.46268411e-04,\n -0.58847447e-04,\n -0.71843184e-04,\n -0.85561762e-04,\n -0.99899378e-04,\n -0.11538975e-03,\n -0.13264996e-03,\n -0.15311912e-03,\n -0.17894371e-03,\n -0.21519083e-03,\n -0.27111307e-03,\n -0.36378118e-03,\n -0.52517361e-03,\n -0.80816675e-03,\n -0.12878858e-02,\n -0.20363682e-02,\n -0.30407030e-02,\n -0.40585143e-02,\n -0.45180605e-02,\n -0.37541774e-02,\n -0.16340034e-02,\n 0.92716783e-03,\n 0.24486976e-02,\n 0.22101640e-02,\n 0.82245702e-03,\n -0.59355749e-03,\n -0.14349412e-02,\n -0.17818650e-02,\n -0.19710022e-02,\n -0.22550686e-02,\n -0.27460111e-02,\n -0.34975049e-02,\n -0.45994679e-02,\n -0.62465151e-02,\n -0.86843111e-02,\n -0.11772950e-01,\n -0.14247025e-01,\n -0.13897673e-01,\n -0.95402114e-02,\n -0.26456779e-02,\n 0.37637560e-02,\n 0.75113312e-02,\n 0.82785152e-02,\n 0.16024915e-03,\n 0.18166313e-03,\n 0.20833101e-03,\n 0.23556003e-03,\n 0.25996531e-03,\n 0.27946456e-03,\n 0.29345075e-03,\n 0.30254686e-03,\n 0.30792650e-03,\n 0.31066421e-03,\n 0.31148014e-03,\n 0.31080900e-03,\n 0.30893844e-03,\n 0.30608277e-03,\n 0.30241298e-03,\n 0.29805984e-03,\n 0.29311786e-03,\n 0.28767882e-03,\n 0.28175494e-03,\n 0.27529473e-03,\n 0.26832905e-03,\n 0.26097632e-03,\n 0.25335979e-03,\n 0.24527783e-03,\n 0.23663967e-03,\n 0.22766842e-03,\n 0.21843993e-03,\n 0.20887410e-03,\n 0.19898124e-03,\n 0.18877641e-03,\n 0.17849728e-03,\n 0.16801062e-03,\n 0.15736978e-03,\n 0.14671541e-03,\n 0.13608270e-03,\n 0.12537625e-03,\n 0.11459773e-03,\n 0.10394175e-03,\n 0.93250273e-04,\n 0.82784871e-04,\n 0.72433824e-04,\n 0.62449340e-04,\n 0.52622323e-04,\n 0.43097367e-04,\n 0.34058194e-04,\n 0.25217514e-04,\n 0.16786646e-04,\n 0.86424679e-05,\n 0.63486573e-06,\n -0.73354054e-05,\n -0.15307762e-04,\n -0.23343211e-04,\n -0.31608852e-04,\n -0.40205730e-04,\n -0.49073769e-04,\n -0.58462469e-04,\n -0.68241599e-04,\n -0.78819096e-04,\n -0.90594622e-04,\n -0.10456181e-03,\n -0.12214016e-03,\n -0.14677636e-03,\n -0.18476187e-03,\n -0.24763195e-03,\n -0.35712289e-03,\n -0.54928858e-03,\n -0.87579578e-03,\n -0.13871513e-02,\n -0.20772433e-02,\n -0.27833104e-02,\n -0.31113259e-02,\n -0.25939168e-02,\n -0.11258019e-02,\n 0.66039932e-03,\n 0.17193498e-02,\n 0.15396239e-02,\n 0.55894168e-03,\n -0.42620959e-03,\n -0.99759083e-03,\n -0.12229718e-02,\n -0.13435301e-02,\n -0.15339972e-02,\n -0.18679998e-02,\n -0.23784370e-02,\n -0.31225604e-02,\n -0.42320755e-02,\n -0.58987723e-02,\n -0.81079528e-02,\n -0.10059725e-01,\n -0.10070107e-01,\n -0.69461395e-02,\n -0.15970924e-02,\n 0.35173711e-02,\n 0.64823525e-02,\n 0.69939806e-02,\n 0.58447313e-02,\n 0.99998928e-04,\n 0.11336149e-03,\n 0.13000234e-03,\n 0.14699313e-03,\n 0.16222145e-03,\n 0.17438777e-03,\n 0.18311353e-03,\n 0.18878833e-03,\n 0.19214548e-03,\n 0.19385564e-03,\n 0.19436714e-03,\n 0.19395056e-03,\n 0.19278521e-03,\n 0.19100346e-03,\n 0.18871219e-03,\n 0.18599514e-03,\n 0.18291226e-03,\n 0.17952279e-03,\n 0.17582905e-03,\n 0.17179834e-03,\n 0.16745202e-03,\n 0.16285951e-03,\n 0.15811202e-03,\n 0.15307403e-03,\n 0.14767774e-03,\n 0.14207418e-03,\n 0.13631718e-03,\n 0.13035099e-03,\n 0.12418116e-03,\n 0.11780974e-03,\n 0.11139606e-03,\n 0.10484913e-03,\n 0.98205564e-04,\n 0.91553651e-04,\n 0.84926301e-04,\n 0.78246710e-04,\n 0.71511990e-04,\n 0.64868305e-04,\n 0.58195605e-04,\n 0.51666491e-04,\n 0.45197361e-04,\n 0.38973347e-04,\n 0.32842243e-04,\n 0.26893933e-04,\n 0.21249898e-04,\n 0.15730866e-04,\n 0.10472757e-04,\n 0.53960257e-05,\n 0.39317513e-06,\n -0.45791730e-05,\n -0.95536843e-05,\n -0.14566379e-04,\n -0.19728041e-04,\n -0.25091455e-04,\n -0.30624815e-04,\n -0.36488018e-04,\n -0.42586758e-04,\n -0.49185081e-04,\n -0.56528501e-04,\n -0.65242508e-04,\n -0.76187855e-04,\n -0.91523340e-04,\n -0.11515653e-03,\n -0.15424404e-03,\n -0.22232164e-03,\n -0.34186023e-03,\n -0.54521387e-03,\n -0.86433312e-03,\n -0.12963127e-02,\n -0.17405468e-02,\n -0.19500151e-02,\n -0.16286803e-02,\n -0.70585759e-03,\n 0.42125979e-03,\n 0.10887617e-02,\n 0.97085984e-03,\n 0.34771912e-03,\n -0.27306599e-03,\n -0.62840234e-03,\n -0.76508470e-03,\n -0.83742116e-03,\n -0.95511600e-03,\n -0.11631359e-02,\n -0.14807530e-02,\n -0.19422875e-02,\n -0.26292126e-02,\n -0.36683388e-02,\n -0.50780545e-02,\n -0.63869823e-02,\n -0.64897770e-02,\n -0.44976156e-02,\n -0.92731958e-03,\n 0.25404119e-02,\n 0.45405924e-02,\n 0.48503033e-02,\n 0.40294230e-02,\n 0.27683314e-02,\n 0.53884014e-04,\n 0.61084211e-04,\n 0.70050810e-04,\n 0.79205915e-04,\n 0.87411223e-04,\n 0.93966446e-04,\n 0.98667610e-04,\n 0.10172475e-03,\n 0.10353317e-03,\n 0.10445446e-03,\n 0.10473084e-03,\n 0.10450836e-03,\n 0.10388237e-03,\n 0.10292211e-03,\n 0.10168495e-03,\n 0.10021854e-03,\n 0.98558390e-04,\n 0.96735770e-04,\n 0.94745832e-04,\n 0.92573500e-04,\n 0.90232592e-04,\n 0.87755776e-04,\n 0.85199157e-04,\n 0.82488186e-04,\n 0.79575933e-04,\n 0.76551951e-04,\n 0.73451745e-04,\n 0.70241033e-04,\n 0.66919587e-04,\n 0.63485968e-04,\n 0.60029004e-04,\n 0.56497996e-04,\n 0.52914555e-04,\n 0.49328813e-04,\n 0.45763074e-04,\n 0.42167223e-04,\n 0.38533406e-04,\n 0.34952976e-04,\n 0.31360858e-04,\n 0.27841659e-04,\n 0.24351986e-04,\n 0.21002805e-04,\n 0.17696189e-04,\n 0.14492869e-04,\n 0.11447947e-04,\n 0.84757721e-05,\n 0.56412364e-05,\n 0.29090747e-05,\n 0.20986288e-06,\n -0.24671158e-05,\n -0.51494958e-05,\n -0.78480261e-05,\n -0.10632524e-04,\n -0.13520086e-04,\n -0.16503614e-04,\n -0.19662311e-04,\n -0.22948981e-04,\n -0.26503118e-04,\n -0.30458663e-04,\n -0.35155379e-04,\n -0.41044819e-04,\n -0.49301310e-04,\n -0.62018436e-04,\n -0.83043000e-04,\n -0.11966786e-03,\n -0.18399033e-03,\n -0.29346853e-03,\n -0.46542627e-03,\n -0.69852220e-03,\n -0.93878357e-03,\n -0.10528294e-02,\n -0.88006578e-03,\n -0.38117066e-03,\n 0.22924476e-03,\n 0.59057289e-03,\n 0.52561943e-03,\n 0.18708916e-03,\n -0.14889365e-03,\n -0.34005527e-03,\n -0.41272707e-03,\n -0.45099761e-03,\n -0.51413663e-03,\n -0.62613422e-03,\n -0.79706579e-03,\n -0.10450790e-02,\n -0.14138672e-02,\n -0.19733894e-02,\n -0.27403242e-02,\n -0.34682453e-02,\n -0.35488785e-02,\n -0.24659703e-02,\n -0.48296375e-03,\n 0.14571503e-02,\n 0.25734871e-02,\n 0.27371510e-02,\n 0.22676513e-02,\n 0.15553402e-02,\n 0.87312493e-03,\n 0.21932714e-04,\n 0.24863417e-04,\n 0.28513090e-04,\n 0.32239484e-04,\n 0.35579265e-04,\n 0.38247381e-04,\n 0.40160809e-04,\n 0.41405037e-04,\n 0.42140975e-04,\n 0.42515869e-04,\n 0.42628493e-04,\n 0.42538381e-04,\n 0.42284017e-04,\n 0.41893109e-04,\n 0.41388936e-04,\n 0.40791474e-04,\n 0.40115974e-04,\n 0.39374940e-04,\n 0.38564984e-04,\n 0.37680649e-04,\n 0.36728081e-04,\n 0.35719502e-04,\n 0.34679109e-04,\n 0.33576438e-04,\n 0.32390075e-04,\n 0.31158215e-04,\n 0.29896764e-04,\n 0.28590852e-04,\n 0.27239597e-04,\n 0.25841982e-04,\n 0.24434608e-04,\n 0.22996630e-04,\n 0.21537227e-04,\n 0.20077452e-04,\n 0.18627210e-04,\n 0.17164399e-04,\n 0.15684327e-04,\n 0.14226660e-04,\n 0.12765439e-04,\n 0.11332698e-04,\n 0.99115950e-05,\n 0.85493384e-05,\n 0.72026537e-05,\n 0.58994046e-05,\n 0.46591872e-05,\n 0.34499117e-05,\n 0.22957561e-05,\n 0.11843766e-05,\n 0.84998014e-07,\n -0.10040685e-05,\n -0.20963864e-05,\n -0.31941836e-05,\n -0.43282666e-05,\n -0.55030100e-05,\n -0.67179099e-05,\n -0.80033133e-05,\n -0.93413128e-05,\n -0.10787697e-04,\n -0.12397514e-04,\n -0.14309594e-04,\n -0.16705344e-04,\n -0.20065328e-04,\n -0.25239178e-04,\n -0.33791446e-04,\n -0.48691152e-04,\n -0.74860320e-04,\n -0.11940804e-03,\n -0.18940083e-03,\n -0.28432420e-03,\n -0.38224252e-03,\n -0.42882701e-03,\n -0.35856038e-03,\n -0.15526464e-03,\n 0.93624702e-04,\n 0.24092782e-03,\n 0.21429047e-03,\n 0.76112388e-04,\n -0.60849816e-04,\n -0.13861567e-03,\n -0.16805886e-03,\n -0.18353794e-03,\n -0.20919893e-03,\n -0.25477342e-03,\n -0.32431912e-03,\n -0.42517454e-03,\n -0.57509379e-03,\n -0.80277218e-03,\n -0.11159416e-02,\n -0.14153966e-02,\n -0.14518298e-02,\n -0.10097986e-02,\n -0.19427406e-03,\n 0.60561736e-03,\n 0.10654813e-02,\n 0.11315914e-02,\n 0.93659549e-03,\n 0.64201583e-03,\n 0.36030522e-03,\n 0.14866883e-03,\n 0.41634707e-05,\n 0.47198023e-05,\n 0.54126149e-05,\n 0.61199912e-05,\n 0.67539763e-05,\n 0.72604594e-05,\n 0.76236788e-05,\n 0.78598650e-05,\n 0.79995607e-05,\n 0.80707214e-05,\n 0.80921054e-05,\n 0.80750178e-05,\n 0.80267509e-05,\n 0.79525425e-05,\n 0.78568110e-05,\n 0.77433706e-05,\n 0.76151509e-05,\n 0.74745158e-05,\n 0.73207625e-05,\n 0.71528843e-05,\n 0.69720704e-05,\n 0.67805959e-05,\n 0.65831077e-05,\n 0.63738212e-05,\n 0.61485744e-05,\n 0.59146905e-05,\n 0.56752488e-05,\n 0.54273883e-05,\n 0.51709094e-05,\n 0.49056021e-05,\n 0.46384298e-05,\n 0.43654286e-05,\n 0.40883574e-05,\n 0.38112394e-05,\n 0.35359881e-05,\n 0.32583384e-05,\n 0.29773378e-05,\n 0.27006156e-05,\n 0.24232718e-05,\n 0.21512824e-05,\n 0.18814886e-05,\n 0.16229329e-05,\n 0.13672592e-05,\n 0.11198919e-05,\n 0.88442783e-06,\n 0.65489411e-06,\n 0.43578387e-06,\n 0.22484058e-06,\n 0.16117639e-07,\n -0.19059540e-06,\n -0.39797052e-06,\n -0.60633920e-06,\n -0.82165008e-06,\n -0.10446255e-05,\n -0.12752704e-05,\n -0.15192638e-05,\n -0.17732648e-05,\n -0.20478196e-05,\n -0.23534012e-05,\n -0.27163851e-05,\n -0.31711115e-05,\n -0.38089131e-05,\n -0.47909707e-05,\n -0.64142382e-05,\n -0.92423534e-05,\n -0.14209581e-04,\n -0.22665536e-04,\n -0.35952206e-04,\n -0.53973064e-04,\n -0.72565315e-04,\n -0.81414371e-04,\n -0.68077694e-04,\n -0.29477938e-04,\n 0.17784128e-04,\n 0.45754921e-04,\n 0.40691128e-04,\n 0.14446910e-04,\n -0.11559983e-04,\n -0.26320620e-04,\n -0.31904819e-04,\n -0.34839621e-04,\n -0.39709423e-04,\n -0.48360336e-04,\n -0.61561077e-04,\n -0.80702943e-04,\n -0.10915505e-03,\n -0.15237252e-03,\n -0.21185735e-03,\n -0.26881794e-03,\n -0.27586648e-03,\n -0.19191137e-03,\n -0.36795147e-04,\n 0.11542100e-03,\n 0.20291690e-03,\n 0.21544744e-03,\n 0.17828873e-03,\n 0.12219916e-03,\n 0.68575428e-04,\n 0.28294970e-04,\n 0.53851386e-05,\n ]\n )\n return spherical_albedo, albedo, expected_r1\n"
] | [
[
"numpy.array",
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ririw/scipy | [
"680ecf8c52966343827903e6b7983b1ef7323fe2"
] | [
"scipy/sparse/compressed.py"
] | [
"\"\"\"Base class for sparse matrix formats using compressed storage.\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\n__all__ = []\n\nfrom warnings import warn\nimport operator\n\nimport numpy as np\nfrom scipy._lib._util import _prune_array\n\nfrom .base import spmatrix, isspmatrix, SparseEfficiencyWarning\nfrom .data import _data_matrix, _minmax_mixin\nfrom .dia import dia_matrix\nfrom . import _sparsetools\nfrom ._sparsetools import (get_csr_submatrix, csr_sample_offsets, csr_todense,\n csr_sample_values, csr_row_index, csr_row_slice,\n csr_column_index1, csr_column_index2)\nfrom ._index import IndexMixin\nfrom .sputils import (upcast, upcast_char, to_native, isdense, isshape,\n getdtype, isscalarlike, isintlike, get_index_dtype,\n downcast_intp_index, get_sum_dtype, check_shape,\n matrix, asmatrix, is_pydata_spmatrix)\n\n\nclass _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin):\n \"\"\"base matrix class for compressed row- and column-oriented matrices\"\"\"\n\n def __init__(self, arg1, shape=None, dtype=None, copy=False):\n _data_matrix.__init__(self)\n\n if isspmatrix(arg1):\n if arg1.format == self.format and copy:\n arg1 = arg1.copy()\n else:\n arg1 = arg1.asformat(self.format)\n self._set_self(arg1)\n\n elif isinstance(arg1, tuple):\n if isshape(arg1):\n # It's a tuple of matrix dimensions (M, N)\n # create empty matrix\n self._shape = check_shape(arg1)\n M, N = self.shape\n # Select index dtype large enough to pass array and\n # scalar parameters to sparsetools\n idx_dtype = get_index_dtype(maxval=max(M, N))\n self.data = np.zeros(0, getdtype(dtype, default=float))\n self.indices = np.zeros(0, idx_dtype)\n self.indptr = np.zeros(self._swap((M, N))[0] + 1,\n dtype=idx_dtype)\n else:\n if len(arg1) == 2:\n # (data, ij) format\n from .coo import coo_matrix\n other = self.__class__(coo_matrix(arg1, shape=shape))\n self._set_self(other)\n elif len(arg1) == 3:\n # (data, indices, indptr) format\n (data, indices, indptr) = arg1\n\n # Select index dtype large enough to pass array and\n # scalar parameters to sparsetools\n maxval = None\n if shape is not None:\n maxval = max(shape)\n idx_dtype = get_index_dtype((indices, indptr),\n maxval=maxval,\n check_contents=True)\n\n self.indices = np.array(indices, copy=copy,\n dtype=idx_dtype)\n self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)\n self.data = np.array(data, copy=copy, dtype=dtype)\n else:\n raise ValueError(\"unrecognized {}_matrix \"\n \"constructor usage\".format(self.format))\n\n else:\n # must be dense\n try:\n arg1 = np.asarray(arg1)\n except Exception:\n raise ValueError(\"unrecognized {}_matrix constructor usage\"\n \"\".format(self.format))\n from .coo import coo_matrix\n self._set_self(self.__class__(coo_matrix(arg1, dtype=dtype)))\n\n # Read matrix dimensions given, if any\n if shape is not None:\n self._shape = check_shape(shape)\n else:\n if self.shape is None:\n # shape not already set, try to infer dimensions\n try:\n major_dim = len(self.indptr) - 1\n minor_dim = self.indices.max() + 1\n except Exception:\n raise ValueError('unable to infer matrix dimensions')\n else:\n self._shape = check_shape(self._swap((major_dim,\n minor_dim)))\n\n if dtype is not None:\n self.data = self.data.astype(dtype, copy=False)\n\n self.check_format(full_check=False)\n\n def getnnz(self, axis=None):\n if axis is None:\n return int(self.indptr[-1])\n else:\n if axis < 0:\n axis += 2\n axis, _ = self._swap((axis, 1 - axis))\n _, N = self._swap(self.shape)\n if axis == 0:\n return np.bincount(downcast_intp_index(self.indices),\n minlength=N)\n elif axis == 1:\n return np.diff(self.indptr)\n raise ValueError('axis out of bounds')\n\n getnnz.__doc__ = spmatrix.getnnz.__doc__\n\n def _set_self(self, other, copy=False):\n \"\"\"take the member variables of other and assign them to self\"\"\"\n\n if copy:\n other = other.copy()\n\n self.data = other.data\n self.indices = other.indices\n self.indptr = other.indptr\n self._shape = check_shape(other.shape)\n\n def check_format(self, full_check=True):\n \"\"\"check whether the matrix format is valid\n\n Parameters\n ----------\n full_check : bool, optional\n If `True`, rigorous check, O(N) operations. Otherwise\n basic check, O(1) operations (default True).\n \"\"\"\n # use _swap to determine proper bounds\n major_name, minor_name = self._swap(('row', 'column'))\n major_dim, minor_dim = self._swap(self.shape)\n\n # index arrays should have integer data types\n if self.indptr.dtype.kind != 'i':\n warn(\"indptr array has non-integer dtype ({})\"\n \"\".format(self.indptr.dtype.name), stacklevel=3)\n if self.indices.dtype.kind != 'i':\n warn(\"indices array has non-integer dtype ({})\"\n \"\".format(self.indices.dtype.name), stacklevel=3)\n\n idx_dtype = get_index_dtype((self.indptr, self.indices))\n self.indptr = np.asarray(self.indptr, dtype=idx_dtype)\n self.indices = np.asarray(self.indices, dtype=idx_dtype)\n self.data = to_native(self.data)\n\n # check array shapes\n for x in [self.data.ndim, self.indices.ndim, self.indptr.ndim]:\n if x != 1:\n raise ValueError('data, indices, and indptr should be 1-D')\n\n # check index pointer\n if (len(self.indptr) != major_dim + 1):\n raise ValueError(\"index pointer size ({}) should be ({})\"\n \"\".format(len(self.indptr), major_dim + 1))\n if (self.indptr[0] != 0):\n raise ValueError(\"index pointer should start with 0\")\n\n # check index and data arrays\n if (len(self.indices) != len(self.data)):\n raise ValueError(\"indices and data should have the same size\")\n if (self.indptr[-1] > len(self.indices)):\n raise ValueError(\"Last value of index pointer should be less than \"\n \"the size of index and data arrays\")\n\n self.prune()\n\n if full_check:\n # check format validity (more expensive)\n if self.nnz > 0:\n if self.indices.max() >= minor_dim:\n raise ValueError(\"{} index values must be < {}\"\n \"\".format(minor_name, minor_dim))\n if self.indices.min() < 0:\n raise ValueError(\"{} index values must be >= 0\"\n \"\".format(minor_name))\n if np.diff(self.indptr).min() < 0:\n raise ValueError(\"index pointer values must form a \"\n \"non-decreasing sequence\")\n\n # if not self.has_sorted_indices():\n # warn('Indices were not in sorted order. Sorting indices.')\n # self.sort_indices()\n # assert(self.has_sorted_indices())\n # TODO check for duplicates?\n\n #######################\n # Boolean comparisons #\n #######################\n\n def _scalar_binopt(self, other, op):\n \"\"\"Scalar version of self._binopt, for cases in which no new nonzeros\n are added. Produces a new spmatrix in canonical form.\n \"\"\"\n self.sum_duplicates()\n res = self._with_data(op(self.data, other), copy=True)\n res.eliminate_zeros()\n return res\n\n def __eq__(self, other):\n # Scalar other.\n if isscalarlike(other):\n if np.isnan(other):\n return self.__class__(self.shape, dtype=np.bool_)\n\n if other == 0:\n warn(\"Comparing a sparse matrix with 0 using == is inefficient\"\n \", try using != instead.\", SparseEfficiencyWarning,\n stacklevel=3)\n all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))\n inv = self._scalar_binopt(other, operator.ne)\n return all_true - inv\n else:\n return self._scalar_binopt(other, operator.eq)\n # Dense other.\n elif isdense(other):\n return self.todense() == other\n # Pydata sparse other.\n elif is_pydata_spmatrix(other):\n return NotImplemented\n # Sparse other.\n elif isspmatrix(other):\n warn(\"Comparing sparse matrices using == is inefficient, try using\"\n \" != instead.\", SparseEfficiencyWarning, stacklevel=3)\n # TODO sparse broadcasting\n if self.shape != other.shape:\n return False\n elif self.format != other.format:\n other = other.asformat(self.format)\n res = self._binopt(other, '_ne_')\n all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))\n return all_true - res\n else:\n return False\n\n def __ne__(self, other):\n # Scalar other.\n if isscalarlike(other):\n if np.isnan(other):\n warn(\"Comparing a sparse matrix with nan using != is\"\n \" inefficient\", SparseEfficiencyWarning, stacklevel=3)\n all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))\n return all_true\n elif other != 0:\n warn(\"Comparing a sparse matrix with a nonzero scalar using !=\"\n \" is inefficient, try using == instead.\",\n SparseEfficiencyWarning, stacklevel=3)\n all_true = self.__class__(np.ones(self.shape), dtype=np.bool_)\n inv = self._scalar_binopt(other, operator.eq)\n return all_true - inv\n else:\n return self._scalar_binopt(other, operator.ne)\n # Dense other.\n elif isdense(other):\n return self.todense() != other\n # Pydata sparse other.\n elif is_pydata_spmatrix(other):\n return NotImplemented\n # Sparse other.\n elif isspmatrix(other):\n # TODO sparse broadcasting\n if self.shape != other.shape:\n return True\n elif self.format != other.format:\n other = other.asformat(self.format)\n return self._binopt(other, '_ne_')\n else:\n return True\n\n def _inequality(self, other, op, op_name, bad_scalar_msg):\n # Scalar other.\n if isscalarlike(other):\n if 0 == other and op_name in ('_le_', '_ge_'):\n raise NotImplementedError(\" >= and <= don't work with 0.\")\n elif op(0, other):\n warn(bad_scalar_msg, SparseEfficiencyWarning)\n other_arr = np.empty(self.shape, dtype=np.result_type(other))\n other_arr.fill(other)\n other_arr = self.__class__(other_arr)\n return self._binopt(other_arr, op_name)\n else:\n return self._scalar_binopt(other, op)\n # Dense other.\n elif isdense(other):\n return op(self.todense(), other)\n # Sparse other.\n elif isspmatrix(other):\n # TODO sparse broadcasting\n if self.shape != other.shape:\n raise ValueError(\"inconsistent shapes\")\n elif self.format != other.format:\n other = other.asformat(self.format)\n if op_name not in ('_ge_', '_le_'):\n return self._binopt(other, op_name)\n\n warn(\"Comparing sparse matrices using >= and <= is inefficient, \"\n \"using <, >, or !=, instead.\", SparseEfficiencyWarning)\n all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))\n res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')\n return all_true - res\n else:\n raise ValueError(\"Operands could not be compared.\")\n\n def __lt__(self, other):\n return self._inequality(other, operator.lt, '_lt_',\n \"Comparing a sparse matrix with a scalar \"\n \"greater than zero using < is inefficient, \"\n \"try using >= instead.\")\n\n def __gt__(self, other):\n return self._inequality(other, operator.gt, '_gt_',\n \"Comparing a sparse matrix with a scalar \"\n \"less than zero using > is inefficient, \"\n \"try using <= instead.\")\n\n def __le__(self, other):\n return self._inequality(other, operator.le, '_le_',\n \"Comparing a sparse matrix with a scalar \"\n \"greater than zero using <= is inefficient, \"\n \"try using > instead.\")\n\n def __ge__(self, other):\n return self._inequality(other, operator.ge, '_ge_',\n \"Comparing a sparse matrix with a scalar \"\n \"less than zero using >= is inefficient, \"\n \"try using < instead.\")\n\n #################################\n # Arithmetic operator overrides #\n #################################\n\n def _add_dense(self, other):\n if other.shape != self.shape:\n raise ValueError('Incompatible shapes.')\n dtype = upcast_char(self.dtype.char, other.dtype.char)\n order = self._swap('CF')[0]\n result = np.array(other, dtype=dtype, order=order, copy=True)\n M, N = self._swap(self.shape)\n y = result if result.flags.c_contiguous else result.T\n csr_todense(M, N, self.indptr, self.indices, self.data, y)\n return matrix(result, copy=False)\n\n def _add_sparse(self, other):\n return self._binopt(other, '_plus_')\n\n def _sub_sparse(self, other):\n return self._binopt(other, '_minus_')\n\n def multiply(self, other):\n \"\"\"Point-wise multiplication by another matrix, vector, or\n scalar.\n \"\"\"\n # Scalar multiplication.\n if isscalarlike(other):\n return self._mul_scalar(other)\n # Sparse matrix or vector.\n if isspmatrix(other):\n if self.shape == other.shape:\n other = self.__class__(other)\n return self._binopt(other, '_elmul_')\n # Single element.\n elif other.shape == (1, 1):\n return self._mul_scalar(other.toarray()[0, 0])\n elif self.shape == (1, 1):\n return other._mul_scalar(self.toarray()[0, 0])\n # A row times a column.\n elif self.shape[1] == 1 and other.shape[0] == 1:\n return self._mul_sparse_matrix(other.tocsc())\n elif self.shape[0] == 1 and other.shape[1] == 1:\n return other._mul_sparse_matrix(self.tocsc())\n # Row vector times matrix. other is a row.\n elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:\n other = dia_matrix((other.toarray().ravel(), [0]),\n shape=(other.shape[1], other.shape[1]))\n return self._mul_sparse_matrix(other)\n # self is a row.\n elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:\n copy = dia_matrix((self.toarray().ravel(), [0]),\n shape=(self.shape[1], self.shape[1]))\n return other._mul_sparse_matrix(copy)\n # Column vector times matrix. other is a column.\n elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:\n other = dia_matrix((other.toarray().ravel(), [0]),\n shape=(other.shape[0], other.shape[0]))\n return other._mul_sparse_matrix(self)\n # self is a column.\n elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:\n copy = dia_matrix((self.toarray().ravel(), [0]),\n shape=(self.shape[0], self.shape[0]))\n return copy._mul_sparse_matrix(other)\n else:\n raise ValueError(\"inconsistent shapes\")\n\n # Assume other is a dense matrix/array, which produces a single-item\n # object array if other isn't convertible to ndarray.\n other = np.atleast_2d(other)\n\n if other.ndim != 2:\n return np.multiply(self.toarray(), other)\n # Single element / wrapped object.\n if other.size == 1:\n return self._mul_scalar(other.flat[0])\n # Fast case for trivial sparse matrix.\n elif self.shape == (1, 1):\n return np.multiply(self.toarray()[0, 0], other)\n\n from .coo import coo_matrix\n ret = self.tocoo()\n # Matching shapes.\n if self.shape == other.shape:\n data = np.multiply(ret.data, other[ret.row, ret.col])\n # Sparse row vector times...\n elif self.shape[0] == 1:\n if other.shape[1] == 1: # Dense column vector.\n data = np.multiply(ret.data, other)\n elif other.shape[1] == self.shape[1]: # Dense matrix.\n data = np.multiply(ret.data, other[:, ret.col])\n else:\n raise ValueError(\"inconsistent shapes\")\n row = np.repeat(np.arange(other.shape[0]), len(ret.row))\n col = np.tile(ret.col, other.shape[0])\n return coo_matrix((data.view(np.ndarray).ravel(), (row, col)),\n shape=(other.shape[0], self.shape[1]),\n copy=False)\n # Sparse column vector times...\n elif self.shape[1] == 1:\n if other.shape[0] == 1: # Dense row vector.\n data = np.multiply(ret.data[:, None], other)\n elif other.shape[0] == self.shape[0]: # Dense matrix.\n data = np.multiply(ret.data[:, None], other[ret.row])\n else:\n raise ValueError(\"inconsistent shapes\")\n row = np.repeat(ret.row, other.shape[1])\n col = np.tile(np.arange(other.shape[1]), len(ret.col))\n return coo_matrix((data.view(np.ndarray).ravel(), (row, col)),\n shape=(self.shape[0], other.shape[1]),\n copy=False)\n # Sparse matrix times dense row vector.\n elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:\n data = np.multiply(ret.data, other[:, ret.col].ravel())\n # Sparse matrix times dense column vector.\n elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:\n data = np.multiply(ret.data, other[ret.row].ravel())\n else:\n raise ValueError(\"inconsistent shapes\")\n ret.data = data.view(np.ndarray).ravel()\n return ret\n\n ###########################\n # Multiplication handlers #\n ###########################\n\n def _mul_vector(self, other):\n M, N = self.shape\n\n # output array\n result = np.zeros(M, dtype=upcast_char(self.dtype.char,\n other.dtype.char))\n\n # csr_matvec or csc_matvec\n fn = getattr(_sparsetools, self.format + '_matvec')\n fn(M, N, self.indptr, self.indices, self.data, other, result)\n\n return result\n\n def _mul_multivector(self, other):\n M, N = self.shape\n n_vecs = other.shape[1] # number of column vectors\n\n result = np.zeros((M, n_vecs),\n dtype=upcast_char(self.dtype.char, other.dtype.char))\n\n # csr_matvecs or csc_matvecs\n fn = getattr(_sparsetools, self.format + '_matvecs')\n fn(M, N, n_vecs, self.indptr, self.indices, self.data,\n other.ravel(), result.ravel())\n\n return result\n\n def _mul_sparse_matrix(self, other):\n M, K1 = self.shape\n K2, N = other.shape\n\n major_axis = self._swap((M, N))[0]\n other = self.__class__(other) # convert to this format\n\n idx_dtype = get_index_dtype((self.indptr, self.indices,\n other.indptr, other.indices))\n\n fn = getattr(_sparsetools, self.format + '_matmat_maxnnz')\n nnz = fn(M, N,\n np.asarray(self.indptr, dtype=idx_dtype),\n np.asarray(self.indices, dtype=idx_dtype),\n np.asarray(other.indptr, dtype=idx_dtype),\n np.asarray(other.indices, dtype=idx_dtype))\n\n idx_dtype = get_index_dtype((self.indptr, self.indices,\n other.indptr, other.indices),\n maxval=nnz)\n\n indptr = np.empty(major_axis + 1, dtype=idx_dtype)\n indices = np.empty(nnz, dtype=idx_dtype)\n data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype))\n\n fn = getattr(_sparsetools, self.format + '_matmat')\n fn(M, N, np.asarray(self.indptr, dtype=idx_dtype),\n np.asarray(self.indices, dtype=idx_dtype),\n self.data,\n np.asarray(other.indptr, dtype=idx_dtype),\n np.asarray(other.indices, dtype=idx_dtype),\n other.data,\n indptr, indices, data)\n\n return self.__class__((data, indices, indptr), shape=(M, N))\n\n def diagonal(self, k=0):\n rows, cols = self.shape\n if k <= -rows or k >= cols:\n raise ValueError(\"k exceeds matrix dimensions\")\n fn = getattr(_sparsetools, self.format + \"_diagonal\")\n y = np.empty(min(rows + min(k, 0), cols - max(k, 0)),\n dtype=upcast(self.dtype))\n fn(k, self.shape[0], self.shape[1], self.indptr, self.indices,\n self.data, y)\n return y\n\n diagonal.__doc__ = spmatrix.diagonal.__doc__\n\n #####################\n # Other binary ops #\n #####################\n\n def _maximum_minimum(self, other, npop, op_name, dense_check):\n if isscalarlike(other):\n if dense_check(other):\n warn(\"Taking maximum (minimum) with > 0 (< 0) number results\"\n \" to a dense matrix.\", SparseEfficiencyWarning,\n stacklevel=3)\n other_arr = np.empty(self.shape, dtype=np.asarray(other).dtype)\n other_arr.fill(other)\n other_arr = self.__class__(other_arr)\n return self._binopt(other_arr, op_name)\n else:\n self.sum_duplicates()\n new_data = npop(self.data, np.asarray(other))\n mat = self.__class__((new_data, self.indices, self.indptr),\n dtype=new_data.dtype, shape=self.shape)\n return mat\n elif isdense(other):\n return npop(self.todense(), other)\n elif isspmatrix(other):\n return self._binopt(other, op_name)\n else:\n raise ValueError(\"Operands not compatible.\")\n\n def maximum(self, other):\n return self._maximum_minimum(other, np.maximum,\n '_maximum_', lambda x: np.asarray(x) > 0)\n\n maximum.__doc__ = spmatrix.maximum.__doc__\n\n def minimum(self, other):\n return self._maximum_minimum(other, np.minimum,\n '_minimum_', lambda x: np.asarray(x) < 0)\n\n minimum.__doc__ = spmatrix.minimum.__doc__\n\n #####################\n # Reduce operations #\n #####################\n\n def sum(self, axis=None, dtype=None, out=None):\n \"\"\"Sum the matrix over the given axis. If the axis is None, sum\n over both rows and columns, returning a scalar.\n \"\"\"\n # The spmatrix base class already does axis=0 and axis=1 efficiently\n # so we only do the case axis=None here\n if (not hasattr(self, 'blocksize') and\n axis in self._swap(((1, -1), (0, 2)))[0]):\n # faster than multiplication for large minor axis in CSC/CSR\n res_dtype = get_sum_dtype(self.dtype)\n ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype)\n\n major_index, value = self._minor_reduce(np.add)\n ret[major_index] = value\n ret = asmatrix(ret)\n if axis % 2 == 1:\n ret = ret.T\n\n if out is not None and out.shape != ret.shape:\n raise ValueError('dimensions do not match')\n\n return ret.sum(axis=(), dtype=dtype, out=out)\n # spmatrix will handle the remaining situations when axis\n # is in {None, -1, 0, 1}\n else:\n return spmatrix.sum(self, axis=axis, dtype=dtype, out=out)\n\n sum.__doc__ = spmatrix.sum.__doc__\n\n def _minor_reduce(self, ufunc, data=None):\n \"\"\"Reduce nonzeros with a ufunc over the minor axis when non-empty\n\n Can be applied to a function of self.data by supplying data parameter.\n\n Warning: this does not call sum_duplicates()\n\n Returns\n -------\n major_index : array of ints\n Major indices where nonzero\n\n value : array of self.dtype\n Reduce result for nonzeros in each major_index\n \"\"\"\n if data is None:\n data = self.data\n major_index = np.flatnonzero(np.diff(self.indptr))\n value = ufunc.reduceat(data,\n downcast_intp_index(self.indptr[major_index]))\n return major_index, value\n\n #######################\n # Getting and Setting #\n #######################\n\n def _get_intXint(self, row, col):\n M, N = self._swap(self.shape)\n major, minor = self._swap((row, col))\n indptr, indices, data = get_csr_submatrix(\n M, N, self.indptr, self.indices, self.data,\n major, major + 1, minor, minor + 1)\n return data.sum(dtype=self.dtype)\n\n def _get_sliceXslice(self, row, col):\n major, minor = self._swap((row, col))\n if major.step in (1, None) and minor.step in (1, None):\n return self._get_submatrix(major, minor, copy=True)\n return self._major_slice(major)._minor_slice(minor)\n\n def _get_arrayXarray(self, row, col):\n # inner indexing\n idx_dtype = self.indices.dtype\n M, N = self._swap(self.shape)\n major, minor = self._swap((row, col))\n major = np.asarray(major, dtype=idx_dtype)\n minor = np.asarray(minor, dtype=idx_dtype)\n\n val = np.empty(major.size, dtype=self.dtype)\n csr_sample_values(M, N, self.indptr, self.indices, self.data,\n major.size, major.ravel(), minor.ravel(), val)\n if major.ndim == 1:\n return asmatrix(val)\n return self.__class__(val.reshape(major.shape))\n\n def _get_columnXarray(self, row, col):\n # outer indexing\n major, minor = self._swap((row, col))\n return self._major_index_fancy(major)._minor_index_fancy(minor)\n\n def _major_index_fancy(self, idx):\n \"\"\"Index along the major axis where idx is an array of ints.\n \"\"\"\n idx_dtype = self.indices.dtype\n indices = np.asarray(idx, dtype=idx_dtype).ravel()\n\n _, N = self._swap(self.shape)\n M = len(indices)\n new_shape = self._swap((M, N))\n if M == 0:\n return self.__class__(new_shape)\n\n row_nnz = np.diff(self.indptr)\n idx_dtype = self.indices.dtype\n res_indptr = np.zeros(M+1, dtype=idx_dtype)\n np.cumsum(row_nnz[idx], out=res_indptr[1:])\n\n nnz = res_indptr[-1]\n res_indices = np.empty(nnz, dtype=idx_dtype)\n res_data = np.empty(nnz, dtype=self.dtype)\n csr_row_index(M, indices, self.indptr, self.indices, self.data,\n res_indices, res_data)\n\n return self.__class__((res_data, res_indices, res_indptr),\n shape=new_shape, copy=False)\n\n def _major_slice(self, idx, copy=False):\n \"\"\"Index along the major axis where idx is a slice object.\n \"\"\"\n if idx == slice(None):\n return self.copy() if copy else self\n\n M, N = self._swap(self.shape)\n start, stop, step = idx.indices(M)\n M = len(range(start, stop, step))\n new_shape = self._swap((M, N))\n if M == 0:\n return self.__class__(new_shape)\n\n row_nnz = np.diff(self.indptr)\n idx_dtype = self.indices.dtype\n res_indptr = np.zeros(M+1, dtype=idx_dtype)\n np.cumsum(row_nnz[idx], out=res_indptr[1:])\n\n if step == 1:\n all_idx = slice(self.indptr[start], self.indptr[stop])\n res_indices = np.array(self.indices[all_idx], copy=copy)\n res_data = np.array(self.data[all_idx], copy=copy)\n else:\n nnz = res_indptr[-1]\n res_indices = np.empty(nnz, dtype=idx_dtype)\n res_data = np.empty(nnz, dtype=self.dtype)\n csr_row_slice(start, stop, step, self.indptr, self.indices,\n self.data, res_indices, res_data)\n\n return self.__class__((res_data, res_indices, res_indptr),\n shape=new_shape, copy=False)\n\n def _minor_index_fancy(self, idx):\n \"\"\"Index along the minor axis where idx is an array of ints.\n \"\"\"\n idx_dtype = self.indices.dtype\n idx = np.asarray(idx, dtype=idx_dtype).ravel()\n\n M, N = self._swap(self.shape)\n k = len(idx)\n new_shape = self._swap((M, k))\n if k == 0:\n return self.__class__(new_shape)\n\n # pass 1: count idx entries and compute new indptr\n col_offsets = np.zeros(N, dtype=idx_dtype)\n res_indptr = np.empty_like(self.indptr)\n csr_column_index1(k, idx, M, N, self.indptr, self.indices,\n col_offsets, res_indptr)\n\n # pass 2: copy indices/data for selected idxs\n col_order = np.argsort(idx).astype(idx_dtype, copy=False)\n nnz = res_indptr[-1]\n res_indices = np.empty(nnz, dtype=idx_dtype)\n res_data = np.empty(nnz, dtype=self.dtype)\n csr_column_index2(col_order, col_offsets, len(self.indices),\n self.indices, self.data, res_indices, res_data)\n return self.__class__((res_data, res_indices, res_indptr),\n shape=new_shape, copy=False)\n\n def _minor_slice(self, idx, copy=False):\n \"\"\"Index along the minor axis where idx is a slice object.\n \"\"\"\n if idx == slice(None):\n return self.copy() if copy else self\n\n M, N = self._swap(self.shape)\n start, stop, step = idx.indices(N)\n N = len(range(start, stop, step))\n if N == 0:\n return self.__class__(self._swap((M, N)))\n if step == 1:\n return self._get_submatrix(minor=idx, copy=copy)\n # TODO: don't fall back to fancy indexing here\n return self._minor_index_fancy(np.arange(start, stop, step))\n\n def _get_submatrix(self, major=None, minor=None, copy=False):\n \"\"\"Return a submatrix of this matrix.\n\n major, minor: None, int, or slice with step 1\n \"\"\"\n M, N = self._swap(self.shape)\n i0, i1 = _process_slice(major, M)\n j0, j1 = _process_slice(minor, N)\n\n if i0 == 0 and j0 == 0 and i1 == M and j1 == N:\n return self.copy() if copy else self\n\n indptr, indices, data = get_csr_submatrix(\n M, N, self.indptr, self.indices, self.data, i0, i1, j0, j1)\n\n shape = self._swap((i1 - i0, j1 - j0))\n return self.__class__((data, indices, indptr), shape=shape,\n dtype=self.dtype, copy=False)\n\n def _set_intXint(self, row, col, x):\n i, j = self._swap((row, col))\n self._set_many(i, j, x)\n\n def _set_arrayXarray(self, row, col, x):\n i, j = self._swap((row, col))\n self._set_many(i, j, x)\n\n def _set_arrayXarray_sparse(self, row, col, x):\n # clear entries that will be overwritten\n self._zero_many(*self._swap((row, col)))\n\n M, N = row.shape # matches col.shape\n broadcast_row = M != 1 and x.shape[0] == 1\n broadcast_col = N != 1 and x.shape[1] == 1\n r, c = x.row, x.col\n x = np.asarray(x.data, dtype=self.dtype)\n if broadcast_row:\n r = np.repeat(np.arange(M), len(r))\n c = np.tile(c, M)\n x = np.tile(x, M)\n if broadcast_col:\n r = np.repeat(r, N)\n c = np.tile(np.arange(N), len(c))\n x = np.repeat(x, N)\n # only assign entries in the new sparsity structure\n i, j = self._swap((row[r, c], col[r, c]))\n self._set_many(i, j, x)\n\n def _setdiag(self, values, k):\n if 0 in self.shape:\n return\n\n M, N = self.shape\n broadcast = (values.ndim == 0)\n\n if k < 0:\n if broadcast:\n max_index = min(M + k, N)\n else:\n max_index = min(M + k, N, len(values))\n i = np.arange(max_index, dtype=self.indices.dtype)\n j = np.arange(max_index, dtype=self.indices.dtype)\n i -= k\n\n else:\n if broadcast:\n max_index = min(M, N - k)\n else:\n max_index = min(M, N - k, len(values))\n i = np.arange(max_index, dtype=self.indices.dtype)\n j = np.arange(max_index, dtype=self.indices.dtype)\n j += k\n\n if not broadcast:\n values = values[:len(i)]\n\n self[i, j] = values\n\n def _prepare_indices(self, i, j):\n M, N = self._swap(self.shape)\n\n def check_bounds(indices, bound):\n idx = indices.max()\n if idx >= bound:\n raise IndexError('index (%d) out of range (>= %d)' %\n (idx, bound))\n idx = indices.min()\n if idx < -bound:\n raise IndexError('index (%d) out of range (< -%d)' %\n (idx, bound))\n\n i = np.array(i, dtype=self.indices.dtype, copy=False, ndmin=1).ravel()\n j = np.array(j, dtype=self.indices.dtype, copy=False, ndmin=1).ravel()\n check_bounds(i, M)\n check_bounds(j, N)\n return i, j, M, N\n\n def _set_many(self, i, j, x):\n \"\"\"Sets value at each (i, j) to x\n\n Here (i,j) index major and minor respectively, and must not contain\n duplicate entries.\n \"\"\"\n i, j, M, N = self._prepare_indices(i, j)\n x = np.array(x, dtype=self.dtype, copy=False, ndmin=1).ravel()\n\n n_samples = x.size\n offsets = np.empty(n_samples, dtype=self.indices.dtype)\n ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,\n i, j, offsets)\n if ret == 1:\n # rinse and repeat\n self.sum_duplicates()\n csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,\n i, j, offsets)\n\n if -1 not in offsets:\n # only affects existing non-zero cells\n self.data[offsets] = x\n return\n\n else:\n warn(\"Changing the sparsity structure of a {}_matrix is expensive.\"\n \" lil_matrix is more efficient.\".format(self.format),\n SparseEfficiencyWarning, stacklevel=3)\n # replace where possible\n mask = offsets > -1\n self.data[offsets[mask]] = x[mask]\n # only insertions remain\n mask = ~mask\n i = i[mask]\n i[i < 0] += M\n j = j[mask]\n j[j < 0] += N\n self._insert_many(i, j, x[mask])\n\n def _zero_many(self, i, j):\n \"\"\"Sets value at each (i, j) to zero, preserving sparsity structure.\n\n Here (i,j) index major and minor respectively.\n \"\"\"\n i, j, M, N = self._prepare_indices(i, j)\n\n n_samples = len(i)\n offsets = np.empty(n_samples, dtype=self.indices.dtype)\n ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,\n i, j, offsets)\n if ret == 1:\n # rinse and repeat\n self.sum_duplicates()\n csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,\n i, j, offsets)\n\n # only assign zeros to the existing sparsity structure\n self.data[offsets[offsets > -1]] = 0\n\n def _insert_many(self, i, j, x):\n \"\"\"Inserts new nonzero at each (i, j) with value x\n\n Here (i,j) index major and minor respectively.\n i, j and x must be non-empty, 1d arrays.\n Inserts each major group (e.g. all entries per row) at a time.\n Maintains has_sorted_indices property.\n Modifies i, j, x in place.\n \"\"\"\n order = np.argsort(i, kind='mergesort') # stable for duplicates\n i = i.take(order, mode='clip')\n j = j.take(order, mode='clip')\n x = x.take(order, mode='clip')\n\n do_sort = self.has_sorted_indices\n\n # Update index data type\n idx_dtype = get_index_dtype((self.indices, self.indptr),\n maxval=(self.indptr[-1] + x.size))\n self.indptr = np.asarray(self.indptr, dtype=idx_dtype)\n self.indices = np.asarray(self.indices, dtype=idx_dtype)\n i = np.asarray(i, dtype=idx_dtype)\n j = np.asarray(j, dtype=idx_dtype)\n\n # Collate old and new in chunks by major index\n indices_parts = []\n data_parts = []\n ui, ui_indptr = np.unique(i, return_index=True)\n ui_indptr = np.append(ui_indptr, len(j))\n new_nnzs = np.diff(ui_indptr)\n prev = 0\n for c, (ii, js, je) in enumerate(zip(ui, ui_indptr, ui_indptr[1:])):\n # old entries\n start = self.indptr[prev]\n stop = self.indptr[ii]\n indices_parts.append(self.indices[start:stop])\n data_parts.append(self.data[start:stop])\n\n # handle duplicate j: keep last setting\n uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True)\n if len(uj) == je - js:\n indices_parts.append(j[js:je])\n data_parts.append(x[js:je])\n else:\n indices_parts.append(j[js:je][::-1][uj_indptr])\n data_parts.append(x[js:je][::-1][uj_indptr])\n new_nnzs[c] = len(uj)\n\n prev = ii\n\n # remaining old entries\n start = self.indptr[ii]\n indices_parts.append(self.indices[start:])\n data_parts.append(self.data[start:])\n\n # update attributes\n self.indices = np.concatenate(indices_parts)\n self.data = np.concatenate(data_parts)\n nnzs = np.empty(self.indptr.shape, dtype=idx_dtype)\n nnzs[0] = idx_dtype(0)\n indptr_diff = np.diff(self.indptr)\n indptr_diff[ui] += new_nnzs\n nnzs[1:] = indptr_diff\n self.indptr = np.cumsum(nnzs, out=nnzs)\n\n if do_sort:\n # TODO: only sort where necessary\n self.has_sorted_indices = False\n self.sort_indices()\n\n self.check_format(full_check=False)\n\n ######################\n # Conversion methods #\n ######################\n\n def tocoo(self, copy=True):\n major_dim, minor_dim = self._swap(self.shape)\n minor_indices = self.indices\n major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)\n _sparsetools.expandptr(major_dim, self.indptr, major_indices)\n row, col = self._swap((major_indices, minor_indices))\n\n from .coo import coo_matrix\n return coo_matrix((self.data, (row, col)), self.shape, copy=copy,\n dtype=self.dtype)\n\n tocoo.__doc__ = spmatrix.tocoo.__doc__\n\n def toarray(self, order=None, out=None):\n if out is None and order is None:\n order = self._swap('cf')[0]\n out = self._process_toarray_args(order, out)\n if not (out.flags.c_contiguous or out.flags.f_contiguous):\n raise ValueError('Output array must be C or F contiguous')\n # align ideal order with output array order\n if out.flags.c_contiguous:\n x = self.tocsr()\n y = out\n else:\n x = self.tocsc()\n y = out.T\n M, N = x._swap(x.shape)\n csr_todense(M, N, x.indptr, x.indices, x.data, y)\n return out\n\n toarray.__doc__ = spmatrix.toarray.__doc__\n\n ##############################################################\n # methods that examine or modify the internal data structure #\n ##############################################################\n\n def eliminate_zeros(self):\n \"\"\"Remove zero entries from the matrix\n\n This is an *in place* operation\n \"\"\"\n M, N = self._swap(self.shape)\n _sparsetools.csr_eliminate_zeros(M, N, self.indptr, self.indices,\n self.data)\n self.prune() # nnz may have changed\n\n def __get_has_canonical_format(self):\n \"\"\"Determine whether the matrix has sorted indices and no duplicates\n\n Returns\n - True: if the above applies\n - False: otherwise\n\n has_canonical_format implies has_sorted_indices, so if the latter flag\n is False, so will the former be; if the former is found True, the\n latter flag is also set.\n \"\"\"\n\n # first check to see if result was cached\n if not getattr(self, '_has_sorted_indices', True):\n # not sorted => not canonical\n self._has_canonical_format = False\n elif not hasattr(self, '_has_canonical_format'):\n self.has_canonical_format = _sparsetools.csr_has_canonical_format(\n len(self.indptr) - 1, self.indptr, self.indices)\n return self._has_canonical_format\n\n def __set_has_canonical_format(self, val):\n self._has_canonical_format = bool(val)\n if val:\n self.has_sorted_indices = True\n\n has_canonical_format = property(fget=__get_has_canonical_format,\n fset=__set_has_canonical_format)\n\n def sum_duplicates(self):\n \"\"\"Eliminate duplicate matrix entries by adding them together\n\n The is an *in place* operation\n \"\"\"\n if self.has_canonical_format:\n return\n self.sort_indices()\n\n M, N = self._swap(self.shape)\n _sparsetools.csr_sum_duplicates(M, N, self.indptr, self.indices,\n self.data)\n\n self.prune() # nnz may have changed\n self.has_canonical_format = True\n\n def __get_sorted(self):\n \"\"\"Determine whether the matrix has sorted indices\n\n Returns\n - True: if the indices of the matrix are in sorted order\n - False: otherwise\n\n \"\"\"\n\n # first check to see if result was cached\n if not hasattr(self, '_has_sorted_indices'):\n self._has_sorted_indices = _sparsetools.csr_has_sorted_indices(\n len(self.indptr) - 1, self.indptr, self.indices)\n return self._has_sorted_indices\n\n def __set_sorted(self, val):\n self._has_sorted_indices = bool(val)\n\n has_sorted_indices = property(fget=__get_sorted, fset=__set_sorted)\n\n def sorted_indices(self):\n \"\"\"Return a copy of this matrix with sorted indices\n \"\"\"\n A = self.copy()\n A.sort_indices()\n return A\n\n # an alternative that has linear complexity is the following\n # although the previous option is typically faster\n # return self.toother().toother()\n\n def sort_indices(self):\n \"\"\"Sort the indices of this matrix *in place*\n \"\"\"\n\n if not self.has_sorted_indices:\n _sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr,\n self.indices, self.data)\n self.has_sorted_indices = True\n\n def prune(self):\n \"\"\"Remove empty space after all non-zero elements.\n \"\"\"\n major_dim = self._swap(self.shape)[0]\n\n if len(self.indptr) != major_dim + 1:\n raise ValueError('index pointer has invalid length')\n if len(self.indices) < self.nnz:\n raise ValueError('indices array has fewer than nnz elements')\n if len(self.data) < self.nnz:\n raise ValueError('data array has fewer than nnz elements')\n\n self.indices = _prune_array(self.indices[:self.nnz])\n self.data = _prune_array(self.data[:self.nnz])\n\n def resize(self, *shape):\n shape = check_shape(shape)\n if hasattr(self, 'blocksize'):\n bm, bn = self.blocksize\n new_M, rm = divmod(shape[0], bm)\n new_N, rn = divmod(shape[1], bn)\n if rm or rn:\n raise ValueError(\"shape must be divisible into %s blocks. \"\n \"Got %s\" % (self.blocksize, shape))\n M, N = self.shape[0] // bm, self.shape[1] // bn\n else:\n new_M, new_N = self._swap(shape)\n M, N = self._swap(self.shape)\n\n if new_M < M:\n self.indices = self.indices[:self.indptr[new_M]]\n self.data = self.data[:self.indptr[new_M]]\n self.indptr = self.indptr[:new_M + 1]\n elif new_M > M:\n self.indptr = np.resize(self.indptr, new_M + 1)\n self.indptr[M + 1:].fill(self.indptr[M])\n\n if new_N < N:\n mask = self.indices < new_N\n if not np.all(mask):\n self.indices = self.indices[mask]\n self.data = self.data[mask]\n major_index, val = self._minor_reduce(np.add, mask)\n self.indptr.fill(0)\n self.indptr[1:][major_index] = val\n np.cumsum(self.indptr, out=self.indptr)\n\n self._shape = shape\n\n resize.__doc__ = spmatrix.resize.__doc__\n\n ###################\n # utility methods #\n ###################\n\n # needed by _data_matrix\n def _with_data(self, data, copy=True):\n \"\"\"Returns a matrix with the same sparsity structure as self,\n but with different data. By default the structure arrays\n (i.e. .indptr and .indices) are copied.\n \"\"\"\n if copy:\n return self.__class__((data, self.indices.copy(),\n self.indptr.copy()),\n shape=self.shape,\n dtype=data.dtype)\n else:\n return self.__class__((data, self.indices, self.indptr),\n shape=self.shape, dtype=data.dtype)\n\n def _binopt(self, other, op):\n \"\"\"apply the binary operation fn to two sparse matrices.\"\"\"\n other = self.__class__(other)\n\n # e.g. csr_plus_csr, csr_minus_csr, etc.\n fn = getattr(_sparsetools, self.format + op + self.format)\n\n maxnnz = self.nnz + other.nnz\n idx_dtype = get_index_dtype((self.indptr, self.indices,\n other.indptr, other.indices),\n maxval=maxnnz)\n indptr = np.empty(self.indptr.shape, dtype=idx_dtype)\n indices = np.empty(maxnnz, dtype=idx_dtype)\n\n bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']\n if op in bool_ops:\n data = np.empty(maxnnz, dtype=np.bool_)\n else:\n data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))\n\n fn(self.shape[0], self.shape[1],\n np.asarray(self.indptr, dtype=idx_dtype),\n np.asarray(self.indices, dtype=idx_dtype),\n self.data,\n np.asarray(other.indptr, dtype=idx_dtype),\n np.asarray(other.indices, dtype=idx_dtype),\n other.data,\n indptr, indices, data)\n\n A = self.__class__((data, indices, indptr), shape=self.shape)\n A.prune()\n\n return A\n\n def _divide_sparse(self, other):\n \"\"\"\n Divide this matrix by a second sparse matrix.\n \"\"\"\n if other.shape != self.shape:\n raise ValueError('inconsistent shapes')\n\n r = self._binopt(other, '_eldiv_')\n\n if np.issubdtype(r.dtype, np.inexact):\n # Eldiv leaves entries outside the combined sparsity\n # pattern empty, so they must be filled manually.\n # Everything outside of other's sparsity is NaN, and everything\n # inside it is either zero or defined by eldiv.\n out = np.empty(self.shape, dtype=self.dtype)\n out.fill(np.nan)\n row, col = other.nonzero()\n out[row, col] = 0\n r = r.tocoo()\n out[r.row, r.col] = r.data\n out = matrix(out)\n else:\n # integers types go with nan <-> 0\n out = r\n\n return out\n\n\ndef _process_slice(sl, num):\n if sl is None:\n i0, i1 = 0, num\n elif isinstance(sl, slice):\n i0, i1, stride = sl.indices(num)\n if stride != 1:\n raise ValueError('slicing with step != 1 not supported')\n i0 = min(i0, i1) # give an empty slice when i0 > i1\n elif isintlike(sl):\n if sl < 0:\n sl += num\n i0, i1 = sl, sl + 1\n if i0 < 0 or i1 > num:\n raise IndexError('index out of bounds: 0 <= %d < %d <= %d' %\n (i0, i1, num))\n else:\n raise TypeError('expected slice or scalar')\n\n return i0, i1\n"
] | [
[
"numpy.resize",
"numpy.asarray",
"numpy.issubdtype",
"numpy.cumsum",
"numpy.concatenate",
"numpy.all",
"numpy.unique",
"numpy.empty_like",
"numpy.arange",
"numpy.diff",
"numpy.repeat",
"numpy.zeros",
"numpy.multiply",
"numpy.isnan",
"numpy.atleast_2d",
"numpy.argsort",
"numpy.array",
"scipy._lib._util._prune_array",
"numpy.tile",
"numpy.ones",
"numpy.result_type",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
athatheo/House-GANs-Reproduction | [
"00cc807f1e74f88eef5ed81615bfd87a39c52f94"
] | [
"src/models.py"
] | [
"import torch\nfrom torch import cat\nfrom torch.nn import Conv2d\nfrom torch.nn import Linear\nfrom torch.nn import Module\nfrom torch.nn import ConvTranspose2d\nfrom torch.nn import LeakyReLU\nfrom torch.nn import Tanh\nfrom torch.nn import MaxPool2d\nfrom torch import zeros_like\n\n\nclass ConvMPN(Module):\n def __init__(self):\n super().__init__()\n self.conv1 = Conv2d(in_channels=3*16, out_channels=2*16, kernel_size=(3, 3), stride=(1, 1), padding=1)\n self.conv2 = Conv2d(in_channels=2*16, out_channels=2*16, kernel_size=(3, 3), stride=(1, 1), padding=1)\n self.conv3 = Conv2d(in_channels=2*16, out_channels=16, kernel_size=(3, 3), stride=(1, 1), padding=1)\n self.leaky_relu = LeakyReLU(0.1)\n\n def get_nodes(self, feature_vectors, edges, include_neighbours=True):\n device = feature_vectors.device\n nodes = zeros_like(feature_vectors, device=device)\n if include_neighbours:\n index = torch.where(edges[:, 1] > 0)\n else:\n index = torch.where(edges[:, 1] < 0)\n\n src = torch.cat([edges[index[0], 0], edges[index[0], 2]]).long()\n dst = torch.cat([edges[index[0], 2], edges[index[0], 0]]).long()\n src = feature_vectors[src.contiguous()]\n dst = dst.view(-1, 1, 1, 1).expand_as(src).to(device)\n return nodes.scatter_add(0, dst, src)\n\n def cat_nodes(self, feature_vectors, edges):\n neighbouring_nodes = self.get_nodes(feature_vectors, edges, include_neighbours=True, )\n non_neighbouring_nodes = self.get_nodes(feature_vectors, edges, include_neighbours=False)\n\n encoding = torch.cat([feature_vectors, neighbouring_nodes, non_neighbouring_nodes], 1)\n return encoding\n\n def forward(self, x, edges):\n x = self.cat_nodes(x, edges)\n x = self.conv1(x)\n x = self.leaky_relu(x)\n x = self.conv2(x)\n x = self.leaky_relu(x)\n x = self.conv3(x)\n x = self.leaky_relu(x)\n return x\n\n\nclass Generator(Module):\n def __init__(self):\n super().__init__()\n self.linear_reshape_1 = Linear(138, 1024)\n self.conv_mpn_1 = ConvMPN()\n self.upsample_1 = ConvTranspose2d(16, 16, 4, 2, 1)\n self.conv_mpn_2 = ConvMPN()\n self.upsample_2 = ConvTranspose2d(16, 16, 4, 2, 1)\n self.conv_1 = Conv2d(16, 256, 3, 1, 1)\n self.leaky_relu = LeakyReLU(0.1)\n self.conv_2 = Conv2d(256, 128, 3, 1, 1)\n self.conv_3 = Conv2d(128, 1, 3, 1, 1)\n self.tanh = Tanh()\n\n def forward(self, z, t, edges):\n z = z.view(-1, 128)#\n t = t.view(-1, 10) #\n x = cat([z, t], 1)\n x = self.linear_reshape_1(x)\n x = x.view(-1, 16, 8, 8)\n x = self.conv_mpn_1(x, edges).view(-1, *x.shape[1:])\n x = self.upsample_1(x)\n x = self.leaky_relu(x)\n x = self.conv_mpn_2(x, edges).view(-1, *x.shape[1:])\n x = self.upsample_2(x)\n x = self.leaky_relu(x)\n x = self.conv_1(x.view(-1, x.shape[1], *x.shape[2:]))\n x = self.leaky_relu(x)\n x = self.conv_2(x)\n x = self.leaky_relu(x)\n x = self.conv_3(x)\n x = self.tanh(x)\n x = x.view(-1, *x.shape[2:])\n return x\n\n\nclass Discriminator(Module):\n def __init__(self):\n super().__init__()\n self.linear_reshape_1 = Linear(10, 8192)\n self.leaky_relu = LeakyReLU(0.1)\n self.conv_1 = Conv2d(9, 16, 3, 1, 1, bias=True)\n self.conv_2 = Conv2d(16, 16, 3, 1, 1)\n self.conv_3 = Conv2d(16, 16, 3, 1, 1)\n self.conv_mpn_1 = ConvMPN()\n self.downsample_1 = Conv2d(16, 16, 3, 2, 1)\n self.conv_mpn_2 = ConvMPN()\n self.downsample_2 = Conv2d(16, 16, 3, 2, 1)\n self.dec_conv_1 = Conv2d(16, 256, 3, 2, 1)\n self.dec_conv_2 = Conv2d(256, 128, 3, 2, 1)\n self.dec_conv_3 = Conv2d(128, 128, 3, 2, 1)\n self.pool_reshape_linear = Linear(128, 1)\n\n def add_pool(self, x, nd_to_sample):\n dtype, device = x.dtype, x.device\n batch_size = torch.max(nd_to_sample) + 1\n pooled_x = torch.zeros(batch_size, x.shape[-1], device=device).float()\n pool_to = nd_to_sample.view(-1, 1).expand_as(x).to(device)\n pooled_x = pooled_x.scatter_add(0, pool_to, x)\n return pooled_x\n\n def forward(self, x, t, edges, nd_to_sample):\n x = x.view(-1, 1, 32, 32)\n t = self.linear_reshape_1(t)\n t = t.view(-1, 8, 32, 32)\n x = cat([x, t], 1)\n x = self.conv_1(x)\n x = self.leaky_relu(x)\n x = self.conv_2(x)\n x = self.leaky_relu(x)\n x = self.conv_3(x)\n x = self.leaky_relu(x)\n x = self.conv_mpn_1(x, edges)\n x = self.downsample_1(x)\n x = self.leaky_relu(x)\n x = self.conv_mpn_2(x, edges)\n x = self.downsample_2(x)\n x = self.leaky_relu(x)\n x = self.dec_conv_1(x)\n x = self.leaky_relu(x)\n x = self.dec_conv_2(x)\n x = self.leaky_relu(x)\n x = self.dec_conv_3(x)\n x = self.leaky_relu(x)\n x = x.view(-1, x.shape[1])\n x = self.add_pool(x, nd_to_sample)\n x = self.pool_reshape_linear(x)\n return x\n"
] | [
[
"torch.max",
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.zeros",
"torch.nn.Conv2d",
"torch.zeros_like",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"torch.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dyahadila/ood_cartography | [
"ff65bf2b1a170e2913f0019a15af3398a1808f0f"
] | [
"cartography/classification/glue_utils.py"
] | [
"import logging\nimport os\n\nfrom transformers import glue_compute_metrics\nfrom transformers import glue_convert_examples_to_features as convert_examples_to_features\nfrom transformers import glue_output_modes\nfrom transformers import glue_processors\n\nfrom transformers.data.processors.glue import MnliMismatchedProcessor\nfrom transformers.data.processors.utils import InputFeatures\nfrom transformers.file_utils import is_tf_available\nif is_tf_available():\n import tensorflow as tf\n\nlogging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\", level=logging.INFO\n)\nlogger = logging.getLogger(__name__)\n\nfrom cartography.data_utils_glue import convert_string_to_unique_number\nfrom cartography.classification.mnli_utils import AdaptedMnliMismatchedProcessor, AdaptedMnliProcessor\nfrom cartography.classification.qnli_utils import AdaptedQnliProcessor\nfrom cartography.classification.snli_utils import SNLIProcessor\nfrom cartography.classification.winogrande_utils import WinograndeProcessor\nfrom cartography.classification.wnli_utils import AdaptedWnliProcessor\nfrom cartography.classification.rte_utils import AdaptedRteProcessor\n\nimport pandas as pd\n\n\nglue_processors[\"snli\"] = SNLIProcessor\nglue_processors[\"mnli\"] = AdaptedMnliProcessor\nglue_processors[\"mnli-mm\"] = AdaptedMnliMismatchedProcessor\nglue_processors[\"qnli\"] = AdaptedQnliProcessor\nglue_processors[\"winogrande\"] = WinograndeProcessor\nglue_processors[\"wnli\"] = AdaptedWnliProcessor\nglue_processors[\"rte\"] = AdaptedRteProcessor\n\nglue_output_modes[\"snli\"] = \"classification\"\nglue_output_modes[\"winogrande\"] = \"classification\"\n\n\n\nclass AdaptedInputFeatures(InputFeatures):\n def __init__(self, input_ids, attention_mask=None, token_type_ids=None, label=None, example_id=None, lex = None,\n const=None, subs=None, original_idx=None):\n self.input_ids = input_ids\n self.attention_mask = attention_mask\n self.token_type_ids = token_type_ids\n self.label = label\n self.example_id = example_id\n self.lex = lex\n self.const = const\n self.subs = subs\n self.original_idx = original_idx\n\n\ndef get_instance_heuristics(task, data_split):\n mode = data_split\n ### UNCOMMENT FOR MNLI\n if 'dev' in data_split:\n mode = 'dev'\n if task.upper() == 'MNLI':\n mode = 'dev_matched'\n\n df = pd.read_csv(\"/home/jusun/adila001/{}/{}_heuristic.tsv\".format(task.upper(), mode), delimiter=\"\\t|\\n\")\n lexical = df[\"lexical\"].tolist()\n if 'constituent' in set(df.columns):\n constituent = df[\"constituent\"].tolist()\n else:\n constituent = [0 for i in range(df.shape[0])]\n subsequence = df[\"subsequence\"].tolist()\n return lexical, constituent, subsequence\n\ndef adapted_glue_convert_examples_to_features(\n examples,\n tokenizer,\n max_length=512,\n task=None,\n label_list=None,\n output_mode=None,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n heuristics=True,\n data_split='train',\n):\n \"\"\"\n Adapted from `transformers`. New functionality: also return an integer ID for each example.\n Loads a data file into a list of ``InputFeatures``\n\n Args:\n examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.\n tokenizer: Instance of a tokenizer that will tokenize the examples\n max_length: Maximum example length\n task: GLUE task\n label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method\n output_mode: String indicating the output mode. Either ``regression`` or ``classification``\n pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)\n pad_token: Padding token\n pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)\n mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values\n and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for\n actual values)\n\n Returns:\n If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``\n containing the task-specific features. If the input is a list of ``InputExamples``, will return\n a list of task-specific ``InputFeatures`` which can be fed to the model.\n\n \"\"\"\n is_tf_dataset = False\n if is_tf_available() and isinstance(examples, tf.data.Dataset):\n is_tf_dataset = True\n\n if task is not None:\n processor = glue_processors[task]()\n if label_list is None:\n label_list = processor.get_labels()\n logger.info(\"Using label list %s for task %s\" % (label_list, task))\n if output_mode is None:\n output_mode = glue_output_modes[task]\n logger.info(\"Using output mode %s for task %s\" % (output_mode, task))\n\n label_map = {label: i for i, label in enumerate(label_list)}\n\n features = []\n lex = []\n const= []\n subs = []\n if heuristics==True:\n lex, const, subs = get_instance_heuristics(task, data_split)\n\n for (ex_index, example) in enumerate(examples):\n len_examples = 0\n if is_tf_dataset:\n example = processor.get_example_from_tensor_dict(example)\n example = processor.tfds_map(example)\n len_examples = tf.data.experimental.cardinality(examples)\n else:\n len_examples = len(examples)\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d/%d\" % (ex_index, len_examples))\n\n inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length,)\n input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask\n token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)\n token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_length, \"Error with input length {} vs {}\".format(len(input_ids), max_length)\n assert len(attention_mask) == max_length, \"Error with input length {} vs {}\".format(\n len(attention_mask), max_length\n )\n assert len(token_type_ids) == max_length, \"Error with input length {} vs {}\".format(\n len(token_type_ids), max_length\n )\n\n if output_mode == \"classification\":\n label = label_map[example.label]\n elif output_mode == \"regression\":\n label = float(example.label)\n else:\n raise KeyError(output_mode)\n\n example_int_id = convert_string_to_unique_number(example.guid)\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(f\"guid: {example_int_id}\")\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"attention_mask: %s\" % \" \".join([str(x) for x in attention_mask]))\n logger.info(\"token_type_ids: %s\" % \" \".join([str(x) for x in token_type_ids]))\n logger.info(\"label: %s (id = %d)\" % (example.label, label))\n\n features.append(\n AdaptedInputFeatures(input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n label=label,\n example_id=example_int_id,\n lex=None if len(lex)==0 else lex[ex_index],\n const=None if len(const)==0 else const[ex_index],\n subs=None if len(const)==0else subs[ex_index],\n original_idx=ex_index))\n if is_tf_available() and is_tf_dataset:\n\n def gen():\n for ex in features:\n yield (\n {\n \"input_ids\": ex.input_ids,\n \"attention_mask\": ex.attention_mask,\n \"token_type_ids\": ex.token_type_ids,\n },\n ex.label,\n )\n\n return tf.data.Dataset.from_generator(\n gen,\n ({\"input_ids\": tf.int32, \"attention_mask\": tf.int32, \"token_type_ids\": tf.int32}, tf.int64),\n (\n {\n \"input_ids\": tf.TensorShape([None]),\n \"attention_mask\": tf.TensorShape([None]),\n \"token_type_ids\": tf.TensorShape([None]),\n },\n tf.TensorShape([]),\n ),\n )\n return features\n\n\ndef adapted_glue_compute_metrics(task_name, preds, labels):\n \"Adapted from `glue_compute_metrics` to also handle SNLI.\"\n try:\n return glue_compute_metrics(task_name, preds, labels)\n except KeyError:\n if task_name in [\"snli\", \"winogrande\", \"toxic\"]:\n # Since MNLI also uses accuracy.\n return glue_compute_metrics(\"mnli\", preds, labels)\n raise KeyError(task_name)\n\n"
] | [
[
"tensorflow.TensorShape",
"tensorflow.data.experimental.cardinality"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Forest216/BigDL | [
"840da9a2eaf395978dd83730b02aa5e5dfbd7989",
"840da9a2eaf395978dd83730b02aa5e5dfbd7989",
"840da9a2eaf395978dd83730b02aa5e5dfbd7989",
"840da9a2eaf395978dd83730b02aa5e5dfbd7989",
"840da9a2eaf395978dd83730b02aa5e5dfbd7989",
"840da9a2eaf395978dd83730b02aa5e5dfbd7989"
] | [
"python/nano/src/bigdl/nano/automl/tf/objective.py",
"python/chronos/src/bigdl/chronos/autots/tspipeline.py",
"python/orca/test/bigdl/orca/ray/integration/test_yarn_reinit_raycontext.py",
"python/dllib/src/bigdl/dllib/utils/file_utils.py",
"python/orca/src/bigdl/orca/data/tf/data.py",
"python/dllib/src/bigdl/dllib/feature/dataset/mnist.py"
] | [
"#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom selectors import EpollSelector\nfrom tensorflow.keras.backend import clear_session\nfrom tensorflow.keras.models import clone_model\nimport tensorflow as tf\nimport inspect\nimport copy\n\nfrom bigdl.nano.automl.hpo.backend import create_tfkeras_pruning_callback\nfrom bigdl.nano.utils.log4Error import invalidInputError\n\n\ndef _is_creator(model):\n return inspect.ismethod(model) or inspect.isfunction(model)\n\n\nclass Objective(object):\n \"\"\"The Tuning objective for HPO.\"\"\"\n\n def __init__(self,\n model=None,\n target_metric=None,\n pruning=False,\n backend=None,\n **kwargs\n ):\n \"\"\"\n Init the objective.\n\n :param: model: a model instance or a creator function.\n Defaults to None.\n :param: target_metric: str(optional): target metric to optimize.\n Defaults to None.\n :param: pruning: bool (optional): whether to enable pruning.\n Defaults to False.\n throw: ValueError: _description_\n \"\"\"\n if not _is_creator(model) and not isinstance(model, tf.keras.Model):\n invalidInputError(False,\n \"You should either pass a Tensorflo Keras model, or \"\n \"a model_creator to the Tuning objective.\")\n\n self.model_ = model\n self.target_metric_ = target_metric\n self.pruning = pruning\n self.backend = backend\n self.kwargs = kwargs\n\n @property\n def target_metric(self):\n \"\"\"Get the target metric.\"\"\"\n return self.target_metric_\n\n @target_metric.setter\n def target_metric(self, value):\n \"\"\"Set the target metric.\"\"\"\n # TODO add more validity check here\n self.target_metric_ = value\n\n def _prepare_fit_args(self, trial):\n # only do shallow copy and process/duplicate\n # specific args TODO: may need to handle more cases\n new_kwargs = copy.copy(self.kwargs)\n new_kwargs['verbose'] = 2\n\n # process batch size\n new_kwargs = self.backend.instantiate_param(trial, new_kwargs, 'batch_size')\n\n # process callbacks\n callbacks = new_kwargs.get('callbacks', None)\n callbacks = callbacks() if inspect.isfunction(callbacks) else callbacks\n\n if self.pruning:\n callbacks = callbacks or []\n prune_callback = create_tfkeras_pruning_callback(trial, self.target_metric)\n callbacks.append(prune_callback)\n\n new_kwargs['callbacks'] = callbacks\n return new_kwargs\n\n def __call__(self, trial):\n \"\"\"\n Execute Training and return target metric in each trial.\n\n :param: trial: the trial object which provides the hyperparameter combinition.\n :return: the target metric value.\n \"\"\"\n # Clear clutter from previous Keras session graphs.\n clear_session()\n # TODO may add data creator here, e.g. refresh data, reset generators, etc.\n # create model\n if _is_creator(self.model_):\n model = self.model_(trial)\n else:\n # copy model so that the original model is not changed\n # Need tests to check this path\n model = clone_model(self.model_)\n\n # fit\n new_kwargs = self._prepare_fit_args(trial)\n hist = model.fit(**new_kwargs)\n\n score = hist.history.get(self.target_metric, None)\n if score is not None:\n if isinstance(score, list):\n # score = score[-1]\n score = max(score)\n return score\n",
"#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport torch\nimport types\nimport numpy as np\n\nfrom bigdl.chronos.data import TSDataset\nfrom bigdl.chronos.metric.forecast_metrics import Evaluator\n\nDEFAULT_MODEL_INIT_DIR = \"model_init.ckpt\"\nDEFAULT_BEST_MODEL_DIR = \"best_model.ckpt\"\nDEFAULT_DATA_PROCESS_DIR = \"data_process.ckpt\"\nDEFAULT_BEST_CONFIG_DIR = \"best_config.ckpt\"\n\n\nclass TSPipeline:\n '''\n TSPipeline is an E2E solution for time series analysis (only forecasting task for now).\n You can use TSPipeline to:\n\n 1. Further development on the prototype. (predict, evaluate, incremental fit)\n\n 2. Deploy the model to their scenario. (save, load)\n '''\n def __init__(self,\n model,\n loss,\n optimizer,\n model_creator,\n loss_creator,\n optimizer_creator,\n best_config,\n **kwargs):\n from bigdl.nano.pytorch.trainer import Trainer\n\n # for runtime fit/predict/evaluate\n self._best_model = Trainer.compile(model=model,\n loss=loss,\n optimizer=optimizer)\n self._best_config = best_config\n self._onnxruntime_fp32 = None\n self._onnxruntime_int8 = None\n self._pytorch_int8 = None\n\n # for data postprocessing\n self._scaler = None\n self._scaler_index = None\n if \"scaler\" in kwargs.keys():\n self._scaler = kwargs[\"scaler\"]\n self._scaler_index = kwargs[\"scaler_index\"]\n\n # for save/load\n self.model_creator = model_creator\n self.loss_creator = loss_creator\n self.optimizer_creator = optimizer_creator\n\n def evaluate(self, data, metrics=['mse'], multioutput=\"uniform_average\",\n batch_size=32, quantize=False):\n '''\n Evaluate the time series pipeline.\n\n :param data: data can be a TSDataset or data creator.\n The TSDataset should follow the same operations as the training\n TSDataset used in AutoTSEstimator.fit.\n :param metrics: list of string or callable. e.g. ['mse'] or [customized_metrics]\n If callable function, it signature should be func(y_true, y_pred), where y_true and\n y_pred are numpy ndarray. The function should return a float value as evaluation\n result.\n :param multioutput: Defines aggregating of multiple output values.\n String in ['raw_values', 'uniform_average']. The value defaults to\n 'uniform_average'.\n :param batch_size: predict batch_size, the process will cost more time\n if batch_size is small while cost less memory. The param is only\n effective when data is a TSDataset. The values defaults to 32.\n :param quantize: if use the quantized model to predict.\n '''\n from bigdl.chronos.pytorch.utils import _pytorch_fashion_inference\n\n # predict\n if isinstance(data, TSDataset):\n x, y = self._tsdataset_to_numpy(data, is_predict=False)\n if quantize:\n yhat = _pytorch_fashion_inference(model=self._pytorch_int8,\n input_data=x,\n batch_size=batch_size)\n else:\n self._best_model.eval()\n yhat = _pytorch_fashion_inference(model=self._best_model,\n input_data=x,\n batch_size=batch_size)\n # unscale\n yhat = self._tsdataset_unscale(yhat)\n y = self._tsdataset_unscale(y)\n elif isinstance(data, types.FunctionType):\n yhat_list, y_list = [], []\n self._best_config.update({'batch_size': batch_size})\n for x, y in data(self._best_config):\n if quantize:\n yhat = _pytorch_fashion_inference(model=self._pytorch_int8,\n input_data=x.numpy())\n else:\n self._best_model.eval()\n yhat = _pytorch_fashion_inference(model=self._best_model,\n input_data=x.numpy())\n yhat_list.append(yhat)\n y_list.append(y)\n yhat = np.concatenate(yhat_list, axis=0)\n y = torch.cat(y_list, dim=0).numpy()\n else:\n from bigdl.nano.utils.log4Error import invalidInputError\n invalidInputError(False,\n \"We only support input tsdataset or data creator, \"\n f\"but found {data.__class__.__name__}.\")\n\n # evaluate\n aggregate = 'mean' if multioutput == 'uniform_average' else None\n eval_result = Evaluator.evaluate(metrics, y, yhat, aggregate=aggregate)\n return eval_result\n\n def evaluate_with_onnx(self, data, metrics=['mse'], multioutput=\"uniform_average\",\n batch_size=32, quantize=False):\n '''\n Evaluate the time series pipeline with onnx.\n\n :param data: data can be a TSDataset or data creator.\n The TSDataset should follow the same operations as the training\n TSDataset used in AutoTSEstimator.fit.\n :param metrics: list of string or callable. e.g. ['mse'] or [customized_metrics]\n If callable function, it signature should be func(y_true, y_pred), where y_true and\n y_pred are numpy ndarray. The function should return a float value as evaluation\n result.\n :param multioutput: Defines aggregating of multiple output values.\n String in ['raw_values', 'uniform_average']. The value defaults to\n 'uniform_average'.\n :param batch_size: predict batch_size, the process will cost more time\n if batch_size is small while cost less memory. The param is only\n effective when data is a TSDataset. The values defaults to 32.\n :param quantize: if use the quantized model to predict.\n '''\n from bigdl.chronos.pytorch import TSTrainer as Trainer\n from bigdl.chronos.pytorch.utils import _pytorch_fashion_inference\n from bigdl.nano.utils.log4Error import invalidInputError\n # predict with onnx\n if isinstance(data, TSDataset):\n x, y = self._tsdataset_to_numpy(data, is_predict=False)\n yhat = None\n if quantize:\n yhat = _pytorch_fashion_inference(model=self._onnxruntime_int8,\n input_data=x,\n batch_size=batch_size)\n else:\n if self._onnxruntime_fp32 is None:\n self._onnxruntime_fp32 = Trainer.trace(self._best_model,\n input_sample=torch.from_numpy(x[0:1]),\n accelerator=\"onnxruntime\")\n yhat = _pytorch_fashion_inference(model=self._onnxruntime_fp32,\n input_data=x,\n batch_size=batch_size)\n yhat = self._tsdataset_unscale(yhat)\n # unscale\n y = self._tsdataset_unscale(y)\n elif isinstance(data, types.FunctionType):\n yhat_list, y_list = [], []\n self._best_config.update({'batch_size': batch_size})\n yhat = None\n for x, y in data(self._best_config):\n if quantize:\n yhat = _pytorch_fashion_inference(model=self._onnxruntime_int8,\n input_data=x.numpy(),\n batch_size=batch_size)\n else:\n if self._onnxruntime_fp32 is None:\n self._onnxruntime_fp32 = Trainer.trace(self._best_model,\n input_sample=x[0:1],\n accelerator=\"onnxruntime\")\n yhat = _pytorch_fashion_inference(model=self._onnxruntime_fp32,\n input_data=x.numpy(),\n batch_size=batch_size)\n yhat_list.append(yhat)\n y_list.append(y)\n yhat = np.concatenate(yhat_list, axis=0)\n y = torch.cat(y_list, dim=0).numpy()\n else:\n invalidInputError(False,\n \"We only support input tsdataset or data creator, \"\n f\"but found {data.__class__.__name__}.\")\n # evaluate\n aggregate = 'mean' if multioutput == 'uniform_average' else None\n eval_result = Evaluator.evaluate(metrics, y, yhat, aggregate=aggregate)\n return eval_result\n\n def predict(self, data, batch_size=32, quantize=False):\n '''\n Rolling predict with time series pipeline.\n\n :param data: data can be a TSDataset or data creator.\n The TSDataset should follow the same operations as the training\n TSDataset used in AutoTSEstimator.fit.\n :param batch_size: predict batch_size, the process will cost more time\n if batch_size is small while cost less memory. The param is only\n effective when data is a TSDataset. The values defaults to 32.\n :param quantize: if use the quantized model to predict.\n '''\n from bigdl.chronos.pytorch.utils import _pytorch_fashion_inference\n from bigdl.nano.utils.log4Error import invalidInputError\n if isinstance(data, TSDataset):\n x, _ = self._tsdataset_to_numpy(data, is_predict=True)\n if quantize:\n yhat = _pytorch_fashion_inference(model=self._pytorch_int8,\n input_data=x,\n batch_size=batch_size)\n else:\n self._best_model.eval()\n yhat = _pytorch_fashion_inference(model=self._best_model,\n input_data=x,\n batch_size=batch_size)\n yhat = self._tsdataset_unscale(yhat)\n elif isinstance(data, types.FunctionType):\n yhat_list = []\n self._best_config.update({'batch_size': batch_size})\n for x, _ in data(self._best_config):\n if quantize:\n yhat = _pytorch_fashion_inference(model=self._pytorch_int8,\n input_data=x.numpy())\n else:\n self._best_model.eval()\n yhat = _pytorch_fashion_inference(model=self._best_model,\n input_data=x.numpy())\n yhat_list.append(yhat)\n yhat = np.concatenate(yhat_list, axis=0)\n else:\n invalidInputError(False,\n \"We only support input tsdataset or data creator, \"\n f\"but found {data.__class__.__name__}\")\n return yhat\n\n def predict_with_onnx(self, data, batch_size=32, quantize=False):\n '''\n Rolling predict with onnx with time series pipeline.\n\n :param data: data can be a TSDataset or data creator.\n The TSDataset should follow the same operations as the training\n TSDataset used in AutoTSEstimator.fit.\n :param batch_size: predict batch_size, the process will cost more time\n if batch_size is small while cost less memory. The param is only\n effective when data is a TSDataset. The values defaults to 32.\n :param quantize: if use the quantized model to predict.\n '''\n from bigdl.chronos.pytorch import TSTrainer as Trainer\n from bigdl.chronos.pytorch.utils import _pytorch_fashion_inference\n from bigdl.nano.utils.log4Error import invalidInputError\n if isinstance(data, TSDataset):\n x, _ = self._tsdataset_to_numpy(data, is_predict=True)\n yhat = None\n if quantize:\n yhat = _pytorch_fashion_inference(model=self._onnxruntime_int8,\n input_data=x,\n batch_size=batch_size)\n else:\n if self._onnxruntime_fp32 is None:\n self._onnxruntime_fp32 = Trainer.trace(self._best_model,\n input_sample=torch.from_numpy(x[0:1]),\n accelerator=\"onnxruntime\")\n yhat = _pytorch_fashion_inference(model=self._onnxruntime_fp32,\n input_data=x,\n batch_size=batch_size)\n yhat = self._tsdataset_unscale(yhat)\n elif isinstance(data, types.FunctionType):\n yhat = None\n yhat_list = []\n self._best_config.update({'batch_size': batch_size})\n for x, _ in data(self._best_config):\n if quantize:\n yhat = _pytorch_fashion_inference(model=self._onnxruntime_int8,\n input_data=x.numpy(),\n batch_size=batch_size)\n else:\n if self._onnxruntime_fp32 is None:\n self._onnxruntime_fp32 = Trainer.trace(self._best_model,\n input_sample=x[0:1],\n accelerator=\"onnxruntime\")\n yhat = _pytorch_fashion_inference(model=self._onnxruntime_fp32,\n input_data=x.numpy(),\n batch_size=batch_size)\n yhat_list.append(yhat)\n yhat = np.concatenate(yhat_list, axis=0)\n else:\n invalidInputError(False,\n \"We only support input tsdataset or data creator, \"\n f\"but found {data.__class__.__name__}\")\n return yhat\n\n def fit(self,\n data,\n validation_data=None,\n epochs=1,\n batch_size=None,\n **kwargs):\n '''\n Incremental fitting\n\n :param data: The data support following formats:\n\n | 1. data creator:\n | a function that takes a config dictionary as parameter and\n | returns a PyTorch DataLoader.\n |\n | 2. a bigdl.chronos.data.TSDataset:\n | the TSDataset should follow the same operations as the training\n | TSDataset used in `AutoTSEstimator.fit`.\n\n :param validation_data: validation data, same format as data.\n :param epochs: incremental fitting epoch. The value defaults to 1.\n :param metric: evaluate metric.\n :param batch_size: batch size, defaults to None, which takes the searched best batch_size.\n :param **kwargs: args to be passed to bigdl-nano trainer.\n '''\n from bigdl.chronos.pytorch import TSTrainer as Trainer\n from bigdl.nano.utils.log4Error import invalidInputError\n train_loader = None\n valid_loader = None\n if isinstance(data, TSDataset):\n if batch_size is None:\n batch_size = self._best_config[\"batch_size\"]\n train_loader = self._tsdataset_to_loader(data, batch_size=batch_size)\n if validation_data:\n valid_loader = self._tsdataset_to_loader(validation_data, batch_size=batch_size)\n elif isinstance(data, types.FunctionType):\n if batch_size:\n self._best_config.update({'batch_size': batch_size})\n train_loader = data(self._best_config)\n if validation_data:\n valid_loader = validation_data(self._best_config)\n else:\n invalidInputError(False,\n \"We only support input TSDataset or data creator, \"\n f\"but found {data.__class__.__name__}.\")\n\n self.trainer = Trainer(max_epochs=epochs, **kwargs)\n self.trainer.fit(self._best_model,\n train_dataloaders=train_loader,\n val_dataloaders=valid_loader)\n\n def save(self, file_path):\n '''\n Save the TSPipeline to a folder\n\n :param file_path: the folder location to save the pipeline\n '''\n import pickle\n if not os.path.isdir(file_path):\n os.mkdir(file_path)\n model_init_path = os.path.join(file_path, DEFAULT_MODEL_INIT_DIR)\n model_path = os.path.join(file_path, DEFAULT_BEST_MODEL_DIR)\n data_process_path = os.path.join(file_path, DEFAULT_DATA_PROCESS_DIR)\n best_config_path = os.path.join(file_path, DEFAULT_BEST_CONFIG_DIR)\n model_init = {\"model_creator\": self.model_creator,\n \"optimizer_creator\": self.optimizer_creator,\n \"loss_creator\": self.loss_creator}\n data_process = {\"scaler\": self._scaler,\n \"scaler_index\": self._scaler_index}\n with open(model_init_path, \"wb\") as f:\n pickle.dump(model_init, f)\n with open(data_process_path, \"wb\") as f:\n pickle.dump(data_process, f)\n with open(best_config_path, \"wb\") as f:\n pickle.dump(self._best_config, f)\n # self._best_model.save(model_path)\n torch.save(self._best_model.model.state_dict(), model_path)\n\n @staticmethod\n def load(file_path):\n '''\n Load the TSPipeline to a folder\n\n :param file_path: the folder location to load the pipeline\n '''\n import pickle\n model_init_path = os.path.join(file_path, DEFAULT_MODEL_INIT_DIR)\n model_path = os.path.join(file_path, DEFAULT_BEST_MODEL_DIR)\n data_process_path = os.path.join(file_path, DEFAULT_DATA_PROCESS_DIR)\n best_config_path = os.path.join(file_path, DEFAULT_BEST_CONFIG_DIR)\n with open(model_init_path, \"rb\") as f:\n model_init = pickle.load(f)\n with open(data_process_path, \"rb\") as f:\n data_process = pickle.load(f)\n with open(best_config_path, \"rb\") as f:\n best_config = pickle.load(f)\n\n model_creator = model_init[\"model_creator\"]\n optimizer_creator = model_init[\"optimizer_creator\"]\n loss_creator = model_init[\"loss_creator\"]\n\n model = model_creator(best_config)\n model.load_state_dict(torch.load(model_path))\n\n if isinstance(optimizer_creator, types.FunctionType):\n optimizer = optimizer_creator(model, best_config)\n else:\n optimizer = optimizer_creator(model.parameters(),\n lr=best_config.get('lr', 0.001))\n\n if isinstance(loss_creator, torch.nn.modules.loss._Loss):\n loss = loss_creator\n else:\n loss = loss_creator(best_config)\n\n return TSPipeline(model=model,\n loss=loss,\n optimizer=optimizer,\n model_creator=model_creator,\n loss_creator=loss_creator,\n optimizer_creator=optimizer_creator,\n best_config=best_config,\n **data_process)\n\n def quantize(self,\n calib_data,\n metric=None,\n conf=None,\n framework='pytorch_fx',\n approach='static',\n tuning_strategy='bayesian',\n relative_drop=None,\n absolute_drop=None,\n timeout=0,\n max_trials=1):\n \"\"\"\n Quantization TSPipeline.\n\n :param calib_data: Required for static quantization or evaluation.\n\n | 1. data creator:\n | a function that takes a config dictionary as parameter and\n | returns a PyTorch DataLoader.\n |\n | 2. a bigdl.chronos.data.TSDataset:\n | the TSDataset should follow the same operations as the training\n | TSDataset used in `AutoTSEstimator.fit`.\n |\n | 3. A torch.utils.data.dataloader.DataLoader object for calibration,\n | Users should set the configs correctly (e.g. past_seq_len, ...).\n | They can be found in TSPipeline._best_config.\n |\n | 4. A numpy ndarray tuple (x, y).\n | x's shape is (num_samples, past_seq_len, input_feature_dim).\n | y's shape is (num_samples, future_seq_len, output_feature_dim).\n | They can be found in TSPipeline._best_config.\n\n :param metric: A str represent the metrics for tunning the quality of\n quantization. You may choose from \"mse\", \"mae\", \"rmse\", \"r2\", \"mape\", \"smape\".\n :param conf: A path to conf yaml file for quantization. Default to None,\n using default config.\n :param framework: string or list, [{'pytorch'|'pytorch_fx'|'pytorch_ipex'},\n {'onnxrt_integerops'|'onnxrt_qlinearops'}]. Default: 'pytorch_fx'.\n Consistent with Intel Neural Compressor.\n :param approach: str, 'static' or 'dynamic'. Default to 'static'.\n :param tuning_strategy: str, 'bayesian', 'basic', 'mse' or 'sigopt'. Default to 'bayesian'.\n :param relative_drop: Float, tolerable ralative accuracy drop. Default to None,\n e.g. set to 0.1 means that we accept a 10% increase in the metrics error.\n :param absolute_drop: Float, tolerable ralative accuracy drop. Default to None,\n e.g. set to 5 means that we can only accept metrics smaller than 5.\n :param timeout: Tuning timeout (seconds). Default to 0, which means early stop.\n Combine with max_trials field to decide when to exit.\n :param max_trials: Max tune times. Default to 1. Combine with timeout field to\n decide when to exit. \"timeout=0, max_trials=1\" means it will try quantization\n only once and return satisfying best model.\n \"\"\"\n from torch.utils.data import DataLoader, TensorDataset\n from bigdl.chronos.data import TSDataset\n from bigdl.nano.utils.log4Error import invalidInputError\n # check model support for quantization\n from bigdl.chronos.autots.utils import check_quantize_available\n check_quantize_available(self._best_model.model)\n # calib data should be set if the forecaster is just loaded\n if calib_data is None and approach.startswith(\"static\"):\n invalidInputError(False,\n \"You must set a `calib_data` \"\n \"for quantization When you use 'static'.\")\n elif calib_data and approach.startswith(\"dynamic\"):\n invalidInputError(False,\n \"`calib_data` should be None When you use 'dynamic'.\")\n\n # preprocess data.\n from .utils import preprocess_quantize_data\n calib_data = preprocess_quantize_data(self, calib_data)\n\n # map metric str to function\n from bigdl.chronos.metric.forecast_metrics import TORCHMETRICS_REGRESSION_MAP\n if isinstance(metric, str):\n metric = TORCHMETRICS_REGRESSION_MAP[metric]\n\n # init acc criterion\n accuracy_criterion = None\n if relative_drop and absolute_drop:\n invalidInputError(False, \"Please unset either `relative_drop` or `absolute_drop`.\")\n if relative_drop:\n accuracy_criterion = {'relative': relative_drop, 'higher_is_better': False}\n if absolute_drop:\n accuracy_criterion = {'absolute': absolute_drop, 'higher_is_better': False}\n\n from bigdl.nano.pytorch.trainer import Trainer\n self._trainer = Trainer(logger=False, max_epochs=1,\n checkpoint_callback=False,\n use_ipex=False)\n\n # quantize\n framework = [framework] if isinstance(framework, str) else framework\n temp_quantized_model = None\n for framework_item in framework:\n accelerator, method = framework_item.split('_')\n if accelerator == 'pytorch':\n accelerator = None\n else:\n accelerator = 'onnxruntime'\n method = method[:-3]\n q_model = self._trainer.quantize(self._best_model,\n precision='int8',\n accelerator=accelerator,\n method=method,\n calib_dataloader=calib_data,\n metric=metric,\n conf=conf,\n approach=approach,\n tuning_strategy=tuning_strategy,\n accuracy_criterion=accuracy_criterion,\n timeout=timeout,\n max_trials=max_trials)\n if accelerator == \"onnxruntime\":\n self._onnxruntime_int8 = q_model\n if accelerator is None:\n self._pytorch_int8 = q_model\n\n def _tsdataset_to_loader(self, data, is_predict=False, batch_size=32):\n self._check_mixed_data_type_usage()\n lookback = self._best_config[\"past_seq_len\"]\n horizon = 0 if is_predict else self._best_config[\"future_seq_len\"]\n selected_features = self._best_config[\"selected_features\"]\n data_loader = data.to_torch_data_loader(batch_size=batch_size,\n roll=True,\n lookback=lookback,\n horizon=horizon,\n feature_col=selected_features)\n return data_loader\n\n def _tsdataset_to_numpy(self, data, is_predict=False):\n self._check_mixed_data_type_usage()\n lookback = self._best_config[\"past_seq_len\"]\n horizon = 0 if is_predict else self._best_config[\"future_seq_len\"]\n selected_features = self._best_config[\"selected_features\"]\n data.roll(lookback=lookback,\n horizon=horizon,\n feature_col=selected_features)\n return data.to_numpy()\n\n def _check_mixed_data_type_usage(self):\n from bigdl.nano.utils.log4Error import invalidInputError\n for key in (\"past_seq_len\", \"future_seq_len\", \"selected_features\"):\n if key not in self._best_config:\n invalidInputError(False,\n \"You use a data creator to fit your AutoTSEstimator, \"\n \"and use a TSDataset to predict/evaluate/fit on the TSPipeline.\"\n \"Please stick to the same data type.\")\n\n def _tsdataset_unscale(self, y):\n if self._scaler:\n from bigdl.chronos.data.utils.scale import unscale_timeseries_numpy\n y = unscale_timeseries_numpy(y, self._scaler, self._scaler_index)\n return y\n",
"#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport time\n\nimport numpy as np\nimport ray\n\nfrom bigdl.dllib.nncontext import init_spark_on_yarn\nfrom bigdl.orca.ray import OrcaRayContext\n\nnp.random.seed(1337) # for reproducibility\n\n\[email protected]\nclass TestRay:\n def hostname(self):\n import socket\n return socket.gethostname()\n\n\nnode_num = 4\nsc = init_spark_on_yarn(\n hadoop_conf=\"/opt/work/hadoop-2.7.2/etc/hadoop/\",\n conda_name=\"rayexample\",\n num_executors=node_num,\n executor_cores=28,\n executor_memory=\"10g\",\n driver_memory=\"2g\",\n driver_cores=4,\n extra_executor_memory_for_ray=\"30g\")\nray_ctx = OrcaRayContext(sc=sc, object_store_memory=\"2g\")\nray_ctx.init()\nactors = [TestRay.remote() for i in range(0, node_num)]\nprint(ray.get([actor.hostname.remote() for actor in actors]))\nray_ctx.stop()\n# repeat\nray_ctx = OrcaRayContext(sc=sc, object_store_memory=\"1g\")\nray_ctx.init()\nactors = [TestRay.remote() for i in range(0, node_num)]\nprint(ray.get([actor.hostname.remote() for actor in actors]))\nray_ctx.stop()\n\nsc.stop()\ntime.sleep(3)\n",
"#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom bigdl.dllib.utils.common import Sample as BSample, JTensor as BJTensor,\\\n JavaCreator, _get_gateway, _py2java, _java2py\nimport numpy as np\nimport os\nimport tempfile\nimport uuid\nimport functools\nimport glob\n\nfrom urllib.parse import urlparse\nfrom bigdl.dllib.utils.log4Error import *\n\n\ndef convert_to_safe_path(input_path, follow_symlinks=True):\n # resolves symbolic links\n if follow_symlinks:\n return os.path.realpath(input_path)\n # covert to abs path\n return os.path.abspath(input_path)\n\n\ndef to_list_of_numpy(elements):\n if isinstance(elements, np.ndarray):\n return [elements]\n elif np.isscalar(elements):\n return [np.array(elements)]\n elif not isinstance(elements, list):\n invalidInputError(False, \"Wrong type: %s\" % type(elements))\n\n results = []\n for element in elements:\n if np.isscalar(element):\n results.append(np.array(element))\n elif isinstance(element, np.ndarray):\n results.append(element)\n else:\n invalidInputError(False, \"Wrong type: %s\" % type(element))\n\n return results\n\n\ndef get_file_list(path, recursive=False):\n return callZooFunc(\"float\", \"listPaths\", path, recursive)\n\n\ndef exists(path):\n return callZooFunc(\"float\", \"exists\", path)\n\n\ndef mkdirs(path):\n callZooFunc(\"float\", \"mkdirs\", path)\n\n\ndef is_local_path(path):\n parse_result = urlparse(path)\n return len(parse_result.scheme.lower()) == 0 or parse_result.scheme.lower() == \"file\"\n\n\ndef append_suffix(prefix, path):\n # append suffix\n splits = path.split(\".\")\n if len(splits) > 0:\n file_name = prefix + \".\" + splits[-1]\n else:\n file_name = prefix\n\n return file_name\n\n\ndef enable_multi_fs_save(save_func):\n\n @functools.wraps(save_func)\n def save_mult_fs(obj, path, *args, **kwargs):\n if is_local_path(path):\n return save_func(obj, path, *args, **kwargs)\n else:\n file_name = str(uuid.uuid1())\n file_name = append_suffix(file_name, path)\n temp_path = os.path.join(tempfile.gettempdir(), file_name)\n\n try:\n result = save_func(obj, temp_path, *args, **kwargs)\n if \"overwrite\" in kwargs:\n put_local_file_to_remote(temp_path, path, over_write=kwargs['overwrite'])\n else:\n put_local_file_to_remote(temp_path, path)\n finally:\n os.remove(temp_path)\n return result\n\n return save_mult_fs\n\n\ndef enable_multi_fs_load_static(load_func):\n @functools.wraps(load_func)\n def multi_fs_load(path, *args, **kwargs):\n if is_local_path(path):\n return load_func(path, *args, **kwargs)\n else:\n file_name = str(uuid.uuid1())\n file_name = append_suffix(file_name, path)\n temp_path = os.path.join(tempfile.gettempdir(), file_name)\n get_remote_file_to_local(path, temp_path)\n try:\n return load_func(temp_path, *args, **kwargs)\n finally:\n os.remove(temp_path)\n\n return multi_fs_load\n\n\ndef enable_multi_fs_load(load_func):\n\n @functools.wraps(load_func)\n def multi_fs_load(obj, path, *args, **kwargs):\n if is_local_path(path):\n return load_func(obj, path, *args, **kwargs)\n else:\n file_name = str(uuid.uuid1())\n file_name = append_suffix(file_name, path)\n temp_path = os.path.join(tempfile.gettempdir(), file_name)\n get_remote_file_to_local(path, temp_path)\n try:\n return load_func(obj, temp_path, *args, **kwargs)\n finally:\n os.remove(temp_path)\n\n return multi_fs_load\n\n\ndef get_remote_file_to_local(remote_path, local_path, over_write=False):\n callZooFunc(\"float\", \"getRemoteFileToLocal\", remote_path, local_path, over_write)\n\n\ndef get_remote_dir_to_local(remote_dir, local_dir):\n # get remote file lists\n file_list = get_file_list(remote_dir)\n # get remote files to local\n [get_remote_file_to_local(file, os.path.join(local_dir, os.path.basename(file)))\n for file in file_list]\n\n\ndef get_remote_files_with_prefix_to_local(remote_path_prefix, local_dir):\n remote_dir = os.path.dirname(remote_path_prefix)\n prefix = os.path.basename(remote_path_prefix)\n # get remote file lists\n file_list = get_file_list(remote_dir)\n file_list = [file for file in file_list if os.path.basename(file).startswith(prefix)]\n # get remote files to local\n [get_remote_file_to_local(file, os.path.join(local_dir, os.path.basename(file)))\n for file in file_list]\n\n\ndef get_remote_dir_tree_to_local(remote_dir, local_dir):\n if os.path.exists(local_dir):\n os.makedirs(local_dir)\n # get remote file lists\n file_list = get_file_list(remote_dir, recursive=True)\n for file in file_list:\n local_subdir = os.path.join(local_dir, os.path.dirname(file)[len(remote_dir)+1:])\n filename = os.path.basename(file)\n if not os.path.exists(local_subdir):\n os.makedirs(local_subdir)\n get_remote_file_to_local(file, os.path.join(local_subdir, filename))\n\n\ndef put_local_file_to_remote(local_path, remote_path, over_write=False):\n callZooFunc(\"float\", \"putLocalFileToRemote\", local_path, remote_path, over_write)\n\n\ndef put_local_files_with_prefix_to_remote(local_path_prefix, remote_dir, over_write=False):\n # get local file lists\n file_list = glob.glob(local_path_prefix + \"*\")\n # get remote files to local\n [put_local_file_to_remote(file, os.path.join(remote_dir, os.path.basename(file)),\n over_write=over_write)\n for file in file_list]\n\n\ndef put_local_dir_tree_to_remote(local_dir, remote_dir, over_write=False):\n if not exists(remote_dir):\n mkdirs(remote_dir)\n for dirpath, dirnames, filenames in os.walk(local_dir):\n for d in dirnames:\n remote_subdir = os.path.join(remote_dir, dirpath[len(local_dir)+1:], d)\n if not exists(remote_subdir):\n mkdirs(remote_subdir)\n for f in filenames:\n remote_file = os.path.join(remote_dir, dirpath[len(local_dir)+1:], f)\n put_local_file_to_remote(os.path.join(dirpath, f), remote_file, over_write=over_write)\n\n\ndef set_core_number(num):\n callZooFunc(\"float\", \"setCoreNumber\", num)\n\n\ndef callZooFunc(bigdl_type, name, *args):\n \"\"\" Call API in PythonBigDL \"\"\"\n gateway = _get_gateway()\n args = [_py2java(gateway, a) for a in args]\n error = Exception(\"Cannot find function: %s\" % name)\n for jinvoker in JavaCreator.instance(bigdl_type, gateway).value:\n # hasattr(jinvoker, name) always return true here,\n # so you need to invoke the method to check if it exist or not\n try:\n api = getattr(jinvoker, name)\n java_result = api(*args)\n result = _java2py(gateway, java_result)\n except Exception as e:\n error = e\n if not (\"does not exist\" in str(e)\n and \"Method {}\".format(name) in str(e)):\n invalidOperationError(False, str(e), cause=e)\n else:\n return result\n invalidOperationError(False, str(error), cause=error)\n\n\nclass JTensor(BJTensor):\n\n def __init__(self, storage, shape, bigdl_type=\"float\", indices=None):\n super(JTensor, self).__init__(storage, shape, bigdl_type, indices)\n\n @classmethod\n def from_ndarray(cls, a_ndarray, bigdl_type=\"float\"):\n \"\"\"\n Convert a ndarray to a DenseTensor which would be used in Java side.\n \"\"\"\n if a_ndarray is None:\n return None\n invalidInputError(isinstance(a_ndarray, np.ndarray),\n \"input should be a np.ndarray, not %s\" % type(a_ndarray))\n return cls(a_ndarray,\n a_ndarray.shape,\n bigdl_type)\n\n\nclass Sample(BSample):\n\n def __init__(self, features, labels, bigdl_type=\"float\"):\n super(Sample, self).__init__(features, labels, bigdl_type)\n\n @classmethod\n def from_ndarray(cls, features, labels, bigdl_type=\"float\"):\n features = to_list_of_numpy(features)\n labels = to_list_of_numpy(labels)\n return cls(\n features=[JTensor(feature, feature.shape) for feature in features],\n labels=[JTensor(label, label.shape) for label in labels],\n bigdl_type=bigdl_type)\n",
"#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport tensorflow as tf\n\nfrom bigdl.orca.tfpark.tf_dataset import TensorMeta\nfrom bigdl.dllib.utils import nest\nfrom bigdl.orca.data import SparkXShards\nfrom bigdl.dllib.utils import log4Error\n\n\nclass Dataset(object):\n\n \"\"\"\n Represents a distributed set of elements backed by an RDD,\n which is created by applying tensorflow dataset transformations\n on each partitions.\n \"\"\"\n\n def __init__(self, xshards, create_dataset_fn):\n self.xshards = xshards\n self.create_dataset_fn = create_dataset_fn\n\n def as_graph_rdd(self, batch_per_shard, drop_remainder=True):\n\n create_dataset_fn = self.create_dataset_fn\n\n def to_dataset(iter):\n data_list = list(iter)\n\n import tensorflow as tf\n if not data_list:\n return []\n\n datasets = [create_dataset_fn(data) for data in data_list]\n from functools import reduce\n dataset = reduce(lambda x, y: x.concatenate(y), datasets)\n dataset = dataset.batch(batch_per_shard, drop_remainder)\n iterator = dataset.make_initializable_iterator()\n train_next_ops = nest.flatten(iterator.get_next())\n output_types = [t for t in nest.flatten(dataset.output_types)]\n output_types_enum = [t.as_datatype_enum for t in output_types]\n\n init_op_name = iterator.initializer.name\n table_init_op = tf.tables_initializer().name\n output_names = [op.name for op in train_next_ops]\n\n graph = train_next_ops[0].graph\n\n flatten_shapes = nest.flatten(dataset.output_shapes)\n\n flatten_shapes = [shape[1:] for shape in flatten_shapes]\n\n flatten_tensor_structure = [TensorMeta(dtype=output_types[i],\n shape=list(flatten_shapes[i]),\n name=\"zoo_input_{}\".format(i))\n for i in range(len(flatten_shapes))]\n structure = dataset.output_types\n if isinstance(structure, tf.DType):\n structure = (structure,)\n tensor_structure = nest.pack_sequence_as(structure,\n flatten_tensor_structure)\n\n meta_info = {\n \"init_op_name\": init_op_name,\n \"table_init_op\": table_init_op,\n \"output_names\": output_names,\n \"output_types\": output_types_enum,\n \"tensor_structure\": tensor_structure\n }\n\n return [(bytearray(graph.as_graph_def().SerializeToString()), meta_info)]\n\n graph_rdd_and_meta = self.xshards.rdd.mapPartitions(to_dataset)\n return graph_rdd_and_meta\n\n def as_tf_dataset_rdd(self):\n create_dataset_fn = self.create_dataset_fn\n\n def to_dataset(iter):\n\n data_list = list(iter)\n if not data_list:\n return []\n\n from tensorflow.python.distribute.coordinator.values import serialize_dataset_to_graph\n datasets = [create_dataset_fn(data) for data in data_list]\n from functools import reduce\n dataset = reduce(lambda x, y: x.concatenate(y), datasets)\n ds_def = serialize_dataset_to_graph(dataset).numpy()\n elem_spec = dataset.element_spec\n return [{\"ds_def\": ds_def, \"elem_spec\": elem_spec}]\n\n tf_dataset_rdd = self.xshards.rdd.mapPartitions(to_dataset)\n return tf_dataset_rdd\n\n @staticmethod\n def from_tensor_slices(xshards):\n return TensorSliceDataset(xshards)\n\n @staticmethod\n def from_feature_table(tbl):\n from bigdl.friesian.feature import FeatureTable\n from bigdl.friesian.feature.utils import featuretable_to_xshards\n log4Error.invalidInputError(isinstance(tbl, FeatureTable),\n \"Only Friesian FeatureTable is supported\")\n xshards = featuretable_to_xshards(tbl)\n return TensorSliceDataset(xshards)\n\n def map(self, map_func):\n\n return MapDataset(self, map_func)\n\n\nclass TensorSliceDataset(Dataset):\n\n def __init__(self, xshards):\n assert isinstance(xshards, SparkXShards), \\\n \"only datasets backed by a SparkXShards are supported\"\n\n self.xshards = xshards\n\n def create_dataset_fn(data):\n return tf.data.Dataset.from_tensor_slices(data)\n super().__init__(xshards, create_dataset_fn)\n\n\nclass MapDataset(Dataset):\n\n def __init__(self, input_dataset, map_func):\n\n create_pre_dataset_fn = input_dataset.create_dataset_fn\n\n def create_dataset_fn(data):\n dataset = create_pre_dataset_fn(data)\n return dataset.map(map_func)\n super().__init__(xshards=input_dataset.xshards,\n create_dataset_fn=create_dataset_fn)\n",
"#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Part of the code originally from Tensorflow\n\n\nimport gzip\n\nimport numpy\n\nfrom bigdl.dllib.feature.dataset import base\nfrom bigdl.dllib.feature.dataset.transformer import *\nfrom bigdl.dllib.utils.log4Error import *\n\n\nSOURCE_URL = 'https://ossci-datasets.s3.amazonaws.com/mnist/'\n\nTRAIN_MEAN = 0.13066047740239506 * 255\nTRAIN_STD = 0.3081078 * 255\nTEST_MEAN = 0.13251460696903547 * 255\nTEST_STD = 0.31048024 * 255\n\n\ndef _read32(bytestream):\n dt = numpy.dtype(numpy.uint32).newbyteorder('>')\n return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]\n\n\ndef extract_images(f):\n \"\"\"Extract the images into a 4D uint8 numpy array [index, y, x, depth].\n\n :param: f: A file object that can be passed into a gzip reader.\n :return: data: A 4D unit8 numpy array [index, y, x, depth].\n :raise: ValueError: If the bytestream does not start with 2051.\n\n \"\"\"\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2051:\n invalidInputError(False,\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, f.name))\n num_images = _read32(bytestream)\n rows = _read32(bytestream)\n cols = _read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = numpy.frombuffer(buf, dtype=numpy.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data\n\n\ndef extract_labels(f):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n invalidInputError(False,\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, f.name))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8)\n return labels\n\n\ndef read_data_sets(train_dir, data_type=\"train\"):\n \"\"\"\n Parse or download mnist data if train_dir is empty.\n\n :param: train_dir: The directory storing the mnist data\n\n :param: data_type: Reading training set or testing set.It can be either \"train\" or \"test\"\n\n :return:\n\n ``` (ndarray, ndarray) representing (features, labels) features is a 4D unit8 numpy array [\n index, y, x, depth] representing each pixel valued from 0 to 255. labels is 1D unit8 nunpy\n array representing the label valued from 0 to 9. ```\n\n \"\"\"\n TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'\n TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'\n TEST_IMAGES = 't10k-images-idx3-ubyte.gz'\n TEST_LABELS = 't10k-labels-idx1-ubyte.gz'\n\n if data_type == \"train\":\n local_file = base.maybe_download(TRAIN_IMAGES, train_dir,\n SOURCE_URL + TRAIN_IMAGES)\n with open(local_file, 'rb') as f:\n train_images = extract_images(f)\n\n local_file = base.maybe_download(TRAIN_LABELS, train_dir,\n SOURCE_URL + TRAIN_LABELS)\n with open(local_file, 'rb') as f:\n train_labels = extract_labels(f)\n return train_images, train_labels\n\n else:\n local_file = base.maybe_download(TEST_IMAGES, train_dir,\n SOURCE_URL + TEST_IMAGES)\n with open(local_file, 'rb') as f:\n test_images = extract_images(f)\n\n local_file = base.maybe_download(TEST_LABELS, train_dir,\n SOURCE_URL + TEST_LABELS)\n with open(local_file, 'rb') as f:\n test_labels = extract_labels(f)\n return test_images, test_labels\n\n\ndef load_data(location=\"/tmp/mnist\"):\n (train_images, train_labels) = read_data_sets(location, \"train\")\n (test_images, test_labels) = read_data_sets(location, \"test\")\n X_train = normalizer(train_images, TRAIN_MEAN, TRAIN_STD)\n X_test = normalizer(test_images, TRAIN_MEAN, TRAIN_STD)\n Y_train = train_labels + 1\n Y_test = test_labels + 1\n return (X_train, Y_train), (X_test, Y_test)\n\n\nif __name__ == \"__main__\":\n train, _ = read_data_sets(\"/tmp/mnist/\", \"train\")\n test, _ = read_data_sets(\"/tmp/mnist\", \"test\")\n invalidInputError(numpy.abs(numpy.mean(train) - TRAIN_MEAN) / TRAIN_MEAN < 1e-7,\n f\"mean of train data doesn't match ${TRAIN_MEAN}\")\n invalidInputError(numpy.abs(numpy.std(train) - TRAIN_STD) / TRAIN_STD < 1e-7,\n f\"std of train data doesn't match ${TRAIN_STD}\")\n invalidInputError(numpy.abs(numpy.mean(test) - TEST_MEAN) / TEST_MEAN < 1e-7,\n f\"mean of test data doesn't match ${TEST_MEAN}\")\n invalidInputError(numpy.abs(numpy.std(test) - TEST_STD) / TEST_STD < 1e-7,\n f\"std of test data doesn't match ${TEST_STD_STD}\")\n"
] | [
[
"tensorflow.keras.backend.clear_session",
"tensorflow.keras.models.clone_model"
],
[
"numpy.concatenate",
"torch.cat",
"torch.from_numpy",
"torch.load"
],
[
"numpy.random.seed"
],
[
"numpy.array",
"numpy.isscalar"
],
[
"tensorflow.python.distribute.coordinator.values.serialize_dataset_to_graph",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.tables_initializer"
],
[
"numpy.frombuffer",
"numpy.std",
"numpy.mean",
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Khanhnn00/blind_sr_denoise | [
"3153f90d20fd884ab69b47c30c685e0175276055",
"3153f90d20fd884ab69b47c30c685e0175276055",
"3153f90d20fd884ab69b47c30c685e0175276055"
] | [
"DNCNN/common.py",
"DNCNN/networks/__init__.py",
"KernelGANFKP/model/model.py"
] | [
"import os\nimport random\nimport numpy as np\nimport scipy.misc as misc\nimport imageio\nfrom tqdm import tqdm\nimport cv2\nfrom PIL import Image\n\nimport torch\nimport torch.nn.functional as F\n\nIMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP']\nBINARY_EXTENSIONS = ['.npy']\nBENCHMARK = ['Set5', 'Set14', 'B100', 'Urban100', 'Manga109', 'DIV2K', 'DF2K']\n\n\n####################\n# Files & IO\n####################\ndef is_image_file(filename):\n return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)\n\n\ndef is_binary_file(filename):\n return any(filename.endswith(extension) for extension in BINARY_EXTENSIONS)\n\n\ndef _get_paths_from_images(path):\n assert os.path.isdir(path), '[Error] [%s] is not a valid directory' % path\n images = []\n for dirpath, _, fnames in sorted(os.walk(path)):\n for fname in sorted(fnames):\n if is_image_file(fname):\n img_path = os.path.join(dirpath, fname)\n images.append(img_path)\n assert images, '[%s] has no valid image file' % path\n return images\n\n\ndef _get_paths_from_binary(path):\n assert os.path.isdir(path), '[Error] [%s] is not a valid directory' % path\n files = []\n for dirpath, _, fnames in sorted(os.walk(path)):\n for fname in sorted(fnames):\n if is_binary_file(fname):\n binary_path = os.path.join(dirpath, fname)\n files.append(binary_path)\n assert files, '[%s] has no valid binary file' % path\n return files\n\n\ndef find_benchmark(dataroot):\n bm_list = [dataroot.find(bm)>=0 for bm in BENCHMARK]\n if not sum(bm_list) == 0:\n bm_idx = bm_list.index(True)\n bm_name = BENCHMARK[bm_idx]\n else:\n bm_name = 'MyImage'\n return bm_name\n\n\ndef read_img(path):\n # read image by misc or from .npy\n # return: Numpy float32, HWC, RGB, [0,255]\n img = imageio.imread(path, pilmode='RGB')\n if img.ndim == 2:\n img = np.expand_dims(img, axis=2)\n return img\n\n# image processing\n# process on numpy image\n####################\ndef im2tensor01(im_np):\n \"\"\"Convert numpy to tensor to the gpu\"\"\"\n im_np = im_np / 255.0 if im_np.dtype == 'uint8' else im_np\n im_np = np.ascontiguousarray(im_np)\n return torch.FloatTensor(np.transpose(im_np, (2, 0, 1)))\n\ndef tensor2im(im_t):\n \"\"\"Copy the tensor to the cpu & convert to range [0,255]\"\"\"\n im_np = np.clip(np.round((np.transpose(im_t.squeeze(0).detach().cpu().float().numpy(), (1, 2, 0)) + 1) / 2.0 * 255.0), 0, 255)\n return im_np.astype(np.uint8)\n\ndef get_patch(img_tar, patch_size):\n oh, ow = img_tar.shape[:2]\n\n ip = patch_size\n tp = ip\n ix = random.randrange(0, ow - ip + 1)\n iy = random.randrange(0, oh - ip + 1)\n tx, ty = ix, iy\n\n img_tar = img_tar[ty:ty + tp, tx:tx + tp, :]\n\n return img_tar\n\ndef augment(img_list, hflip=True, rot=True):\n # horizontal flip OR rotate\n hflip = hflip and random.random() < 0.5\n vflip = rot and random.random() < 0.5\n rot90 = rot and random.random() < 0.5\n\n def _augment(img):\n if hflip: img = img[:, ::-1, :]\n if vflip: img = img[::-1, :, :]\n if rot90: img = img.transpose(1, 0, 2)\n return img\n\n return [_augment(img) for img in img_list]\n\n\ndef modcrop(img_in, scale):\n img = np.copy(img_in)\n if img.ndim == 2:\n H, W = img.shape\n H_r, W_r = H % scale, W % scale\n img = img[:H - H_r, :W - W_r]\n elif img.ndim == 3:\n H, W, C = img.shape\n H_r, W_r = H % scale, W % scale\n img = img[:H - H_r, :W - W_r, :]\n else:\n raise ValueError('Wrong img ndim: [%d].' % img.ndim)\n return img\n",
"import functools\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import init\n# from .E_arch import Estimator\n# from .IRCNN import IRCNN\n\n####################\n# initialize\n####################\n\ndef weights_init_normal(m, std=0.02):\n classname = m.__class__.__name__\n if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):\n if classname != \"MeanShift\":\n print('initializing [%s] ...' % classname)\n init.normal_(m.weight.data, 0.0, std)\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, (nn.Linear)):\n init.normal_(m.weight.data, 0.0, std)\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, (nn.BatchNorm2d)):\n init.normal_(m.weight.data, 1.0, std)\n init.constant_(m.bias.data, 0.0)\n\ndef weights_init_kaiming(m, scale=1):\n classname = m.__class__.__name__\n if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):\n if classname != \"MeanShift\":\n print('initializing [%s] ...' % classname)\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n m.weight.data *= scale\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, (nn.Linear)):\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n m.weight.data *= scale\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, (nn.BatchNorm2d)):\n init.constant_(m.weight.data, 1.0)\n m.weight.data *= scale\n init.constant_(m.bias.data, 0.0)\n\ndef weights_init_orthogonal(m):\n classname = m.__class__.__name__\n if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):\n if classname != \"MeanShift\":\n print('initializing [%s] ...' % classname)\n init.orthogonal_(m.weight.data, gain=1)\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, (nn.Linear)):\n init.orthogonal_(m.weight.data, gain=1)\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, (nn.BatchNorm2d)):\n init.normal_(m.weight.data, 1.0, 0.02)\n init.constant_(m.bias.data, 0.0)\n\ndef init_weights(net, init_type='kaiming', scale=1, std=0.02):\n # scale for 'kaiming', std for 'normal'.\n print('initialization method [%s]' % init_type)\n if init_type == 'normal':\n weights_init_normal_ = functools.partial(weights_init_normal, std=std)\n net.apply(weights_init_normal_)\n elif init_type == 'kaiming':\n weights_init_kaiming_ = functools.partial(weights_init_kaiming, scale=scale)\n net.apply(weights_init_kaiming_)\n elif init_type == 'orthogonal':\n net.apply(weights_init_orthogonal)\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n\ndef create_model(opt):\n if opt['mode'] == 'sr':\n net = define_net(opt['networks'])\n return net\n else:\n raise NotImplementedError(\"The mode [%s] of networks is not recognized.\" % opt['mode'])\n\ndef define_net(opt):\n which_model = opt['which_model'].upper()\n print('===> Building network [%s]...'%which_model)\n\n if which_model == 'ECNN':\n net = Estimator()\n elif which_model == 'IRCNN':\n net = IRCNN()\n else:\n raise NotImplementedError(\"Network [%s] is not recognized.\" % which_model)\n if torch.cuda.is_available():\n net = nn.DataParallel(net).cuda()\n\n return net",
"import torch\nfrom .loss import GANLoss, DownScaleLoss, SumOfWeightsLoss, BoundariesLoss, CentralizedLoss, SparsityLoss\nfrom .networks import Generator, Discriminator, weights_init_G, weights_init_D\nimport torch.nn.functional as F\nimport sys\nsys.path.append('../')\nfrom util import save_final_kernel_png, post_process_k_crop2, analytic_kernel, kernel_shift\n\n'''\n# ------------------------------------------\n# model of original KernelGAN\n# ------------------------------------------\n'''\n\n\nclass KernelGAN:\n '''\n # ------------------------------------------\n # (1) create model, loss and optimizer\n # ------------------------------------------\n '''\n # Constraint co-efficients\n lambda_sum2one = 0.5\n lambda_bicubic = 5\n lambda_boundaries = 0.5\n lambda_centralized = 0\n lambda_sparse = 0\n\n def __init__(self, conf):\n # Acquire configuration\n self.conf = conf\n\n # Define the GAN\n self.G = Generator(conf).cuda()\n self.D = Discriminator(conf).cuda()\n\n # Calculate D's input & output shape according to the shaving done by the networks\n self.d_input_shape = self.G.output_size\n self.d_output_shape = self.d_input_shape - self.D.forward_shave\n\n # Input tensors\n self.g_input = torch.FloatTensor(1, 3, conf.input_crop_size, conf.input_crop_size).cuda()\n self.d_input = torch.FloatTensor(1, 3, self.d_input_shape, self.d_input_shape).cuda()\n\n # The kernel G is imitating\n self.curr_k = torch.FloatTensor(conf.G_kernel_size, conf.G_kernel_size).cuda()\n\n # Losses\n self.GAN_loss_layer = GANLoss(d_last_layer_size=self.d_output_shape).cuda()\n self.bicubic_loss = DownScaleLoss(scale_factor=conf.scale_factor).cuda()\n self.sum2one_loss = SumOfWeightsLoss().cuda()\n self.boundaries_loss = BoundariesLoss(k_size=conf.G_kernel_size).cuda()\n self.centralized_loss = CentralizedLoss(k_size=conf.G_kernel_size, scale_factor=conf.scale_factor).cuda()\n self.sparse_loss = SparsityLoss().cuda()\n self.loss_bicubic = 0\n\n # Define loss function\n self.criterionGAN = self.GAN_loss_layer.forward\n\n # Initialize networks weights\n self.G.apply(weights_init_G)\n self.D.apply(weights_init_D)\n\n # Optimizers\n self.optimizer_G = torch.optim.Adam(self.G.parameters(), lr=conf.g_lr, betas=(conf.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(self.D.parameters(), lr=conf.d_lr, betas=(conf.beta1, 0.999))\n\n print('*' * 60 + '\\nSTARTED KernelGAN on: \\\"%s\\\"...' % conf.input_image_path)\n\n # noinspection PyUnboundLocalVariable\n def calc_curr_k(self):\n \"\"\"given a generator network, the function calculates the kernel it is imitating\"\"\"\n delta = torch.Tensor([1.]).unsqueeze(0).unsqueeze(-1).unsqueeze(-1).cuda()\n for ind, w in enumerate(self.G.parameters()):\n curr_k = F.conv2d(delta, w, padding=self.conf.G_kernel_size - 1) if ind == 0 else F.conv2d(curr_k, w)\n self.curr_k = curr_k.squeeze() # .flip([0, 1]) # no need to flip kernel because we use F.conv2d to blur\n\n '''\n # ---------------------\n # (2) training\n # ---------------------\n '''\n\n def train(self, g_input, d_input, iteration):\n self.set_input(g_input, d_input)\n total_loss_g, loss_g = self.train_g()\n loss_d = self.train_d()\n\n if (iteration % 10 == 0 or iteration == 1) and self.conf.verbose:\n k_2 = post_process_k_crop2(self.curr_k, n=self.conf.n_filtering)\n save_final_kernel_png(k_2, self.conf, self.conf.kernel_gt, iteration)\n print('\\n Iter {}, D_loss: {}, G_loss: {}, G_loss_total: {}'.format(iteration, loss_d.data, loss_g.data,\n total_loss_g.data))\n\n def set_input(self, g_input, d_input):\n self.g_input = g_input.contiguous()\n self.d_input = d_input.contiguous()\n\n '''\n # ---------------------\n # (2.1) training of G\n # ---------------------\n '''\n\n def train_g(self):\n # Zeroize gradients\n self.optimizer_G.zero_grad()\n # Generator forward pass\n g_pred = self.G.forward(self.g_input)\n # Pass Generators output through Discriminator\n d_pred_fake = self.D.forward(g_pred)\n # Calculate generator loss, based on discriminator prediction on generator result\n loss_g = self.criterionGAN(d_last_layer=d_pred_fake, is_d_input_real=True)\n # Sum all losses\n total_loss_g = loss_g + self.calc_constraints(g_pred)\n # Calculate gradients\n total_loss_g.backward()\n # Update weights\n self.optimizer_G.step()\n return total_loss_g, loss_g\n\n def calc_constraints(self, g_pred):\n # Calculate K which is equivalent to G\n self.calc_curr_k()\n # Calculate constraints\n self.loss_bicubic = self.bicubic_loss.forward(g_input=self.g_input, g_output=g_pred)\n loss_boundaries = self.boundaries_loss.forward(kernel=self.curr_k)\n loss_sum2one = self.sum2one_loss.forward(kernel=self.curr_k)\n loss_centralized = self.centralized_loss.forward(kernel=self.curr_k)\n loss_sparse = self.sparse_loss.forward(kernel=self.curr_k)\n # Apply constraints co-efficients\n return self.loss_bicubic * self.lambda_bicubic + loss_sum2one * self.lambda_sum2one + \\\n loss_boundaries * self.lambda_boundaries + loss_centralized * self.lambda_centralized + \\\n loss_sparse * self.lambda_sparse\n\n '''\n # ---------------------\n # (2.2) training of D\n # ---------------------\n '''\n\n def train_d(self):\n # Zeroize gradients\n self.optimizer_D.zero_grad()\n # Discriminator forward pass over real example\n d_pred_real = self.D.forward(self.d_input)\n # Discriminator forward pass over fake example (generated by generator)\n # Note that generator result is detached so that gradients are not propagating back through generator\n g_output = self.G.forward(self.g_input)\n d_pred_fake = self.D.forward((g_output + torch.randn_like(g_output) / 255.).detach())\n # Calculate discriminator loss\n loss_d_fake = self.criterionGAN(d_pred_fake, is_d_input_real=False)\n loss_d_real = self.criterionGAN(d_pred_real, is_d_input_real=True)\n loss_d = (loss_d_fake + loss_d_real) * 0.5\n # Calculate gradients, note that gradients are not propagating back through generator\n loss_d.backward()\n # Update weights, note that only discriminator weights are updated (by definition of the D optimizer)\n self.optimizer_D.step()\n return loss_d\n\n '''\n # ---------------------\n # (3) finish\n # ---------------------\n '''\n\n def finish(self):\n k_2 = post_process_k_crop2(self.curr_k, n=self.conf.n_filtering)\n save_final_kernel_png(k_2, self.conf, self.conf.kernel_gt)\n if self.conf.verbose:\n print(\n 'KernelGAN estimation complete! (see --%s-- folder)\\n' % self.conf.output_dir_path + '*' * 60 + '\\n\\n')\n\n if self.conf.X4:\n k_4 = analytic_kernel(k_2)\n k_4 = kernel_shift(k_4, 4)\n return k_4\n else:\n return k_2\n\n"
] | [
[
"numpy.ascontiguousarray",
"numpy.copy",
"numpy.expand_dims",
"numpy.transpose"
],
[
"torch.nn.init.constant_",
"torch.nn.init.normal_",
"torch.nn.init.orthogonal_",
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.nn.init.kaiming_normal_"
],
[
"torch.randn_like",
"torch.nn.functional.conv2d",
"torch.FloatTensor",
"torch.Tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tinyrobots/Generalized-PixelVAE | [
"ee99634be08c726c3da7e8ba2675c8d1448e15af"
] | [
"fast_pixel_cnn_pp/test_end_to_end.py"
] | [
"from . import model\nfrom . import fast_nn\n\nimport tensorflow as tf\nimport numpy as np\n\nimport os\nimport unittest\n\n\nclass FastPixelCNNPPEndToEndTest(tf.test.TestCase):\n def test_end_to_end(self):\n with self.test_session() as sess:\n print('Creating model')\n image_size = (10, 32, 32, 4)\n batch_size, image_height, image_width, image_channels = image_size\n\n # Create placeholders.\n row_input = tf.placeholder(\n tf.float32, [batch_size, 1, image_width, image_channels],\n name='row_input')\n pixel_input = tf.placeholder(\n tf.float32, [batch_size, 1, 1, image_channels],\n name='pixel_input')\n row_id = tf.placeholder(tf.int32, [], name='row_id')\n col_id = tf.placeholder(tf.int32, [], name='col_id')\n ema = tf.train.ExponentialMovingAverage(0.9995)\n\n # Create the model.\n model_spec = tf.make_template('model', model.model_spec)\n sample, fast_nn_out, v_stack = model_spec(\n row_input, pixel_input, row_id, col_id, image_size)\n\n # Initialize the caches.\n cache_variables = [\n v for v in tf.global_variables() if 'cache' in v.name\n ]\n sess.run(tf.variables_initializer(cache_variables))\n\n # Load the pretrained model\n print('Restoring variables')\n vars_to_restore = {\n k: v\n for k, v in ema.variables_to_restore().items()\n if 'cache' not in k\n }\n saver = tf.train.Saver(vars_to_restore)\n ckpt_path = None\n assert ckpt_path, 'Provide a path to the checkpoint in this file'\n saver.restore(sess, ckpt_path)\n\n # Create the fixed random input.\n np.random.seed(2702)\n x = np.random.randint(0, 256, size=(10, 32, 32, 3))\n x = np.cast[np.float32]((x - 127.5) / 127.5)\n x_pad = np.concatenate(\n (x, np.ones((batch_size, 32, 32, 1))), axis=3)\n x_downshift = fast_nn.down_shift(x_pad)\n x_rightshift = fast_nn.right_shift(x_pad)\n\n # Holds the output.\n num_output_features = 10 * 10\n output_features = np.zeros(\n (batch_size, 32, 32, num_output_features))\n\n # Compute all features.\n print('Computing features')\n sess.run(fast_nn.reset_cache_op())\n for row in range(image_height):\n x_row_input = x_downshift[:, row:(row + 1), :, :]\n sess.run(v_stack, {row_input: x_row_input, row_id: row})\n\n for col in range(image_width):\n x_pixel_input = x_rightshift[:, row:(row + 1),\n col:(col + 1), :]\n feed_dict = {\n row_id: row,\n col_id: col,\n pixel_input: x_pixel_input\n }\n pixel_features = sess.run(fast_nn_out, feed_dict)\n output_features[:, row:(row + 1), col:(\n col + 1), :] = pixel_features\n\n ground_truth_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'ground_truth_output.npy')\n ground_truth_features = np.load(ground_truth_file)\n total_features = np.prod(output_features[0].shape)\n for i in range(batch_size):\n self.assertTrue(\n np.allclose(\n output_features[i, :, :, :],\n ground_truth_features[i, :, :, :],\n atol=1e-4))\n"
] | [
[
"numpy.allclose",
"numpy.random.seed",
"tensorflow.global_variables",
"tensorflow.variables_initializer",
"tensorflow.placeholder",
"numpy.ones",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.make_template",
"numpy.prod",
"tensorflow.train.Saver",
"numpy.load",
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
zhangyanyu0722/EC523_Project | [
"72673713bb798023e82ccc257e8c05459c34a4b9"
] | [
"carla-data-export/dataexport.py"
] | [
"\"\"\"\nThis file contains all the methods responsible for saving the generated data in the correct output format.\n\n\"\"\"\nimport cv2\nimport numpy as np\nimport os\nimport logging\nfrom utils import degrees_to_radians\nimport json\n\n\ndef save_groundplanes(planes_fname, player_measurements, lidar_height):\n from math import cos, sin\n \"\"\" Saves the groundplane vector of the current frame.\n The format of the ground plane file is first three lines describing the file (number of parameters).\n The next line is the three parameters of the normal vector, and the last is the height of the normal vector,\n which is the same as the distance to the camera in meters.\n \"\"\"\n rotation = player_measurements.transform.rotation\n pitch, roll = rotation.pitch, rotation.roll\n # Since measurements are in degrees, convert to radians\n pitch = degrees_to_radians(pitch)\n roll = degrees_to_radians(roll)\n # Rotate normal vector (y) wrt. pitch and yaw\n normal_vector = [cos(pitch)*sin(roll),\n -cos(pitch)*cos(roll),\n sin(pitch)\n ]\n normal_vector = map(str, normal_vector)\n with open(planes_fname, 'w') as f:\n f.write(\"# Plane\\n\")\n f.write(\"Width 4\\n\")\n f.write(\"Height 1\\n\")\n f.write(\"{} {}\\n\".format(\" \".join(normal_vector), lidar_height))\n logging.info(\"Wrote plane data to %s\", planes_fname)\n\n\ndef save_ref_files(OUTPUT_FOLDER, TIME_ON_NEW_EPISODE, PHASE, id):\n \"\"\" Appends the id of the given record to the files \"\"\"\n # for name in ['train.txt', 'val.txt', 'trainval.txt']:\n # path = os.path.join(OUTPUT_FOLDER, name)\n # with open(path, 'a') as f:\n # f.write(\"{0:06}\".format(id) + '\\n')\n # logging.info(\"Wrote reference files to %s\", path)\n\n prefix = os.path.join(\"\\\".\", \"data\", \"carla\", PHASE, \"label\", TIME_ON_NEW_EPISODE)\n name = \"{0:06}.json\\\"\".format(id)\n path = os.path.join(OUTPUT_FOLDER, \"label\", \"{}.json\".format(TIME_ON_NEW_EPISODE))\n\n with open(path, \"a\") as f:\n filePath = os.path.join(prefix, name)\n\n f.write(filePath + \"\\n\")\n\n logging.info(\"Wrote reference files to %s\", path)\n\n\ndef save_image_data(filename, image):\n logging.info(\"Wrote image data to %s\", filename)\n # Convert to correct color format\n color_fmt = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n cv2.imwrite(filename, color_fmt)\n\n\ndef save_lidar_data(filename, point_cloud, LIDAR_HEIGHT, format=\"bin\"):\n \"\"\" Saves lidar data to given filename, according to the lidar data format.\n bin is used for KITTI-data format, while .ply is the regular point cloud format\n In Unreal, the coordinate system of the engine is defined as, which is the same as the lidar points\n z\n ^ ^ x\n | /\n | /\n |/____> y\n This is a left-handed coordinate system, with x being forward, y to the right and z up\n See also https://github.com/carla-simulator/carla/issues/498\n However, the lidar coordinate system from KITTI is defined as\n z\n ^ ^ x\n | /\n | /\n y<____|/\n Which is a right handed coordinate sylstem\n Therefore, we need to flip the y axis of the lidar in order to get the correct lidar format for kitti.\n\n This corresponds to the following changes from Carla to Kitti\n Carla: X Y Z\n KITTI: X -Y Z\n NOTE: We do not flip the coordinate system when saving to .ply.\n \"\"\"\n logging.info(\"Wrote lidar data to %s\", filename)\n\n if format == \"bin\":\n lidar_array = [[point[0], -point[1], point[2], 1.0]\n for point in point_cloud]\n lidar_array = np.array(lidar_array).astype(np.float32)\n logging.debug(\"Lidar min/max of x: {} {}\".format(\n lidar_array[:, 0].min(), lidar_array[:, 0].max()))\n logging.debug(\"Lidar min/max of y: {} {}\".format(\n lidar_array[:, 1].min(), lidar_array[:, 0].max()))\n logging.debug(\"Lidar min/max of z: {} {}\".format(\n lidar_array[:, 2].min(), lidar_array[:, 0].max()))\n lidar_array.tofile(filename)\n else:\n lidar_measurement.point_cloud.save_to_disk(filename)\n\n\ndef save_kitti_data(filename, datapoints):\n with open(filename, 'w') as f:\n # out_str = \"\\n\".join([str(point) for point in datapoints if point])\n # f.write(out_str)\n json.dump(datapoints, f)\n logging.info(\"Wrote kitti data to %s\", filename)\n\n\ndef save_calibration_matrices(filename, intrinsic_mat, extrinsic_mat):\n \"\"\" Saves the calibration matrices to a file.\n AVOD (and KITTI) refers to P as P=K*[R;t], so we will just store P.\n The resulting file will contain:\n 3x4 p0-p3 Camera P matrix. Contains extrinsic\n and intrinsic parameters. (P=K*[R;t])\n 3x3 r0_rect Rectification matrix, required to transform points\n from velodyne to camera coordinate frame.\n 3x4 tr_velodyne_to_cam Used to transform from velodyne to cam\n coordinate frame according to:\n Point_Camera = P_cam * R0_rect *\n Tr_velo_to_cam *\n Point_Velodyne.\n 3x4 tr_imu_to_velo Used to transform from imu to velodyne coordinate frame. This is not needed since we do not export\n imu data.\n \"\"\"\n # KITTI format demands that we flatten in row-major order\n ravel_mode = 'C'\n P0 = intrinsic_mat\n P0 = np.column_stack((P0, np.array([0, 0, 0])))\n P0 = np.ravel(P0, order=ravel_mode)\n R0 = np.identity(3)\n TR_velodyne = np.array([[0, -1, 0],\n [0, 0, -1],\n [1, 0, 0]])\n # Add translation vector from velo to camera. This is 0 because the position of camera and lidar is equal in our configuration.\n TR_velodyne = np.column_stack((TR_velodyne, np.array([0, 0, 0])))\n TR_imu_to_velo = np.identity(3)\n TR_imu_to_velo = np.column_stack((TR_imu_to_velo, np.array([0, 0, 0])))\n\n def write_flat(f, name, arr):\n f.write(\"{}: {}\\n\".format(name, ' '.join(\n map(str, arr.flatten(ravel_mode).squeeze()))))\n\n # All matrices are written on a line with spacing\n with open(filename, 'w') as f:\n for i in range(4): # Avod expects all 4 P-matrices even though we only use the first\n write_flat(f, \"P\" + str(i), P0)\n write_flat(f, \"R0_rect\", R0)\n write_flat(f, \"Tr_velo_to_cam\", TR_velodyne)\n write_flat(f, \"TR_imu_to_velo\", TR_imu_to_velo)\n logging.info(\"Wrote all calibration matrices to %s\", filename)\n"
] | [
[
"numpy.ravel",
"numpy.array",
"numpy.identity"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lizhipengTouch/CSA-inpainting | [
"50602607ddc9153af5bfe627e355b0466fc4944f"
] | [
"models/vgg16.py"
] | [
"import torch\nimport torchvision\nfrom torchvision import models\nfrom collections import namedtuple\n\nclass Vgg16(torch.nn.Module):\n def __init__(self, requires_grad=False):\n super(Vgg16, self).__init__()\n vgg_pretrained_features = models.vgg16(pretrained=True).features # 获取预训练vgg网络层\n self.slice1 = torch.nn.Sequential()\n self.slice2 = torch.nn.Sequential()\n self.slice3 = torch.nn.Sequential()\n self.slice4 = torch.nn.Sequential()\n for x in range(5):\n self.slice1.add_module(str(x), vgg_pretrained_features[x])\n for x in range(5, 10):\n self.slice2.add_module(str(x), vgg_pretrained_features[x])\n for x in range(10, 17):\n self.slice3.add_module(str(x), vgg_pretrained_features[x])\n for x in range(17, 23):\n self.slice4.add_module(str(x), vgg_pretrained_features[x])\n if not requires_grad:\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, X):\n h = self.slice1(X)\n h_relu1_2 = h\n h = self.slice2(h)\n h_relu2_2 = h\n h = self.slice3(h)\n h_relu3_3 = h\n h = self.slice4(h)\n h_relu4_3 = h\n vgg_outputs = namedtuple(\"VggOutputs\", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3'])\n # 定义一个namedtuple类型数据,并包含列表中的属性。\n out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3)\n return out # 得到经过不同层的特征值"
] | [
[
"torch.nn.Sequential"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alphaciel/Balancing-Robot-Raspberry-Pi-DIY | [
"8a61acf688ea0915017c40eaff3841a9b219f9b7"
] | [
"matplotlib/matplotlib_test/plot_lib_test.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.widgets import Slider, Button, RadioButtons\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nfig.subplots_adjust(left=0.25, bottom=0.25)\nmin0 = 0\nmax0 = 25000\n\nim = max0 * np.random.random((10,10))\nim1 = ax.imshow(im)\nfig.colorbar(im1)\n\naxcolor = 'lightgoldenrodyellow'\naxmin = fig.add_axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)\naxmax = fig.add_axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)\n\nsmin = Slider(axmin, 'Min', 0, 30000, valinit=min0)\nsmax = Slider(axmax, 'Max', 0, 30000, valinit=max0)\n\ndef update(val):\n im1.set_clim([smin.val,smax.val])\n fig.canvas.draw()\nsmin.on_changed(update)\nsmax.on_changed(update)\n\nplt.show()"
] | [
[
"matplotlib.widgets.Slider",
"matplotlib.pyplot.show",
"numpy.random.random",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Honghe/AnchorDETR | [
"fc3d45441241cd689b28878d3aa4b0bffb33a8b8"
] | [
"models/transformer.py"
] | [
"# ------------------------------------------------------------------------\n# Copyright (c) 2021 megvii-model. All Rights Reserved.\n# ------------------------------------------------------------------------\n# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------\nimport copy\nfrom typing import Optional, List\nimport math\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn, Tensor\n\nfrom util.misc import inverse_sigmoid\n\n\nfrom models.row_column_decoupled_attention import MultiheadRCDA\n\nclass Transformer(nn.Module):\n def __init__(self, d_model=256, nhead=8,\n num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=1024, dropout=0.,\n activation=\"relu\", num_feature_levels=3,num_query_position = 300,num_query_pattern=3,\n spatial_prior=\"learned\",attention_type=\"RCDA\"):\n super().__init__()\n\n self.d_model = d_model\n self.nhead = nhead\n\n self.attention_type = attention_type\n encoder_layer = TransformerEncoderLayerSpatial(d_model, dim_feedforward,\n dropout, activation, nhead , attention_type)\n encoder_layer_level = TransformerEncoderLayerLevel(d_model, dim_feedforward,\n dropout, activation, nhead)\n\n decoder_layer = TransformerDecoderLayer(d_model, dim_feedforward,\n dropout, activation, nhead,\n num_feature_levels, attention_type)\n\n if num_feature_levels == 1:\n self.num_encoder_layers_level = 0\n else:\n self.num_encoder_layers_level = num_encoder_layers // 2\n self.num_encoder_layers_spatial = num_encoder_layers - self.num_encoder_layers_level\n\n self.encoder_layers = _get_clones(encoder_layer, self.num_encoder_layers_spatial)\n self.encoder_layers_level = _get_clones(encoder_layer_level, self.num_encoder_layers_level)\n self.decoder_layers = _get_clones(decoder_layer, num_decoder_layers)\n\n self.spatial_prior=spatial_prior\n\n if num_feature_levels>1:\n self.level_embed = nn.Embedding(num_feature_levels, d_model)\n self.num_pattern = num_query_pattern\n self.pattern = nn.Embedding(self.num_pattern, d_model)\n\n self.num_position = num_query_position\n if self.spatial_prior == \"learned\":\n self.position = nn.Embedding(self.num_position, 2)\n\n self.adapt_pos2d = nn.Sequential(\n nn.Linear(d_model, d_model),\n nn.ReLU(),\n nn.Linear(d_model, d_model),\n )\n self.adapt_pos1d = nn.Sequential(\n nn.Linear(d_model, d_model),\n nn.ReLU(),\n nn.Linear(d_model, d_model),\n )\n\n self.num_layers = num_decoder_layers\n num_classes = 91\n\n self.class_embed = nn.Linear(d_model, num_classes)\n self.bbox_embed = MLP(d_model, d_model, 4, 3)\n\n self._reset_parameters()\n\n def _reset_parameters(self):\n\n num_pred = self.num_layers\n num_classes = 91\n prior_prob = 0.01\n bias_value = -math.log((1 - prior_prob) / prior_prob)\n self.class_embed.bias.data = torch.ones(num_classes) * bias_value\n\n nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)\n nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)\n if self.spatial_prior == \"learned\":\n nn.init.uniform_(self.position.weight.data, 0, 1)\n\n nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)\n self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])\n self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])\n\n\n def forward(self, srcs, masks):\n\n # prepare input for decoder\n bs, l, c, h, w = srcs.shape\n\n if self.spatial_prior == \"learned\":\n reference_points = self.position.weight.unsqueeze(0).repeat(bs, self.num_pattern, 1)\n elif self.spatial_prior == \"grid\":\n nx=ny=round(math.sqrt(self.num_position))\n self.num_position=nx*ny\n x = (torch.arange(nx) + 0.5) / nx\n y = (torch.arange(ny) + 0.5) / ny\n xy=torch.meshgrid(x,y)\n reference_points=torch.cat([xy[0].reshape(-1)[...,None],xy[1].reshape(-1)[...,None]],-1).cuda()\n reference_points = reference_points.unsqueeze(0).repeat(bs, self.num_pattern, 1)\n else:\n raise ValueError(f'unknown {self.spatial_prior} spatial prior')\n\n tgt = self.pattern.weight.reshape(1, self.num_pattern, 1, c).repeat(bs, 1, self.num_position, 1).reshape(\n bs, self.num_pattern * self.num_position, c)\n\n\n mask = masks[-1].unsqueeze(1).repeat(1,l,1,1).reshape(bs*l,h,w)\n pos_col, pos_row = mask2pos(mask)\n if self.attention_type==\"RCDA\":\n posemb_row = self.adapt_pos1d(pos2posemb1d(pos_row))\n posemb_col = self.adapt_pos1d(pos2posemb1d(pos_col))\n posemb_2d = None\n else:\n pos_2d = torch.cat([pos_row.unsqueeze(1).repeat(1, h, 1).unsqueeze(-1), pos_col.unsqueeze(2).repeat(1, 1, w).unsqueeze(-1)],dim=-1)\n posemb_2d = self.adapt_pos2d(pos2posemb2d(pos_2d))\n posemb_row = posemb_col = None\n\n outputs = srcs.reshape(bs * l, c, h, w)\n\n for idx in range(len(self.encoder_layers)):\n outputs = self.encoder_layers[idx](outputs, mask, posemb_row, posemb_col,posemb_2d)\n if idx < self.num_encoder_layers_level:\n outputs = self.encoder_layers_level[idx](outputs, level_emb=self.level_embed.weight.unsqueeze(1).unsqueeze(0).repeat(bs,1,1,1).reshape(bs*l,1,c))\n\n srcs = outputs.reshape(bs, l, c, h, w)\n\n output = tgt\n\n outputs_classes = []\n outputs_coords = []\n for lid, layer in enumerate(self.decoder_layers):\n output = layer(output, reference_points, srcs, mask, adapt_pos2d=self.adapt_pos2d,\n adapt_pos1d=self.adapt_pos1d, posemb_row=posemb_row, posemb_col=posemb_col,posemb_2d=posemb_2d)\n reference = inverse_sigmoid(reference_points)\n outputs_class = self.class_embed[lid](output)\n tmp = self.bbox_embed[lid](output)\n if reference.shape[-1] == 4:\n tmp += reference\n else:\n assert reference.shape[-1] == 2\n tmp[..., :2] += reference\n outputs_coord = tmp.sigmoid()\n outputs_classes.append(outputs_class[None,])\n outputs_coords.append(outputs_coord[None,])\n\n output = torch.cat(outputs_classes, dim=0), torch.cat(outputs_coords, dim=0)\n\n return output\n\n\nclass TransformerEncoderLayerSpatial(nn.Module):\n def __init__(self,\n d_model=256, d_ffn=1024,\n dropout=0., activation=\"relu\",\n n_heads=8, attention_type=\"RCDA\"):\n super().__init__()\n\n self.attention_type = attention_type\n if attention_type==\"RCDA\":\n attention_module=MultiheadRCDA\n elif attention_type == \"nn.MultiheadAttention\":\n attention_module=nn.MultiheadAttention\n else:\n raise ValueError(f'unknown {attention_type} attention_type')\n\n # self attention\n self.self_attn = attention_module(d_model, n_heads, dropout=dropout)\n self.dropout1 = nn.Dropout(dropout)\n self.norm1 = nn.LayerNorm(d_model)\n\n # ffn\n self.ffn = FFN(d_model, d_ffn, dropout, activation)\n\n @staticmethod\n def with_pos_embed(tensor, pos):\n return tensor if pos is None else tensor + pos\n\n def forward(self, src, padding_mask=None, posemb_row=None, posemb_col=None,posemb_2d=None):\n # self attention\n bz, c, h, w = src.shape\n src = src.permute(0, 2, 3, 1)\n\n if self.attention_type==\"RCDA\":\n posemb_row = posemb_row.unsqueeze(1).repeat(1, h, 1, 1)\n posemb_col = posemb_col.unsqueeze(2).repeat(1, 1, w, 1)\n src2 = self.self_attn((src + posemb_row).reshape(bz, h * w, c), (src + posemb_col).reshape(bz, h * w, c),\n src + posemb_row, src + posemb_col,\n src, key_padding_mask=padding_mask)[0].transpose(0, 1).reshape(bz, h, w, c)\n else:\n src2 = self.self_attn((src + posemb_2d).reshape(bz, h * w, c).transpose(0, 1),\n (src + posemb_2d).reshape(bz, h * w, c).transpose(0, 1),\n src.reshape(bz, h * w, c).transpose(0, 1))[0].transpose(0, 1).reshape(bz, h, w, c)\n\n src = src + self.dropout1(src2)\n src = self.norm1(src)\n\n # ffn\n src = self.ffn(src)\n src = src.permute(0, 3, 1, 2)\n return src\n\n\nclass TransformerEncoderLayerLevel(nn.Module):\n def __init__(self,\n d_model=256, d_ffn=1024,\n dropout=0., activation=\"relu\",\n n_heads=8):\n super().__init__()\n\n # self attention\n self.self_attn_level = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)\n self.dropout1 = nn.Dropout(dropout)\n self.norm1 = nn.LayerNorm(d_model)\n\n # ffn\n self.ffn = FFN(d_model, d_ffn, dropout, activation)\n\n @staticmethod\n def with_pos_embed(tensor, pos):\n return tensor if pos is None else tensor + pos\n\n def forward(self, src, level_emb=0):\n # self attention\n bz, c, h, w = src.shape\n src = src.permute(0, 2, 3, 1)\n\n src2 = self.self_attn_level(src.reshape(bz, h * w, c) + level_emb, src.reshape(bz, h * w, c) + level_emb,\n src.reshape(bz, h * w, c))[0].reshape(bz, h, w, c)\n\n src = src + self.dropout1(src2)\n src = self.norm1(src)\n\n # ffn\n src = self.ffn(src)\n src = src.permute(0, 3, 1, 2)\n return src\n\n\n\nclass TransformerDecoderLayer(nn.Module):\n def __init__(self, d_model=256, d_ffn=1024,\n dropout=0., activation=\"relu\", n_heads=8,\n n_levels=3, attention_type=\"RCDA\"):\n super().__init__()\n\n self.attention_type = attention_type\n self.attention_type = attention_type\n if attention_type==\"RCDA\":\n attention_module=MultiheadRCDA\n elif attention_type == \"nn.MultiheadAttention\":\n attention_module=nn.MultiheadAttention\n else:\n raise ValueError(f'unknown {attention_type} attention_type')\n\n # cross attention\n self.cross_attn = attention_module(d_model, n_heads, dropout=dropout)\n self.dropout1 = nn.Dropout(dropout)\n self.norm1 = nn.LayerNorm(d_model)\n\n # self attention\n self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)\n self.dropout2 = nn.Dropout(dropout)\n self.norm2 = nn.LayerNorm(d_model)\n\n\n # level combination\n if n_levels>1:\n self.level_fc = nn.Linear(d_model * n_levels, d_model)\n\n # ffn\n self.ffn = FFN(d_model, d_ffn, dropout, activation)\n\n @staticmethod\n def with_pos_embed(tensor, pos):\n return tensor if pos is None else tensor + pos\n\n def forward(self, tgt, reference_points, srcs, src_padding_masks=None, adapt_pos2d=None,\n adapt_pos1d=None, posemb_row=None, posemb_col=None, posemb_2d=None):\n tgt_len = tgt.shape[1]\n\n query_pos = pos2posemb2d(reference_points.squeeze(2))\n query_pos = adapt_pos2d(query_pos)\n # self attention\n q = k = self.with_pos_embed(tgt, query_pos)\n tgt2 = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), tgt.transpose(0, 1))[0].transpose(0, 1)\n tgt = tgt + self.dropout2(tgt2)\n tgt = self.norm2(tgt)\n\n bz, l, c, h, w = srcs.shape\n srcs = srcs.reshape(bz * l, c, h, w).permute(0, 2, 3, 1)\n\n if self.attention_type == \"RCDA\":\n query_pos_x = adapt_pos1d(pos2posemb1d(reference_points[..., 0]))\n query_pos_y = adapt_pos1d(pos2posemb1d(reference_points[..., 1]))\n posemb_row = posemb_row.unsqueeze(1).repeat(1, h, 1, 1)\n posemb_col = posemb_col.unsqueeze(2).repeat(1, 1, w, 1)\n src_row = src_col = srcs\n k_row = src_row + posemb_row\n k_col = src_col + posemb_col\n tgt2 = self.cross_attn((tgt + query_pos_x).repeat(l, 1, 1), (tgt + query_pos_y).repeat(l, 1, 1), k_row, k_col,\n srcs, key_padding_mask=src_padding_masks)[0].transpose(0, 1)\n else:\n tgt2 = self.cross_attn((tgt + query_pos).repeat(l, 1, 1).transpose(0, 1),\n (srcs + posemb_2d).reshape(bz * l, h * w, c).transpose(0,1),\n srcs.reshape(bz * l, h * w, c).transpose(0, 1))[0].transpose(0,1)\n\n if l > 1:\n tgt2 = self.level_fc(tgt2.reshape(bz, l, tgt_len, c).permute(0, 2, 3, 1).reshape(bz, tgt_len, c * l))\n\n tgt = tgt + self.dropout1(tgt2)\n tgt = self.norm1(tgt)\n\n # ffn\n tgt = self.ffn(tgt)\n\n return tgt\n\n\nclass FFN(nn.Module):\n\n def __init__(self, d_model=256, d_ffn=1024, dropout=0., activation='relu'):\n super().__init__()\n self.linear1 = nn.Linear(d_model, d_ffn)\n self.activation = _get_activation_fn(activation)\n self.dropout2 = nn.Dropout(dropout)\n self.linear2 = nn.Linear(d_ffn, d_model)\n self.dropout3 = nn.Dropout(dropout)\n self.norm2 = nn.LayerNorm(d_model)\n\n def forward(self, src):\n src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))\n src = src + self.dropout3(src2)\n src = self.norm2(src)\n return src\n\n\nclass MLP(nn.Module):\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x\n\n\ndef _get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n\ndef _get_activation_fn(activation):\n \"\"\"Return an activation function given a string\"\"\"\n if activation == \"relu\":\n return F.relu\n if activation == \"gelu\":\n return F.gelu\n if activation == \"glu\":\n return F.glu\n raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")\n\n\ndef build_transformer(args):\n return Transformer(\n d_model=args.hidden_dim,\n nhead=args.nheads,\n num_encoder_layers=args.enc_layers,\n num_decoder_layers=args.dec_layers,\n dim_feedforward=args.dim_feedforward,\n dropout=args.dropout,\n activation=\"relu\",\n num_feature_levels=args.num_feature_levels,\n num_query_position=args.num_query_position,\n num_query_pattern=args.num_query_pattern,\n spatial_prior=args.spatial_prior,\n attention_type=args.attention_type,\n)\n\n\n\n\n\ndef pos2posemb2d(pos, num_pos_feats=128, temperature=10000):\n scale = 2 * math.pi\n pos = pos * scale\n dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device)\n dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)\n pos_x = pos[..., 0, None] / dim_t\n pos_y = pos[..., 1, None] / dim_t\n pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2)\n pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()), dim=-1).flatten(-2)\n posemb = torch.cat((pos_y, pos_x), dim=-1)\n return posemb\n\n\ndef pos2posemb1d(pos, num_pos_feats=256, temperature=10000):\n scale = 2 * math.pi\n pos = pos * scale\n dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device)\n dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)\n pos_x = pos[..., None] / dim_t\n posemb = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2)\n return posemb\n\n\ndef mask2pos(mask):\n not_mask = ~mask\n y_embed = not_mask[:, :, 0].cumsum(1, dtype=torch.float32)\n x_embed = not_mask[:, 0, :].cumsum(1, dtype=torch.float32)\n y_embed = (y_embed - 0.5) / y_embed[:, -1:]\n x_embed = (x_embed - 0.5) / x_embed[:, -1:]\n return y_embed, x_embed\n"
] | [
[
"torch.nn.Dropout",
"torch.nn.init.uniform_",
"torch.ones",
"torch.nn.MultiheadAttention",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.arange",
"torch.nn.ReLU",
"torch.meshgrid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SIMEXP/nilearn | [
"4f51aea58f38689ca32c2edd748528d521e6cfb0",
"4f51aea58f38689ca32c2edd748528d521e6cfb0",
"4f51aea58f38689ca32c2edd748528d521e6cfb0",
"4f51aea58f38689ca32c2edd748528d521e6cfb0",
"4f51aea58f38689ca32c2edd748528d521e6cfb0"
] | [
"examples/01_plotting/plot_colormaps.py",
"nilearn/plotting/surf_plotting.py",
"nilearn/plotting/tests/test_html_stat_map.py",
"nilearn/plotting/cm.py",
"nilearn/decoding/tests/test_tv.py"
] | [
"\"\"\"\nMatplotlib colormaps in Nilearn\n================================\n\nVisualize HCP connectome workbench color maps shipped with Nilearn\nwhich can be used for plotting brain images on surface.\n\nSee :ref:`surface-plotting` for surface plotting details.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom nilearn.plotting.cm import _cmap_d as nilearn_cmaps\nfrom nilearn.plotting import show\n\n###########################################################################\n# Plot color maps\n# ----------------\n\nnmaps = len(nilearn_cmaps)\na = np.outer(np.arange(0, 1, 0.01), np.ones(10))\n\n# Initialize the figure\nplt.figure(figsize=(10, 4.2))\nplt.subplots_adjust(top=0.4, bottom=0.05, left=0.01, right=0.99)\n\nfor index, cmap in enumerate(nilearn_cmaps):\n plt.subplot(1, nmaps + 1, index + 1)\n plt.imshow(a, cmap=nilearn_cmaps[cmap])\n plt.axis('off')\n plt.title(cmap, fontsize=10, va='bottom', rotation=90)\n\n###########################################################################\n# Plot matplotlib color maps\n# --------------------------\nplt.figure(figsize=(10, 5))\nplt.subplots_adjust(top=0.8, bottom=0.05, left=0.01, right=0.99)\ndeprecated_cmaps = ['Vega10', 'Vega20', 'Vega20b', 'Vega20c', 'spectral']\nm_cmaps = []\nfor m in plt.cm.datad:\n if not m.endswith(\"_r\") and m not in deprecated_cmaps:\n m_cmaps.append(m)\nm_cmaps.sort()\n\nfor index, cmap in enumerate(m_cmaps):\n plt.subplot(1, len(m_cmaps) + 1, index + 1)\n plt.imshow(a, cmap=plt.get_cmap(cmap), aspect='auto')\n plt.axis('off')\n plt.title(cmap, fontsize=10, va='bottom', rotation=90)\n\nshow()\n",
"\"\"\"\nFunctions for surface visualization.\nOnly matplotlib is required.\n\"\"\"\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom matplotlib.colorbar import make_axes\nfrom matplotlib.cm import ScalarMappable, get_cmap\nfrom matplotlib.colors import Normalize, LinearSegmentedColormap\n\nfrom ..surface import load_surf_data, load_surf_mesh\nfrom .._utils.compat import _basestring\nfrom .img_plotting import _get_colorbar_and_data_ranges, _crop_colorbar\n\n\ndef plot_surf(surf_mesh, surf_map=None, bg_map=None,\n hemi='left', view='lateral', cmap=None, colorbar=False,\n avg_method='mean', threshold=None, alpha='auto',\n bg_on_data=False, darkness=1, vmin=None, vmax=None,\n cbar_vmin=None, cbar_vmax=None,\n title=None, output_file=None, axes=None, figure=None, **kwargs):\n \"\"\" Plotting of surfaces with optional background and data\n\n .. versionadded:: 0.3\n\n Parameters\n ----------\n surf_mesh: str or list of two numpy.ndarray\n Surface mesh geometry, can be a file (valid formats are\n .gii or Freesurfer specific files such as .orig, .pial,\n .sphere, .white, .inflated) or\n a list of two Numpy arrays, the first containing the x-y-z coordinates\n of the mesh vertices, the second containing the indices\n (into coords) of the mesh faces.\n\n surf_map: str or numpy.ndarray, optional.\n Data to be displayed on the surface mesh. Can be a file (valid formats\n are .gii, .mgz, .nii, .nii.gz, or Freesurfer specific files such as\n .thickness, .curv, .sulc, .annot, .label) or\n a Numpy array with a value for each vertex of the surf_mesh.\n\n bg_map: Surface data object (to be defined), optional,\n Background image to be plotted on the mesh underneath the\n surf_data in greyscale, most likely a sulcal depth map for\n realistic shading.\n\n hemi : {'left', 'right'}, default is 'left'\n Hemisphere to display.\n\n view: {'lateral', 'medial', 'dorsal', 'ventral', 'anterior', 'posterior'},\n default is 'lateral'\n View of the surface that is rendered.\n\n cmap: matplotlib colormap, str or colormap object, default is None\n To use for plotting of the stat_map. Either a string\n which is a name of a matplotlib colormap, or a matplotlib\n colormap object. If None, matplotlib default will be chosen\n\n colorbar : bool, optional, default is False\n If True, a colorbar of surf_map is displayed.\n\n avg_method: {'mean', 'median'}, default is 'mean'\n How to average vertex values to derive the face value, mean results\n in smooth, median in sharp boundaries.\n\n threshold : a number or None, default is None.\n If None is given, the image is not thresholded.\n If a number is given, it is used to threshold the image, values\n below the threshold (in absolute value) are plotted as transparent.\n\n alpha: float, alpha level of the mesh (not surf_data), default 'auto'\n If 'auto' is chosen, alpha will default to .5 when no bg_map\n is passed and to 1 if a bg_map is passed.\n\n bg_on_data: bool, default is False\n If True, and a bg_map is specified, the surf_data data is multiplied\n by the background image, so that e.g. sulcal depth is visible beneath\n the surf_data.\n NOTE: that this non-uniformly changes the surf_data values according\n to e.g the sulcal depth.\n\n darkness: float, between 0 and 1, default is 1\n Specifying the darkness of the background image.\n 1 indicates that the original values of the background are used.\n .5 indicates the background values are reduced by half before being\n applied.\n\n vmin, vmax: lower / upper bound to plot surf_data values\n If None , the values will be set to min/max of the data\n\n title : str, optional\n Figure title.\n\n output_file: str, or None, optional\n The name of an image file to export plot to. Valid extensions\n are .png, .pdf, .svg. If output_file is not None, the plot\n is saved to a file, and the display is closed.\n\n axes: instance of matplotlib axes, None, optional\n The axes instance to plot to. The projection must be '3d' (e.g.,\n `figure, axes = plt.subplots(subplot_kw={'projection': '3d'})`,\n where axes should be passed.).\n If None, a new axes is created.\n\n figure: instance of matplotlib figure, None, optional\n The figure instance to plot to. If None, a new figure is created.\n\n See Also\n --------\n nilearn.datasets.fetch_surf_fsaverage : For surface data object to be\n used as background map for this plotting function.\n\n nilearn.plotting.plot_surf_roi : For plotting statistical maps on brain\n surfaces.\n\n nilearn.plotting.plot_surf_stat_map : for plotting statistical maps on\n brain surfaces.\n \"\"\"\n\n # load mesh and derive axes limits\n mesh = load_surf_mesh(surf_mesh)\n coords, faces = mesh[0], mesh[1]\n limits = [coords.min(), coords.max()]\n\n # set view\n if hemi == 'right':\n if view == 'lateral':\n elev, azim = 0, 0\n elif view == 'medial':\n elev, azim = 0, 180\n elif view == 'dorsal':\n elev, azim = 90, 0\n elif view == 'ventral':\n elev, azim = 270, 0\n elif view == 'anterior':\n elev, azim = 0, 90\n elif view == 'posterior':\n elev, azim = 0, 270\n else:\n raise ValueError('view must be one of lateral, medial, '\n 'dorsal, ventral, anterior, or posterior')\n elif hemi == 'left':\n if view == 'medial':\n elev, azim = 0, 0\n elif view == 'lateral':\n elev, azim = 0, 180\n elif view == 'dorsal':\n elev, azim = 90, 0\n elif view == 'ventral':\n elev, azim = 270, 0\n elif view == 'anterior':\n elev, azim = 0, 90\n elif view == 'posterior':\n elev, azim = 0, 270\n else:\n raise ValueError('view must be one of lateral, medial, '\n 'dorsal, ventral, anterior, or posterior')\n else:\n raise ValueError('hemi must be one of right or left')\n\n # set alpha if in auto mode\n if alpha == 'auto':\n if bg_map is None:\n alpha = .5\n else:\n alpha = 1\n\n # if no cmap is given, set to matplotlib default\n if cmap is None:\n cmap = plt.cm.get_cmap(plt.rcParamsDefault['image.cmap'])\n else:\n # if cmap is given as string, translate to matplotlib cmap\n if isinstance(cmap, _basestring):\n cmap = plt.cm.get_cmap(cmap)\n\n # initiate figure and 3d axes\n if axes is None:\n if figure is None:\n figure = plt.figure()\n axes = Axes3D(figure, rect=[0, 0, 1, 1],\n xlim=limits, ylim=limits)\n else:\n if figure is None:\n figure = axes.get_figure()\n axes.set_xlim(*limits)\n axes.set_ylim(*limits)\n axes.view_init(elev=elev, azim=azim)\n axes.set_axis_off()\n\n # plot mesh without data\n p3dcollec = axes.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2],\n triangles=faces, linewidth=0.,\n antialiased=False,\n color='white')\n\n # reduce viewing distance to remove space around mesh\n axes.dist = 8\n\n # set_facecolors function of Poly3DCollection is used as passing the\n # facecolors argument to plot_trisurf does not seem to work\n face_colors = np.ones((faces.shape[0], 4))\n\n if bg_map is None:\n bg_data = np.ones(coords.shape[0]) * 0.5\n\n else:\n bg_data = load_surf_data(bg_map)\n if bg_data.shape[0] != coords.shape[0]:\n raise ValueError('The bg_map does not have the same number '\n 'of vertices as the mesh.')\n\n bg_faces = np.mean(bg_data[faces], axis=1)\n if bg_faces.min() != bg_faces.max():\n bg_faces = bg_faces - bg_faces.min()\n bg_faces = bg_faces / bg_faces.max()\n # control background darkness\n bg_faces *= darkness\n face_colors = plt.cm.gray_r(bg_faces)\n\n # modify alpha values of background\n face_colors[:, 3] = alpha * face_colors[:, 3]\n # should it be possible to modify alpha of surf data as well?\n\n if surf_map is not None:\n surf_map_data = load_surf_data(surf_map)\n if len(surf_map_data.shape) is not 1:\n raise ValueError('surf_map can only have one dimension but has'\n '%i dimensions' % len(surf_map_data.shape))\n if surf_map_data.shape[0] != coords.shape[0]:\n raise ValueError('The surf_map does not have the same number '\n 'of vertices as the mesh.')\n\n # create face values from vertex values by selected avg methods\n if avg_method == 'mean':\n surf_map_faces = np.mean(surf_map_data[faces], axis=1)\n elif avg_method == 'median':\n surf_map_faces = np.median(surf_map_data[faces], axis=1)\n\n # if no vmin/vmax are passed figure them out from data\n if vmin is None:\n vmin = np.nanmin(surf_map_faces)\n if vmax is None:\n vmax = np.nanmax(surf_map_faces)\n\n # treshold if inidcated\n if threshold is None:\n kept_indices = np.arange(surf_map_faces.shape[0])\n else:\n kept_indices = np.where(np.abs(surf_map_faces) >= threshold)[0]\n\n surf_map_faces = surf_map_faces - vmin\n surf_map_faces = surf_map_faces / (vmax - vmin)\n\n # multiply data with background if indicated\n if bg_on_data:\n face_colors[kept_indices] = cmap(surf_map_faces[kept_indices])\\\n * face_colors[kept_indices]\n else:\n face_colors[kept_indices] = cmap(surf_map_faces[kept_indices])\n\n if colorbar:\n our_cmap = get_cmap(cmap)\n norm = Normalize(vmin=vmin, vmax=vmax)\n\n nb_ticks = 5\n ticks = np.linspace(vmin, vmax, nb_ticks)\n bounds = np.linspace(vmin, vmax, our_cmap.N)\n\n if threshold is not None:\n cmaplist = [our_cmap(i) for i in range(our_cmap.N)]\n # set colors to grey for absolute values < threshold\n istart = int(norm(-threshold, clip=True) *\n (our_cmap.N - 1))\n istop = int(norm(threshold, clip=True) *\n (our_cmap.N - 1))\n for i in range(istart, istop):\n cmaplist[i] = (0.5, 0.5, 0.5, 1.)\n our_cmap = LinearSegmentedColormap.from_list(\n 'Custom cmap', cmaplist, our_cmap.N)\n\n # we need to create a proxy mappable\n proxy_mappable = ScalarMappable(cmap=our_cmap, norm=norm)\n proxy_mappable.set_array(surf_map_faces)\n cax, kw = make_axes(axes, location='right', fraction=.1,\n shrink=.6, pad=.0)\n cbar = figure.colorbar(\n proxy_mappable, cax=cax, ticks=ticks,\n boundaries=bounds, spacing='proportional',\n format='%.2g', orientation='vertical')\n _crop_colorbar(cbar, cbar_vmin, cbar_vmax)\n\n p3dcollec.set_facecolors(face_colors)\n\n if title is not None:\n axes.set_title(title, position=(.5, .95))\n\n # save figure if output file is given\n if output_file is not None:\n figure.savefig(output_file)\n plt.close(figure)\n else:\n return figure\n\n\ndef plot_surf_stat_map(surf_mesh, stat_map, bg_map=None,\n hemi='left', view='lateral', threshold=None,\n alpha='auto', vmax=None, cmap='cold_hot',\n colorbar=True, symmetric_cbar=\"auto\", bg_on_data=False,\n darkness=1, title=None, output_file=None, axes=None,\n figure=None, **kwargs):\n \"\"\" Plotting a stats map on a surface mesh with optional background\n\n .. versionadded:: 0.3\n\n Parameters\n ----------\n surf_mesh : str or list of two numpy.ndarray\n Surface mesh geometry, can be a file (valid formats are\n .gii or Freesurfer specific files such as .orig, .pial,\n .sphere, .white, .inflated) or\n a list of two Numpy arrays, the first containing the x-y-z\n coordinates of the mesh vertices, the second containing the\n indices (into coords) of the mesh faces\n\n stat_map : str or numpy.ndarray\n Statistical map to be displayed on the surface mesh, can\n be a file (valid formats are .gii, .mgz, .nii, .nii.gz, or\n Freesurfer specific files such as .thickness, .curv, .sulc, .annot,\n .label) or\n a Numpy array with a value for each vertex of the surf_mesh.\n\n bg_map : Surface data object (to be defined), optional,\n Background image to be plotted on the mesh underneath the\n stat_map in greyscale, most likely a sulcal depth map for\n realistic shading.\n\n hemi : {'left', 'right'}, default is 'left'\n Hemispere to display.\n\n view: {'lateral', 'medial', 'dorsal', 'ventral', 'anterior', 'posterior'},\n default is 'lateral'\n View of the surface that is rendered.\n\n threshold : a number or None, default is None\n If None is given, the image is not thresholded.\n If a number is given, it is used to threshold the image,\n values below the threshold (in absolute value) are plotted\n as transparent.\n\n cmap : matplotlib colormap in str or colormap object, default 'cold_hot'\n To use for plotting of the stat_map. Either a string\n which is a name of a matplotlib colormap, or a matplotlib\n colormap object.\n\n colorbar : bool, optional, default is False\n If True, a symmetric colorbar of the statistical map is displayed.\n\n alpha : float, alpha level of the mesh (not the stat_map), default 'auto'\n If 'auto' is chosen, alpha will default to .5 when no bg_map is\n passed and to 1 if a bg_map is passed.\n\n vmax : upper bound for plotting of stat_map values.\n\n symmetric_cbar : bool or 'auto', optional, default 'auto'\n Specifies whether the colorbar should range from -vmax to vmax\n or from vmin to vmax. Setting to 'auto' will select the latter\n if the range of the whole image is either positive or negative.\n Note: The colormap will always range from -vmax to vmax.\n\n bg_on_data : bool, default is False\n If True, and a bg_map is specified, the stat_map data is multiplied\n by the background image, so that e.g. sulcal depth is visible beneath\n the stat_map.\n NOTE: that this non-uniformly changes the stat_map values according\n to e.g the sulcal depth.\n\n darkness: float, between 0 and 1, default 1\n Specifying the darkness of the background image. 1 indicates that the\n original values of the background are used. .5 indicates the\n background values are reduced by half before being applied.\n\n title : str, optional\n Figure title.\n\n output_file: str, or None, optional\n The name of an image file to export plot to. Valid extensions\n are .png, .pdf, .svg. If output_file is not None, the plot\n is saved to a file, and the display is closed.\n\n axes: instance of matplotlib axes, None, optional\n The axes instance to plot to. The projection must be '3d' (e.g.,\n `figure, axes = plt.subplots(subplot_kw={'projection': '3d'})`,\n where axes should be passed.).\n If None, a new axes is created.\n\n figure: instance of matplotlib figure, None, optional\n The figure instance to plot to. If None, a new figure is created.\n\n See Also\n --------\n nilearn.datasets.fetch_surf_fsaverage: For surface data object to be\n used as background map for this plotting function.\n\n nilearn.plotting.plot_surf: For brain surface visualization.\n \"\"\"\n\n # Call _get_colorbar_and_data_ranges to derive symmetric vmin, vmax\n # And colorbar limits depending on symmetric_cbar settings\n cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(\n stat_map, vmax, symmetric_cbar, kwargs)\n\n display = plot_surf(\n surf_mesh, surf_map=stat_map, bg_map=bg_map, hemi=hemi, view=view,\n avg_method='mean', threshold=threshold, cmap=cmap, colorbar=colorbar,\n alpha=alpha, bg_on_data=bg_on_data, darkness=darkness, vmax=vmax,\n vmin=vmin, title=title, output_file=output_file, axes=axes,\n figure=figure, cbar_vmin=cbar_vmin, cbar_vmax=cbar_vmax, **kwargs)\n\n return display\n\n\ndef plot_surf_roi(surf_mesh, roi_map, bg_map=None,\n hemi='left', view='lateral', threshold=1e-14,\n alpha='auto', vmin=None, vmax=None, cmap='gist_ncar',\n bg_on_data=False, darkness=1, title=None,\n output_file=None, axes=None, figure=None, **kwargs):\n \"\"\" Plotting ROI on a surface mesh with optional background\n\n .. versionadded:: 0.3\n\n Parameters\n ----------\n surf_mesh : str or list of two numpy.ndarray\n Surface mesh geometry, can be a file (valid formats are\n .gii or Freesurfer specific files such as .orig, .pial,\n .sphere, .white, .inflated) or\n a list of two Numpy arrays, the first containing the x-y-z\n coordinates of the mesh vertices, the second containing the indices\n (into coords) of the mesh faces\n\n roi_map : str or numpy.ndarray or list of numpy.ndarray\n ROI map to be displayed on the surface mesh, can be a file\n (valid formats are .gii, .mgz, .nii, .nii.gz, or Freesurfer specific\n files such as .annot or .label), or\n a Numpy array with a value for each vertex of the surf_mesh.\n The value at each vertex one inside the ROI and zero inside ROI, or an\n integer giving the label number for atlases.\n\n hemi : {'left', 'right'}, default is 'left'\n Hemisphere to display.\n\n bg_map : Surface data object (to be defined), optional,\n Background image to be plotted on the mesh underneath the\n stat_map in greyscale, most likely a sulcal depth map for\n realistic shading.\n\n view: {'lateral', 'medial', 'dorsal', 'ventral', 'anterior', 'posterior'},\n default is 'lateral'\n View of the surface that is rendered.\n\n threshold: a number or None\n default is 1e-14 to threshold regions that are labelled 0. If you want\n to use 0 as a label, set threshold to None.\n\n cmap : matplotlib colormap str or colormap object, default 'gist_ncar'\n To use for plotting of the rois. Either a string which is a name\n of a matplotlib colormap, or a matplotlib colormap object.\n\n alpha : float, default is 'auto'\n Alpha level of the mesh (not the stat_map). If default,\n alpha will default to .5 when no bg_map is passed\n and to 1 if a bg_map is passed.\n\n bg_on_data : bool, default is False\n If True, and a bg_map is specified, the stat_map data is multiplied\n by the background image, so that e.g. sulcal depth is visible beneath\n the stat_map. Beware that this non-uniformly changes the stat_map\n values according to e.g the sulcal depth.\n\n darkness : float, between 0 and 1, default is 1\n Specifying the darkness of the background image. 1 indicates that the\n original values of the background are used. .5 indicates the background\n values are reduced by half before being applied.\n\n title : str, optional\n Figure title.\n\n output_file: str, or None, optional\n The name of an image file to export plot to. Valid extensions\n are .png, .pdf, .svg. If output_file is not None, the plot\n is saved to a file, and the display is closed.\n\n axes: Axes instance | None\n The axes instance to plot to. The projection must be '3d' (e.g.,\n `plt.subplots(subplot_kw={'projection': '3d'})`).\n If None, a new axes is created.\n\n figure: Figure instance | None\n The figure to plot to. If None, a new figure is created.\n\n See Also\n --------\n nilearn.datasets.fetch_surf_fsaverage: For surface data object to be\n used as background map for this plotting function.\n\n nilearn.plotting.plot_surf: For brain surface visualization.\n \"\"\"\n\n # preload roi and mesh to determine vmin, vmax and give more useful error\n # messages in case of wrong inputs\n\n roi = load_surf_data(roi_map)\n vmin, vmax = np.min(roi), 1 + np.max(roi)\n\n mesh = load_surf_mesh(surf_mesh)\n\n if len(roi.shape) is not 1:\n raise ValueError('roi_map can only have one dimension but has '\n '%i dimensions' % len(roi.shape))\n if roi.shape[0] != mesh[0].shape[0]:\n raise ValueError('roi_map does not have the same number of vertices '\n 'as the mesh. If you have a list of indices for the '\n 'ROI you can convert them into a ROI map like this:\\n'\n 'roi_map = np.zeros(n_vertices)\\n'\n 'roi_map[roi_idx] = 1')\n\n display = plot_surf(mesh, surf_map=roi, bg_map=bg_map,\n hemi=hemi, view=view, avg_method='median',\n threshold=threshold, cmap=cmap, alpha=alpha,\n bg_on_data=bg_on_data, darkness=darkness,\n vmin=vmin, vmax=vmax, title=title,\n output_file=output_file, axes=axes,\n figure=figure, **kwargs)\n\n return display\n",
"import warnings\nfrom io import BytesIO\n\nimport numpy as np\nfrom numpy.testing import assert_raises\n\nfrom nibabel import Nifti1Image\n\nfrom nilearn import datasets, image\nfrom nilearn.plotting import html_stat_map\nfrom nilearn.image import new_img_like\nfrom ..js_plotting_utils import colorscale\nfrom ..._utils.compat import _basestring\n\n\ndef _check_html(html_view):\n \"\"\" Check the presence of some expected code in the html viewer\n \"\"\"\n assert isinstance(html_view, html_stat_map.StatMapView)\n assert \"var brain =\" in str(html_view)\n assert \"overlayImg\" in str(html_view)\n\n\ndef _simulate_img(affine=np.eye(4)):\n \"\"\" Simulate data with one \"spot\"\n Returns: img, data\n \"\"\"\n data = np.zeros([8, 8, 8])\n data[4, 4, 4] = 1\n img = Nifti1Image(data, affine)\n return img, data\n\n\ndef _check_affine(affine):\n \"\"\" Check positive, isotropic, near-diagonal affine.\n \"\"\"\n assert(affine[0, 0] == affine[1, 1])\n assert(affine[2, 2] == affine[1, 1])\n assert(affine[0, 0] > 0)\n\n A, b = image.resampling.to_matrix_vector(affine)\n assert np.all((np.abs(A) > 0.001).sum(axis=0) == 1), (\n \"the affine transform was not near-diagonal\")\n\n\ndef test_data_to_sprite():\n\n # Simulate data and turn into sprite\n data = np.zeros([8, 8, 8])\n data[2:6, 2:6, 2:6] = 1\n sprite = html_stat_map._data_to_sprite(data)\n\n # Generate ground truth for the sprite\n Z = np.zeros([8, 8])\n Zr = np.zeros([2, 8])\n Cr = np.tile(np.array([[0, 0, 1, 1, 1, 1, 0, 0]]), [4, 1])\n C = np.concatenate((Zr, Cr, Zr), axis=0)\n gtruth = np.concatenate((np.concatenate((Z, Z, C), axis=1),\n np.concatenate((C, C, C), axis=1),\n np.concatenate((Z, Z, Z), axis=1)),\n axis=0)\n\n assert sprite.shape == gtruth.shape, \"shape of sprite not as expected\"\n assert (sprite == gtruth).all(), \"simulated sprite not as expected\"\n\n\ndef test_threshold_data():\n\n data = np.arange(-3, 4)\n\n # Check that an 'auto' threshold leaves at least one element\n data_t, mask, thresh = html_stat_map._threshold_data(data, threshold='auto')\n gtruth_m = np.array([False, True, True, True, True, True, False])\n gtruth_d = np.array([-3, 0, 0, 0, 0, 0, 3])\n assert (mask == gtruth_m).all()\n assert (data_t == gtruth_d).all()\n\n # Check that threshold=None keeps everything\n data_t, mask, thresh = html_stat_map._threshold_data(data, threshold=None)\n assert np.all(np.logical_not(mask))\n assert np.all(data_t == data)\n\n # Check positive threshold works\n data_t, mask, thresh = html_stat_map._threshold_data(data, threshold=1)\n gtruth = np.array([False, False, True, True, True, False, False])\n assert (mask == gtruth).all()\n\n # Check 0 threshold works\n data_t, mask, thresh = html_stat_map._threshold_data(data, threshold=0)\n gtruth = np.array([False, False, False, True, False, False, False])\n assert (mask == gtruth).all()\n\n # Check that overly lenient threshold returns array\n data = np.arange(3, 10)\n data_t, mask, thresh = html_stat_map._threshold_data(data, threshold=2)\n gtruth = np.full(7, False)\n assert (mask == gtruth).all()\n\n\ndef test_save_sprite():\n \"\"\"This test covers _save_sprite as well as _bytesIO_to_base64\n \"\"\"\n\n # Generate a simulated volume with a square inside\n data = np.zeros([2, 1, 1])\n data[0, 0, 0] = 1\n mask = data > 0\n\n # Save the sprite using BytesIO\n sprite_io = BytesIO()\n html_stat_map._save_sprite(data, sprite_io, vmin=0, vmax=1,\n mask=mask, format='png')\n\n # Load the sprite back in base64\n sprite_base64 = html_stat_map._bytesIO_to_base64(sprite_io)\n\n # Check the sprite is correct\n assert sprite_base64.startswith('iVBORw0KG')\n assert sprite_base64.endswith('ABJRU5ErkJggg==')\n\n\ndef test_save_cmap():\n \"\"\"This test covers _save_cmap as well as _bytesIO_to_base64\n \"\"\"\n\n # Save the cmap using BytesIO\n cmap_io = BytesIO()\n html_stat_map._save_cm(cmap_io, 'cold_hot', format='png', n_colors=2)\n\n # Load the colormap back in base64\n cmap_base64 = html_stat_map._bytesIO_to_base64(cmap_io)\n\n # Check the colormap is correct\n assert cmap_base64.startswith('iVBORw0KG')\n assert cmap_base64.endswith('ElFTkSuQmCC')\n\n\ndef test_mask_stat_map():\n\n # Generate simple simulated data with one \"spot\"\n img, data = _simulate_img()\n\n # Try not to threshold anything\n mask_img, img, data_t, thre = html_stat_map._mask_stat_map(img,\n threshold=None)\n assert np.max(mask_img.get_data()) == 0\n\n # Now threshold at zero\n mask_img, img, data_t, thre = html_stat_map._mask_stat_map(img,\n threshold=0)\n assert np.min((data == 0) == mask_img.get_data())\n\n\ndef test_load_bg_img():\n\n # Generate simple simulated data with non-diagonal affine\n affine = np.eye(4)\n affine[0, 0] = -1\n affine[0, 1] = 0.1\n img, data = _simulate_img(affine)\n\n # use empty bg_img\n bg_img, bg_min, bg_max, black_bg = html_stat_map._load_bg_img(img,\n bg_img=None)\n # Check positive isotropic, near-diagonal affine\n _check_affine(bg_img.affine)\n\n # Try to load the default background\n bg_img, bg_min, bg_max, black_bg = html_stat_map._load_bg_img(img)\n\n # Check positive isotropic, near-diagonal affine\n _check_affine(bg_img.affine)\n\n\ndef test_resample_stat_map():\n\n # Start with simple simulated data\n bg_img, data = _simulate_img()\n\n # Now double the voxel size and mess with the affine\n affine = 2 * np.eye(4)\n affine[3, 3] = 1\n affine[0, 1] = 0.1\n stat_map_img = Nifti1Image(data, affine)\n\n # Make a mask for the stat image\n mask_img = new_img_like(stat_map_img, data > 0, stat_map_img.affine)\n\n # Now run the resampling\n stat_map_img, mask_img = html_stat_map._resample_stat_map(\n stat_map_img, bg_img, mask_img, resampling_interpolation='nearest')\n\n # Check positive isotropic, near-diagonal affine\n _check_affine(stat_map_img.affine)\n _check_affine(mask_img.affine)\n\n # Check voxel size matches bg_img\n assert stat_map_img.affine[0, 0] == bg_img.affine[0, 0], (\n \"stat_map_img was not resampled at the resolution of background\")\n assert mask_img.affine[0, 0] == bg_img.affine[0, 0], (\n \"mask_img was not resampled at the resolution of background\")\n\n\ndef test_json_view_params():\n\n # Try to generate some sprite parameters\n params = html_stat_map._json_view_params(\n shape=[4, 4, 4], affine=np.eye(4), vmin=0, vmax=1,\n cut_slices=[1, 1, 1], black_bg=True, opacity=0.5, draw_cross=False,\n annotate=True, title=\"A test\", colorbar=True, value=True)\n\n # Just check that a structure was generated,\n # and test a single parameter\n assert params['overlay']['opacity'] == 0.5\n\n\ndef test_json_view_size():\n\n # Build some minimal sprite Parameters\n sprite_params = {'nbSlice': {'X': 4, 'Y': 4, 'Z': 4}}\n width, height = html_stat_map._json_view_size(sprite_params)\n\n # This is a simple case: height is 4 pixels, width 3 x 4 = 12 pixels\n # with an additional 120% height factor for annotations and margins\n ratio = 1.2 * 4 / 12\n\n # check we received the expected width and height\n width_exp = 600\n height_exp = np.ceil(ratio * 600)\n assert width == width_exp, \"html viewer does not have expected width\"\n assert height == height_exp, \"html viewer does not have expected height\"\n\n\ndef test_json_view_data():\n\n # simple simulated data for stat_img and background\n bg_img, data = _simulate_img()\n stat_map_img, data = _simulate_img()\n\n # make a mask\n mask_img = new_img_like(stat_map_img, data > 0, stat_map_img.affine)\n\n # Get color bar and data ranges\n colors = colorscale('cold_hot', data.ravel(), threshold=0,\n symmetric_cmap=True, vmax=1)\n\n # Build a sprite\n json_view = html_stat_map._json_view_data(\n bg_img, stat_map_img, mask_img, bg_min=0, bg_max=1, colors=colors,\n cmap='cold_hot', colorbar=True)\n\n # Check the presence of critical fields\n assert isinstance(json_view['bg_base64'], _basestring)\n assert isinstance(json_view['stat_map_base64'], _basestring)\n assert isinstance(json_view['cm_base64'], _basestring)\n\n return json_view, data\n\n\ndef test_json_view_to_html():\n\n # Re use the data simulated in another test\n json_view, data = test_json_view_data()\n json_view['params'] = html_stat_map._json_view_params(\n data.shape, np.eye(4), vmin=0, vmax=1, cut_slices=[1, 1, 1],\n black_bg=True, opacity=1, draw_cross=True, annotate=False,\n title=\"test\", colorbar=True)\n\n # Create a viewer\n html_view = html_stat_map._json_view_to_html(json_view)\n _check_html(html_view)\n\n\ndef test_get_cut_slices():\n\n # Generate simple simulated data with one \"spot\"\n img, data = _simulate_img()\n\n # Use automatic selection of coordinates\n cut_slices = html_stat_map._get_cut_slices(img, cut_coords=None,\n threshold=None)\n assert (cut_slices == [4, 4, 4]).all()\n\n # Check that using a single number for cut_coords raises an error\n assert_raises(ValueError, html_stat_map._get_cut_slices,\n img, cut_coords=4, threshold=None)\n\n # Check that it is possible to manually specify coordinates\n cut_slices = html_stat_map._get_cut_slices(img, cut_coords=[2, 2, 2],\n threshold=None)\n assert (cut_slices == [2, 2, 2]).all()\n\n # Check that the affine does not change where the cut is done\n affine = 2 * np.eye(4)\n img = Nifti1Image(data, affine)\n cut_slices = html_stat_map._get_cut_slices(img, cut_coords=None,\n threshold=None)\n assert (cut_slices == [4, 4, 4]).all()\n\n\ndef test_view_img():\n mni = datasets.load_mni152_template()\n with warnings.catch_warnings(record=True) as w:\n # Create a fake functional image by resample the template\n img = image.resample_img(mni, target_affine=3 * np.eye(3))\n html_view = html_stat_map.view_img(img)\n _check_html(html_view)\n html_view = html_stat_map.view_img(img, threshold='95%')\n _check_html(html_view)\n html_view = html_stat_map.view_img(img, bg_img=mni)\n _check_html(html_view)\n html_view = html_stat_map.view_img(img, bg_img=None)\n _check_html(html_view)\n html_view = html_stat_map.view_img(img, threshold=2., vmax=4.)\n _check_html(html_view)\n html_view = html_stat_map.view_img(img, symmetric_cmap=False)\n img_4d = image.new_img_like(img, img.get_data()[:, :, :, np.newaxis])\n assert len(img_4d.shape) == 4\n html_view = html_stat_map.view_img(img_4d, threshold=2., vmax=4.)\n _check_html(html_view)\n html_view = html_stat_map.view_img(img_4d, threshold=1e6)\n _check_html(html_view)\n\n # Check that all warnings were expected\n warnings_set = set(warning_.category for warning_ in w)\n expected_set = set([FutureWarning, UserWarning,\n DeprecationWarning])\n assert warnings_set.issubset(expected_set), (\n \"the following warnings were not expected: {}\").format(\n warnings_set.difference(expected_set))\n",
"# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"\nMatplotlib colormaps useful for neuroimaging.\n\"\"\"\nimport numpy as _np\n\nfrom matplotlib import cm as _cm\nfrom matplotlib import colors as _colors\n\n################################################################################\n# Custom colormaps for two-tailed symmetric statistics\n################################################################################\n\n################################################################################\n# Helper functions\n\ndef _rotate_cmap(cmap, swap_order=('green', 'red', 'blue')):\n \"\"\" Utility function to swap the colors of a colormap.\n \"\"\"\n orig_cdict = cmap._segmentdata.copy()\n\n cdict = dict()\n cdict['green'] = [(p, c1, c2)\n for (p, c1, c2) in orig_cdict[swap_order[0]]]\n cdict['blue'] = [(p, c1, c2)\n for (p, c1, c2) in orig_cdict[swap_order[1]]]\n cdict['red'] = [(p, c1, c2)\n for (p, c1, c2) in orig_cdict[swap_order[2]]]\n\n return cdict\n\n\ndef _pigtailed_cmap(cmap, swap_order=('green', 'red', 'blue')):\n \"\"\" Utility function to make a new colormap by concatenating a\n colormap with its reverse.\n \"\"\"\n orig_cdict = cmap._segmentdata.copy()\n\n cdict = dict()\n cdict['green'] = [(0.5*(1-p), c1, c2)\n for (p, c1, c2) in reversed(orig_cdict[swap_order[0]])]\n cdict['blue'] = [(0.5*(1-p), c1, c2)\n for (p, c1, c2) in reversed(orig_cdict[swap_order[1]])]\n cdict['red'] = [(0.5*(1-p), c1, c2)\n for (p, c1, c2) in reversed(orig_cdict[swap_order[2]])]\n\n for color in ('red', 'green', 'blue'):\n cdict[color].extend([(0.5*(1+p), c1, c2) \n for (p, c1, c2) in orig_cdict[color]])\n\n return cdict\n\n\ndef _concat_cmap(cmap1, cmap2):\n \"\"\" Utility function to make a new colormap by concatenating two\n colormaps.\n \"\"\"\n cdict = dict()\n\n cdict1 = cmap1._segmentdata.copy()\n cdict2 = cmap2._segmentdata.copy()\n if not hasattr(cdict1['red'], '__call__'):\n for c in ['red', 'green', 'blue']:\n cdict[c] = [(0.5*p, c1, c2) for (p, c1, c2) in cdict1[c]]\n else:\n for c in ['red', 'green', 'blue']:\n cdict[c] = []\n ps = _np.linspace(0, 1, 10)\n colors = cmap1(ps)\n for p, (r, g, b, a) in zip(ps, colors):\n cdict['red'].append((.5*p, r, r))\n cdict['green'].append((.5*p, g, g))\n cdict['blue'].append((.5*p, b, b))\n if not hasattr(cdict2['red'], '__call__'):\n for c in ['red', 'green', 'blue']:\n cdict[c].extend([(0.5*(1+p), c1, c2) for (p, c1, c2) in cdict2[c]])\n else:\n ps = _np.linspace(0, 1, 10)\n colors = cmap2(ps)\n for p, (r, g, b, a) in zip(ps, colors):\n cdict['red'].append((.5*(1+p), r, r))\n cdict['green'].append((.5*(1+p), g, g))\n cdict['blue'].append((.5*(1+p), b, b))\n\n return cdict\n\n\ndef alpha_cmap(color, name='', alpha_min=0.5, alpha_max=1.):\n \"\"\" Return a colormap with the given color, and alpha going from\n zero to 1.\n\n Parameters\n ----------\n color: (r, g, b), or a string\n A triplet of floats ranging from 0 to 1, or a matplotlib\n color string\n \"\"\"\n red, green, blue = _colors.colorConverter.to_rgb(color)\n if name == '' and hasattr(color, 'startswith'):\n name = color\n cmapspec = [(red, green, blue, 1.),\n (red, green, blue, 1.),\n ]\n cmap = _colors.LinearSegmentedColormap.from_list(\n '%s_transparent' % name, cmapspec, _cm.LUTSIZE)\n cmap._init()\n cmap._lut[:, -1] = _np.linspace(alpha_min, alpha_max, cmap._lut.shape[0])\n cmap._lut[-1, -1] = 0\n return cmap\n\n\n\n################################################################################\n# Our colormaps definition\n_cmaps_data = dict(\n cold_hot = _pigtailed_cmap(_cm.hot),\n cold_white_hot = _pigtailed_cmap(_cm.hot_r),\n brown_blue = _pigtailed_cmap(_cm.bone),\n cyan_copper = _pigtailed_cmap(_cm.copper),\n cyan_orange = _pigtailed_cmap(_cm.YlOrBr_r),\n blue_red = _pigtailed_cmap(_cm.Reds_r),\n brown_cyan = _pigtailed_cmap(_cm.Blues_r),\n purple_green = _pigtailed_cmap(_cm.Greens_r,\n swap_order=('red', 'blue', 'green')),\n purple_blue = _pigtailed_cmap(_cm.Blues_r,\n swap_order=('red', 'blue', 'green')),\n blue_orange = _pigtailed_cmap(_cm.Oranges_r,\n swap_order=('green', 'red', 'blue')),\n black_blue = _rotate_cmap(_cm.hot),\n black_purple = _rotate_cmap(_cm.hot,\n swap_order=('blue', 'red', 'green')),\n black_pink = _rotate_cmap(_cm.hot,\n swap_order=('blue', 'green', 'red')),\n black_green = _rotate_cmap(_cm.hot,\n swap_order=('red', 'blue', 'green')),\n black_red = _cm.hot._segmentdata.copy(),\n)\n\nif hasattr(_cm, 'ocean'):\n # MPL 0.99 doesn't have Ocean\n _cmaps_data['ocean_hot'] = _concat_cmap(_cm.ocean, _cm.hot_r)\nif hasattr(_cm, 'afmhot'): # or afmhot\n _cmaps_data['hot_white_bone'] = _concat_cmap(_cm.afmhot, _cm.bone_r)\n _cmaps_data['hot_black_bone'] = _concat_cmap(_cm.afmhot_r, _cm.bone)\n\n# Copied from matplotlib 1.2.0 for matplotlib 0.99 compatibility.\n_bwr_data = ((0.0, 0.0, 1.0), (1.0, 1.0, 1.0), (1.0, 0.0, 0.0))\n_cmaps_data['bwr'] = _colors.LinearSegmentedColormap.from_list(\n 'bwr', _bwr_data)._segmentdata.copy()\n\n################################################################################\n# Build colormaps and their reverse.\n_cmap_d = dict()\n\nfor _cmapname in list(_cmaps_data.keys()): # needed as dict changes within loop\n _cmapname_r = _cmapname + '_r'\n _cmapspec = _cmaps_data[_cmapname]\n _cmaps_data[_cmapname_r] = _cm.revcmap(_cmapspec)\n _cmap_d[_cmapname] = _colors.LinearSegmentedColormap(\n _cmapname, _cmapspec, _cm.LUTSIZE)\n _cmap_d[_cmapname_r] = _colors.LinearSegmentedColormap(\n _cmapname_r, _cmaps_data[_cmapname_r],\n _cm.LUTSIZE)\n\n################################################################################\n# A few transparent colormaps\nfor color, name in (((1, 0, 0), 'red'),\n ((0, 1, 0), 'green'),\n ((0, 0, 1), 'blue'),\n ):\n _cmap_d['%s_transparent' % name] = alpha_cmap(color, name=name)\n _cmap_d['%s_transparent_full_alpha_range' % name] = alpha_cmap(\n color, alpha_min=0,\n alpha_max=1, name=name)\n\n###############################################################################\n# HCP Connectome Workbench colormaps\n# As seen in https://github.com/Washington-University/workbench src/Pallete\nroy_big_bl = _np.array([(255, 255, 0), (255, 200, 0),\n (255, 120, 0), (255, 0, 0),\n (200, 0, 0), (150, 0, 0),\n (100, 0, 0), (60, 0, 0),\n (0, 0, 0), (0, 0, 80),\n (0, 0, 170), (75, 0, 125),\n (125, 0, 160), (75, 125, 0),\n (0, 200, 0), (0, 255, 0),\n (0, 255, 255), (0, 255, 255)][::-1]) / 255\n\nvideen_style = ['#000000', '#bbbbbb', '#dddddd', '#ffffff',\n '#ff388d', '#e251e2', '#10b010', '#00ff00',\n '#00ffff', '#000000', '#660033', '#33334c',\n '#4c4c7f', '#7f7fcc', '#00ff00', '#10b010',\n '#ffff00', '#ff9900', '#ff6900', '#ff0000']\n\n_cmap_d['roy_big_bl'] = _colors.LinearSegmentedColormap.from_list(\n 'roy_big_bl', roy_big_bl.tolist())\n_cmap_d['videen_style'] = _colors.LinearSegmentedColormap.from_list(\n 'videen_style', videen_style)\n\n# Save colormaps in the scope of the module\nlocals().update(_cmap_d)\n# Register cmaps in matplotlib too\nfor k, v in _cmap_d.items():\n _cm.register_cmap(name=k, cmap=v)\n\n\n################################################################################\n# Utility to replace a colormap by another in an interval\n################################################################################\n\ndef dim_cmap(cmap, factor=.3, to_white=True):\n \"\"\" Dim a colormap to white, or to black.\n \"\"\"\n assert factor >= 0 and factor <=1, ValueError(\n 'Dimming factor must be larger than 0 and smaller than 1, %s was passed.' \n % factor)\n if to_white:\n dimmer = lambda c: 1 - factor*(1-c)\n else:\n dimmer = lambda c: factor*c\n cdict = cmap._segmentdata.copy()\n for c_index, color in enumerate(('red', 'green', 'blue')):\n color_lst = list()\n for value, c1, c2 in cdict[color]:\n color_lst.append((value, dimmer(c1), dimmer(c2)))\n cdict[color] = color_lst\n\n return _colors.LinearSegmentedColormap(\n '%s_dimmed' % cmap.name,\n cdict,\n _cm.LUTSIZE)\n\n\ndef replace_inside(outer_cmap, inner_cmap, vmin, vmax):\n \"\"\" Replace a colormap by another inside a pair of values.\n \"\"\"\n assert vmin < vmax, ValueError('vmin must be smaller than vmax')\n assert vmin >= 0, ValueError('vmin must be larger than 0, %s was passed.' \n % vmin)\n assert vmax <= 1, ValueError('vmax must be smaller than 1, %s was passed.' \n % vmax)\n outer_cdict = outer_cmap._segmentdata.copy()\n inner_cdict = inner_cmap._segmentdata.copy()\n\n cdict = dict()\n for this_cdict, cmap in [(outer_cdict, outer_cmap),\n (inner_cdict, inner_cmap)]:\n if hasattr(this_cdict['red'], '__call__'):\n ps = _np.linspace(0, 1, 25)\n colors = cmap(ps)\n this_cdict['red'] = list()\n this_cdict['green'] = list()\n this_cdict['blue'] = list()\n for p, (r, g, b, a) in zip(ps, colors):\n this_cdict['red'].append((p, r, r))\n this_cdict['green'].append((p, g, g))\n this_cdict['blue'].append((p, b, b))\n\n\n for c_index, color in enumerate(('red', 'green', 'blue')):\n color_lst = list()\n\n for value, c1, c2 in outer_cdict[color]:\n if value >= vmin:\n break\n color_lst.append((value, c1, c2))\n\n color_lst.append((vmin, outer_cmap(vmin)[c_index], \n inner_cmap(vmin)[c_index]))\n\n for value, c1, c2 in inner_cdict[color]:\n if value <= vmin:\n continue\n if value >= vmax:\n break\n color_lst.append((value, c1, c2))\n\n color_lst.append((vmax, inner_cmap(vmax)[c_index],\n outer_cmap(vmax)[c_index]))\n\n for value, c1, c2 in outer_cdict[color]:\n if value <= vmax:\n continue\n color_lst.append((value, c1, c2))\n\n cdict[color] = color_lst\n\n return _colors.LinearSegmentedColormap(\n '%s_inside_%s' % (inner_cmap.name, outer_cmap.name),\n cdict,\n _cm.LUTSIZE)\n\n\n",
"from nose.tools import assert_equal, assert_raises\nimport numpy as np\nfrom nilearn.decoding.objective_functions import _gradient_id, _squared_loss\nfrom nilearn.decoding.space_net_solvers import (\n _tvl1_objective, _tvl1_objective_from_gradient, tvl1_solver)\n\n\ndef test_tvl1_from_gradient(size=5, n_samples=10, random_state=42):\n rng = np.random.RandomState(random_state)\n shape = [size] * 3\n n_voxels = np.prod(shape)\n X = rng.randn(n_samples, n_voxels)\n y = rng.randn(n_samples)\n w = rng.randn(*shape)\n mask = np.ones_like(w).astype(np.bool)\n for alpha in [0., 1e-1, 1e-3]:\n for l1_ratio in [0., .5, 1.]:\n gradid = _gradient_id(w, l1_ratio=l1_ratio)\n assert_equal(_tvl1_objective(\n X, y, w.copy().ravel(), alpha, l1_ratio, mask),\n _squared_loss(X, y, w.copy().ravel(),\n compute_grad=False\n ) + alpha * _tvl1_objective_from_gradient(\n gradid))\n\n\ndef test_tvl1_objective_raises_value_error_if_invalid_loss():\n assert_raises(ValueError, lambda loss: _tvl1_objective(\n None, None, None, None, None, None, loss=loss),\n \"invalidloss\")\n\n\ndef test_tvl1_solver_raises_value_error_if_invalid_loss():\n assert_raises(ValueError, lambda loss: tvl1_solver(\n np.array([[1]]), None, None, None, None, loss=loss),\n \"invalidloss\")\n"
] | [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.get_cmap",
"numpy.ones",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.figure"
],
[
"numpy.nanmax",
"numpy.linspace",
"numpy.nanmin",
"matplotlib.pyplot.cm.gray_r",
"numpy.max",
"numpy.mean",
"numpy.arange",
"matplotlib.cm.ScalarMappable",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.cm.get_cmap",
"numpy.min",
"numpy.median",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.abs",
"matplotlib.colors.Normalize",
"numpy.ones",
"matplotlib.cm.get_cmap",
"matplotlib.colorbar.make_axes"
],
[
"numpy.logical_not",
"numpy.abs",
"numpy.arange",
"numpy.eye",
"numpy.full",
"numpy.concatenate",
"numpy.all",
"numpy.ceil",
"numpy.testing.assert_raises",
"numpy.array",
"numpy.zeros"
],
[
"matplotlib.cm.register_cmap",
"numpy.linspace",
"matplotlib.colors.LinearSegmentedColormap",
"matplotlib.colors.colorConverter.to_rgb",
"matplotlib.cm.revcmap",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.array",
"matplotlib.cm.hot._segmentdata.copy"
],
[
"numpy.array",
"numpy.random.RandomState",
"numpy.ones_like",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
raznem/sac_ppo | [
"c18e9bd32a70fcc4bc413565c6b885d7560b8b5a"
] | [
"rltoolkit/rl.py"
] | [
"import logging\nfrom pathlib import Path\nfrom typing import Any, Optional, Tuple, Union\n\nimport gym\nimport torch\nimport pickle as pkl\n\nfrom rltoolkit import config, utils\nfrom rltoolkit.buffer import Memory\nfrom rltoolkit.stats_logger import StatsLogger\nfrom rltoolkit.tensorboard_logger import TensorboardWriter\n\nlogger = logging.getLogger(__name__)\n\n\nclass MetaLearner:\n def __init__(\n self,\n env_name: str,\n use_gpu: bool,\n debug_mode: bool = config.DEBUG_MODE,\n tensorboard_dir: Union[str, None] = config.TENSORBOARD_DIR,\n tensorboard_comment: str = config.TENSORBOARD_COMMENT,\n ):\n f\"\"\"Class with parameters common for RL and other interactions with environment\n\n Args:\n env_name (str): Name of the gym environment.\n use_gpu (bool): Use CUDA.\n debug_mode (bool, optional): Log additional info.\n Defaults to { config.DEBUG_MODE }\n tensorboard_dir (Union[str, None], optional): Path to tensorboard logs.\n Defaults to { config.TENSORBOARD_DIR }.\n tensorboard_comment (str, optional): Comment for tensorboard files.\n Defaults to { config.TENSORBOARD_COMMENT }.\n \"\"\"\n self.env_name = env_name\n if use_gpu and torch.cuda.is_available():\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n\n self.env = gym.make(self.env_name)\n self.discrete = isinstance(self.env.action_space, gym.spaces.Discrete)\n self.ob_dim = self.env.observation_space.shape[0]\n if self.discrete:\n self.ac_dim = self.env.action_space.n\n self.ac_lim = None\n else:\n self.ac_dim = self.env.action_space.shape[0]\n self.ac_lim = torch.tensor(self.env.action_space.high, device=self.device)\n\n self.obs_mean = torch.zeros(self.ob_dim, device=self.device)\n self.obs_std = torch.ones(self.ob_dim, device=self.device)\n\n self.iteration = 0 # used in tensorboard\n\n self.opt = torch.optim.Adam\n self.loss = {}\n\n self.debug_mode = debug_mode\n\n self.tensorboard_writer = None\n self.tensorboard_comment = (\n \"_\" + tensorboard_comment if tensorboard_comment else \"\"\n )\n self.tensorboard_dir = tensorboard_dir\n\n def run_tensorboard_if_needed(self):\n if self.tensorboard_writer is None and (self.tensorboard_dir is not None):\n self.tensorboard_writer = TensorboardWriter(\n env_name=self.env_name,\n log_dir=self.tensorboard_dir,\n filename=self.filename,\n render=self.render,\n )\n\n def log_obs_mean_std_tensorboard(self):\n \"\"\"\n Log mean and std of observations in the tensorboard.\n \"\"\"\n self.run_tensorboard_if_needed()\n self.tensorboard_writer.log_obs_mean_std(\n self.iteration, self.obs_mean, self.obs_std\n )\n\n def update_obs_mean_std(self, buffer: Memory) -> Memory:\n \"\"\"\n Update running average of mean and stds based on the buffer.\n\n Args:\n buffer (Memory)\n\n Returns:\n Memory\n \"\"\"\n buffer.update_obs_mean_std()\n self.obs_mean = buffer.obs_mean\n self.obs_std = buffer.obs_std\n\n if self.debug_mode and self.tensorboard_dir is not None:\n self.log_obs_mean_std_tensorboard()\n return buffer\n\n\nclass RL(MetaLearner):\n def __init__(\n self,\n env_name: str = config.ENV_NAME,\n gamma: float = config.GAMMA,\n stats_freq: int = config.STATS_FREQ,\n test_episodes: int = config.TEST_EPISODES,\n batch_size: int = config.BATCH_SIZE,\n iterations: int = config.ITERATIONS,\n max_frames: int = None,\n return_done: Union[int, None] = config.RETURN_DONE,\n log_dir: str = config.LOG_DIR,\n use_gpu: bool = config.USE_GPU,\n verbose: int = config.VERBOSE,\n render: bool = config.RENDER,\n *args,\n **kwargs,\n ):\n f\"\"\"Basic parent class for reinforcement learning algorithms.\n\n Args:\n env_name (str, optional): Name of the gym environment.\n Defaults to { config.ENV_NAME }.\n gamma (float, optional): Discount factor. Defaults to { config.GAMMA }.\n stats_freq (int, optional): Frequency of logging the progress.\n Defaults to { config.STATS_FREQ }.\n batch_size (int, optional): Number of frames used for one algorithm step\n (could be higher because batch collection stops when rollout ends).\n Defaults to { config.BATCH_SIZE }.\n iterations (int, optional): Number of algorithms iterations.\n Defaults to { config.ITERATIONS }.\n max_frames (int, optional): Limit of frames for training. Defaults to\n { None }.\n return_done (Union[int, None], optional): target return, which will stop\n training if reached. Defaults to { config.RETURN_DONE }.\n log_dir (str, optional): Path for basic logs which includes final model.\n Defaults to { config.LOG_DIR }.\n use_gpu (bool, optional): Use CUDA. Defaults to { config.USE_GPU }.\n verbose (int, optional): Verbose level. Defaults to { config.VERBOSE }.\n render (bool, optional): Render rollouts to tensorboard.\n Defaults to { config.RENDER }.\n debug_mode (bool, optional): Log additional info.\n Defaults to { config.DEBUG_MODE }\n tensorboard_dir (Union[str, None], optional): Path to tensorboard logs.\n Defaults to { config.TENSORBOARD_DIR }.\n tensorboard_comment (str, optional): Comment for tensorboard files.\n Defaults to { config.TENSORBOARD_COMMENT }.\n \"\"\"\n super().__init__(env_name, use_gpu, *args, **kwargs)\n assert iterations > 0, f\"Iteration has to be positive not {iterations}\"\n if max_frames is not None:\n assert (\n max_frames <= iterations * batch_size\n ), \"max_frames should be smaller or equal than iterations * batch_size\"\n\n self.max_frames = max_frames\n self.gamma = gamma\n self.stats_freq = stats_freq\n self.test_episodes = test_episodes\n self.batch_size = batch_size\n self.iterations = iterations\n self.return_done = return_done\n if log_dir is not None:\n self.log_dir = Path(log_dir)\n self.log_dir.mkdir(parents=True, exist_ok=True)\n else:\n self.log_dir = log_dir\n self.verbose = verbose\n self.render = render\n\n self.max_ep_len = self.env._max_episode_steps\n\n self.start_time = utils.get_time()\n\n self.hparams = {\n \"hparams/gamma\": self.gamma,\n \"hparams/batch_size\": self.batch_size,\n \"hparams/type\": utils.get_pretty_type_name(self),\n }\n self.shortnames = config.SHORTNAMES\n self.stats_logger = StatsLogger()\n\n def train(self, iterations=None):\n f\"\"\" Train RL model\n\n Args:\n iterations ([type], optional): Number of additional training iterations.\n If None performs number of iterations defined in self.iterations.\n Otherwise increase global counter by this value to run additional steps.\n Defaults to { None }.\n \"\"\"\n self.run_tensorboard_if_needed()\n if iterations:\n self.iterations += iterations\n\n while self.iteration < self.iterations:\n buffer, time_diff = self.perform_iteration()\n self.stats_logger.time_list.append(time_diff)\n running_return = self.stats_logger.calc_running_return(buffer)\n\n if self.return_done is not None and running_return >= self.return_done:\n break\n\n if self.iteration % self.stats_freq == 0:\n self.logs_after_iteration(buffer)\n\n if self.log_dir is not None:\n self.stats_logger.dump_stats(self.log_path)\n\n self.iteration += 1 # used also for logs\n if (\n self.max_frames is not None\n and self.max_frames < self.stats_logger.frames\n ):\n logger.info(f\"Reached max_frames at {self.iteration} iteration\") # INFO\n break\n\n self.logs_after_iteration(buffer, done=True)\n\n if self.log_dir is not None:\n self.save()\n\n def test(self, episodes=None):\n f\"\"\"Test policy\n\n Args:\n episodes (int): Number of episodes. Defaults to { None }.\n\n Returns:\n float: mean episode reward\n \"\"\"\n mean_reward = None\n return mean_reward\n\n @utils.measure_time\n def perform_iteration(self):\n raise NotImplementedError\n\n def save_model(self):\n raise NotImplementedError\n\n def check_path(self, path):\n if self.filename is None and path is None:\n raise AttributeError\n elif path is None:\n path = str(self.log_path) + \".pkl\"\n return path\n\n def collect_params_dict(self):\n params_dict = {}\n params_dict[\"actor\"] = self.actor.state_dict()\n params_dict[\"critic\"] = self.critic.state_dict()\n params_dict[\"obs_mean\"] = self.obs_mean\n params_dict[\"obs_std\"] = self.obs_std\n return params_dict\n\n def apply_params_dict(self, params_dict):\n self.actor.load_state_dict(params_dict[\"actor\"])\n self.critic.load_state_dict(params_dict[\"critic\"])\n self.obs_mean = params_dict[\"obs_mean\"]\n self.obs_std = params_dict[\"obs_std\"]\n\n def save(self, path: str = None):\n f\"\"\"Save RL object\n\n Args:\n path (str): Path to save\n \"\"\"\n path = self.check_path(path)\n with open(path, \"wb\") as f:\n params_dict = self.collect_params_dict()\n pkl.dump(params_dict, f)\n\n def load(self, path: str):\n \"\"\"Load RL object\n\n Args:\n path (str): Path to saved RL object\n \"\"\"\n path = self.check_path(path)\n with open(path, \"rb\") as f:\n params_dict = pkl.load(f)\n self.apply_params_dict(params_dict)\n\n @property\n def log_iteration(self):\n return self.iteration // self.stats_freq\n\n @property\n def filename(self):\n suffix = self.get_tensorboard_hparams_suffix()\n suffix += self.tensorboard_comment\n filename = self.start_time + suffix\n return filename\n\n @property\n def log_path(self):\n log_path = Path(self.log_dir)\n log_path = log_path / self.filename\n return log_path\n\n def logs_after_iteration(self, buffer: Memory, done: bool = False):\n f\"\"\"Logs writer\n\n Args:\n buffer (Memory): Buffer used for tensorboard\n done (bool, optional): Finalize tensorboard logging due to last iteration.\n Defaults to { False }.\n \"\"\"\n if self.test_episodes is not None:\n self.stats_logger.test_return = self.test()\n\n running_return = self.stats_logger.running_return\n if self.verbose:\n if done:\n self.stats_logger.task_done(self.iteration)\n else:\n self.stats_logger.log_stats(self.iteration)\n\n self.stats_logger.stats.append([self.iteration, running_return])\n self.stats_logger.reset_time_list()\n\n if self.tensorboard_writer is not None:\n self.add_tensorboard_logs(buffer, done)\n\n def add_tensorboard_logs(self, buffer: Memory, done: bool):\n self.tensorboard_writer.log_running_return(\n self.iteration,\n self.stats_logger.frames,\n self.stats_logger.rollouts,\n self.stats_logger.running_return,\n )\n if self.test_episodes:\n self.tensorboard_writer.log_test_return(\n self.iteration,\n self.stats_logger.frames,\n self.stats_logger.rollouts,\n self.stats_logger.test_return,\n )\n\n if (self.log_iteration % 5) == 0 or done:\n _, rendering_time = self.tensorboard_writer.record_episode(\n self, self.iteration, done\n )\n self.tensorboard_writer.log_returns(self.iteration, buffer)\n self.tensorboard_writer.log_actions(self.iteration, buffer)\n self.tensorboard_writer.log_observations(self.iteration, buffer)\n self.tensorboard_writer.log_loss(self.iteration, self.loss)\n\n def get_tensorboard_hparams_suffix(self):\n suffix = \"\"\n for key, val in self.hparams.items():\n if key in self.shortnames.keys():\n key = self.shortnames[key]\n else:\n key = key.split(\"/\")[1]\n if isinstance(val, float):\n val = f\"{val:.2}\"\n else:\n val = str(val)\n suffix += f\"-{key}{val}\"\n\n return suffix\n\n def _get_initial_obs_mean_std(\n self, obs_norm: Any\n ) -> Tuple[Optional[torch.tensor], Optional[torch.tensor]]:\n f\"\"\"\n Check if observations are normalized and if so return initial mean and std,\n None otherwise.\n\n Returns:\n Tuple[Optional[torch.tensor], Optional[torch.tensor]]: obs mean and std\n \"\"\"\n if obs_norm:\n obs_mean = torch.zeros(self.ob_dim, device=self.device)\n obs_std = torch.ones(self.ob_dim, device=self.device)\n else:\n obs_mean = None\n obs_std = None\n return obs_mean, obs_std\n"
] | [
[
"torch.ones",
"torch.zeros",
"torch.tensor",
"torch.cuda.is_available",
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
samggreenberg/pynndescent | [
"f97bc2fe01e4e59c5dad20ed23b9cb47e8182b6c"
] | [
"pynndescent/utils.py"
] | [
"# Author: Leland McInnes <[email protected]>\n#\n# License: BSD 2 clause\n\nimport time\n\nimport numba\nfrom numba.core import types\nimport numba.experimental.structref as structref\nimport numpy as np\n\n\[email protected](\"void(i8[:], i8)\", cache=True)\ndef seed(rng_state, seed):\n \"\"\"Seed the random number generator with a given seed.\"\"\"\n rng_state.fill(seed + 0xFFFF)\n\n\[email protected](\"i4(i8[:])\", cache=True)\ndef tau_rand_int(state):\n \"\"\"A fast (pseudo)-random number generator.\n\n Parameters\n ----------\n state: array of int64, shape (3,)\n The internal state of the rng\n\n Returns\n -------\n A (pseudo)-random int32 value\n \"\"\"\n state[0] = (((state[0] & 4294967294) << 12) & 0xFFFFFFFF) ^ (\n (((state[0] << 13) & 0xFFFFFFFF) ^ state[0]) >> 19\n )\n state[1] = (((state[1] & 4294967288) << 4) & 0xFFFFFFFF) ^ (\n (((state[1] << 2) & 0xFFFFFFFF) ^ state[1]) >> 25\n )\n state[2] = (((state[2] & 4294967280) << 17) & 0xFFFFFFFF) ^ (\n (((state[2] << 3) & 0xFFFFFFFF) ^ state[2]) >> 11\n )\n\n return state[0] ^ state[1] ^ state[2]\n\n\[email protected](\"f4(i8[:])\", cache=True)\ndef tau_rand(state):\n \"\"\"A fast (pseudo)-random number generator for floats in the range [0,1]\n\n Parameters\n ----------\n state: array of int64, shape (3,)\n The internal state of the rng\n\n Returns\n -------\n A (pseudo)-random float32 in the interval [0, 1]\n \"\"\"\n integer = tau_rand_int(state)\n return abs(float(integer) / 0x7FFFFFFF)\n\n\[email protected](\n [\n \"f4(f4[::1])\",\n numba.types.float32(\n numba.types.Array(numba.types.float32, 1, \"C\", readonly=True)\n ),\n ],\n locals={\n \"dim\": numba.types.intp,\n \"i\": numba.types.uint32,\n # \"result\": numba.types.float32, # This provides speed, but causes errors in corner cases\n },\n fastmath=True,\n cache=True,\n)\ndef norm(vec):\n \"\"\"Compute the (standard l2) norm of a vector.\n\n Parameters\n ----------\n vec: array of shape (dim,)\n\n Returns\n -------\n The l2 norm of vec.\n \"\"\"\n result = 0.0\n dim = vec.shape[0]\n for i in range(dim):\n result += vec[i] * vec[i]\n return np.sqrt(result)\n\n\[email protected](cache=True)\ndef rejection_sample(n_samples, pool_size, rng_state):\n \"\"\"Generate n_samples many integers from 0 to pool_size such that no\n integer is selected twice. The duplication constraint is achieved via\n rejection sampling.\n\n Parameters\n ----------\n n_samples: int\n The number of random samples to select from the pool\n\n pool_size: int\n The size of the total pool of candidates to sample from\n\n rng_state: array of int64, shape (3,)\n Internal state of the random number generator\n\n Returns\n -------\n sample: array of shape(n_samples,)\n The ``n_samples`` randomly selected elements from the pool.\n \"\"\"\n result = np.empty(n_samples, dtype=np.int64)\n for i in range(n_samples):\n reject_sample = True\n j = 0\n while reject_sample:\n j = tau_rand_int(rng_state) % pool_size\n for k in range(i):\n if j == result[k]:\n break\n else:\n reject_sample = False\n result[i] = j\n return result\n\n\[email protected]\nclass HeapType(types.StructRef):\n pass\n\n\nclass Heap(structref.StructRefProxy):\n @property\n def indices(self):\n return Heap_get_indices(self)\n\n @property\n def distances(self):\n return Heap_get_distances(self)\n\n @property\n def flags(self):\n return Heap_get_flags(self)\n\n\[email protected](cache=True)\ndef Heap_get_flags(self):\n return self.flags\n\n\[email protected](cache=True)\ndef Heap_get_distances(self):\n return self.distances\n\n\[email protected](cache=True)\ndef Heap_get_indices(self):\n return self.indices\n\n\nstructref.define_proxy(Heap, HeapType, [\"indices\", \"distances\", \"flags\"])\n\n# Heap = namedtuple(\"Heap\", (\"indices\", \"distances\", \"flags\"))\n\n\[email protected](cache=True)\ndef make_heap(n_points, size):\n \"\"\"Constructor for the numba enabled heap objects. The heaps are used\n for approximate nearest neighbor search, maintaining a list of potential\n neighbors sorted by their distance. We also flag if potential neighbors\n are newly added to the list or not. Internally this is stored as\n a single ndarray; the first axis determines whether we are looking at the\n array of candidate graph_indices, the array of distances, or the flag array for\n whether elements are new or not. Each of these arrays are of shape\n (``n_points``, ``size``)\n\n Parameters\n ----------\n n_points: int\n The number of graph_data points to track in the heap.\n\n size: int\n The number of items to keep on the heap for each graph_data point.\n\n Returns\n -------\n heap: An ndarray suitable for passing to other numba enabled heap functions.\n \"\"\"\n indices = np.full((int(n_points), int(size)), -1, dtype=np.int32)\n distances = np.full((int(n_points), int(size)), np.infty, dtype=np.float32)\n flags = np.zeros((int(n_points), int(size)), dtype=np.uint8)\n result = (indices, distances, flags)\n\n return result\n\n\[email protected](cache=True)\ndef siftdown(heap1, heap2, elt):\n \"\"\"Restore the heap property for a heap with an out of place element\n at position ``elt``. This works with a heap pair where heap1 carries\n the weights and heap2 holds the corresponding elements.\"\"\"\n while elt * 2 + 1 < heap1.shape[0]:\n left_child = elt * 2 + 1\n right_child = left_child + 1\n swap = elt\n\n if heap1[swap] < heap1[left_child]:\n swap = left_child\n\n if right_child < heap1.shape[0] and heap1[swap] < heap1[right_child]:\n swap = right_child\n\n if swap == elt:\n break\n else:\n heap1[elt], heap1[swap] = heap1[swap], heap1[elt]\n heap2[elt], heap2[swap] = heap2[swap], heap2[elt]\n elt = swap\n\n\[email protected](parallel=True, cache=False)\ndef deheap_sort(indices, distances):\n \"\"\"Given two arrays representing a heap (indices and distances), reorder the \n arrays by increasing distance. This is effectively just the second half of\n heap sort (the first half not being required since we already have the\n graph_data in a heap).\n\n Note that this is done in-place.\n\n Parameters\n ----------\n indices : array of shape (n_samples, n_neighbors)\n The graph indices to sort by distance.\n distances : array of shape (n_samples, n_neighbors)\n The corresponding edge distance.\n\n Returns\n -------\n indices, distances: arrays of shape (n_samples, n_neighbors)\n The indices and distances sorted by increasing distance.\n \"\"\"\n for i in numba.prange(indices.shape[0]):\n # starting from the end of the array and moving back\n for j in range(indices.shape[1] - 1, 0, -1):\n indices[i, 0], indices[i, j] = indices[i, j], indices[i, 0]\n distances[i, 0], distances[i, j] = distances[i, j], distances[i, 0]\n\n siftdown(distances[i, :j], indices[i, :j], 0)\n\n return indices, distances\n\n\n# @numba.njit()\n# def smallest_flagged(heap, row):\n# \"\"\"Search the heap for the smallest element that is\n# still flagged.\n#\n# Parameters\n# ----------\n# heap: array of shape (3, n_samples, n_neighbors)\n# The heaps to search\n#\n# row: int\n# Which of the heaps to search\n#\n# Returns\n# -------\n# index: int\n# The index of the smallest flagged element\n# of the ``row``th heap, or -1 if no flagged\n# elements remain in the heap.\n# \"\"\"\n# ind = heap[0][row]\n# dist = heap[1][row]\n# flag = heap[2][row]\n#\n# min_dist = np.inf\n# result_index = -1\n#\n# for i in range(ind.shape[0]):\n# if flag[i] == 1 and dist[i] < min_dist:\n# min_dist = dist[i]\n# result_index = i\n#\n# if result_index >= 0:\n# flag[result_index] = 0.0\n# return int(ind[result_index])\n# else:\n# return -1\n\n\[email protected](parallel=True, locals={\"idx\": numba.types.int64}, cache=False)\ndef new_build_candidates(current_graph, max_candidates, rng_state, n_threads):\n \"\"\"Build a heap of candidate neighbors for nearest neighbor descent. For\n each vertex the candidate neighbors are any current neighbors, and any\n vertices that have the vertex as one of their nearest neighbors.\n\n Parameters\n ----------\n current_graph: heap\n The current state of the graph for nearest neighbor descent.\n\n max_candidates: int\n The maximum number of new candidate neighbors.\n\n rng_state: array of int64, shape (3,)\n The internal state of the rng\n\n Returns\n -------\n candidate_neighbors: A heap with an array of (randomly sorted) candidate\n neighbors for each vertex in the graph.\n \"\"\"\n current_indices = current_graph[0]\n current_flags = current_graph[2]\n\n n_vertices = current_indices.shape[0]\n n_neighbors = current_indices.shape[1]\n\n new_candidate_indices = np.full((n_vertices, max_candidates), -1, dtype=np.int32)\n new_candidate_priority = np.full(\n (n_vertices, max_candidates), np.inf, dtype=np.float32\n )\n\n old_candidate_indices = np.full((n_vertices, max_candidates), -1, dtype=np.int32)\n old_candidate_priority = np.full(\n (n_vertices, max_candidates), np.inf, dtype=np.float32\n )\n\n for n in numba.prange(n_threads):\n local_rng_state = rng_state + n\n for i in range(n_vertices):\n for j in range(n_neighbors):\n idx = current_indices[i, j]\n isn = current_flags[i, j]\n\n if idx < 0:\n continue\n\n d = tau_rand(local_rng_state)\n\n if isn:\n if i % n_threads == n:\n checked_heap_push(\n new_candidate_priority[i], new_candidate_indices[i], d, idx\n )\n if idx % n_threads == n:\n checked_heap_push(\n new_candidate_priority[idx],\n new_candidate_indices[idx],\n d,\n i,\n )\n else:\n if i % n_threads == n:\n checked_heap_push(\n old_candidate_priority[i], old_candidate_indices[i], d, idx\n )\n if idx % n_threads == n:\n checked_heap_push(\n old_candidate_priority[idx],\n old_candidate_indices[idx],\n d,\n i,\n )\n\n indices = current_graph[0]\n flags = current_graph[2]\n\n for i in numba.prange(n_vertices):\n for j in range(n_neighbors):\n idx = indices[i, j]\n\n for k in range(max_candidates):\n if new_candidate_indices[i, k] == idx:\n flags[i, j] = 0\n break\n\n return new_candidate_indices, old_candidate_indices\n\n\[email protected](\"b1(u1[::1],i4)\", cache=True)\ndef has_been_visited(table, candidate):\n loc = candidate >> 3\n mask = 1 << (candidate & 7)\n return table[loc] & mask\n\n\[email protected](\"void(u1[::1],i4)\", cache=True)\ndef mark_visited(table, candidate):\n loc = candidate >> 3\n mask = 1 << (candidate & 7)\n table[loc] |= mask\n return\n\n\[email protected](\n \"i4(f4[::1],i4[::1],f4,i4)\",\n fastmath=True,\n locals={\n \"size\": numba.types.intp,\n \"i\": numba.types.uint16,\n \"ic1\": numba.types.uint16,\n \"ic2\": numba.types.uint16,\n \"i_swap\": numba.types.uint16,\n },\n cache=True,\n)\ndef simple_heap_push(priorities, indices, p, n):\n if p >= priorities[0]:\n return 0\n\n size = priorities.shape[0]\n\n # insert val at position zero\n priorities[0] = p\n indices[0] = n\n\n # descend the heap, swapping values until the max heap criterion is met\n i = 0\n while True:\n ic1 = 2 * i + 1\n ic2 = ic1 + 1\n\n if ic1 >= size:\n break\n elif ic2 >= size:\n if priorities[ic1] > p:\n i_swap = ic1\n else:\n break\n elif priorities[ic1] >= priorities[ic2]:\n if p < priorities[ic1]:\n i_swap = ic1\n else:\n break\n else:\n if p < priorities[ic2]:\n i_swap = ic2\n else:\n break\n\n priorities[i] = priorities[i_swap]\n indices[i] = indices[i_swap]\n\n i = i_swap\n\n priorities[i] = p\n indices[i] = n\n\n return 1\n\n\[email protected](\n \"i4(f4[::1],i4[::1],f4,i4)\",\n fastmath=True,\n locals={\n \"size\": numba.types.intp,\n \"i\": numba.types.uint16,\n \"ic1\": numba.types.uint16,\n \"ic2\": numba.types.uint16,\n \"i_swap\": numba.types.uint16,\n },\n cache=True,\n)\ndef checked_heap_push(priorities, indices, p, n):\n if p >= priorities[0]:\n return 0\n\n size = priorities.shape[0]\n\n # break if we already have this element.\n for i in range(size):\n if n == indices[i]:\n return 0\n\n # insert val at position zero\n priorities[0] = p\n indices[0] = n\n\n # descend the heap, swapping values until the max heap criterion is met\n i = 0\n while True:\n ic1 = 2 * i + 1\n ic2 = ic1 + 1\n\n if ic1 >= size:\n break\n elif ic2 >= size:\n if priorities[ic1] > p:\n i_swap = ic1\n else:\n break\n elif priorities[ic1] >= priorities[ic2]:\n if p < priorities[ic1]:\n i_swap = ic1\n else:\n break\n else:\n if p < priorities[ic2]:\n i_swap = ic2\n else:\n break\n\n priorities[i] = priorities[i_swap]\n indices[i] = indices[i_swap]\n\n i = i_swap\n\n priorities[i] = p\n indices[i] = n\n\n return 1\n\n\[email protected](\n \"i4(f4[::1],i4[::1],u1[::1],f4,i4,u1)\",\n fastmath=True,\n locals={\n \"size\": numba.types.intp,\n \"i\": numba.types.uint16,\n \"ic1\": numba.types.uint16,\n \"ic2\": numba.types.uint16,\n \"i_swap\": numba.types.uint16,\n },\n cache=True,\n)\ndef checked_flagged_heap_push(priorities, indices, flags, p, n, f):\n if p >= priorities[0]:\n return 0\n\n size = priorities.shape[0]\n\n # break if we already have this element.\n for i in range(size):\n if n == indices[i]:\n return 0\n\n # insert val at position zero\n priorities[0] = p\n indices[0] = n\n flags[0] = f\n\n # descend the heap, swapping values until the max heap criterion is met\n i = 0\n while True:\n ic1 = 2 * i + 1\n ic2 = ic1 + 1\n\n if ic1 >= size:\n break\n elif ic2 >= size:\n if priorities[ic1] > p:\n i_swap = ic1\n else:\n break\n elif priorities[ic1] >= priorities[ic2]:\n if p < priorities[ic1]:\n i_swap = ic1\n else:\n break\n else:\n if p < priorities[ic2]:\n i_swap = ic2\n else:\n break\n\n priorities[i] = priorities[i_swap]\n indices[i] = indices[i_swap]\n flags[i] = flags[i_swap]\n\n i = i_swap\n\n priorities[i] = p\n indices[i] = n\n flags[i] = f\n\n return 1\n\n\[email protected](\n parallel=True,\n locals={\n \"p\": numba.int32,\n \"q\": numba.int32,\n \"d\": numba.float32,\n \"added\": numba.uint8,\n \"n\": numba.uint32,\n \"i\": numba.uint32,\n \"j\": numba.uint32,\n },\n cache=False,\n)\ndef apply_graph_updates_low_memory(current_graph, updates, n_threads):\n\n n_changes = 0\n priorities = current_graph[1]\n indices = current_graph[0]\n flags = current_graph[2]\n # n_threads = numba.get_num_threads()\n\n for n in numba.prange(n_threads):\n for i in range(len(updates)):\n for j in range(len(updates[i])):\n p, q, d = updates[i][j]\n\n if p == -1 or q == -1:\n continue\n\n if p % n_threads == n:\n added = checked_flagged_heap_push(\n priorities[p], indices[p], flags[p], d, q, 1\n )\n n_changes += added\n\n if q % n_threads == n:\n added = checked_flagged_heap_push(\n priorities[q], indices[q], flags[q], d, p, 1\n )\n n_changes += added\n\n return n_changes\n\n\[email protected](locals={\"p\": numba.types.int64, \"q\": numba.types.int64}, cache=True)\ndef apply_graph_updates_high_memory(current_graph, updates, in_graph):\n\n n_changes = 0\n\n for i in range(len(updates)):\n for j in range(len(updates[i])):\n p, q, d = updates[i][j]\n\n if p == -1 or q == -1:\n continue\n\n if q in in_graph[p] and p in in_graph[q]:\n continue\n elif q in in_graph[p]:\n pass\n else:\n added = checked_flagged_heap_push(\n current_graph[1][p],\n current_graph[0][p],\n current_graph[2][p],\n d,\n q,\n 1,\n )\n\n if added > 0:\n in_graph[p].add(q)\n n_changes += added\n\n if p == q or p in in_graph[q]:\n pass\n else:\n added = checked_flagged_heap_push(\n current_graph[1][p],\n current_graph[0][p],\n current_graph[2][p],\n d,\n q,\n 1,\n )\n\n if added > 0:\n in_graph[q].add(p)\n n_changes += added\n\n return n_changes\n\n\[email protected](cache=True)\ndef initalize_heap_from_graph_indices(heap, graph_indices, data, metric):\n\n for i in range(graph_indices.shape[0]):\n for idx in range(graph_indices.shape[1]):\n j = graph_indices[i, idx]\n if j >= 0:\n d = metric(data[i], data[j])\n checked_flagged_heap_push(heap[1][i], heap[0][i], heap[2][i], d, j, 1)\n\n return heap\n\n\[email protected](parallel=True, cache=False)\ndef sparse_initalize_heap_from_graph_indices(\n heap, graph_indices, data_indptr, data_indices, data_vals, metric\n):\n\n for i in numba.prange(graph_indices.shape[0]):\n for idx in range(graph_indices.shape[1]):\n j = graph_indices[i, idx]\n ind1 = data_indices[data_indptr[i] : data_indptr[i + 1]]\n data1 = data_vals[data_indptr[i] : data_indptr[i + 1]]\n ind2 = data_indices[data_indptr[j] : data_indptr[j + 1]]\n data2 = data_vals[data_indptr[j] : data_indptr[j + 1]]\n d = metric(ind1, data1, ind2, data2)\n checked_flagged_heap_push(heap[1][i], heap[0][i], heap[2][i], d, j, 1)\n\n return heap\n\n\n# Generates a timestamp for use in logging messages when verbose=True\ndef ts():\n return time.ctime(time.time())\n"
] | [
[
"numpy.sqrt",
"numpy.empty",
"numpy.full"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RonnyLV/PRNet | [
"0c2ded7042ceee2b2f9bba02bc19d91d4c3993c5"
] | [
"prnet/utils/render_app.py"
] | [
"import numpy as np\nfrom prnet.utils.render import vis_of_vertices, render_texture\nfrom scipy import ndimage\n\ndef get_visibility(vertices, triangles, h, w):\n triangles = triangles.T\n vertices_vis = vis_of_vertices(vertices.T, triangles, h, w)\n vertices_vis = vertices_vis.astype(bool)\n for k in range(2):\n tri_vis = vertices_vis[triangles[0,:]] | vertices_vis[triangles[1,:]] | vertices_vis[triangles[2,:]]\n ind = triangles[:, tri_vis]\n vertices_vis[ind] = True\n # for k in range(2):\n # tri_vis = vertices_vis[triangles[0,:]] & vertices_vis[triangles[1,:]] & vertices_vis[triangles[2,:]]\n # ind = triangles[:, tri_vis]\n # vertices_vis[ind] = True\n vertices_vis = vertices_vis.astype(np.float32) #1 for visible and 0 for non-visible\n return vertices_vis\n\ndef get_uv_mask(vertices_vis, triangles, uv_coords, h, w, resolution):\n triangles = triangles.T\n vertices_vis = vertices_vis.astype(np.float32)\n uv_mask = render_texture(uv_coords.T, vertices_vis[np.newaxis, :], triangles, resolution, resolution, 1)\n uv_mask = np.squeeze(uv_mask > 0)\n uv_mask = ndimage.binary_closing(uv_mask)\n uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4))) \n uv_mask = ndimage.binary_closing(uv_mask)\n uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4))) \n uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4))) \n uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4))) \n uv_mask = uv_mask.astype(np.float32)\n\n return np.squeeze(uv_mask)\n\ndef get_depth_image(vertices, triangles, h, w, isShow = False):\n z = vertices[:, 2:]\n if isShow:\n z = z/max(z)\n depth_image = render_texture(vertices.T, z.T, triangles.T, h, w, 1)\n return np.squeeze(depth_image)"
] | [
[
"numpy.squeeze",
"scipy.ndimage.binary_closing",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
miramirakim227/SwapNeRF_single_GT | [
"55a842ec4155fa782ca1c48b5c6863aeca8ca295"
] | [
"im2scene/camera.py"
] | [
"import numpy as np\nimport torch\nfrom scipy.spatial.transform import Rotation as Rot\nimport pdb \nimport math \n\ndef get_camera_mat(fov=49.13, invert=True):\n # fov = 2 * arctan( sensor / (2 * focal))\n # focal = (sensor / 2) * 1 / (tan(0.5 * fov))\n # in our case, sensor = 2 as pixels are in [-1, 1]\n focal = 1. / np.tan(0.5 * fov * np.pi/180.)\n focal = focal.astype(np.float32)\n mat = torch.tensor([\n [focal, 0., 0., 0.],\n [0., focal, 0., 0.],\n [0., 0., 1, 0.],\n [0., 0., 0., 1.]\n ]).reshape(1, 4, 4)\n\n if invert:\n mat = torch.inverse(mat)\n return mat\n\n\ndef get_random_pose(u, v, range_radius, batch_size=16, # batch size 유동적으로 바꿀 수 있도록!\n invert=False): \n # edit mira start \n if isinstance(u, int):\n device = 'cpu'\n u = torch.zeros(batch_size,).to(device)\n v = torch.ones(batch_size,).to(device) * 0.25\n loc = sample_on_sphere(u, v, size=(batch_size))\n radius = range_radius[0] + \\\n torch.rand(batch_size) * (range_radius[1] - range_radius[0])\n if loc.is_cuda:\n radius = radius.cuda()\n loc = loc * radius.unsqueeze(-1)\n R = look_at(loc)\n RT = torch.eye(4).reshape(1, 4, 4).repeat(batch_size, 1, 1)\n RT[:, :3, :3] = R\n RT[:, :3, -1] = loc\n\n if invert:\n RT = torch.inverse(RT)\n return radius, RT\n\n\ndef get_middle_pose(range_u, range_v, range_radius, batch_size=32,\n invert=False):\n u_m, u_v, r_v = sum(range_u) * 0.5, sum(range_v) * \\\n 0.5, sum(range_radius) * 0.5\n loc = sample_on_sphere((u_m, u_m), (u_v, u_v), size=(batch_size))\n radius = torch.ones(batch_size) * r_v\n loc = loc * radius.unsqueeze(-1)\n R = look_at(loc)\n RT = torch.eye(4).reshape(1, 4, 4).repeat(batch_size, 1, 1)\n RT[:, :3, :3] = R\n RT[:, :3, -1] = loc\n\n if invert:\n RT = torch.inverse(RT)\n return RT\n\n\ndef get_camera_pose(range_u, range_v, range_r, val_u=0.5, val_v=0.5, val_r=0.5,\n batch_size=32, invert=False):\n u0, ur = range_u[0], range_u[1] - range_u[0]\n v0, vr = range_v[0], range_v[1] - range_v[0]\n r0, rr = range_r[0], range_r[1] - range_r[0]\n u = u0 + val_u * ur\n v = v0 + val_v * vr\n r = r0 + val_r * rr\n\n loc = sample_on_sphere((u, u), (v, v), size=(batch_size))\n radius = torch.ones(batch_size) * r\n loc = loc * radius.unsqueeze(-1)\n R = look_at(loc)\n RT = torch.eye(4).reshape(1, 4, 4).repeat(batch_size, 1, 1)\n RT[:, :3, :3] = R\n RT[:, :3, -1] = loc\n\n if invert:\n RT = torch.inverse(RT)\n return RT\n\n# edit: np -> torch \ndef to_sphere(u, v):\n theta = 2 * math.pi * u\n phi = torch.arccos(1 - 2 * v)\n cx = torch.sin(phi) * torch.cos(theta)\n cy = torch.sin(phi) * torch.sin(theta)\n cz = torch.cos(phi)\n return torch.stack([cx, cy, cz], dim=-1)\n\n\ndef sample_on_sphere(u=None, v=None, size=(1,),\n to_pytorch=True): # range_u (0, 0) range_v (0.25, 0.25)\n sample = to_sphere(u, v) # sample expect to be (16, 3)\n if to_pytorch:\n sample = torch.tensor(sample).float()\n\n return sample\n\n\ndef look_at(eye, at=np.array([0, 0, 0]), up=np.array([0, 0, 1]), eps=1e-5,\n to_pytorch=True):\n at = at.reshape(1, 3)\n up = up.reshape(1, 3)\n eye = eye.reshape(-1, 3)\n if isinstance(eye, torch.Tensor):\n if eye.is_cuda:\n device=torch.device('cuda:0')\n else:\n device=torch.device('cpu') # array \n at = torch.tensor(at).to(device).float()\n up = torch.tensor(up).to(device).float()\n \n up = up.repeat(eye.shape[0] // up.shape[0], 1)\n eps = torch.tensor([eps]).reshape(1, 1).repeat(up.shape[0], 1).to(device).float()\n\n z_axis = eye - at\n z_axis = z_axis / torch.max(torch.stack([torch.norm(z_axis,\n dim=1, keepdim=True), eps]))\n\n x_axis = torch.cross(up, z_axis)\n x_axis = x_axis / torch.max(torch.stack([torch.norm(x_axis,\n dim=1, keepdim=True), eps]))\n\n y_axis = torch.cross(z_axis, x_axis)\n y_axis = y_axis / torch.max(torch.stack([torch.norm(y_axis,\n dim=1, keepdim=True), eps]))\n\n r_mat = torch.cat(\n (x_axis.reshape(-1, 3, 1), y_axis.reshape(-1, 3, 1), z_axis.reshape(\n -1, 3, 1)), dim=2)\n\n else:\n print('pass here? oh my gaadd....') # 여기 안들어간다 오우쨔쓰!!\n up = up.repeat(eye.shape[0] // up.shape[0], axis = 0)\n eps = np.array([eps]).reshape(1, 1).repeat(up.shape[0], axis=0)\n\n z_axis = eye - at\n z_axis /= np.max(np.stack([np.linalg.norm(z_axis,\n axis=1, keepdims=True), eps]))\n\n x_axis = np.cross(up, z_axis)\n x_axis /= np.max(np.stack([np.linalg.norm(x_axis,\n axis=1, keepdims=True), eps]))\n\n y_axis = np.cross(z_axis, x_axis)\n y_axis /= np.max(np.stack([np.linalg.norm(y_axis,\n axis=1, keepdims=True), eps]))\n\n r_mat = np.concatenate(\n (x_axis.reshape(-1, 3, 1), y_axis.reshape(-1, 3, 1), z_axis.reshape(\n -1, 3, 1)), axis=2)\n\n if to_pytorch:\n r_mat = torch.tensor(r_mat).float()\n\n return r_mat\n\n\ndef get_rotation_matrix(axis='z', value=0., batch_size=32):\n r = Rot.from_euler(axis, value * 2 * np.pi).as_dcm()\n r = torch.from_numpy(r).reshape(1, 3, 3).repeat(batch_size, 1, 1)\n return r\n"
] | [
[
"torch.sin",
"torch.zeros",
"numpy.cross",
"torch.device",
"torch.norm",
"torch.ones",
"torch.eye",
"torch.from_numpy",
"torch.inverse",
"torch.tensor",
"torch.rand",
"torch.cos",
"numpy.tan",
"torch.stack",
"scipy.spatial.transform.Rotation.from_euler",
"numpy.array",
"numpy.linalg.norm",
"torch.arccos",
"torch.cross"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [
"1.5",
"1.2",
"1.3",
"1.4"
],
"tensorflow": []
}
] |
gerritholl/typhon | [
"dbde147be12922ec730bd072dc4797c9da9a6d6b"
] | [
"typhon/retrieval/common.py"
] | [
"from ast import literal_eval\nimport copy\nfrom importlib import import_module\nimport json\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.pipeline import Pipeline\nfrom typhon.utils import to_array\n\n__all__ = [\n 'RetrievalProduct',\n]\n\n\nclass NotTrainedError(Exception):\n \"\"\"Should be raised if someone runs a non-trained retrieval product\n \"\"\"\n def __init__(self, *args):\n message = \"You must train this retrieval product before running it!\"\n Exception.__init__(self, message, *args)\n\n\nclass RetrievalProduct:\n \"\"\"Retrieval that can be trained with data and stored to json files\n\n This is basically a wrapper around the scikit-learn estimator and trainer\n classes and makes it possible to save the trained models as json file.\n\n To save this object to a json file, the additional package json_tricks is\n required.\n \"\"\"\n\n def __init__(self, verbose=False):\n \"\"\"Initialize a Retriever object\n\n Args:\n verbose: The higher this value is the more debug messages are\n printed. Default is False.\n \"\"\"\n\n # The trainer and/or model for this retriever:\n self.estimator = None\n self.verbose = verbose\n self._inputs = []\n self._outputs = []\n\n @property\n def inputs(self):\n return self._inputs\n\n @property\n def outputs(self):\n return self._outputs\n\n @staticmethod\n def _import_class(module_name, class_name):\n \"\"\"Import a class dynamically to the namespace\"\"\"\n mod = import_module(module_name)\n klass = getattr(mod, class_name)\n return klass\n\n @staticmethod\n def _encode_numpy(obj):\n def _to_dict(item):\n if isinstance(item, np.ndarray):\n return {\n \"__ndarray__\": item.tolist(),\n \"__dtype__\": str(item.dtype),\n \"__shape__\": item.shape,\n }\n else:\n return np.asscalar(item)\n\n def _is_numpy(item):\n return type(item).__module__ == np.__name__\n\n if isinstance(obj, dict):\n obj = obj.copy()\n iterator = obj.items()\n elif isinstance(obj, list):\n obj = obj.copy()\n iterator = enumerate(obj)\n else:\n return obj\n\n for key, value in iterator:\n if _is_numpy(value):\n obj[key] = _to_dict(value)\n elif isinstance(value, (list, dict)):\n obj[key] = RetrievalProduct._encode_numpy(value)\n\n return obj\n\n @staticmethod\n def _decode_numpy(obj):\n def _from_dict(item):\n try:\n return np.array(\n item[\"__ndarray__\"],\n dtype=item[\"__dtype__\"],\n )\n except TypeError:\n return np.array(\n item[\"__ndarray__\"],\n dtype=literal_eval(item[\"__dtype__\"]),\n )\n\n def _is_numpy(item):\n return isinstance(item, dict) and \"__ndarray__\" in item\n\n if isinstance(obj, dict):\n obj = obj.copy()\n iterator = obj.items()\n elif isinstance(obj, list):\n obj = obj.copy()\n iterator = enumerate(obj)\n else:\n return obj\n\n for key, value in iterator:\n if _is_numpy(value):\n obj[key] = _from_dict(value)\n elif isinstance(value, (list, tuple, dict)):\n obj[key] = RetrievalProduct._decode_numpy(value)\n\n return obj\n\n @staticmethod\n def _tree_to_dict(tree):\n return {\n \"module\": type(tree).__module__,\n \"class\": type(tree).__name__,\n \"coefs\": tree.__getstate__(),\n }\n\n @staticmethod\n def _tree_from_dict(dictionary, coefs):\n instance = RetrievalProduct._import_class(\n dictionary[\"module\"], dictionary[\"class\"]\n )\n tree = instance(\n to_array(coefs[\"n_features_\"]),\n to_array(coefs[\"n_classes_\"]),\n to_array(coefs[\"n_outputs_\"])\n )\n tree.__setstate__(dictionary[\"coefs\"])\n return tree\n\n @staticmethod\n def _model_to_dict(model):\n \"\"\"Convert a sklearn model object to a dictionary\"\"\"\n dictionary = {\n \"module\": type(model).__module__,\n \"class\": type(model).__name__,\n \"params\": model.get_params(deep=True),\n \"coefs\": {\n attr: copy.deepcopy(getattr(model, attr))\n for attr in model.__dir__()\n if not attr.startswith(\"__\") and attr.endswith(\"_\")\n }\n }\n\n if \"tree_\" in dictionary[\"coefs\"]:\n # Not funny. sklearn.tree objects are not directly\n # serializable to json. Hence, we must dump them by ourselves.\n dictionary[\"coefs\"][\"tree_\"] = RetrievalProduct._tree_to_dict(\n dictionary[\"coefs\"][\"tree_\"]\n )\n\n return RetrievalProduct._encode_numpy(dictionary)\n\n @staticmethod\n def _model_from_dict(dictionary):\n \"\"\"Create a sklearn model object from a dictionary\"\"\"\n dictionary = RetrievalProduct._decode_numpy(dictionary)\n instance = RetrievalProduct._import_class(\n dictionary[\"module\"], dictionary[\"class\"]\n )\n model = instance(**dictionary[\"params\"])\n for attr, value in dictionary[\"coefs\"].items():\n if attr == \"tree_\":\n # We must treat a tree specially:\n value = RetrievalProduct._tree_from_dict(\n value, dictionary[\"coefs\"]\n )\n try:\n setattr(model, attr, value)\n except AttributeError:\n # Some attributes cannot be set such as feature_importances_\n pass\n return model\n\n @staticmethod\n def _pipeline_to_dict(pipeline):\n \"\"\"Convert a pipeline object to a dictionary\"\"\"\n if pipeline is None:\n raise ValueError(\"No object trained!\")\n\n all_steps = {}\n for name, model in pipeline.steps:\n all_steps[name] = RetrievalProduct._model_to_dict(model)\n return all_steps\n\n @staticmethod\n def _pipeline_from_dict(dictionary):\n \"\"\"Create a pipeline object from a dictionary\"\"\"\n all_steps = []\n for name, step in dictionary.items():\n model = RetrievalProduct._model_from_dict(step)\n all_steps.append([name, model])\n\n return Pipeline(all_steps)\n\n def is_trained(self):\n \"\"\"Return true if RetrievalProduct is trained\"\"\"\n return self.estimator is not None\n\n @classmethod\n def from_dict(cls, parameter, *args, **kwargs):\n \"\"\"Load a retrieval product from a dictionary\n\n Args:\n parameter: A dictionary with the training parameters. Simply the\n output of :meth:`to_dict`.\n *args: Positional arguments allowed for :meth:`__init__`.\n **kwargs Keyword arguments allowed for :meth:`__init__`.\n\n Returns:\n A new :class:`RetrievalProduct` object.\n \"\"\"\n\n self = cls(*args, **kwargs)\n\n estimator = parameter.get(\"estimator\", None)\n if estimator is None:\n raise ValueError(\"Found no coefficients for estimator!\")\n\n is_pipeline = parameter[\"estimator_is_pipeline\"]\n\n if is_pipeline:\n self.estimator = self._pipeline_from_dict(estimator)\n else:\n self.estimator = self._model_from_dict(estimator)\n\n self._inputs = parameter[\"inputs\"]\n self._outputs = parameter[\"outputs\"]\n return self\n\n def to_dict(self):\n \"\"\"Dump this retrieval product to a dictionary\"\"\"\n parameter = {}\n if isinstance(self.estimator, Pipeline):\n parameter[\"estimator\"] = self._pipeline_to_dict(self.estimator)\n parameter[\"estimator_is_pipeline\"] = True\n else:\n parameter[\"estimator\"] = self._model_to_dict(self.estimator)\n parameter[\"estimator_is_pipeline\"] = False\n\n parameter[\"inputs\"] = self.inputs\n parameter[\"outputs\"] = self.outputs\n return parameter\n\n @classmethod\n def from_txt(cls, filename, *args, **kwargs):\n \"\"\"Load a retrieval product from a txt file\n\n Notes:\n The output format is not standard json!\n\n Training parameters are:\n * weights of the estimator\n * names of the input and target fields\n\n Args:\n filename: The name of file from where to load the training\n parameters.\n *args: Positional arguments allowed for :meth:`__init__`.\n **kwargs Keyword arguments allowed for :meth:`__init__`.\n\n Returns:\n A new :class:`RetrievalProduct` object.\n \"\"\"\n\n with open(filename, 'r') as infile:\n parameter = literal_eval(infile.read())\n return cls.from_dict(parameter, *args, **kwargs)\n\n def to_txt(self, filename):\n \"\"\"Save this retrieval product to a txt file\n\n Training parameters are:\n * configuration of the used estimator\n * names of the input, output, and target fields\n\n Args:\n filename: The name of the file where to store the training\n parameters.\n\n Returns:\n None\n \"\"\"\n\n with open(filename, 'w') as outfile:\n outfile.write(repr(self.to_dict()))\n\n def retrieve(self, inputs):\n \"\"\"Predict the target values for data coming from arrays\n\n Args:\n inputs: A pandas.DataFrame object. The keys must be the\n same labels as used in :meth:`train`.\n\n Returns:\n A pandas.DataFrame object with the retrieved data.\n\n Examples:\n\n .. :code-block:: python\n\n # TODO\n \"\"\"\n\n if self.estimator is None:\n raise NotTrainedError()\n\n # Skip empty datasets\n if inputs.empty:\n return None\n\n # Retrieve the data from the neural network:\n output_data = self.estimator.predict(inputs)\n\n return pd.DataFrame(data=output_data, columns=self.outputs)\n\n def score(self, inputs, targets):\n \"\"\"\n\n Args:\n inputs: A pandas.DataFrame with input data.\n targets: A pandas.DataFrame with target data.\n\n Returns:\n The metric score as a number\n \"\"\"\n if self.estimator is None:\n raise NotTrainedError()\n\n return self.estimator.score(inputs.squeeze(), targets.squeeze())\n\n def train(self, estimator, inputs, targets):\n \"\"\"Train this retriever with data from arrays\n\n Args:\n estimator: The object that will be trained. If it is a trainer\n object such as a GridSearchCV, the best estimator will be\n chosen after training. Can also be a Pipeline or a standard\n Estimator from scikit-learn.\n inputs: A pandas.DataFrame with input data.\n targets: A pandas.DataFrame with target data.\n\n Returns:\n A float number indicating the training score.\n \"\"\"\n\n # The input and target labels will be saved because to know what this\n # product retrieves and from what:\n self._inputs = inputs.columns.tolist()\n self._outputs = targets.columns.tolist()\n\n # Start to train!\n estimator.fit(inputs.squeeze(), targets.squeeze())\n\n # Let's check whether the estimator was a trainer object such as\n # GridSearchCV, etc. Then we save only the best estimator.\n if hasattr(estimator, \"best_estimator_\"):\n # Use the best estimator from now on:\n self.estimator = estimator.best_estimator_\n else:\n self.estimator = estimator\n\n return self.score(inputs, targets)\n"
] | [
[
"numpy.array",
"numpy.asscalar",
"sklearn.pipeline.Pipeline",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
BernhardRiemann/iree | [
"471349762b316f7d6b83eb5f9089255d78052758"
] | [
"integrations/tensorflow/e2e/broadcasting_test.py"
] | [
"# Lint as: python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test broadcasting support.\"\"\"\n\nfrom absl import app\nimport numpy as np\nfrom pyiree.tf.support import tf_test_utils\nfrom pyiree.tf.support import tf_utils\nimport tensorflow.compat.v2 as tf\n\n\nclass BroadcastingModule(tf.Module):\n\n @tf.function(input_signature=[\n tf.TensorSpec([None], tf.float32),\n tf.TensorSpec([None], tf.float32),\n ])\n def add(self, lhs, rhs):\n return lhs + rhs\n\n\nclass BroadcastingTest(tf_test_utils.TracedModuleTestCase):\n\n def __init__(self, methodName=\"runTest\"):\n super(BroadcastingTest, self).__init__(methodName)\n self._modules = tf_test_utils.compile_tf_module(BroadcastingModule)\n\n def test_add_same_shape(self):\n\n def add_same_shape(module):\n lhs = tf_utils.uniform([4])\n rhs = tf_utils.uniform([4])\n module.add(lhs, rhs)\n\n self.compare_backends(add_same_shape, self._modules)\n\n def test_add_broadcast_lhs(self):\n\n def add_broadcast_lhs(module):\n lhs = tf_utils.uniform([1])\n rhs = tf_utils.uniform([4])\n module.add(lhs, rhs)\n\n self.compare_backends(add_broadcast_lhs, self._modules)\n\n def test_add_broadcast_rhs(self):\n\n def add_broadcast_rhs(module):\n lhs = tf_utils.uniform([4])\n rhs = tf_utils.uniform([1])\n module.add(lhs, rhs)\n\n self.compare_backends(add_broadcast_rhs, self._modules)\n\n\ndef main(argv):\n del argv # Unused\n if hasattr(tf, 'enable_v2_behavior'):\n tf.enable_v2_behavior()\n tf.test.main()\n\n\nif __name__ == '__main__':\n app.run(main)\n"
] | [
[
"tensorflow.compat.v2.enable_v2_behavior",
"tensorflow.compat.v2.TensorSpec",
"tensorflow.compat.v2.test.main"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mannyray/sort | [
"f0ee0488aa4e7213d30ff50bcb848a843fedde42"
] | [
"python_implementation/example/example7.py"
] | [
"import commonExample\nimport math\nimport sys\nsys.path.insert(0,'..')\nimport generate\nimport constants\nfrom numpy import random\nimport intersection\nfrom PIL import Image, ImageDraw, ImageFont\n\ngif_file=\"example7\"\n\n\nxcoords = [constants.width,constants.width,constants.width,100,400,700,1000,1300]\nycoords = [50,350,700,constants.height, constants.height, constants.height, constants.height, constants.height]\n\ndef updateCoords(xCor,yCor,frameNumber):\n lastFrame = False\n turnBackHorizontal = False\n turnBackVertical = False\n if frameNumber > constants.width/constants.step_size:\n turnBackHorizontal = True\n if frameNumber > constants.height/constants.step_size:\n turnBackVertical = True\n if yCor[3] > constants.height + 10:\n lastFrame = True\n for i in range(0,len(xCor)):\n if i < 3:\n if turnBackHorizontal == False:\n xCor[i] = xCor[i] - constants.step_size\n else:\n xCor[i] = xCor[i] + constants.step_size\n else:\n if turnBackVertical == False:\n yCor[i] = yCor[i] - constants.step_size\n else:\n yCor[i] = yCor[i] + constants.step_size\n return lastFrame, xCor, yCor\n\ndef drawImage7(image,draw,xcoords,ycoords,index):\n if index == 0:\n original = Image.open('assets/orange.jpg')\n elif index == 1:\n original = Image.open('assets/apple.jpg')\n elif index == 2:\n original = Image.open('assets/watermellon.jpg')\n elif index == 3:\n original = Image.open('assets/orange.jpg')\n elif index == 4:\n original = Image.open('assets/apple.jpg')\n elif index == 5:\n original = Image.open('assets/watermellon.jpg')\n elif index == 6:\n original = Image.open('assets/apple.jpg')\n elif index == 7:\n original = Image.open('assets/watermellon.jpg')\n \n font = ImageFont.truetype(\"/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf\", 20)\n image.paste(original, box=(xcoords[index],ycoords[index]))\n draw.text((xcoords[index]+constants.orange_width/2,ycoords[index]+constants.orange_width/2),str(index+1),fill=(0,0,0), font=font)\n\ndef boundBoxNoNoise7(x,y,index):\n center = 25\n objectType = None\n if index == 0:\n objectType = \"orange\"\n if index == 1:\n objectType = \"apple\"\n if index == 2:\n objectType = \"watermellon\"\n if index == 3:\n objectType = \"orange\"\n if index == 4:\n objectType = \"apple\"\n if index == 5:\n objectType = \"watermellon\"\n elif index == 6:\n objectType = \"apple\"\n elif index == 7:\n objectType = \"watermellon\"\n return x+center, y+center, constants.orange_width - center*2, constants.orange_width - center*2.5,objectType\n\ndef boundBoxNoise7(x,y,index):\n multiplier = 10\n x,y,w,h,objectType = boundBoxNoNoise7(x,y,index)\n arr = random.normal(size=(4,1))*multiplier\n return x+arr[0], y+arr[1], w+arr[2], h+arr[3], objectType\n\ncommonExample.common_run(updateCoords,gif_file,xcoords,ycoords,boundBoxNoise=boundBoxNoise7,boundBoxNoNoise=boundBoxNoNoise7,drawImage=drawImage7,saveData=True)\n"
] | [
[
"numpy.random.normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FastSense/rosbot-ros2 | [
"063c897a16129d9aa88c2c7c52bdf6547af894e4",
"063c897a16129d9aa88c2c7c52bdf6547af894e4"
] | [
"ros2_ws/src/utils/logger/logger/logger.py",
"ros1_ws/src/hdf5_data_publisher/scripts/hdf5_pcd_publisher.py"
] | [
"import os\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport rclpy\nimport numpy as np\n\nfrom rclpy.node import Node\nfrom geometry_msgs.msg import Twist\nfrom nav_msgs.msg import Odometry\nfrom std_srvs.srv import Empty\n\nfrom logger.utils import convert_ros2_time_to_float\nfrom logger.create_graphs import build_general_graph_for_rosbot\nfrom scipy.spatial.transform import Rotation\n\nclass Logger(Node):\n \"\"\"\n Class for logging the state of the rosbot\n Node for logging the state of the robot,\n kinematic model (optional) and neural network\n model (optional), control and time stamps\n :Attributes:\n :first_tick: (bool) srue if it is first callbcak\n :init_time: (float) node start time (time of the first callback)\n :curr_control: (list) current control [u_v, u_w]\n :output_path: (str) Absolute path to the directory\n where the logged data will be saved\n :control_topic: (str) nam of the control topic (/cmd_vel)\n :parent_frame: (str) name of the origin tf frame\n :kinetic_model_frame: (str) name of the kinematic model tf frame\n :nn_model_frame: (str) name of the NN model tf frame\n :robot_state: (pandas.DataFrame) container for rosbot state\n :kinetic_model_state: (pandas.DataFrame) container for\n kinematic model state\n :nn_model_state: (pandas.DataFrame) container for NN model state\n :robot_control: (pandas.DataFrame) container for rosbot control\n :time: (list) container for time stamps\n :odom_sub: subscriber to /odom topic\n :control_sub: subscriber to control topic\n \"\"\"\n def __init__(self):\n \"\"\"\n \"\"\"\n super().__init__('logger')\n \n self.init_parameters()\n self.get_node_parametes()\n self.init_subs()\n self.init_containers()\n \n self.first_tick = True\n self.init_time = None\n self.curr_control = list()\n self.srv = self.create_service(Empty, 'shutdown_logger', self.shutdown_logger_callback)\n rclpy.get_default_context().on_shutdown(self.on_shutdown)\n \n\n def init_parameters(self):\n \"\"\"\n Declares node parameters\n \"\"\"\n self.declare_parameter('output_path', \"\")\n self.declare_parameter('control_topic', '/cmd_vel')\n self.declare_parameter('parent_frame', 'odom')\n self.declare_parameter('kinetic_model_frame', 'model_link')\n self.declare_parameter('nn_model_frame', 'nn_model_link')\n # self.declare_parameter('tf_topic', '/tf')\n\n def get_node_parametes(self):\n \"\"\"\n Gets node parameters\n \"\"\"\n self.output_path = self.get_parameter('output_path').get_parameter_value().string_value\n self.control_topic = self.get_parameter('control_topic').get_parameter_value().string_value\n self.parent_frame = self.get_parameter('parent_frame').get_parameter_value().string_value\n self.kinetic_model_frame = self.get_parameter('kinetic_model_frame').get_parameter_value().string_value\n self.nn_model_frame = self.get_parameter('nn_model_frame').get_parameter_value().string_value\n # self.tf_topic = self.get_parameter('tf_topic').get_parameter_value().string_value\n\n def init_containers(self):\n \"\"\"\n Declares containers for logged data\n \"\"\"\n self.robot_state = pd.DataFrame(\n columns=[\n 'x', 'y', 'z', 'roll', 'pitch', 'yaw',\n 'v_x', 'v_y', 'v_z', 'w_x', 'w_y', 'w_z',\n ]\n )\n self.kinetic_model_state = pd.DataFrame(\n columns=[\n 'x', 'y', 'z', 'roll', 'pitch', 'yaw',\n 'v_x', 'v_y', 'v_z', 'w_x', 'w_y', 'w_z',\n ]\n )\n self.nn_model_state = pd.DataFrame(\n columns=[\n 'x', 'y', 'z', 'roll', 'pitch', 'yaw',\n 'v_x', 'v_y', 'v_z', 'w_x', 'w_y', 'w_z',\n ]\n )\n self.robot_control = pd.DataFrame(\n columns=[\n 'v_x', 'w_z'\n ]\n )\n self.time = list()\n\n def init_subs(self):\n \"\"\"\n Declares node subscribers\n \"\"\"\n self.odom_sub = self.create_subscription(\n Odometry,\n '/odom',\n self.odom_callback,\n 1\n )\n self.control_sub = self.create_subscription(\n Twist,\n self.control_topic,\n self.control_callback,\n 1\n )\n\n # prevent unused variable warning\n self.control_sub\n self.odom_sub\n\n def odom_callback(self, odom_msg):\n \"\"\"\n Callback on odom message\n Robot position, current time and control are logged\n Args:\n :odom_msg: (nav_msgs.msg.Odometry): odom msg\n \"\"\"\n\n if (len(self.curr_control) == 0):\n return \n\n curr_time = convert_ros2_time_to_float(\n self.get_clock().now().seconds_nanoseconds()\n ) \n # update time container\n self.time.append(curr_time - self.init_time)\n # update control container\n self.robot_control.loc[len(self.robot_control)] = self.curr_control\n # update robot_state container\n rosbot_pose = odom_msg.pose.pose\n rosbot_velocities = odom_msg.twist.twist\n x, y, z = rosbot_pose.position.x, rosbot_pose.position.y, rosbot_pose.position.z\n rpy = Rotation.from_quat([\n np.float(rosbot_pose.orientation.x),\n np.float(rosbot_pose.orientation.y),\n np.float(rosbot_pose.orientation.z),\n np.float(rosbot_pose.orientation.w)]\n ).as_euler('xyz')\n rpy = list(rpy)\n\n v_x = rosbot_velocities.linear.x # Linear velocity\n v_y = rosbot_velocities.linear.y\n v_z = rosbot_velocities.linear.z\n\n w_x = rosbot_velocities.angular.x\n w_y = rosbot_velocities.angular.y\n w_z = rosbot_velocities.angular.z # YAW velocity\n\n last_row = len(self.robot_state)\n self.robot_state.loc[last_row] = [x,y,z] + rpy + [v_x, v_y, v_z, w_x, w_y, w_z]\n\n def control_callback(self, control):\n \"\"\"\n Updates the current control\n Args:\n :control: (geometry_msgs.msg.Twist) control msg\n \"\"\"\n if self.first_tick:\n self.first_tick = False\n self.init_time = convert_ros2_time_to_float(\n self.get_clock().now().seconds_nanoseconds()\n )\n\n self.curr_control = [control.linear.x, control.angular.z]\n\n def save_collected_data_to_csv(self):\n \"\"\"\n Saves logged data in csv format\n \"\"\"\n # if not os.path.exists(self.output_path):\n # os.makedirs(self.output_path)\n\n self.robot_state.to_csv(\n path_or_buf=os.path.join(self.output_path, \"rosbot_state.csv\"),\n sep=' ',\n index=False\n )\n\n self.kinetic_model_state.to_csv(\n path_or_buf=os.path.join(self.output_path, \"kinematic_model_state.csv\"),\n sep=' ',\n index=False\n )\n\n self.nn_model_state.to_csv(\n path_or_buf=os.path.join(self.output_path, \"nn_model_state.csv\"),\n sep=' ',\n index=False\n )\n\n self.robot_control.to_csv(\n path_or_buf= os.path.join(self.output_path,\"control.csv\"),\n sep=' ',\n index=False\n )\n\n pd.DataFrame(data=self.time, columns=['t']).to_csv(\n path_or_buf= os.path.join(self.output_path, \"time.csv\"),\n sep=' ',\n index=False\n )\n \n\n def shutdown_logger_callback(self):\n \"\"\"\n Callback for the shutdown_logger service, \n turns off the logger node\n \"\"\"\n rclpy.try_shutdown()\n\n def on_shutdown(self):\n \"\"\"\n A function that is executed when a node shutdown.\n Plots a graph of all collected data, saves it in csv format.\n \"\"\"\n\n if not os.path.exists(self.output_path):\n os.makedirs(self.output_path)\n \n data_plots = build_general_graph_for_rosbot(\n robot_state_df=self.robot_state,\n control_df=self.robot_control,\n time_list=self.time,\n save_to_png=True,\n path=self.output_path\n )\n self.save_collected_data_to_csv()\n\n self.get_logger().warn(\"Output path = {}\".format(self.output_path))\n\ndef main():\n \"\"\"\n Declares the logger node.\n Node works \n \"\"\"\n rclpy.init()\n logger = Logger()\n\n try:\n rclpy.spin(logger)\n except:\n pass\n\n logger.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n\n\n\n",
"#! /usr/bin/env python\n\nimport rospy\nfrom sensor_msgs.msg import PointCloud2, PointField\nfrom geometry_msgs.msg import PoseWithCovarianceStamped, Point, Quaternion\nimport h5py\nimport numpy as np\nimport tf\nimport cv2\n\nrospy.init_node('hdf5_pcd_publisher')\n\n# Read data from HDF5\nhdf5_data_file = '/home/kirill/DTL_project/rosbot_ws/src/oakd_rosbags/rosbot_gazebo_msgs3.hdf5'\nwith h5py.File(hdf5_data_file, 'r') as f:\n\tpositions = np.array(f['odom_position'])\n\trotations = np.array(f['odom_rotation'])\n\tpose_stamps = np.array(f['odom_stamp'])\n\tpcd_data = np.array(f['pcd_data'])\n\tpcd_stamps = np.array(f['pcd_stamp'])\nprint(pcd_data.shape)\n\n# Synchronize poses to pointclouds\nj = 0\npositions_sync = np.zeros((pcd_data.shape[0], 3))\nrotations_sync = np.zeros((pcd_data.shape[0], 4))\nfor i in range(len(pcd_stamps)):\n\twhile j < len(pose_stamps) and pose_stamps[j] < pcd_stamps[i]:\n\t\tj += 1\n\tif j == 0:\n\t\tpositions_sync[i] = positions[j]\n\t\trotations_sync[i] = rotations[j]\n\telif j == len(pose_stamps):\n\t\tpositions_sync[i] = positions[j - 1]\n\t\trotations_sync[i] = rotations[j - 1]\n\telse:\n\t\talpha = (pcd_stamps[i] - pose_stamps[j - 1]) / (pose_stamps[j] - pose_stamps[j - 1])\n\t\tpositions_sync[i] = alpha * positions[j] + (1 - alpha) * positions[j - 1]\n\t\trotations_sync[i] = alpha * rotations[j] + (1 - alpha) * rotations[j - 1]\n\n# Initialize publishers\nrate = rospy.Rate(10)\npose_publisher = rospy.Publisher('/pose', PoseWithCovarianceStamped, latch=True, queue_size=100)\npcd_publisher = rospy.Publisher('/points', PointCloud2, latch=True, queue_size=100)\n\n# Initialize pose message\npose_msg = PoseWithCovarianceStamped()\npose_msg.header.frame_id = 'odom'\npose_msg.pose.covariance = list(np.eye(6).ravel() * 0.05)\ntf_broadcaster = tf.TransformBroadcaster()\n\n# Initialize pointcloud message\npcd_msg = PointCloud2()\npcd_msg.header.frame_id = 'points'\npcd_msg.height = 1\npcd_msg.width = pcd_data.shape[1] // 32\npcd_msg.fields = [\n\tPointField(name='x', offset=0, datatype=7, count=1),\n\tPointField(name='y', offset=4, datatype=7, count=1),\n\tPointField(name='z', offset=8, datatype=7, count=1),\n\tPointField(name='rgb', offset=16, datatype=7, count=1)\n]\npcd_msg.is_bigendian = False\npcd_msg.point_step = 32\npcd_msg.row_step = pcd_data.shape[1]\n\n# Publish pose and pcd with specified rate\nfor i in range(len(pcd_data)):\n\tif rospy.is_shutdown():\n\t\tbreak\n\n\tcur_time = rospy.Time.now()\n\tprint('Publish data at time {}'.format(cur_time.to_sec()))\n\n\t# Pose\n\tpose_msg.header.stamp = cur_time\n\tpose_position = Point()\n\tpose_position.x, pose_position.y, pose_position.z = positions_sync[i]\n\tpose_msg.pose.pose.position = pose_position\n\tpose_orientation = Quaternion()\n\tpose_orientation.x, pose_orientation.y, pose_orientation.z, pose_orientation.w = rotations_sync[i]\n\tpose_msg.pose.pose.orientation = pose_orientation\n\tpose_publisher.publish(pose_msg)\n\n\t# TF\n\ttf_broadcaster.sendTransform(positions_sync[i], rotations_sync[i],\n\t\t cur_time,\n\t\t 'base_link', 'odom')\n\n\t# Pointcloud\n\tpcd_msg.header.stamp = cur_time\n\tpcd_msg.data = list(pcd_data[i])\n\tpcd_publisher.publish(pcd_msg)\n\n\trate.sleep()"
] | [
[
"numpy.float",
"pandas.DataFrame"
],
[
"numpy.eye",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bgpeyton/QCElemental | [
"2e84cd686d5fff0fc79accb28ffa985de4684704"
] | [
"qcelemental/util/misc.py"
] | [
"import math\nimport re\nfrom typing import Dict, List\n\nimport numpy as np\n\nfrom ..physical_constants import constants\n\n\ndef distance_matrix(a: np.ndarray, b: np.ndarray) -> np.ndarray:\n \"\"\"Euclidean distance matrix between rows of arrays `a` and `b`. Equivalent to\n `scipy.spatial.distance.cdist(a, b, 'euclidean')`. Returns a.shape[0] x b.shape[0] array.\n\n \"\"\"\n assert a.shape[1] == b.shape[1], \"\"\"Inner dimensions do not match\"\"\"\n distm = np.zeros([a.shape[0], b.shape[0]])\n for i in range(a.shape[0]):\n distm[i] = np.linalg.norm(a[i] - b, axis=1)\n return distm\n\n\ndef update_with_error(a: Dict, b: Dict, path=None) -> Dict:\n \"\"\"Merges `b` into `a` like dict.update; however, raises KeyError if values of a\n key shared by `a` and `b` conflict.\n\n Adapted from: https://stackoverflow.com/a/7205107\n\n \"\"\"\n if path is None:\n path = []\n for key in b:\n if key in a:\n if isinstance(a[key], dict) and isinstance(b[key], dict):\n update_with_error(a[key], b[key], path + [str(key)])\n elif a[key] == b[key]:\n pass # same leaf value\n elif a[key] is None:\n a[key] = b[key]\n elif (\n isinstance(a[key], (list, tuple))\n and not isinstance(a[key], str)\n and isinstance(b[key], (list, tuple))\n and not isinstance(b[key], str)\n and len(a[key]) == len(b[key])\n and all((av is None or av == bv) for av, bv in zip(a[key], b[key]))\n ): # yapf: disable\n a[key] = b[key]\n else:\n raise KeyError(\"Conflict at {}: {} vs. {}\".format(\".\".join(path + [str(key)]), a[key], b[key]))\n else:\n a[key] = b[key]\n return a\n\n\ndef standardize_efp_angles_units(units: str, geom_hints: List[List[float]]) -> List[List[float]]:\n \"\"\"Applies to the pre-validated xyzabc or points hints in `geom_hints`\n the libefp default (1) units of [a0] and (2) radian angle range of\n (-pi, pi]. The latter is handy since this is how libefp returns hints\n\n \"\"\"\n\n def radrge(radang):\n \"\"\"Adjust `radang` by 2pi into (-pi, pi] range.\"\"\"\n if radang > math.pi:\n return radang - 2 * math.pi\n elif radang <= -math.pi:\n return radang + 2 * math.pi\n else:\n return radang\n\n if units == \"Angstrom\":\n iutau = 1.0 / constants.bohr2angstroms\n else:\n iutau = 1.0\n\n hints = []\n for hint in geom_hints:\n if len(hint) == 6:\n x, y, z = [i * iutau for i in hint[:3]]\n a, b, c = [radrge(i) for i in hint[3:]]\n hints.append([x, y, z, a, b, c])\n if len(hint) == 9:\n points = [i * iutau for i in hint]\n hints.append(points)\n\n return hints\n\n\ndef filter_comments(string: str) -> str:\n \"\"\"Remove from `string` any Python-style comments ('#' to end of line).\"\"\"\n\n return re.sub(r\"(^|[^\\\\])#.*\", \"\", string)\n\n\ndef unnp(dicary: Dict, _path=None, *, flat: bool = False) -> Dict:\n \"\"\"Return `dicary` with any ndarray values replaced by lists.\n\n Parameters\n ----------\n dicary: dict\n Dictionary where any internal iterables are dict or list.\n flat : bool, optional\n Whether the returned lists are flat or nested.\n\n Returns\n -------\n dict\n Input with any ndarray values replaced by lists.\n\n \"\"\"\n\n if _path is None:\n _path = []\n\n ndicary: Dict = {}\n for k, v in dicary.items():\n if isinstance(v, dict):\n ndicary[k] = unnp(v, _path + [str(k)], flat=flat)\n elif isinstance(v, list):\n # relying on Py3.6+ ordered dict here\n fakedict = {kk: vv for kk, vv in enumerate(v)}\n tolisted = unnp(fakedict, _path + [str(k)], flat=flat)\n ndicary[k] = list(tolisted.values())\n else:\n try:\n v.shape\n except AttributeError:\n ndicary[k] = v\n else:\n if flat:\n ndicary[k] = v.ravel().tolist()\n else:\n ndicary[k] = v.tolist()\n return ndicary\n\n\ndef _norm(points) -> float:\n \"\"\"\n Return the Frobenius norm across axis=-1, NumPy's internal norm is crazy slow (~4x)\n \"\"\"\n\n tmp = np.atleast_2d(points)\n return np.sqrt(np.einsum(\"ij,ij->i\", tmp, tmp))\n\n\ndef measure_coordinates(coordinates, measurements, degrees=False):\n \"\"\"\n Measures a geometry array based on 0-based indices provided, automatically detects distance, angle,\n and dihedral based on length of measurement input.\n \"\"\"\n\n coordinates = np.atleast_2d(coordinates)\n num_coords = coordinates.shape[0]\n\n single = False\n if isinstance(measurements[0], int):\n measurements = [measurements]\n single = True\n\n ret = []\n for num, m in enumerate(measurements):\n if any(x >= num_coords for x in m):\n raise ValueError(f\"An index of measurement {num} is out of bounds.\")\n\n kwargs = {}\n if len(m) == 2:\n func = compute_distance\n elif len(m) == 3:\n func = compute_angle\n kwargs = {\"degrees\": degrees}\n elif len(m) == 4:\n func = compute_dihedral\n kwargs = {\"degrees\": degrees}\n else:\n raise KeyError(f\"Unrecognized number of arguments for measurement {num}, found {len(m)}, expected 2-4.\")\n\n val = func(*[coordinates[x] for x in m], **kwargs)\n ret.append(float(val))\n\n if single:\n return ret[0]\n else:\n return ret\n\n\ndef compute_distance(points1, points2) -> np.ndarray:\n \"\"\"\n Computes the distance between the provided points on a per-row basis.\n\n Parameters\n ----------\n points1 : array-like\n The first list of points, can be 1D or 2D\n points2 : array-like\n The second list of points, can be 1D or 2D\n\n Returns\n -------\n distances : np.ndarray\n The array of distances between points1 and points2\n\n Notes\n -----\n Units are not considered inside these expressions, please preconvert to the same units before using.\n\n See Also\n --------\n distance_matrix\n Computes the distance between the provided points in all rows.\n compute_distance result is the diagonal of the distance_matrix result.\n\n \"\"\"\n points1 = np.atleast_2d(points1)\n points2 = np.atleast_2d(points2)\n\n return _norm(points1 - points2)\n\n\ndef compute_angle(points1, points2, points3, *, degrees: bool = False) -> np.ndarray:\n \"\"\"\n Computes the angle (p1, p2 [vertex], p3) between the provided points on a per-row basis.\n\n Parameters\n ----------\n points1 : np.ndarray\n The first list of points, can be 1D or 2D\n points2 : np.ndarray\n The second list of points, can be 1D or 2D\n points3 : np.ndarray\n The third list of points, can be 1D or 2D\n degrees : bool, options\n Returns the angle in degrees rather than radians if True\n\n Returns\n -------\n angles : np.ndarray\n The angle between the three points in radians\n\n Notes\n -----\n Units are not considered inside these expressions, please preconvert to the same units before using.\n \"\"\"\n\n points1 = np.atleast_2d(points1)\n points2 = np.atleast_2d(points2)\n points3 = np.atleast_2d(points3)\n\n v12 = points1 - points2\n v23 = points2 - points3\n\n denom = _norm(v12) * _norm(v23)\n cosine_angle = np.einsum(\"ij,ij->i\", v12, v23) / denom\n\n angle = np.pi - np.arccos(cosine_angle)\n\n if degrees:\n return np.degrees(angle)\n else:\n return angle\n\n\ndef compute_dihedral(points1, points2, points3, points4, *, degrees: bool = False) -> np.ndarray:\n \"\"\"\n Computes the dihedral angle (p1, p2, p3, p4) between the provided points on a per-row basis using the Praxeolitic formula.\n\n Parameters\n ----------\n points1 : np.ndarray\n The first list of points, can be 1D or 2D\n points2 : np.ndarray\n The second list of points, can be 1D or 2D\n points3 : np.ndarray\n The third list of points, can be 1D or 2D\n points4 : np.ndarray\n The third list of points, can be 1D or 2D\n degrees : bool, options\n Returns the dihedral angle in degrees rather than radians if True\n\n Returns\n -------\n dihedrals : np.ndarray\n The dihedral angle between the four points in radians\n\n Notes\n -----\n Units are not considered inside these expressions, please preconvert to the same units before using.\n \"\"\"\n\n # FROM: https://stackoverflow.com/questions/20305272/\n\n points1 = np.atleast_2d(points1)\n points2 = np.atleast_2d(points2)\n points3 = np.atleast_2d(points3)\n points4 = np.atleast_2d(points4)\n\n # Build the three vectors\n v1 = -1.0 * (points2 - points1)\n v2 = points3 - points2\n v3 = points4 - points3\n\n # Normalize the central vector\n v2 = v2 / _norm(v2)\n\n # v = projection of b0 onto plane perpendicular to b1\n # = b0 minus component that aligns with b1\n # w = projection of b2 onto plane perpendicular to b1\n # = b2 minus component that aligns with b1\n v = v1 - np.einsum(\"ij,ij->i\", v1, v1) * v2\n w = v3 - np.einsum(\"ij,ij->i\", v3, v2) * v2\n\n # angle between v and w in a plane is the torsion angle\n # v and w may not be normalized but that's fine since tan is y/x\n x = np.einsum(\"ij,ij->i\", v, w)\n y = np.einsum(\"ij,ij->i\", np.cross(v2, v), w)\n angle = np.arctan2(y, x)\n\n if degrees:\n return np.degrees(angle)\n else:\n return angle\n"
] | [
[
"numpy.einsum",
"numpy.degrees",
"numpy.linalg.norm",
"numpy.arccos",
"numpy.arctan2",
"numpy.atleast_2d",
"numpy.cross",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DavidHurst/palbolts | [
"72f9ca3f82499b532f14d0e797426e1b425d3efe"
] | [
"conduit/fair/models/gpd.py"
] | [
"\"\"\"Zhang Gradient Projection Debiasing Baseline Model.\"\"\"\nfrom __future__ import annotations\nfrom typing import NamedTuple, cast\n\nimport ethicml as em\nfrom kit import implements\nfrom kit.torch import CrossEntropyLoss, TrainingMode\nimport pandas as pd\nimport pytorch_lightning as pl\nfrom pytorch_lightning.utilities.types import EPOCH_OUTPUT\nimport torch\nfrom torch import Tensor, nn\nfrom torch.optim.optimizer import Optimizer\n\nfrom conduit.data.structures import TernarySample\nfrom conduit.models.base import CdtModel\nfrom conduit.models.utils import aggregate_over_epoch, prediction, prefix_keys\nfrom conduit.types import LRScheduler, Stage\n\n__all__ = [\"GPD\"]\n\n\ndef compute_proj_grads(*, model: nn.Module, loss_p: Tensor, loss_a: Tensor, alpha: float) -> None:\n \"\"\"Computes the adversarial-gradient projection term.\n\n :param model: Model whose parameters the gradients are to be computed w.r.t.\n :param loss_p: Prediction loss.\n :param loss_a: Adversarial loss.\n :param alpha: Pre-factor for adversarial loss.\n \"\"\"\n grad_p = torch.autograd.grad(loss_p, tuple(model.parameters()), retain_graph=True)\n grad_a = torch.autograd.grad(loss_a, tuple(model.parameters()), retain_graph=True)\n\n def _proj(a: Tensor, b: Tensor) -> Tensor:\n return b * torch.sum(a * b) / torch.sum(b * b).clamp(min=torch.finfo(b.dtype).eps)\n\n grad_p = [p - _proj(p, a) - alpha * a for p, a in zip(grad_p, grad_a)]\n\n for param, grad in zip(model.parameters(), grad_p):\n param.grad = grad\n\n\ndef compute_grad(*, model: nn.Module, loss: Tensor) -> None:\n \"\"\"Computes the adversarial gradient projection term.\n\n :param model: Model whose parameters the gradients are to be computed w.r.t.\n :param loss: Adversarial loss.\n \"\"\"\n grad_list = torch.autograd.grad(loss, tuple(model.parameters()), retain_graph=True)\n\n for param, grad in zip(model.parameters(), grad_list):\n param.grad = grad\n\n\nclass ModelOut(NamedTuple):\n s: Tensor\n y: Tensor\n\n\nclass GPD(CdtModel):\n \"\"\"Zhang Mitigating Unwanted Biases.\"\"\"\n\n def __init__(\n self,\n *,\n adv: nn.Module,\n enc: nn.Module,\n clf: nn.Module,\n lr: float = 3.0e-4,\n weight_decay: float = 0.0,\n lr_initial_restart: int = 10,\n lr_restart_mult: int = 2,\n lr_sched_interval: TrainingMode = TrainingMode.epoch,\n lr_sched_freq: int = 1,\n ) -> None:\n super().__init__(\n lr=lr,\n weight_decay=weight_decay,\n lr_initial_restart=lr_initial_restart,\n lr_restart_mult=lr_restart_mult,\n lr_sched_interval=lr_sched_interval,\n lr_sched_freq=lr_sched_freq,\n )\n\n self.adv = adv\n self.enc = enc\n self.clf = clf\n\n self._loss_adv_fn = CrossEntropyLoss()\n self._loss_clf_fn = CrossEntropyLoss()\n\n self.automatic_optimization = False # Mark for manual optimization\n\n @implements(CdtModel)\n @torch.no_grad()\n def inference_step(self, batch: TernarySample, *, stage: Stage) -> dict[str, Tensor]:\n assert isinstance(batch.x, Tensor)\n model_out = self.forward(batch.x)\n loss_adv, loss_clf, loss = self._get_losses(model_out=model_out, batch=batch)\n logging_dict = {\n \"loss\": loss.item(),\n \"loss_adv\": loss_adv.item(),\n \"loss_clf\": loss_clf.item(),\n }\n logging_dict = prefix_keys(dict_=logging_dict, prefix=str(stage), sep=\"/\")\n self.log_dict(logging_dict)\n\n return {\n \"targets\": batch.y.view(-1),\n \"subgroup_inf\": batch.s.view(-1),\n \"logits_y\": model_out.y,\n }\n\n @implements(CdtModel)\n def inference_epoch_end(self, outputs: EPOCH_OUTPUT, stage: Stage) -> dict[str, float]:\n targets_all = aggregate_over_epoch(outputs=outputs, metric=\"targets\")\n subgroup_inf_all = aggregate_over_epoch(outputs=outputs, metric=\"subgroup_inf\")\n logits_y_all = aggregate_over_epoch(outputs=outputs, metric=\"logits_y\")\n\n preds_y_all = prediction(logits_y_all)\n\n dt = em.DataTuple(\n x=pd.DataFrame(\n torch.rand_like(subgroup_inf_all).detach().cpu().numpy(),\n columns=[\"x0\"],\n ),\n s=pd.DataFrame(subgroup_inf_all.detach().cpu().numpy(), columns=[\"s\"]),\n y=pd.DataFrame(targets_all.detach().cpu().numpy(), columns=[\"y\"]),\n )\n\n return em.run_metrics(\n predictions=em.Prediction(hard=pd.Series(preds_y_all.detach().cpu().numpy())),\n actual=dt,\n metrics=[em.Accuracy(), em.RenyiCorrelation(), em.Yanovich()],\n per_sens_metrics=[em.Accuracy(), em.ProbPos(), em.TPR()],\n )\n\n def _get_losses(\n self, model_out: ModelOut, *, batch: TernarySample\n ) -> tuple[Tensor, Tensor, Tensor]:\n loss_adv = self._loss_adv_fn(model_out.s, target=batch.s)\n loss_clf = self._loss_clf_fn(model_out.y, target=batch.y)\n return loss_adv, loss_clf, loss_adv + loss_clf\n\n @implements(pl.LightningModule)\n def training_step(self, batch: TernarySample, batch_idx: int) -> None:\n assert isinstance(batch.x, Tensor)\n opt = cast(Optimizer, self.optimizers())\n\n opt.zero_grad()\n\n model_out: ModelOut = self.forward(batch.x)\n loss_adv, loss_clf, loss = self._get_losses(model_out=model_out, batch=batch)\n\n logging_dict = {\n \"adv_loss\": loss_adv.item(),\n \"clf_loss\": loss_clf.item(),\n \"loss\": loss.item(),\n }\n logging_dict = prefix_keys(dict_=logging_dict, prefix=\"train\", sep=\"/\")\n self.log_dict(logging_dict)\n\n compute_proj_grads(model=self.enc, loss_p=loss_clf, loss_a=loss_adv, alpha=1.0)\n compute_grad(model=self.adv, loss=loss_adv)\n compute_grad(model=self.clf, loss=loss_clf)\n\n opt.step()\n\n if (self.lr_sched_interval is TrainingMode.step) and (\n self.global_step % self.lr_sched_freq == 0\n ):\n sch = cast(LRScheduler, self.lr_schedulers())\n sch.step()\n if (self.lr_sched_interval is TrainingMode.epoch) and self.trainer.is_last_batch:\n sch = cast(LRScheduler, self.lr_schedulers())\n sch.step()\n\n @implements(nn.Module)\n def forward(self, x: Tensor) -> ModelOut:\n embedding = self.enc(x)\n y_pred = self.clf(embedding)\n s_pred = self.adv(embedding)\n return ModelOut(y=y_pred, s=s_pred)\n"
] | [
[
"torch.finfo",
"torch.sum",
"torch.no_grad",
"torch.rand_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Tulioas/dfp_analyser | [
"d66ff94ba0b88a5d421a992ad27661011db36091"
] | [
"primary_info.py"
] | [
"import pandas as pd\nfrom zipfile import ZipFile\nimport numpy as np\nimport re\nimport os\n\n\ndef year_identifier(file_name):\n\n '''\n Abstrait: identify the year of the file\n '''\n\n folder_regex = re.compile(r'20\\d\\d')\n match = folder_regex.search(str(file_name))\n year = match.group()\n return year\n\n\ndef debt_correction(dataframe):\n\n debt_ident_list = ['Empréstimos e Financiamentos']\n lpa_ident_list = ['ON']\n count_debt = 1\n count_lpa = 1\n\n for row in range(len(dataframe)):\n for col in range(len(dataframe.columns)):\n if dataframe.iloc[row,col] in debt_ident_list:\n prev_name = dataframe.iloc[row,col]\n dataframe.iat[row, col] = f'{prev_name} {count_debt}'\n count_debt += 1\n if dataframe.iloc[row,col] in lpa_ident_list:\n prev_name = dataframe.iloc[row,col]\n dataframe.iat[row, col] = f'{prev_name} {count_lpa}'\n count_lpa += 1 \n\n return dataframe\n\n\ndef dataframe_filtering(folder, file_name_list, company_list, prev=False):\n\n '''\n Input: folder name, list with important files in the folder and list with companies of interest\n Output: \n '''\n\n dataframe_general = []\n\n for company in company_list:\n\n dataframe_company = []\n dataframe_list = []\n\n for file in file_name_list:\n\n # Create BPA DataFrame\n file_raw = pd.read_csv(f'raw_dfp\\\\{folder}\\\\{file}', encoding='iso-8859-1', delimiter=';', skiprows=0, low_memory=False)\n\n # Filter year and last year results\n if prev is False:\n file_1 = file_raw[~file_raw['ORDEM_EXERC'].str.startswith('P')]\n folder_year = year_identifier(file_name_list)\n else:\n file_1 = file_raw[file_raw['ORDEM_EXERC'].str.startswith('P')]\n folder_year = int(year_identifier(file_name_list)) - 1\n\n # Filter the right columns\n file_2 = file_1[['DENOM_CIA', 'CD_CONTA','DS_CONTA', 'VL_CONTA']]\n\n # Filter the right companies\n file_3 = file_2[file_2['DENOM_CIA'].isin([company])]\n\n # Filter the right data\n\n if file.find('DRE') != -1:\n interest_data = ['Receita de Venda de Bens e/ou Serviços', 'Resultado Bruto', 'Despesas com Vendas', 'Despesas com Pesquisa e Desenvolvimento',\n 'Custos com Pesquisa e Desenvolvimento', 'Despesas com pesquisas e desenvolvimento', 'Pesquisa e Desenvolvimento', 'Pesquisa', 'Despesas com Pesquisas e Desenvolvimento',\n 'Custo com Pesquisa e Desenvolvimento Tecnológico', 'Despesas com gastos com desenvolvimento', 'Despesas com desenvolvimento de tecnologia e produtos', 'Com estudos em desenvolvimento',\n 'Despesas Gerais e Administrativas', 'Despesas de Depreciação', 'Despesas/Receitas Operacionais',\n 'Resultado Antes do Resultado Financeiro e dos Tributos', 'Resultado Financeiro', 'Resultado Antes dos Tributos sobre o Lucro',\n 'Resultado Líquido das Operações Continuadas', 'Lucro Básico por Ação', 'ON']\n\n elif file.find('BPA') != -1:\n interest_data = ['Ativo Total', 'Ativo Circulante', 'Imobilizado']\n\n elif file.find('BPP') != -1:\n interest_data = ['Passivo Circulante', 'Empréstimos e Financiamentos', 'Passivo Não Circulante', 'Patrimônio Líquido Consolidado',\n 'Reservas de Lucros', 'Lucros/Prejuízos Acumulados']\n\n elif file.find('DFC_MI') != -1:\n interest_data = ['Lucro Líquido do exercício', 'Depreciação, Amortização e Impairment', 'Depreciação e amortização', 'Depreciação de arrendamento', 'Depreciação e Amortização', 'Depreciações e Amortizações', 'Amortização e Depreciação', 'Depreciação/amortização', 'Depreciações', 'Depreciação e Amortizações', 'Depreciação do imobilizado', 'Depreciação e depleção do imobilizado', 'Depreciação, exaustão e amortização', 'Depreciação, Amortização e Exaustão', 'Depreciação, Exaustão e Amortização',\n 'Aquisição de Imobilizado e Intangíveis', 'Adições de imobilizado', 'Compras de ativo imobilizado', 'Aquisições de imobilizado', 'Aquisições de Imobilizado',\n 'Aquisições de Imobilizado e Intangível', 'Aquisições de imobilizado e intangível', 'Aquisições de Imobilizados e Intangíveis (Exceto pelo Excedente de Cessão Onerosa)',\n 'Aquisições de imobilizados e intangíveis', 'Aquisições de imobilizado veículos frota', 'Aquisições de imobilizado de uso', 'Aquisições de Imobilizado de Uso', 'Aquisição de ativos imobilizados, intangível e propriedade para investimento', 'Aquisição de imobilizado e intangível']\n\n file_4 = file_3[file_3['DS_CONTA'].isin(interest_data)]\n\n dataframe_list.append(file_4)\n\n # Concatenate each file dataframe into one and add year column\n dataframe_company = pd.concat(dataframe_list)\n dataframe_company = dataframe_company.rename(columns={\"VL_CONTA\": f\"{folder_year}\"})\n\n # Append to general list\n dataframe_general.append(dataframe_company)\n\n return dataframe_general\n\n\ndef primary_info(companies, clear_prev_folder=False):\n\n company_frames = []\n for company in companies:\n company_frames.append(pd.DataFrame())\n\n # Identify zip year\n for file in os.listdir('raw_dfp\\\\raw_zip'):\n zip_year = year_identifier(f'raw_dfp\\\\raw_zip\\\\{file}')\n\n # Create or clear the folder of the year\n output_folder = zip_year\n directory_elements = os.listdir('raw_dfp')\n if output_folder not in directory_elements:\n os.mkdir(f'raw_dfp\\\\{output_folder}')\n elif os.listdir(f'raw_dfp\\\\{output_folder}') != [] and clear_prev_folder is True:\n output_folder_elements = os.listdir(f'raw_dfp\\\\{output_folder}')\n for element in output_folder_elements:\n os.remove(f'raw_dfp\\\\{output_folder}\\\\{element}')\n\n # Extract files from zip\n if os.listdir(f'raw_dfp\\\\{output_folder}') == []:\n with ZipFile(f'raw_dfp\\\\raw_zip\\\\{file}', 'r') as zip:\n zip.extractall(path=f'raw_dfp\\\\{output_folder}')\n else:\n print(f\"A pasta \\\"raw_dfp/{zip_year}\\\" ja tem arquivos internos. Confira a necessidade de descompactar o .zip.\")\n print('Prosseguindo ...')\n\n # List folders in 'raw_dfp' and remove 'raw_zip'\n raw_folders = os.listdir('raw_dfp')\n raw_folders.remove('raw_zip')\n\n # Travel around raw_dfp folders excluding \"raw_zip\"\n for folder in raw_folders:\n\n # Remove all individual reports, aiming only consolidated reports\n file_list = os.listdir(f'raw_dfp\\\\{folder}')\n for file in file_list:\n file_regex = re.compile(r'ind_20\\d\\d')\n mo = file_regex.search(str(file))\n if mo is not None:\n os.remove(f'raw_dfp\\\\{folder}\\\\{file}')\n\n # Travel around folder files\n for file in file_list:\n\n # Save DRE file name in a variable\n dre_regex = re.compile(r'DRE_con_20\\d\\d')\n mo_dre = dre_regex.search(str(file))\n if mo_dre is not None:\n dre = file\n\n # Save BPA file name in a variable\n bpa_regex = re.compile(r'BPA_con_20\\d\\d')\n mo_bpa = bpa_regex.search(str(file))\n if mo_bpa is not None:\n bpa = file\n\n # Save BPP file name in a variable\n bpp_regex = re.compile(r'BPP_con_20\\d\\d')\n mo_bpp = bpp_regex.search(str(file))\n if mo_bpp is not None:\n bpp = file\n\n # Save DFC_MI file name in a variable\n dfc_regex = re.compile(r'DFC_MI_con_20\\d\\d')\n mo_dfc = dfc_regex.search(str(file))\n if mo_dfc is not None:\n dfc = file\n\n folder_list = dataframe_filtering(folder, [dre, bpa, bpp, dfc], companies)\n\n # Create datframe for 2016 based on 2017 folder\n if int(folder) == 2017:\n folder_list_2 = dataframe_filtering(folder, [dre, bpa, bpp, dfc], companies, prev=True)\n\n for company_index in range(len(companies)):\n if len(folder_list_2[company_index]) == 0: # Do not add empty dataframe\n pass\n else:\n company_frames[company_index] = debt_correction(folder_list_2[company_index])\n \n # Construct and append a final dataframe for each company with all years information\n for company_index in range(len(companies)):\n if len(folder_list[company_index]) == 0:\n pass\n elif len(company_frames[company_index]) == 0:\n company_frames[company_index] = debt_correction(folder_list[company_index])\n\n else:\n main = company_frames[company_index]\n serie_corrected = debt_correction(folder_list[company_index][['DS_CONTA', str(folder)]])\n serie = serie_corrected.set_index('DS_CONTA')\n\n #serie_no_dups = serie\n company_frames[company_index] = pd.merge(main, serie, on=['DS_CONTA'])\n\n return company_frames\n\n\ndef worked_info(companies=['AMBEV S.A.'], clear_prev_folder=False):\n\n # Create return variable\n return_dict_list = []\n\n # Extract primary information\n prim_info = primary_info(companies, clear_prev_folder=False)\n\n print('-+-' * 20)\n print('CARREGANDO DATAFFRAME ...')\n\n # Travel throught companies\n for comp_index in range(len(companies)):\n\n # Extract list of years collected\n year_columns = []\n for column in prim_info[comp_index].columns:\n if '20' in column:\n year_columns.append(column)\n\n # Extract company frame\n primary_frame = prim_info[comp_index]\n #pd.set_option('display.expand_frame_repr', False)\n #print(primary_frame)\n #primary_frame.to_csv('primary_csv.csv',sep=' ')\n\n # Duplicate checker\n imobilizado_duplicate = 0\n desp_ga_duplicate = 0\n lucro_acumul_duplicate = 0\n dai_duplicate = 0\n ped_duplicate = 0\n vendas_duplicate = 0\n divida_curto_duplicate = 0\n divida_longo_duplicate = 0\n receita_duplicate = 0\n\n # Initialize primary variables lists\n receita_list = []\n lucro_brut_list = []\n desp_vendas_list = []\n desp_ga_list = []\n dai_list = []\n desp_oper_list = []\n financeiro_list = []\n lucropreimp_list = []\n lucro_liq_list = []\n lucro_oper_list = []\n lucroporacao_list = []\n\n ativo_total_list = []\n ativo_circ_list = []\n imobilizado_list = []\n passivo_circ_list = []\n divida_curto_list = []\n divida_longo_list = []\n passivo_ncirc_list = []\n patr_liq_list = []\n lucro_acumul_list = []\n\n lucro_liq_exerc_list = []\n desp_ativo_fixo_list = []\n\n # Initialize intermediate variables\n desp_vga_list = []\n desp_ped_list = []\n\n # Travel trought cells\n for row in range(len(primary_frame)):\n\n col = 'DS_CONTA'\n \n # Fill primary variable lists (DRE)\n if primary_frame.iloc[row][col] == 'Receita de Venda de Bens e/ou Serviços':\n if receita_duplicate == 0:\n receita_duplicate += 1\n for year in year_columns:\n receita_list.append(primary_frame.iloc[row][year])\n else:\n pass\n\n elif primary_frame.iloc[row][col] == 'Resultado Bruto':\n for year in year_columns:\n lucro_brut_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] == 'Despesas com Vendas':\n if vendas_duplicate == 0:\n vendas_duplicate += 1\n for year in year_columns:\n desp_vendas_list.append(primary_frame.iloc[row][year])\n else:\n pass\n\n elif primary_frame.iloc[row][col] == 'Despesas Gerais e Administrativas':\n if desp_ga_duplicate == 0:\n desp_ga_duplicate += 1\n for year in year_columns:\n desp_ga_list.append(primary_frame.iloc[row][year])\n else:\n pass\n\n elif primary_frame.iloc[row][col] in ['Despesas de Depreciação', 'Depreciação, Amortização e Impairment', 'Depreciação e amortização', 'Depreciação de arrendamento',\n 'Depreciação e Amortização', 'Depreciações e Amortizações', 'Amortização e Depreciação', 'Depreciação/amortização',\n 'Depreciações', 'Depreciação e Amortizações', 'Depreciação do imobilizado', 'Depreciação e depleção do imobilizado', 'Depreciação, exaustão e amortização',\n 'Depreciação, Amortização e Exaustão', 'Depreciação, Exaustão e Amortização']:\n if dai_duplicate == 0:\n dai_duplicate += 1\n for year in year_columns:\n dai_list.append(primary_frame.iloc[row][year])\n else:\n pass\n\n elif primary_frame.iloc[row][col] in ['Despesas com Pesquisa e Desenvolvimento',\n 'Custos com Pesquisa e Desenvolvimento', 'Despesas com pesquisas e desenvolvimento', 'Pesquisa e Desenvolvimento', 'Pesquisa', 'Despesas com Pesquisas e Desenvolvimento',\n 'Custo com Pesquisa e Desenvolvimento Tecnológico', 'Despesas com gastos com desenvolvimento', 'Despesas com desenvolvimento de tecnologia e produtos', 'Com estudos em desenvolvimento']:\n if ped_duplicate == 0:\n ped_duplicate += 1\n for year in year_columns:\n desp_ped_list.append(primary_frame.iloc[row][year])\n else:\n pass\n\n elif primary_frame.iloc[row][col] == 'Despesas/Receitas Operacionais':\n for year in year_columns:\n desp_oper_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] == 'Resultado Antes do Resultado Financeiro e dos Tributos':\n for year in year_columns:\n lucro_oper_list.append(primary_frame.iloc[row][year]) \n\n elif primary_frame.iloc[row][col] == 'Resultado Financeiro':\n for year in year_columns:\n financeiro_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] == 'Resultado Antes dos Tributos sobre o Lucro':\n for year in year_columns:\n lucropreimp_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] == 'Resultado Líquido das Operações Continuadas':\n for year in year_columns:\n lucro_liq_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] == 'ON 1':\n for year in year_columns:\n lucroporacao_list.append(primary_frame.iloc[row][year])\n\n # Fill primary variable lists (BPA and BPP)\n if primary_frame.iloc[row][col] == 'Ativo Total':\n for year in year_columns:\n ativo_total_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] == 'Ativo Circulante':\n for year in year_columns:\n ativo_circ_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] == 'Imobilizado':\n if imobilizado_duplicate == 0:\n imobilizado_duplicate += 1\n for year in year_columns:\n imobilizado_list.append(primary_frame.iloc[row][year])\n else:\n pass\n\n elif primary_frame.iloc[row][col] == 'Passivo Circulante':\n for year in year_columns:\n passivo_circ_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] == 'Empréstimos e Financiamentos 1':\n if divida_curto_duplicate == 0:\n divida_curto_duplicate += 1\n for year in year_columns:\n divida_curto_list.append(primary_frame.iloc[row][year])\n else:\n pass\n\n elif primary_frame.iloc[row][col] == 'Empréstimos e Financiamentos 3':\n if divida_longo_duplicate == 0:\n divida_longo_duplicate += 1\n for year in year_columns:\n divida_longo_list.append(primary_frame.iloc[row][year])\n else:\n pass\n\n elif primary_frame.iloc[row][col] == 'Passivo Não Circulante':\n for year in year_columns:\n passivo_ncirc_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] == 'Patrimônio Líquido Consolidado':\n for year in year_columns:\n patr_liq_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] == 'Reservas de Lucros' or primary_frame.iloc[row][col] == 'Lucros/Prejuízos Acumulados':\n if lucro_acumul_duplicate == 0:\n lucro_acumul_duplicate += 1\n for year in year_columns:\n lucro_acumul_list.append(primary_frame.iloc[row][year])\n else:\n pass\n\n # Fill primary variable lists (DFC)\n elif primary_frame.iloc[row][col] == 'Lucro Líquido do exercício':\n for year in year_columns:\n lucro_liq_exerc_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] in ['Aquisição de Imobilizado e Intangíveis',\n 'Adições de imobilizado', 'Compras de ativo imobilizado', 'Aquisições de imobilizado', 'Aquisições de Imobilizado',\n 'Aquisições de Imobilizado e Intangível', 'Aquisições de imobilizado e intangível', 'Aquisições de Imobilizados e Intangíveis (Exceto pelo Excedente de Cessão Onerosa)',\n 'Aquisições de imobilizados e intangíveis', 'Aquisições de imobilizado veículos frota', 'Aquisições de imobilizado de uso', 'Aquisições de Imobilizado de Uso',\n 'Aquisição de ativos imobilizados, intangível e propriedade para investimento', 'Aquisição de imobilizado e intangível']:\n for year in year_columns:\n desp_ativo_fixo_list.append(primary_frame.iloc[row][year])\n\n # Build intermediate Variables\n desp_vga_list = np.array(desp_vendas_list) + np.array(desp_ga_list)\n divida_tot_list = np.array(divida_curto_list) + np.array(divida_longo_list)\n\n if lucro_brut_list == []:\n lucro_brut_list = np.zeros(len(year_columns))\n if desp_ped_list == []:\n desp_ped_list = np.zeros(len(year_columns))\n if dai_list == []:\n dai_list = np.zeros(len(year_columns))\n if desp_ativo_fixo_list == []:\n desp_ativo_fixo_list = np.zeros(len(year_columns))\n if lucro_liq_exerc_list == []:\n lucro_liq_exerc_list = lucro_liq_list\n\n # Build worked info\n marg_brut_list = 100 * np.divide(np.array(lucro_brut_list), np.array(receita_list))\n marg_liq_list = 100 * np.divide(np.array(lucro_liq_list), np.array(receita_list))\n vga_lucro_brut_list = 100 * np.divide(np.array(desp_vga_list), np.array(lucro_brut_list))\n ped_lucro_brut_list = 100 * np.divide(np.array(desp_ped_list), np.array(lucro_brut_list))\n deprec_lucro_brut_list = 100 * np.divide(np.array(dai_list), np.array(lucro_brut_list))\n juros_lucro_oper_list = 100 * np.divide(np.array(financeiro_list), np.array(lucro_oper_list))\n coef_liquidez_list = np.divide(np.array(ativo_circ_list), np.array(passivo_circ_list))\n passivo_tot_patrliq_list = np.divide((np.array(passivo_circ_list) + np.array(passivo_ncirc_list)), np.array(patr_liq_list))\n roe_list = 100 * np.divide(np.array(lucro_liq_list), np.array(patr_liq_list))\n roa_list = 100 * np.divide(np.array(lucro_liq_list), np.array(ativo_total_list))\n desp_ativo_fixo_lucro_liq_exerc_list = 100 * np.divide(np.array(desp_ativo_fixo_list), np.array(lucro_liq_exerc_list))\n divida_curto_tot_list = 100 * np.divide(np.array(divida_curto_list), np.array(divida_tot_list))\n divida_tot_lucro_oper_list = np.divide(np.array(divida_tot_list), np.array(lucro_oper_list))\n\n company_dict = {\n 'year_columns': year_columns,\n 'marg_brut_list': marg_brut_list,\n 'marg_liq_list': marg_liq_list,\n 'vga_lucro_brut_list': vga_lucro_brut_list,\n 'ped_lucro_brut_list': ped_lucro_brut_list,\n 'deprec_lucro_brut_list': deprec_lucro_brut_list,\n 'juros_lucro_oper_list': juros_lucro_oper_list,\n 'lucro_brut_list': lucro_brut_list,\n 'lucro_liq_list': lucro_liq_list,\n 'lucroporacao_list':lucroporacao_list,\n 'coef_liquidez_list': coef_liquidez_list,\n 'imobilizado_list': imobilizado_list,\n 'passivo_tot_patrliq_list': passivo_tot_patrliq_list,\n 'roe_list': roe_list,\n 'roa_list': roa_list,\n 'lucro_acumul_list': lucro_acumul_list,\n 'desp_ativo_fixo_lucro_liq_exerc_list': desp_ativo_fixo_lucro_liq_exerc_list,\n 'divida_curto_tot_list': divida_curto_tot_list,\n 'divida_tot_lucro_oper_list': divida_tot_lucro_oper_list\n }\n\n return_dict_list.append(company_dict)\n\n return return_dict_list\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"pandas.merge",
"pandas.DataFrame",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
rabbitsun2/toy_python | [
"32f84b4d15b13c4daa4fa212a40e685abc0d2a5d",
"32f84b4d15b13c4daa4fa212a40e685abc0d2a5d"
] | [
"practice/3_basic_tensorflow/Keras/Example_simpleLinearRegression.py",
"practice/4_basic_tensorflow_2/220404_Pima_indian2.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\n\nx = np.arange(1, 6)\n\ny = 3 * x + 2\n\nprint(x)\nprint(y)\n\n# 시각화\nplt.plot(x, y)\nplt.title('y = 3x + 2')\nplt.show()\n",
"import pandas as pd\n\ndf = pd.read_csv('dataset/pima-indians-diabetes.csv', \n names = [\"pregnant\", \"plasma\", \"pressure\", \"thickness\",\n \"insulin\", \"BMI\", \"pedigree\", \"age\", \"class\"])\n\n#print(df.head(5))\n#print(df.tail(5))\n\n#print(df.info())\n\n#print(df.describe())\nprint(df[['pregnant', 'class']])"
] | [
[
"matplotlib.pyplot.plot",
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.pyplot.title"
],
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
manuelciosici/DeepSpeed | [
"3da841853ca07abf3a09e7bd325a576c4e642c11",
"3da841853ca07abf3a09e7bd325a576c4e642c11",
"3da841853ca07abf3a09e7bd325a576c4e642c11",
"3da841853ca07abf3a09e7bd325a576c4e642c11"
] | [
"deepspeed/runtime/zero/linear.py",
"deepspeed/module_inject/replace_module.py",
"deepspeed/runtime/utils.py",
"deepspeed/runtime/zero/utils.py"
] | [
"#Linear Module to use with ZeRO Stage 3 to allow for parameter memory release\n#after the module execution during forward\n#Instead of saving variables using save_for_backward, we save variable ids\n#Allowing us to retrieve the variable without creating pointer to it\n#Which allows for underlying tensor to be garbage collected\n#When partitioned as needed by the Zero Stage 3 optimizer\n#TODO instead of patching Linear module, we could patch the ctx.save_for_backward\n#ctx.saved_tensors so that this approach works for all nn modules that are built upon\n#torch.nn.function. However the issue is that many modules uses C++ implementations\n#which does not have pytorch implementation. Eg torch.addmm which acts as a functional\n#when implemented outside of torch.autograd.Function\n\nimport math\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn.parameter import Parameter\nfrom torch.nn import init\nfrom torch.nn.modules.module import Module\nfrom deepspeed.runtime.utils import noop_decorator\n\ntensor_map = {}\n\n\ndef print_rank_0(message, debug=False, force=False):\n if torch.distributed.get_rank() == 0 and (debug or force):\n print(message)\n\n\ntry:\n autocast_custom_fwd = torch.cuda.amp.custom_fwd\n autocast_custom_bwd = torch.cuda.amp.custom_bwd\nexcept (ImportError, AttributeError) as exp:\n autocast_custom_fwd = noop_decorator\n autocast_custom_bwd = noop_decorator\n\n\nclass LinearFunctionForZeroStage3(torch.autograd.Function):\n\n # Note that both forward and backward are @staticmethods\n @staticmethod\n @autocast_custom_fwd\n # bias is an optional argument\n def forward(ctx, input, weight, bias=None):\n #print(\"In ZeRO Linear Function\")\n\n weight_id = id(weight)\n bias_id = id(bias)\n\n #ctx.save_for_backward(input, weight, bias)\n ctx.save_for_backward(input, torch.tensor(weight_id), torch.tensor(bias_id))\n\n tensor_map[weight_id] = weight\n tensor_map[bias_id] = bias\n\n if input.dim() == 2 and bias is not None:\n # fused op is marginally faster\n ret = torch.addmm(bias, input, weight.t())\n else:\n output = input.matmul(weight.t())\n if bias is not None:\n output += bias\n ret = output\n\n return ret\n\n # This function has only a single output, so it gets only one gradient\n @staticmethod\n @autocast_custom_bwd\n def backward(ctx, grad_output):\n # This is a pattern that is very convenient - at the top of backward\n # unpack saved_tensors and initialize all gradients w.r.t. inputs to\n # None. Thanks to the fact that additional trailing Nones are\n # ignored, the return statement is simple even when the function has\n # optional inputs.\n #input, weight, bias = ctx.saved_tensors\n\n input, weight_id, bias_id = ctx.saved_tensors\n weight = tensor_map[weight_id.item()]\n bias = tensor_map[bias_id.item()]\n\n grad_input = grad_weight = grad_bias = None\n\n #print(f\"backward shaped grad_output {grad_output.shape}, input {input.shape}, weight {weight.shape} and bias {bias.shape if bias is not None else None}\")\n # These needs_input_grad checks are optional and there only to\n # improve efficiency. If you want to make your code simpler, you can\n # skip them. Returning gradients for inputs that don't require it is\n # not an error.\n if ctx.needs_input_grad[0]:\n #print(f\"Computing grad input weight {weight.shape} grad_output {grad_output.shape}\")\n grad_input = grad_output.matmul(weight)\n #print(f\"Computed grad input {grad_input.shape}\")\n if ctx.needs_input_grad[1]:\n #print(\"Computing grad weight\")\n dim = grad_output.dim()\n if dim > 2:\n grad_weight = grad_output.reshape(-1,\n grad_output.shape[-1]).t().matmul(\n input.reshape(-1,\n input.shape[-1]))\n else:\n grad_weight = grad_output.t().matmul(input)\n #print(f\"Computed grad weight grad_weight {grad_weight.shape}\")\n if bias is not None and ctx.needs_input_grad[2]:\n #print(\"Computing grad bias\")\n grad_bias = grad_output.sum(0)\n #print(\"Done computing grad bias\")\n #print(\"needs bias\")\n #print(f\"backward shaped grad_input {grad_input.shape}, grad_weight {grad_weight.shape}, grad_bias {grad_bias.shape if grad_bias is not None else None}\")\n return grad_input, grad_weight, grad_bias\n\n\ndef zero3_linear_wrap(input, weight, bias=None):\n if bias is None:\n return LinearFunctionForZeroStage3.apply(input, weight)\n else:\n return LinearFunctionForZeroStage3.apply(input, weight, bias)\n\n\nclass LinearModuleForZeroStage3(Module):\n r\"\"\"Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.\n The weights are pre-transposed and stored as A^T instead of transposing during each\n forward. Memory savings proportional to the parameter size.\n\n Args:\n in_features: size of each input sample\n out_features: size of each output sample\n bias: If set to ``False``, the layer will not learn an additive bias.\n Default: ``True``\n\n Shape:\n - Input: :math:`(N, *, H_{in})` where :math:`*` means any number of\n additional dimensions and :math:`H_{in} = \\text{in\\_features}`\n - Output: :math:`(N, *, H_{out})` where all but the last dimension\n are the same shape as the input and :math:`H_{out} = \\text{out\\_features}`.\n\n Attributes:\n weight: the learnable weights of the module of shape\n :math:`(\\text{out\\_features}, \\text{in\\_features})`. The values are\n initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})`, where\n :math:`k = \\frac{1}{\\text{in\\_features}}`\n bias: the learnable bias of the module of shape :math:`(\\text{out\\_features})`.\n If :attr:`bias` is ``True``, the values are initialized from\n :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where\n :math:`k = \\frac{1}{\\text{in\\_features}}`\n\n Examples::\n\n >>> m = nn.Linear(20, 30)\n >>> input = torch.randn(128, 20)\n >>> output = m(input)\n >>> print(output.size())\n torch.Size([128, 30])\n \"\"\"\n __constants__ = ['in_features', 'out_features']\n in_features: int\n out_features: int\n weight: Tensor\n\n def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:\n super(LinearModuleForZeroStage3, self).__init__()\n print(\"Building ZeRO module\")\n self.in_features = in_features\n self.out_features = out_features\n self.weight = Parameter(torch.Tensor(out_features, in_features))\n if bias:\n self.bias = Parameter(torch.Tensor(out_features))\n else:\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self) -> None:\n init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n if self.bias is not None:\n fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)\n bound = 1 / math.sqrt(fan_in)\n init.uniform_(self.bias, -bound, bound)\n\n def forward(self, input: Tensor) -> Tensor:\n return LinearFunctionForZeroStage3.apply(input, self.weight, self.bias)\n\n def extra_repr(self) -> str:\n return 'in_features={}, out_features={}, bias={}'.format(\n self.in_features,\n self.out_features,\n self.bias is not None)\n",
"import copy\nimport torch\nimport deepspeed\nimport deepspeed.ops.transformer as transformer_inference\nfrom .replace_policy import HFBertLayerPolicy, HFGPT2LayerPolicy, HFGPTJLayerPolicy\nfrom .replace_policy import replace_policies\nfrom ..constants import INFERENCE_GENERIC_MODE, INFERENCE_SPECIALIZED_MODE\nfrom ..runtime.weight_quantizer import WeightQuantization\nfrom torch import nn\n\n\nclass LinearAllreduce(nn.Module):\n def __init__(self, weight, bias=None, mp_group=None):\n super(LinearAllreduce, self).__init__()\n self.weight = weight\n self.bias = bias\n self.mp_group = mp_group\n\n def forward(self, input):\n output = torch.matmul(input, self.weight)\n if self.mp_group is not None:\n torch.distributed.all_reduce(output, group=self.mp_group)\n if self.bias is not None:\n output += self.bias\n return output\n\n\nclass LinearLayer(nn.Module):\n def __init__(self, weight, bias=None):\n super(LinearLayer, self).__init__()\n self.weight = weight\n self.bias = bias\n\n def forward(self, input):\n output = torch.matmul(input, self.weight)\n if self.bias is not None:\n output += self.bias\n return output\n\n\nclass ReplaceWithTensorSlicing:\n def __init__(self, mp_group=None):\n if mp_group is not None:\n self.gpu_index = torch.distributed.get_rank(group=mp_group)\n else:\n self.gpu_index = 0\n\n def merge_assert(self, dim1, dim2):\n assert dim1 > dim2, \\\n 'Merging tensors is not allowed here! Please use deepspeed load_checkpoint\\\n for merging your checkpoints before replacing the transformer layer with\\\n inference-kernels'\n\n def qkv_copy(self, dst, src):\n if src is None:\n return src\n src_shape = src.shape\n dst_shape = dst.shape\n\n src_split = torch.split(src.data, src.shape[-1] // 3, dim=-1)\n\n if (len(src_shape) == 2 and len(dst_shape) == 2):\n if src_shape[1] == dst_shape[1]:\n return torch.nn.Parameter(src)\n\n self.merge_assert(src_shape[1], dst_shape[1])\n qkv_size = dst_shape[1] // 3\n qkv_split = [torch.split(src_s, qkv_size, dim=1) for src_s in src_split]\n\n weight_split = [\n torch.cat([qkv_s[i] for qkv_s in qkv_split],\n axis=1) for i in range(len(qkv_split[0]))\n ]\n dst.data.copy_(weight_split[self.gpu_index].to(\n torch.cuda.current_device()).contiguous())\n else:\n if src_shape[0] == dst_shape[0]:\n return torch.nn.Parameter(src)\n\n qkv_size = dst_shape[0] // 3\n qkv_split = [torch.split(src_s, qkv_size, dim=0) for src_s in src_split]\n bias_split = [\n torch.cat([qkv_s[i] for qkv_s in qkv_split],\n axis=0) for i in range(len(qkv_split[0]))\n ]\n dst.data.copy_(bias_split[self.gpu_index].to(\n torch.cuda.current_device()).contiguous())\n\n return torch.nn.Parameter(dst)\n\n def copy(self, dst, src):\n if src is None:\n return src\n\n src_shape = src.shape\n dst_shape = dst.shape\n\n if (len(src_shape) == 2 and len(dst_shape) == 2):\n\n if src_shape[0] == dst_shape[0] and src_shape[1] == dst_shape[1]:\n return torch.nn.Parameter(src)\n\n if src_shape[0] != dst_shape[0]:\n self.merge_assert(src_shape[0], dst_shape[0])\n weight_split = torch.split(src, dst_shape[0])\n else:\n self.merge_assert(src_shape[1], dst_shape[1])\n weight_split = torch.split(src.data, dst_shape[1], dim=1)\n\n dst.data.copy_(weight_split[self.gpu_index].to(\n torch.cuda.current_device()).contiguous())\n else:\n if src_shape[0] == dst_shape[0]:\n return torch.nn.Parameter(src)\n\n bias_split = torch.split(src.data, dst_shape[-1])\n dst.data.copy_(bias_split[self.gpu_index].to(\n torch.cuda.current_device()).contiguous())\n\n return torch.nn.Parameter(dst)\n\n\ndef replace_transformer_layer(orig_layer_impl,\n model,\n policy=None,\n micro_batch_size=-1,\n config=None,\n seed=-1,\n hidden_size=-1,\n num_attention_heads=-1,\n mp_size=1,\n training_mp_size=1,\n mp_group=None,\n ep_group=None,\n expert_mp_group=None,\n preln=True,\n fp16=True,\n local_rank=-1,\n stochastic_mode=True,\n training=True,\n quantize=False,\n quantize_settings=None,\n triangular_masking=False,\n return_tuple=True,\n replace_with_kernel_inject=False,\n linear_layer_setting=None,\n moe=False,\n moe_experts=1,\n moe_type='standard'):\n \"\"\" Replace bert-style transformer layers with DeepSpeed's transformer layer\n Arguments:\n orig_layer_impl (torch.nn.Module): the original transformer layer implementation to look for,\n e.g., transformers.modeling_bert.BertLayer.\n model (torch.nn.Module): user's nn.module representing their model\n policy: shows the policy for mapping from the orig_layer_impl to transformer parameters when\n replace_with_kernel_inject is set, otherwise, it provides the names of two linear layers as\n a tuple: (attention_output projection, transformer output projection)\n micro_batch_size (int): micro batch size per gpu used during training/eval\n config (dict): model config containing hidden size, attention heads, etc.\n seed (int): random seed value\n max_seq_length (int): max sequence length for training\n hidden_size (int): hidden dimension\n num_attention_heads (int): number of attention heads\n mp_size (int): model_parallelism degree\n mp_group : model_parallel group initialized on the modeling side\n preln (bool): does the original layer implementation do pre or post layer norm?\n fp16 (bool): fp16 or fp32\n local_rank (int): GPU rank (optional),\n stochastic_mode (bool): whether to use stochastic mode\n training (bool): specifying whether kernel-injection is done for training/inference (set to false for inference-mode injection)\n quantize_settings (tuple): this setting shows how we can quantize a model for running it through the inference kernels.\n It includes (quantization_scales, merge_count, mlp_extra_grouping, quantize_groups).\n return_tuple (bool): if set, transformer layer returns a tuple as the output.\n Note: this flag needs to be set for huggingface models.\n replace_with_kernel_inject (bool): injection_mode, if true, kernels will be add along with configuring\n Tensor-Parallelism\n linear_layer_setting (tuple of modules) [Optional]: shows which two classes are used for linear layers\n and embedding layers\n attention_params: (list of strings) [Optional]: shows the parameters in the attention part that needs to\n be adjusted based on the model-parallelism\n Returns:\n Updated nn.module with replaced transformer layers\n \"\"\"\n def replace_with_policy(child,\n policy_cls,\n triangular_masking,\n inference=False,\n preln=True,\n layer_id=0):\n preln = False if policy_cls is HFBertLayerPolicy else preln\n if policy_cls is HFBertLayerPolicy:\n policy = policy_cls(child, inference=inference, preln=preln)\n else:\n policy = policy_cls(child, inference=inference)\n\n if inference:\n hidden_size, num_attention_heads = policy.get_hidden_heads()\n assert num_attention_heads % mp_size == 0,\\\n \"To run the model parallel across the GPUs, the attention_heads require to be divisible by the world_size!\" +\\\n \"This is because the attention computation is partitioned evenly among the parallel GPUs.\"\n from deepspeed.moe.layer import MoE\n moe = False\n if hasattr(child, 'mlp') and isinstance(child.mlp, MoE):\n num_experts = child.mlp.num_experts\n moe = True\n\n attn_linear_layer, qkvw, qkvb, dense_w, dense_b, scale_attention, megatron_v2 = policy.attention()\n if not moe or moe_type == 'standard':\n mlp_linear_layer, _h4h_w, _h4h_b, _4hh_w, _4hh_b = policy.mlp()\n else:\n mlp_linear_layer, _h4h_w, _h4h_b, _4hh_w, _4hh_b, \\\n _res_h4h_w, _res_h4h_b, _res_4hh_w, _res_4hh_b, _res_coef = policy.mlp(moe_type)\n\n attn_nw, attn_nb, input_nw, input_nb = policy.layerNorm()\n if quantize:\n if policy_cls is not HFBertLayerPolicy:\n qkvw = qkvw.to(torch.int8)\n dense_w = dense_w.to(torch.int8)\n _h4h_w = [moe_w1.to(torch.int8)\n for moe_w1 in _h4h_w] if moe else _h4h_w.to(torch.int8)\n _4hh_w = [moe_w1.to(torch.int8)\n for moe_w1 in _4hh_w] if moe else _4hh_w.to(torch.int8)\n elif fp16:\n qkvw = qkvw.half()\n dense_w = dense_w.half()\n _h4h_w = [moe_w1.half() for moe_w1 in _h4h_w] if moe else _h4h_w.half()\n _4hh_w = [moe_w1.half() for moe_w1 in _4hh_w] if moe else _4hh_w.half()\n if quantize or fp16:\n qkvb = qkvb if qkvb is None else qkvb.half()\n dense_b = dense_b if dense_b is None else dense_b.half()\n _h4h_b = [moe_b1.half() for moe_b1 in _h4h_b] if moe else _h4h_b.half()\n _4hh_b = [moe_b1.half() for moe_b1 in _4hh_b] if moe else _4hh_b.half()\n attn_nw = attn_nw if attn_nw is None else attn_nw.half()\n attn_nb = attn_nb if attn_nb is None else attn_nb.half()\n input_nw = input_nw.half()\n input_nb = input_nb.half()\n\n if moe and moe_type == 'residual' and fp16:\n _res_h4h_b = _res_h4h_b.half()\n _res_4hh_b = _res_4hh_b.half()\n _res_h4h_w = _res_h4h_w.half()\n _res_4hh_w = _res_4hh_w.half()\n _res_coef = _res_coef.half()\n\n mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group)\n #expert_mp_replace = ReplaceWithTensorSlicing(mp_group=expert_mp_group)\n\n if inference:\n if moe:\n ep_world_size = torch.distributed.get_world_size()\n local_ep_size = 1 if num_experts < ep_world_size else num_experts // ep_world_size\n\n transformer_config = transformer_inference.DeepSpeedMoEInferenceConfig(\n hidden_size=hidden_size,\n heads=num_attention_heads,\n layer_norm_eps=config.layer_norm_eps if hasattr(\n config,\n 'layer_norm_eps') else 1e-12,\n fp16=fp16,\n pre_layer_norm=preln,\n mp_size=mp_size,\n q_int8=quantize,\n moe_experts=local_ep_size,\n global_experts=num_experts,\n mlp_type=moe_type)\n else:\n rotary_dim = config.rotary_dim if hasattr(config, 'rotary_dim') else child.attention.rotary_ndims \\\n if hasattr(child, 'attention') and hasattr(child.attention,'rotary_ndims') else -1\n transformer_config = transformer_inference.DeepSpeedInferenceConfig(\n hidden_size=hidden_size,\n heads=num_attention_heads,\n layer_norm_eps=config.layer_norm_eps if hasattr(\n config,\n 'layer_norm_eps') else\n (config.layer_norm_epsilon\n if hasattr(config,\n 'layer_norm_epsilon') else config.layernorm_epsilon\n if hasattr(config,\n 'layernorm_epsilon') else 1.0e-12),\n fp16=fp16,\n pre_layer_norm=preln,\n mp_size=mp_size,\n q_int8=quantize,\n return_tuple=(return_tuple or (policy_cls is HFBertLayerPolicy)),\n triangular_masking=(policy_cls is not HFBertLayerPolicy),\n local_attention=((config.attention_layers[layer_id] == \"local\")\n if hasattr(config,\n 'attention_layers') else False),\n window_size=(config.window_size if hasattr(config,\n 'window_size') else 1),\n rotary_dim=rotary_dim,\n mlp_after_attn=(rotary_dim is None or rotary_dim < 0),\n training_mp_size=training_mp_size)\n\n if quantize and quantize_settings is not None:\n (quantization_scales,\n merge_count,\n mlp_extra_grouping,\n quantize_groups) = quantize_settings\n if moe:\n new_module = transformer_inference.DeepSpeedMoEInference(\n transformer_config,\n mp_group=mp_group,\n ep_group=None if ep_group is None else ep_group[num_experts],\n expert_mp_group=None\n if expert_mp_group is None else expert_mp_group[num_experts],\n quantize_scales=quantization_scales[layer_id],\n quantize_groups=quantize_groups,\n merge_count=merge_count,\n mlp_extra_grouping=mlp_extra_grouping,\n qkv_merging=(policy_cls is HFBertLayerPolicy))\n\n else:\n new_module = transformer_inference.DeepSpeedTransformerInference(\n transformer_config,\n mp_group=mp_group,\n quantize_scales=quantization_scales[layer_id],\n quantize_groups=quantize_groups,\n merge_count=merge_count,\n mlp_extra_grouping=mlp_extra_grouping,\n qkv_merging=(policy_cls is HFBertLayerPolicy))\n\n if quantize and qkvw.dtype != torch.int8:\n quantize_bits = 8\n quantizer = WeightQuantization()\n if policy_cls is HFBertLayerPolicy:\n data_quantized, _ = quantizer.quantize_data(qkvw.data, quantize_bits, quantize_groups * 3)\n else:\n data_quantized, _ = quantizer.quantize_data(qkvw.data, quantize_bits, quantize_groups)\n qkvw.data.copy_(data_quantized)\n qkvw.data = qkvw.data.to(torch.int8)\n else:\n\n if moe:\n new_module = transformer_inference.DeepSpeedMoEInference(\n transformer_config,\n mp_group=mp_group,\n ep_group=None if ep_group is None else ep_group[num_experts],\n expert_mp_group=None\n if expert_mp_group is None else expert_mp_group[num_experts],\n )\n\n else:\n new_module = transformer_inference.DeepSpeedTransformerInference(\n transformer_config,\n mp_group=mp_group,\n )\n new_module.config.scale_attention = scale_attention\n\n # we want the weights in [input, output] shape\n # linear layer is created with [input, output] shape\n # transpose it here to reduce inference cost!\n def transpose(data):\n data.reshape(-1).copy_(data.transpose(-1, -2).contiguous().reshape(-1))\n data = data.reshape(data.shape[-1], data.shape[-2])\n return data\n\n if attn_linear_layer:\n qkvw.data = transpose(qkvw.data)\n dense_w.data = transpose(dense_w.data)\n\n if megatron_v2:\n new_module.config.rotate_half = True\n new_module.config.rotate_every_two = False\n\n def _transpose(x):\n num_attention_heads_per_partition = transformer_config.heads // transformer_config.mp_size\n attention_head_size = x.shape[-1] // num_attention_heads_per_partition\n new_x_shape = x.size()[:-1] + (num_attention_heads_per_partition,\n attention_head_size)\n x_1 = x.view(*new_x_shape)\n (q,\n k,\n v) = torch.split(x_1,\n (x_1.shape[-1] // 3),\n dim=(x_1.dim() - 1))\n if len(q.shape) > 2:\n return torch.cat((q.reshape(q.shape[0],\n -1),\n k.reshape(q.shape[0],\n -1),\n v.reshape(q.shape[0],\n -1)),\n dim=-1).reshape(x.shape)\n else:\n return torch.cat((q.reshape(-1),\n k.reshape(-1),\n v.reshape(-1)),\n dim=-1).reshape(x.shape)\n\n qkvw = torch.nn.Parameter(_transpose(qkvw).contiguous())\n qkvb = torch.nn.Parameter(_transpose(qkvb).contiguous())\n\n dense_b = dense_b if dense_b is None else dense_b * (\n transformer_config.training_mp_size / transformer_config.mp_size)\n _4hh_b = _4hh_b * (transformer_config.training_mp_size /\n transformer_config.mp_size)\n\n if mlp_linear_layer:\n _h4h_w = [transpose(moe_w1.data)\n for moe_w1 in _h4h_w] if moe else transpose(_h4h_w.data)\n _4hh_w = [transpose(moe_w1.data)\n for moe_w1 in _4hh_w] if moe else transpose(_4hh_w.data)\n\n if moe and moe_type == 'residual':\n _res_h4h_w.data = transpose(_res_h4h_w.data)\n _res_4hh_w.data = transpose(_res_4hh_w.data)\n _res_coef.data = transpose(_res_coef.data)\n\n attn_block = new_module.attention\n attn_block.attn_qkvw = mp_replace.qkv_copy(attn_block.attn_qkvw, qkvw)\n attn_block.attn_qkvb = mp_replace.qkv_copy(attn_block.attn_qkvb, qkvb)\n\n attn_block.attn_ow = mp_replace.copy(attn_block.attn_ow, dense_w)\n attn_block.attn_ob = mp_replace.copy(attn_block.attn_ob, dense_b)\n\n mpl_block = new_module.mlp\n if moe:\n gpu_index = torch.distributed.get_rank()\n gpu_index = 0\n for ep_index in range(local_ep_size):\n mpl_block[ep_index].inter_w.data = _h4h_w[\n gpu_index * local_ep_size + ep_index].to(\n torch.cuda.current_device())\n mpl_block[ep_index].inter_b.data = _h4h_b[\n gpu_index * local_ep_size + ep_index].to(\n torch.cuda.current_device())\n mpl_block[ep_index].output_w.data = _4hh_w[\n gpu_index * local_ep_size + ep_index].to(\n torch.cuda.current_device())\n mpl_block[ep_index].output_b.data = _4hh_b[\n gpu_index * local_ep_size + ep_index].to(\n torch.cuda.current_device())\n new_module.attn_nw.data = attn_nw.to(torch.cuda.current_device())\n new_module.attn_nb.data = attn_nb.to(torch.cuda.current_device())\n if moe_type == 'residual':\n new_module.res_mlp.inter_w.data = _res_h4h_w.to(\n torch.cuda.current_device())\n new_module.res_mlp.inter_b.data = _res_h4h_b.to(\n torch.cuda.current_device())\n new_module.res_mlp.output_w.data = _res_4hh_w.to(\n torch.cuda.current_device())\n new_module.res_mlp.output_b.data = _res_4hh_b.to(\n torch.cuda.current_device())\n new_module.res_coef.data = _res_coef.to(torch.cuda.current_device())\n else:\n mpl_block.inter_w.data = mp_replace.copy(mpl_block.inter_w, _h4h_w)\n mpl_block.inter_b.data = mp_replace.copy(mpl_block.inter_b, _h4h_b)\n mpl_block.output_w.data = mp_replace.copy(mpl_block.output_w, _4hh_w)\n mpl_block.output_b.data = mp_replace.copy(mpl_block.output_b, _4hh_b)\n if attn_nw is None:\n new_module.mlp.attn_nw = attn_nw\n else:\n new_module.mlp.attn_nw.data = attn_nw.to(torch.cuda.current_device())\n if attn_nb is None:\n new_module.mlp.attn_nb = attn_nb\n else:\n new_module.mlp.attn_nb.data = attn_nb.to(torch.cuda.current_device())\n new_module.norm_w.data = input_nw.to(torch.cuda.current_device())\n new_module.norm_b.data = input_nb.to(torch.cuda.current_device())\n else:\n transformer_config = deepspeed.DeepSpeedTransformerConfig(\n batch_size=micro_batch_size,\n hidden_size=config.hidden_size,\n heads=config.num_attention_heads,\n attn_dropout_ratio=config.attention_probs_dropout_prob,\n hidden_dropout_ratio=config.hidden_dropout_prob,\n num_hidden_layers=config.num_hidden_layers,\n initializer_range=config.initializer_range,\n layer_norm_eps=config.layer_norm_eps if hasattr(\n config,\n 'layer_norm_eps') else 1e-12,\n seed=seed,\n fp16=fp16,\n pre_layer_norm=(False if policy_cls is HFBertLayerPolicy else preln),\n return_tuple=return_tuple,\n local_rank=local_rank,\n stochastic_mode=stochastic_mode,\n normalize_invertible=True,\n training=training)\n new_module = deepspeed.DeepSpeedTransformerLayer(transformer_config)\n new_module.attn_qkvw.data = qkvw\n new_module.attn_qkvb.data = qkvb\n new_module.attn_ow.data = dense_w\n new_module.attn_ob.data = dense_b\n\n new_module.attn_nw.data = attn_nw\n new_module.attn_nb.data = attn_nb\n new_module.norm_w.data = input_nw\n new_module.norm_b.data = input_nb\n\n new_module.inter_w.data = _h4h_w\n new_module.inter_b.data = _h4h_b\n new_module.output_w.data = _4hh_w\n new_module.output_b.data = _4hh_b\n return new_module\n\n def replace_wo_policy(module, all_reduce_linears):\n def _replace(child, name, conv_linear_layer):\n mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group)\n if name in all_reduce_linears:\n new_weight = torch.empty(\n (child.weight.shape[0]\n if conv_linear_layer else child.weight.shape[1] // mp_size,\n child.weight.shape[1]\n if conv_linear_layer else child.weight.shape[0]),\n device=child.weight.device,\n dtype=torch.half if fp16 else torch.float)\n if not conv_linear_layer:\n child.weight.data.view(-1).copy_(\n child.weight.data.transpose(-1,\n -2).contiguous().view(-1))\n child.weight.data = child.weight.data.reshape(\n child.weight.data.shape[-1],\n child.weight.data.shape[-2])\n data = mp_replace.copy(new_weight,\n child.weight.data).to(torch.cuda.current_device())\n return LinearAllreduce(data, child.bias if child.bias is None else \\\n child.bias.to(torch.cuda.current_device()), mp_group)\n else:\n new_weight = torch.empty(\n (child.weight.shape[0] //\n mp_size if conv_linear_layer else child.weight.shape[1],\n child.weight.shape[1]\n if conv_linear_layer else child.weight.shape[0] // mp_size),\n device=child.weight.device,\n dtype=torch.half if fp16 else torch.float)\n if not conv_linear_layer:\n child.weight.data.view(-1).copy_(\n child.weight.data.transpose(-1,\n -2).contiguous().view(-1))\n child.weight.data = child.weight.data.reshape(\n child.weight.data.shape[-1],\n child.weight.data.shape[-2])\n data = mp_replace.copy(new_weight, child.weight.data)\n new_bias = torch.empty((child.weight.shape[1] // mp_size),\n device=child.weight.device,\n dtype=torch.half if fp16 else torch.float)\n bias_data = None if child.bias is None else mp_replace.copy(\n new_bias,\n child.bias.data).to(torch.cuda.current_device())\n return LinearLayer(data.to(torch.cuda.current_device()), bias_data)\n\n def _slice_embedding(child, name, conv_linear_layer):\n mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group)\n new_weight = torch.empty((child.weight.shape[0],\n child.weight.shape[1] // mp_size),\n device=child.weight.device,\n dtype=child.weight.dtype)\n data = mp_replace.copy(new_weight, child.weight.data)\n new_embedding = nn.Embedding(child.weight.shape[0],\n child.weight.shape[1] // mp_size)\n new_embedding.weight.data.copy_(data)\n return new_embedding\n\n def update_mp_params(child):\n if hasattr(child, 'n_heads'):\n child.n_heads = child.n_heads // mp_size\n if hasattr(child, 'inner_dim'):\n child.inner_dim = child.inner_dim // mp_size\n if hasattr(child, 'num_heads'):\n child.num_heads = child.num_heads // mp_size\n if hasattr(child, 'num_attention_heads'):\n child.num_attention_heads = child.num_attention_heads // mp_size\n if hasattr(child, 'all_head_size'):\n child.all_head_size = child.all_head_size // mp_size\n if hasattr(child, 'embed_dim'):\n child.embed_dim = child.embed_dim // mp_size\n\n conv_linear_layer = False\n if linear_layer_setting is not None:\n linear_policies = {linear_layer_setting[0]: _replace}\n if len(linear_layer_setting) == 2:\n linear_policies.update({linear_layer_setting[1]: _slice_embedding})\n else:\n if orig_layer_impl is HFGPT2LayerPolicy._orig_layer_class:\n try:\n import transformers\n conv_linear_layer = True\n linear_policies = {transformers.model_utils.Conv1D: _replace}\n except ImportError:\n linear_policies = {nn.Linear: _replace}\n else:\n linear_policies = {nn.Linear: _replace, nn.Embedding: _slice_embedding}\n\n def _replace_module(r_module, prev_name=''):\n for name, child in r_module.named_children():\n if child.__class__ in linear_policies:\n setattr(\n r_module,\n name,\n linear_policies[child.__class__](child,\n prev_name + '.' + name,\n conv_linear_layer))\n else:\n update_mp_params(child)\n _replace_module(child, name)\n return r_module\n\n return _replace_module(module)\n\n def replace_fn(child, _policy, layer_id=0):\n if training:\n # copy relevant state from child -> new module\n new_module = replace_with_policy(child,\n _policy,\n triangular_masking,\n preln=preln)\n\n else:\n # copy relevant state from child -> new module\n if replace_with_kernel_inject:\n new_module = replace_with_policy(child,\n _policy,\n triangular_masking,\n inference=True,\n preln=(_policy\n is not HFBertLayerPolicy),\n layer_id=layer_id)\n else:\n new_module = replace_wo_policy(child, _policy)\n\n return new_module\n\n return replace_module(model=model,\n orig_class=orig_layer_impl,\n replace_fn=replace_fn,\n _replace_policy=policy)\n\n\ndef revert_transformer_layer(orig_layer_impl, model, config, preln=False):\n \"\"\" Revert DeepSpeed's transformer layer back to original bert-style transformer layer\n Arguments:\n orig_layer_impl (torch.nn.Module): the original transformer layer implementation that was replaced,\n e.g., transformers.modeling_bert.BertLayer.\n model (torch.nn.Module): user's nn.module representing their model\n config (dict): model config containing hidden size, attention heads, etc.\n Returns:\n Updated nn.module with original bert-style transformer layers\n \"\"\"\n def replace_fn(child, _replace_policy, layer_id):\n #from turing.nvidia_modelingpreln import BertLayer\n orig_module = orig_layer_impl(config)\n\n # copy relevant state from child -> original module\n qkvw = child.attn_qkvw.data\n qkvb = child.attn_qkvb.data\n\n qw, kw, vw = torch.chunk(qkvw, 3, axis=0)\n qb, kb, vb = torch.chunk(qkvb, 3, axis=0)\n\n orig_module.attention.self.query.weight.data = qw\n orig_module.attention.self.query.bias.data = qb\n orig_module.attention.self.key.weight.data = kw\n orig_module.attention.self.key.bias.data = kb\n orig_module.attention.self.value.weight.data = vw\n orig_module.attention.self.value.bias.data = vb\n\n orig_module.attention.output.dense.weight.data = child.attn_ow.data\n orig_module.attention.output.dense.bias.data = child.attn_ob.data\n\n attn_ln_w = child.attn_nw.data\n attn_ln_b = child.attn_nb.data\n if preln:\n orig_module.PostAttentionLayerNorm.weight.data = attn_ln_w\n orig_module.PostAttentionLayerNorm.bias.data = attn_ln_b\n else:\n orig_module.attention.output.LayerNorm.weight.data = attn_ln_w\n orig_module.attention.output.LayerNorm.bias.data = attn_ln_b\n\n inter_ff_w = child.inter_w.data\n inter_ff_b = child.inter_b.data\n if preln:\n orig_module.intermediate.dense_act.weight.data = inter_ff_w\n orig_module.intermediate.dense_act.bias.data = inter_ff_b\n else:\n orig_module.intermediate.dense.weight.data = inter_ff_w\n orig_module.intermediate.dense.bias.data = inter_ff_b\n\n orig_module.output.dense.weight.data = child.output_w.data\n orig_module.output.dense.bias.data = child.output_b.data\n\n transformer_ln_w = child.norm_w.data\n transformer_ln_b = child.norm_b.data\n if preln:\n orig_module.PreAttentionLayerNorm.weight.data = transformer_ln_w\n orig_module.PreAttentionLayerNorm.bias.data = transformer_ln_b\n else:\n orig_module.output.LayerNorm.weight.data = transformer_ln_w\n orig_module.output.LayerNorm.bias.data = transformer_ln_b\n return orig_module\n\n return replace_module(model=model,\n orig_class=deepspeed.DeepSpeedTransformerLayer,\n replace_fn=replace_fn,\n _replace_policy=None)\n\n\ndef replace_module(model, orig_class, replace_fn, _replace_policy):\n \"\"\" Scan the model for instances of ``orig_clas:`` to replace using ``replace_fn``.\n Arguments:\n model (torch.nn.Module): the model to augment\n orig_class (torch.nn.Module): the module to search for\n replace_fn (method): a method to convert instances of ``orig_class`` to the\n desired type and return a new instance.\n Returns:\n A modified ``model``.\n \"\"\"\n policy = {}\n if orig_class is not None:\n policy.update({orig_class: (replace_fn, _replace_policy)})\n else:\n for plcy in replace_policies:\n # instantiate a throw-away policy in order to populate the _orig_layer_class\n _ = plcy(None)\n if isinstance(plcy._orig_layer_class, list):\n for orig_layer_class in plcy._orig_layer_class:\n policy.update({orig_layer_class: (replace_fn, plcy)})\n elif plcy._orig_layer_class is not None:\n policy.update({plcy._orig_layer_class: (replace_fn, plcy)})\n assert len(policy.items()) > 0,\\\n \"No default policy found! Please specify your policy injection_policy (like {BertLayer:HFBEertLayerPolicy}).\" +\\\n \"You can find some samples here: https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/module_inject/replace_policy.py\"\n\n replaced_module, _ = _replace_module(model, policy)\n return replaced_module\n\n\nfrom ..pipe import PipelineModule\n\n\ndef _replace_module(model, policies, layer_id=0):\n \"\"\" Traverse model's children recursively and apply any transformations in ``policies``.\n Arguments:\n model (torch.nn.Module): model to augment\n policies (dict): Mapping of source class to replacement function.\n Returns:\n Modified ``model``.\n \"\"\"\n for name, child in model.named_children():\n if child.__class__ in policies:\n replaced_module = policies[child.__class__][0](child,\n policies[child.__class__][-1],\n layer_id)\n setattr(model, name, replaced_module)\n if isinstance(model, PipelineModule):\n assert hasattr(model, 'forward_funcs'),\\\n \"we require pipe-module to have the list of fwd_functions\"\n model.forward_funcs[model.fwd_map[name]] = replaced_module\n layer_id += 1\n else:\n _, layer_id = _replace_module(child, policies, layer_id=layer_id)\n\n return model, layer_id\n",
"'''\nCopyright 2019 The Microsoft DeepSpeed Team\n\nCopyright NVIDIA/Megatron\n\nHelper functions and classes from multiple sources.\n'''\n\nfrom collections.abc import Iterable\nfrom deepspeed.moe.utils import is_moe_param, split_params_into_shared_and_expert_params\nimport os\nimport psutil\nimport gc\nfrom math import ceil, sqrt\nfrom math import floor\nfrom bisect import bisect_left, bisect_right\n\nimport torch\nfrom torch._six import inf\nimport torch.distributed as dist\n\nfrom deepspeed.utils import groups, logger\nfrom deepspeed.runtime.constants import PIPE_REPLICATED\nfrom numpy import prod\n\n# pt-1.9 deprecations\nif hasattr(torch.cuda, \"memory_reserved\"):\n torch_memory_reserved = torch.cuda.memory_reserved\nelse:\n torch_memory_reserved = torch.cuda.memory_allocated\nif hasattr(torch.cuda, \"max_memory_reserved\"):\n torch_max_memory_reserved = torch.cuda.max_memory_reserved\nelse:\n torch_max_memory_reserved = torch.cuda.memory_cached\n\n\nclass DummyOptim():\n \"\"\"\n Dummy optimizer presents model parameters as a param group, this is\n primarily used to allow ZeRO-3 without an optimizer\n \"\"\"\n def __init__(self, params):\n self.param_groups = []\n self.param_groups.append({'params': params})\n\n\ndef noop_decorator(func):\n return func\n\n\ndef ensure_directory_exists(filename):\n \"\"\"Create the directory path to ``filename`` if it does not already exist.\n\n Args:\n filename (str): A file path.\n \"\"\"\n dirname = os.path.dirname(filename)\n os.makedirs(dirname, exist_ok=True)\n\n\ndef set_random_seed(seed):\n \"\"\"Set the random seed for common PRNGs used during training: random, numpy, and torch.\n\n Args:\n seed (int): the seed to use\n \"\"\"\n import numpy\n import random\n random.seed(seed)\n numpy.random.seed(seed)\n torch.manual_seed(seed)\n\n\ndef is_model_parallel_parameter(p) -> bool:\n if hasattr(p, 'model_parallel') and p.model_parallel:\n return True\n\n if hasattr(p, 'tensor_model_parallel') and p.tensor_model_parallel:\n return True\n\n return False\n\n\ndef bwc_tensor_model_parallel_rank(mpu=None):\n \"\"\"Backwards-compatible way of querying the tensor model parallel rank from\n an ``mpu`` object.\n\n *Tensor* model parallelism means that tensors are physically split across\n processes. This contrasts with *pipeline* model parallelism, in which the\n layers are partitioned but tensors left intact.\n\n The API for tensor model parallelism has changed across versions and this\n helper provides a best-effort implementation across versions of ``mpu``\n objects. The preferred mechanism is\n ``mpu.get_tensor_model_parallel_rank()``.\n\n This should \"just work\" with both Megatron-LM and DeepSpeed's pipeline\n parallelism.\n\n Args:\n mpu (model parallel unit, optional): The tensor model parallel rank.\n If ``mpu=None``, returns 0. Defaults to ``None``.\n\n Returns:\n int: the rank\n \"\"\"\n if mpu is None:\n # No model parallelism in easy :)\n return 0\n\n if hasattr(mpu, 'get_tensor_model_parallel_rank'):\n # New Megatron and DeepSpeed convention (post pipeline-parallelism release)\n return mpu.get_tensor_model_parallel_rank()\n elif hasattr(mpu, 'get_slice_parallel_rank'):\n # Some DeepSpeed + pipeline parallelism versions\n return mpu.get_slice_parallel_rank()\n else:\n # Deprecated Megatron and DeepSpeed convention\n return mpu.get_model_parallel_rank()\n\n\ndef copy_to_device(item, device, criterion_func):\n \"\"\"\n Return a copy of tensor on specified device.\n Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts.\n Parameters:\n item: tensor to copy or (possibly nested) container of tensors to copy.\n device: target device\n criterion_func: Function to restrict copy operation to items meet criterion\n\n Returns:\n None\n \"\"\"\n if criterion_func(item):\n return item.to(device)\n elif isinstance(item, list):\n return [copy_to_device(v, device, criterion_func) for v in item]\n elif isinstance(item, tuple):\n return tuple([copy_to_device(v, device, criterion_func) for v in item])\n elif isinstance(item, dict):\n return {k: copy_to_device(v, device, criterion_func) for k, v in item.items()}\n else:\n return item\n\n\ndef move_to_device(item, device, criterion_func):\n \"\"\"\n Move tensor on to specified device by changing the storage.\n Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts.\n Parameters:\n item: tensor to move or (possibly nested) container of tensors to move.\n device: target device\n criterion_func: Function to restrict move operation to items meet criterion\n\n Returns:\n None\n \"\"\"\n if criterion_func(item):\n device_copy = item.to(device)\n item.data = device_copy.data\n return item\n elif isinstance(item, list):\n return [move_to_device(v, device, criterion_func) for v in item]\n elif isinstance(item, tuple):\n return tuple([move_to_device(v, device, criterion_func) for v in item])\n elif isinstance(item, dict):\n return {k: move_to_device(v, device, criterion_func) for k, v in item.items()}\n else:\n return item\n\n\nclass CheckOverflow(object):\n '''Checks for overflow in gradient across parallel process'''\n def __init__(self,\n param_groups=None,\n mpu=None,\n zero_reduce_scatter=False,\n deepspeed=None):\n self.mpu = mpu\n self.params = [] if param_groups else None\n self.zero_reduce_scatter = zero_reduce_scatter\n self.deepspeed = deepspeed\n self.has_moe_params = False\n if param_groups:\n for group in param_groups:\n for param in group:\n self.params.append(param)\n if is_moe_param(param):\n self.has_moe_params = True\n\n def check_using_norm(self, norm_group, reduce_overflow=True):\n # TODO: I don't think reduce_overflow is needed if mpu is None\n overflow = -1 in norm_group\n overflow_gpu = torch.cuda.FloatTensor([overflow])\n if self.has_moe_params:\n # In this case, we need to do an all_reduce across\n # the expert_parallel_group, so that if there was\n # an overflow due to expert weights, we detect it\n\n # Only need to check groups.get_largest_expert_parallel_group()\n dist.all_reduce(overflow_gpu,\n op=dist.ReduceOp.MAX,\n group=groups._get_max_expert_parallel_group())\n if self.mpu is not None:\n torch.distributed.all_reduce(overflow_gpu,\n op=torch.distributed.ReduceOp.MAX,\n group=self.mpu.get_model_parallel_group())\n elif reduce_overflow:\n dist.all_reduce(overflow_gpu, op=torch.distributed.ReduceOp.MAX)\n dist.barrier()\n overflow = overflow_gpu[0].item()\n return bool(overflow)\n\n def check(self, param_groups=None):\n params = []\n has_moe_params = False\n if param_groups is None:\n params = self.params\n has_moe_params = self.has_moe_params\n else:\n assert param_groups is not None, \\\n \"self.params and param_groups both cannot be none\"\n\n for group in param_groups:\n for param in group:\n params.append(param)\n if is_moe_param(param):\n has_moe_params = True\n\n return self.has_overflow(params, has_moe_params=has_moe_params)\n\n # `params` is a list / generator of torch.Variable\n def has_overflow_serial(self, params):\n for i, p in enumerate(params):\n if p.grad is not None and self._has_inf_or_nan(p.grad.data, i):\n return True\n return False\n\n def has_overflow(self, params, has_moe_params=None):\n if has_moe_params is None:\n has_moe_params = self.has_moe_params\n overflow = self.has_overflow_serial(params)\n # Since each model parallel GPU carries only part of the model,\n # make sure overflow flag is synced across all the model parallel GPUs\n overflow_gpu = torch.cuda.ByteTensor([overflow])\n # torch.distributed.all_reduce(overflow_gpu,\n # op=torch.distributed.ReduceOp.MAX,\n # group=mpu.get_model_parallel_group())\n if has_moe_params:\n # All reduce this across expert_parallel_group, so that if an expert\n # overflows, we detect it here\n dist.all_reduce(overflow_gpu,\n op=dist.ReduceOp.MAX,\n group=groups._get_max_expert_parallel_group())\n if self.zero_reduce_scatter:\n torch.distributed.all_reduce(overflow_gpu,\n op=torch.distributed.ReduceOp.MAX,\n group=torch.distributed.group.WORLD)\n elif self.mpu is not None:\n if self.deepspeed is not None:\n using_pipeline = hasattr(self.deepspeed,\n 'pipeline_enable_backward_allreduce')\n if (using_pipeline\n and self.deepspeed.pipeline_enable_backward_allreduce is False\n ) or (not using_pipeline\n and self.deepspeed.enable_backward_allreduce is False):\n torch.distributed.all_reduce(\n overflow_gpu,\n op=torch.distributed.ReduceOp.MAX,\n group=self.mpu.get_data_parallel_group())\n torch.distributed.all_reduce(overflow_gpu,\n op=torch.distributed.ReduceOp.MAX,\n group=self.mpu.get_model_parallel_group())\n elif self.deepspeed is not None and self.deepspeed.enable_backward_allreduce is False:\n torch.distributed.all_reduce(overflow_gpu,\n op=torch.distributed.ReduceOp.MAX,\n group=torch.distributed.group.WORLD)\n\n overflow = overflow_gpu[0].item()\n return bool(overflow)\n\n # `x` is a torch.Tensor\n @staticmethod\n def _has_inf_or_nan(x, i):\n try:\n # if x is half, the .float() incurs an additional deep copy, but it's necessary if\n # Pytorch's .sum() creates a one-element tensor of the same type as x\n # (which is true for some recent version of pytorch).\n cpu_sum = float(x.float().sum())\n # More efficient version that can be used if .sum() returns a Python scalar\n # cpu_sum = float(x.sum())\n except RuntimeError as instance:\n # We want to check if inst is actually an overflow exception.\n # RuntimeError could come from a different error.\n # If so, we still want the exception to propagate.\n if \"value cannot be converted\" not in instance.args[0]:\n raise\n return True\n else:\n if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:\n return True\n return False\n\n\ndef _handle_overflow(cpu_sum, x, i):\n import math\n rank = torch.distributed.get_rank()\n if rank == 0:\n t_i = -1\n for v_i, v in enumerate(x.data.contiguous().view(-1)):\n if not math.isfinite(float(v)):\n t_i = v_i\n break\n logger.info(\n f\"rank {rank} detected overflow {cpu_sum} in tensor {i}:{t_i} shape {x.shape}\"\n )\n\n\ndef get_global_norm(norm_list):\n \"\"\" Compute total from a list of norms\n \"\"\"\n total_norm = 0.0\n for norm in norm_list:\n total_norm += norm**2.0\n return sqrt(total_norm)\n\n\ndef clip_grad_norm_(parameters, max_norm, norm_type=2, mpu=None):\n \"\"\"Clips gradient norm of an iterable of parameters.\n\n This has been adapted from Nvidia megatron. We add norm averaging\n to consider MoE params when calculating norm as they will result\n in different norms across different ranks.\n\n This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and\n added functionality to handle model parallel parameters. Note that\n the gradients are modified in place.\n\n Arguments:\n parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a\n single Tensor that will have gradients normalized\n max_norm (float or int): max norm of the gradients\n norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for\n infinity norm.\n\n Returns:\n Total norm of the parameters (viewed as a single vector).\n \"\"\"\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n parameters = list(filter(lambda p: p.grad is not None, parameters))\n max_norm = float(max_norm)\n norm_type = float(norm_type)\n if norm_type == inf:\n total_norm = max(p.grad.data.abs().max() for p in parameters)\n total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])\n # Take max across all GPUs.\n if mpu is not None:\n torch.distributed.all_reduce(total_norm_cuda,\n op=torch.distributed.ReduceOp.MAX,\n group=mpu.get_model_parallel_group())\n total_norm = total_norm_cuda[0].item()\n else:\n total_norm = 0\n for p in parameters:\n if mpu is not None:\n if (mpu.get_model_parallel_rank()\n == 0) or is_model_parallel_parameter(p):\n param_norm = p.grad.data.norm(norm_type)\n total_norm += param_norm.item()**norm_type\n else:\n param_norm = p.grad.data.float().norm(norm_type)\n total_norm += param_norm.item()**norm_type\n\n # Sum across all model parallel GPUs.\n total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])\n if mpu is not None:\n torch.distributed.all_reduce(total_norm_cuda,\n op=torch.distributed.ReduceOp.SUM,\n group=mpu.get_model_parallel_group())\n total_norm = total_norm_cuda[0].item()**(1. / norm_type)\n\n # Need to average total_norm across different GPUs due to the presence of moe params\n pg = groups._get_data_parallel_group()\n scaled_norm = total_norm * 1.0 / float(dist.get_world_size(group=pg))\n\n scaled_norm_tensor = torch.cuda.FloatTensor([float(scaled_norm)])\n dist.all_reduce(scaled_norm_tensor, group=pg)\n total_norm = scaled_norm_tensor.item()\n\n clip_coef = max_norm / (total_norm + 1e-6)\n if clip_coef < 1:\n for p in parameters:\n p.grad.data.mul_(clip_coef)\n return total_norm\n\n\ndef get_grad_norm(parameters, norm_type=2, mpu=None):\n \"\"\"Get grad norm of an iterable of parameters.\n\n This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and\n added functionality to handle model parallel parameters. Note that\n the gradients are modified in place. Taken from Nvidia Megatron.\n\n Arguments:\n parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a\n single Tensor that will have gradients normalized\n max_norm (float or int): max norm of the gradients\n norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for\n infinity norm.\n\n Returns:\n Total norm of the parameters (viewed as a single vector).\n \"\"\"\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n parameters = list(filter(lambda p: p.grad is not None, parameters))\n\n norm_type = float(norm_type)\n if norm_type == inf:\n total_norm = max(p.grad.data.abs().max() for p in parameters)\n total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])\n # Take max across all GPUs.\n if mpu is not None:\n torch.distributed.all_reduce(total_norm_cuda,\n op=torch.distributed.ReduceOp.MAX,\n group=mpu.get_model_parallel_group())\n total_norm = total_norm_cuda[0].item()\n else:\n total_norm = 0.\n tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=mpu)\n for p in parameters:\n # Pipeline parallelism may replicate parameters. Avoid multi-counting.\n if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated:\n continue\n\n # Filter to avoid over-counting replicated tensors from tensor\n # model parallelism\n if (tensor_mp_rank > 0) and not is_model_parallel_parameter(p):\n continue\n\n param_norm = p.grad.data.float().norm(norm_type)\n total_norm += param_norm.item()**norm_type\n\n # Sum across all model parallel GPUs.\n total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])\n if mpu is not None:\n torch.distributed.all_reduce(total_norm_cuda,\n op=torch.distributed.ReduceOp.SUM,\n group=mpu.get_model_parallel_group())\n total_norm = total_norm_cuda[0].item()**(1. / norm_type)\n\n if total_norm == float(\n 'inf') or total_norm == -float('inf') or total_norm != total_norm:\n total_norm = -1\n\n return total_norm\n\n\ndef get_grad_zeros(parameters, mpu=None):\n \"\"\"Compute the number of grads with zero values.\n\n This is adapted from get_grad_norm\n\n Arguments:\n parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a\n single Tensor that will have gradients normalized\n\n Returns:\n Total number of params with zero values (viewed as a single vector).\n \"\"\"\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n parameters = list(filter(lambda p: p.grad is not None, parameters))\n\n total_zeros = 0.\n tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=mpu)\n for p in parameters:\n # Pipeline parallelism may replicate parameters. Avoid multi-counting.\n if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated:\n continue\n\n # Filter to avoid over-counting replicated tensors from tensor\n # model parallelism\n if (tensor_mp_rank > 0) and not is_model_parallel_parameter(p):\n continue\n\n count_zeros = p.grad.numel() - torch.count_nonzero(p.grad)\n total_zeros += count_zeros.item()\n\n # Sum across all model parallel GPUs.\n total_zeros_cuda = torch.cuda.FloatTensor([float(total_zeros)])\n if mpu is not None:\n torch.distributed.all_reduce(total_zeros_cuda,\n op=torch.distributed.ReduceOp.SUM,\n group=mpu.get_model_parallel_group())\n total_zeros = total_zeros_cuda[0].item()\n\n return total_zeros\n\n\ndef get_weight_norm(parameters, norm_type=2, mpu=None):\n \"\"\"Get norm of an iterable of parameters.\n\n This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and\n added functionality to handle model parallel parameters. Note that\n the gradients are modified in place. Taken from Nvidia Megatron.\n\n Arguments:\n parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a\n single Tensor that will have gradients normalized\n max_norm (float or int): max norm of the gradients\n norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for\n infinity norm.\n\n Returns:\n Total norm of the parameters (viewed as a single vector).\n \"\"\"\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n\n norm_type = float(norm_type)\n if norm_type == inf:\n total_norm = max(p.data.abs().max() for p in parameters)\n total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])\n # Take max across all GPUs.\n if mpu is not None:\n torch.distributed.all_reduce(total_norm_cuda,\n op=torch.distributed.ReduceOp.MAX,\n group=mpu.get_model_parallel_group())\n total_norm = total_norm_cuda[0].item()\n else:\n total_norm = 0.\n tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=mpu)\n for p in parameters:\n # Pipeline parallelism may replicate parameters. Avoid multi-counting.\n if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated:\n continue\n\n # Filter to avoid over-counting replicated tensors from tensor\n # model parallelism\n if (tensor_mp_rank > 0) and not is_model_parallel_parameter(p):\n continue\n\n param_norm = p.data.float().norm(norm_type)\n total_norm += param_norm**norm_type\n\n # Sum across all model parallel GPUs.\n total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])\n if mpu is not None:\n torch.distributed.all_reduce(total_norm_cuda,\n op=torch.distributed.ReduceOp.SUM,\n group=mpu.get_model_parallel_group())\n total_norm = total_norm_cuda[0].item()**(1. / norm_type)\n\n if total_norm == float(\n 'inf') or total_norm == -float('inf') or total_norm != total_norm:\n total_norm = -1\n\n return total_norm\n\n\ndef prefix_sum_inc(weights):\n \"\"\" Compute an inclusive prefix sum.\n\n Example:\n >>> prefix_sum_inc([3,4,5])\n [3, 7, 12]\n \"\"\"\n weights_ = [w for w in weights]\n for x in range(1, len(weights_)):\n weights_[x] += weights_[x - 1]\n return weights_\n\n\ndef partition_uniform(num_items, num_parts):\n parts = [0] * (num_parts + 1)\n # First check for the trivial edge case\n if num_items <= num_parts:\n for p in range(num_parts + 1):\n parts[p] = min(p, num_items)\n return parts\n\n chunksize = floor(num_items / num_parts)\n for p in range(num_parts):\n parts[p] = min(chunksize * p, num_items)\n parts[num_parts] = num_items\n return parts\n\n\ndef _lprobe(weights, num_parts, bottleneck):\n num_items = len(weights)\n total_weight = weights[-1]\n\n # initialize partitioning\n parts = [0] * (num_parts + 1)\n for p in range(1, num_parts + 1):\n parts[p] = num_items\n\n bsum = bottleneck # running sum of target weight for pth partition\n chunksize = num_items // num_parts\n step = chunksize\n for p in range(1, num_parts):\n # Jump to the next bucket\n while (step < num_items) and (weights[step] < bsum):\n step += chunksize\n\n # Find the end index of partition p\n parts[p] = bisect_left(weights,\n bsum,\n lo=step - chunksize,\n hi=min(step,\n num_items))\n # Nothing more to partition, return early\n if parts[p] == num_items:\n # See if the current partition is overweight.\n part_size = weights[-1] - weights[parts[p - 1]]\n return parts, part_size < bottleneck\n\n # Next partition target\n bsum = weights[parts[p] - 1] + bottleneck\n\n return parts, bsum >= total_weight\n\n\ndef _rb_partition_balanced(weights, num_parts, eps):\n total_weight = weights[-1]\n lower = total_weight / num_parts # best case heaviest partition\n upper = total_weight # worst case heaviest partition\n\n # Do a binary search for the best partitioning\n while upper > lower + eps:\n mid = lower + ((upper - lower) / 2)\n parts, success = _lprobe(weights, num_parts, mid)\n if success:\n upper = mid\n else:\n lower = mid + eps\n return upper\n\n\ndef partition_balanced(weights, num_parts, eps=1e-3):\n num_items = len(weights)\n # First check for the trivial edge case\n if num_items <= num_parts:\n return partition_uniform(num_items, num_parts)\n\n weights_ = prefix_sum_inc(weights)\n\n # Find the smallest bottleneck (weight of heaviest partition)\n bottleneck = _rb_partition_balanced(weights_, num_parts, eps=eps)\n\n # Now compute that partitioning\n parts, success = _lprobe(weights_, num_parts, bottleneck)\n assert success\n\n return parts\n\n\nclass PartitionedTensor:\n def __init__(self, tensor, group, partition_meta=None):\n super().__init__()\n\n self.group = group\n self.num_parts = dist.get_world_size(group=self.group)\n self.rank = dist.get_rank(group=self.group)\n\n self.orig_size = list(tensor.size())\n self.orig_device = tensor.device\n self.local_data, self.partition = self._partition_tensor(tensor)\n\n @classmethod\n def from_meta(cls, meta, local_part, group, device='cuda'):\n assert meta.dtype == torch.long\n dummy = torch.ones(dist.get_world_size(group=group))\n part_obj = cls(tensor=dummy, group=group)\n\n meta = meta.tolist()\n\n # [N, list0, ..., listN-1]\n part_obj.orig_size = meta[1:(1 + meta[0])]\n meta = meta[1 + meta[0]:]\n\n part_obj.orig_device = device\n part_obj.local_data = local_part.detach()\n\n part_obj.group = group\n\n # Partition is encoded like the rowptr of a CSR matrix:\n # [num_parts, rank, 0, part_1, ..., part_num_parts]\n # TODO: support shuffle between different partition granularities\n assert part_obj.num_parts == meta[0]\n assert part_obj.rank == meta[1]\n part_obj.partition = meta[2:] # length num_parts+1\n\n return part_obj\n\n def _partition_tensor(self, tensor):\n partition = partition_uniform(num_items=tensor.numel(), num_parts=self.num_parts)\n start = partition[self.rank]\n length = partition[self.rank + 1] - start\n tensor_part = tensor.detach().contiguous().view(-1).narrow(\n 0,\n start=start,\n length=length).clone()\n\n return tensor_part, partition\n\n def full(self, device=None):\n if device is None:\n device = self.orig_device\n\n # Allocate the full tensor as a flat buffer.\n full_numel = prod(self.full_size())\n flat_tensor = torch.zeros([full_numel],\n dtype=self.local_data.dtype,\n device=device)\n\n # Prepare all-gather buffer\n partition_tensors = []\n for part_id in range(self.num_parts):\n part_size = self.partition[part_id + 1] - self.partition[part_id]\n buf = flat_tensor.narrow(0, start=self.partition[part_id], length=part_size)\n if part_id == self.rank:\n buf.copy_(self.local_data)\n partition_tensors.append(buf)\n\n # Collect the full tensor\n dist.all_gather(partition_tensors,\n partition_tensors[self.rank],\n group=self.group)\n\n for i in range(len(partition_tensors)):\n partition_tensors[i].data = torch.zeros(1)\n partition_tensors[i] = None\n\n return flat_tensor.view(self.full_size()).clone().detach()\n\n def to_meta(self):\n \"\"\"Returns a torch.LongTensor that encodes partitioning information.\n\n Can be used along with ``data()`` to serialize a ``PartitionedTensor`` for\n communication.\n\n Returns:\n torch.LongTensor: a tensor encoding the meta-information for the partitioning\n \"\"\"\n meta = []\n meta.append(len(self.orig_size))\n meta += list(self.orig_size)\n meta.append(self.num_parts)\n meta.append(self.rank)\n meta += self.partition\n return torch.LongTensor(data=meta).to(self.orig_device)\n\n def data(self):\n return self.local_data\n\n def local_size(self):\n return self.local_data.size()\n\n def full_size(self):\n return self.orig_size\n\n\nmem_alloced = 0\nmem_cached = 0\n\n\ndef memory_status(msg, print_rank=-1, reset_max=False):\n global mem_alloced, mem_cached\n\n rank = dist.get_rank()\n if print_rank != -1 and rank != print_rank:\n return\n\n torch.cuda.synchronize()\n\n if reset_max:\n torch.cuda.reset_max_memory_cached()\n torch.cuda.reset_max_memory_allocated()\n\n new_alloced = torch.cuda.memory_allocated()\n new_cached = torch.cuda.memory_cached()\n\n delta_alloced = new_alloced - mem_alloced\n delta_cached = new_cached - mem_cached\n\n mem_cached = new_cached\n mem_alloced = new_alloced\n\n max_alloced = torch.cuda.max_memory_allocated()\n max_cached = torch.cuda.max_memory_cached()\n\n # convert to GB for printing\n new_alloced /= 1024**3\n new_cached /= 1024**3\n delta_alloced /= 1024**3\n delta_cached /= 1024**3\n max_alloced /= 1024**3\n max_cached /= 1024**3\n\n print(\n f'RANK={rank} MEMSTATS',\n msg,\n f'device={torch.cuda.current_device()} '\n f'current alloc={new_alloced:0.4f}GB (delta={delta_alloced:0.4f}GB max={max_alloced:0.4f}GB) '\n f'current cache={new_cached:0.4f}GB (delta={delta_cached:0.4f}GB max={max_cached:0.4f}GB)'\n )\n\n\ndef get_ma_status():\n if torch.distributed.is_initialized() and not torch.distributed.get_rank() == 0:\n return 0\n return torch.cuda.memory_allocated()\n\n\ndef see_memory_usage(message, force=False):\n if not force:\n return\n if torch.distributed.is_initialized() and not torch.distributed.get_rank() == 0:\n return\n\n # python doesn't do real-time garbage collection so do it explicitly to get the correct RAM reports\n gc.collect()\n\n # Print message except when distributed but not rank 0\n logger.info(message)\n logger.info(\n f\"MA {round(torch.cuda.memory_allocated() / (1024 * 1024 * 1024),2 )} GB \\\n Max_MA {round(torch.cuda.max_memory_allocated() / (1024 * 1024 * 1024),2)} GB \\\n CA {round(torch_memory_reserved() / (1024 * 1024 * 1024),2)} GB \\\n Max_CA {round(torch_max_memory_reserved() / (1024 * 1024 * 1024))} GB \")\n\n vm_stats = psutil.virtual_memory()\n used_GB = round(((vm_stats.total - vm_stats.available) / (1024**3)), 2)\n logger.info(\n f'CPU Virtual Memory: used = {used_GB} GB, percent = {vm_stats.percent}%')\n\n # get the peak memory to report correct data, so reset the counter for the next call\n if hasattr(torch.cuda, \"reset_peak_memory_stats\"): # pytorch 1.4+\n torch.cuda.reset_peak_memory_stats()\n\n\ndef call_to_str(base, *args, **kwargs):\n \"\"\"Construct a string representation of a call.\n\n Args:\n base (str): name of the call\n args (tuple, optional): args to ``base``\n kwargs (dict, optional): kwargs supplied to ``base``\n\n Returns:\n str: A string representation of base(*args, **kwargs)\n \"\"\"\n name = f'{base}('\n if args:\n name += ', '.join(repr(arg) for arg in args)\n if kwargs:\n name += ', '\n if kwargs:\n name += ', '.join(f'{key}={repr(arg)}' for key, arg in kwargs.items())\n name += ')'\n return name\n\n\ndef get_only_unique_item(items):\n item_set = set(items)\n if len(item_set) != 1:\n raise RuntimeError(f\"expected there to be only one unique element in {items}\")\n unique_item, = item_set\n\n return unique_item\n\n\ndef clip_gradients(parameters, max_norm=1.0, global_grad_norm=None, mpu=None, eps=1e-6):\n \"\"\"Clip the gradient of a list of parameters.\n Args:\n parameters: List of parameters whose .grad will be clipped.\n global_grad_norm (float, optional): Precomputed gradient norm. Defaults to None.\n mpu (optional): model parallelism unit. Defaults to None.\n eps (float, optional): epsilon value added to grad norm. Defaults to 1e-6\n Returns:\n float: the global gradient norm\n \"\"\"\n if global_grad_norm is None:\n global_grad_norm = get_grad_norm(parameters, mpu=mpu)\n clip_coef = max_norm / (global_grad_norm + eps)\n if clip_coef < 1:\n for p in parameters:\n p.grad.detach().mul_(clip_coef)\n return global_grad_norm\n\n\ndef get_global_norm_of_tensors(input_tensors, norm_type=2, mpu=None):\n \"\"\"Get norm of an iterable of tensors.\n\n This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and\n added functionality to handle model parallel parameters. Taken from Nvidia Megatron.\n\n Arguments:\n input_tensors (Iterable[Tensor]): an iterable of Tensors will have norm computed\n norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for\n infinity norm.\n\n Returns:\n Total norm of the tensors (viewed as a single vector).\n \"\"\"\n\n assert isinstance(input_tensors, Iterable), f'expected Iterable type not {type(input_tensors)}'\n assert all([torch.is_tensor(t) for t in input_tensors]), f'expected list of only tensors'\n\n norm_type = float(norm_type)\n if norm_type == inf:\n total_norm = max(t.data.abs().max() for t in input_tensors)\n total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])\n if mpu is not None:\n torch.distributed.all_reduce(total_norm_cuda,\n op=torch.distributed.ReduceOp.MAX,\n group=mpu.get_model_parallel_group())\n total_norm = total_norm_cuda[0].item()\n else:\n total_norm = sum(\n [t.data.float().norm(norm_type).item()**norm_type for t in input_tensors])\n total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])\n if mpu is not None:\n torch.distributed.all_reduce(total_norm_cuda,\n op=torch.distributed.ReduceOp.SUM,\n group=mpu.get_model_parallel_group())\n total_norm = total_norm_cuda[0].item()**(1. / norm_type)\n\n if total_norm == float(\n 'inf') or total_norm == -float('inf') or total_norm != total_norm:\n total_norm = -1\n\n return total_norm\n\n\ndef clip_tensors_by_global_norm(input_tensors,\n max_norm=1.0,\n global_norm=None,\n mpu=None,\n eps=1e-6):\n \"\"\"Clip list of tensors by global norm.\n Args:\n input_tensors: List of tensors to be clipped\n global_norm (float, optional): Precomputed norm. Defaults to None.\n mpu (optional): model parallelism unit. Defaults to None.\n eps (float, optional): epsilon value added to grad norm. Defaults to 1e-6\n Returns:\n float: the global norm\n \"\"\"\n if global_norm is None:\n global_norm = get_global_norm_of_tensors(input_tensors, mpu=mpu)\n\n clip_coef = max_norm / (global_norm + eps)\n\n if clip_coef < 1:\n for t in input_tensors:\n t.detach().mul_(clip_coef)\n\n return global_norm\n\n\ndef align_dense_tensors(tensor_list, alignment):\n num_elements = sum(t.numel() for t in tensor_list)\n remaining = num_elements % alignment\n\n if remaining:\n elements_to_add = alignment - remaining\n pad_tensor = torch.zeros(elements_to_add,\n device=tensor_list[0].device,\n dtype=tensor_list[0].dtype)\n padded_tensor_list = tensor_list + [pad_tensor]\n else:\n padded_tensor_list = tensor_list\n\n return padded_tensor_list\n\n\ndef all_gather_dp_groups(partitioned_param_groups,\n dp_process_group,\n start_alignment_factor,\n allgather_bucket_size):\n for group_id, partitioned_params in enumerate(partitioned_param_groups):\n # Sequential AllGather Best of both worlds\n partition_id = dist.get_rank(group=dp_process_group[group_id])\n dp_world_size = dist.get_world_size(group=dp_process_group[group_id])\n\n num_shards = max(\n 1,\n partitioned_params[partition_id].numel() * dp_world_size //\n allgather_bucket_size)\n\n shard_size = partitioned_params[partition_id].numel() // num_shards\n\n # Enforce nccl/rccl alignment of start location of each shard\n shard_size = shard_size - (shard_size % start_alignment_factor)\n\n num_elements = shard_size\n\n assert shard_size * num_shards <= partitioned_params[partition_id].numel()\n\n for shard_id in range(num_shards):\n\n if shard_id == (num_shards - 1):\n num_elements = partitioned_params[partition_id].numel(\n ) - shard_id * shard_size\n\n shard_list = []\n for dp_id in range(dp_world_size):\n curr_shard = partitioned_params[dp_id].narrow(0,\n shard_id * shard_size,\n num_elements).detach()\n shard_list.append(curr_shard)\n\n dist.all_gather(shard_list,\n shard_list[partition_id],\n dp_process_group[group_id])\n",
"import os\nfrom typing import List\n\nimport torch\nimport torch.distributed as dist\nfrom deepspeed.utils import logger\nfrom deepspeed.ops.adam import DeepSpeedCPUAdam\nfrom deepspeed.ops.adam import FusedAdam\nfrom deepspeed.utils.nvtx import instrument_w_nvtx\n\n\ndef _initialize_parameter_parallel_groups(parameter_parallel_size=None):\n data_parallel_size = int(dist.get_world_size())\n parameter_parallel_size = parameter_parallel_size or data_parallel_size\n logger.info(\"data_parallel_size: %s, parameter_parallel_size: %s\",\n data_parallel_size,\n parameter_parallel_size)\n assert data_parallel_size % parameter_parallel_size == 0, \\\n 'world size should be divisible by parameter parallel size'\n rank = dist.get_rank()\n my_group = None\n for i in range(data_parallel_size // parameter_parallel_size):\n ranks = range(i * parameter_parallel_size, (i + 1) * parameter_parallel_size)\n group = torch.distributed.new_group(ranks)\n if rank in ranks:\n my_group = group\n return my_group\n\n\nclass ZeRORuntimeException(Exception):\n pass\n\n\nZERO_SUPPORTED_OPTIMIZERS = [\n torch.optim.Adam,\n torch.optim.AdamW,\n FusedAdam,\n DeepSpeedCPUAdam\n]\n\n# Add apex FusedAdam to supported list if apex is installed\ntry:\n import apex\n if hasattr(apex, 'optimizers') and hasattr(apex.optimizers, 'FusedAdam'):\n ZERO_SUPPORTED_OPTIMIZERS.append(apex.optimizers.FusedAdam)\nexcept ImportError:\n pass\n\n\ndef is_zero_supported_optimizer(optimizer):\n if dist.get_rank() == 0:\n logger.info(\n f'Checking ZeRO support for optimizer={optimizer.__class__.__name__} type={type(optimizer)}'\n )\n return type(optimizer) in ZERO_SUPPORTED_OPTIMIZERS\n\n\ndef get_lst_from_rank0(lst: List[int]) -> None:\n \"\"\"\n NOTE: creates both communication and synchronization overhead so should be used\n sparingly\n \"\"\"\n lst_tensor = torch.tensor(\n lst if dist.get_rank() == 0 else [-1] * len(lst),\n dtype=int,\n # device=torch.cuda.current_device(),\n device=torch.device('cuda:{}'.format(os.environ[\"LOCAL_RANK\"])),\n requires_grad=False,\n )\n dist.broadcast(lst_tensor, src=0, async_op=False)\n\n return list(lst_tensor.cpu().numpy())\n\n\n@instrument_w_nvtx\ndef assert_ints_same_as_other_ranks(ints: List[int]) -> None:\n \"\"\"\n NOTE: creates both communication and synchronization overhead so should be\n used sparingly\n\n takes a list of ints from each rank and ensures that they are the same\n across ranks, throwing an exception if they are not.\n \"\"\"\n rank0_ints = get_lst_from_rank0(ints)\n if ints != rank0_ints:\n raise RuntimeError(f\"disagreement between rank0 and rank{dist.get_rank()}: \"\n f\"rank0: {rank0_ints}, rank{dist.get_rank()}: {ints}\")\n"
] | [
[
"torch.nn.init.uniform_",
"torch.Tensor",
"torch.distributed.get_rank",
"torch.tensor",
"torch.nn.init._calculate_fan_in_and_fan_out"
],
[
"torch.nn.Parameter",
"torch.empty",
"torch.cat",
"torch.cuda.current_device",
"torch.nn.Embedding",
"torch.matmul",
"torch.split",
"torch.chunk",
"torch.distributed.get_rank",
"torch.distributed.get_world_size",
"torch.distributed.all_reduce"
],
[
"torch.zeros",
"torch.distributed.get_rank",
"torch.cuda.synchronize",
"torch.distributed.barrier",
"torch.cuda.memory_cached",
"torch.LongTensor",
"torch.cuda.current_device",
"torch.cuda.ByteTensor",
"torch.cuda.reset_max_memory_cached",
"torch.distributed.is_initialized",
"torch.is_tensor",
"torch.cuda.FloatTensor",
"torch.cuda.max_memory_cached",
"torch.distributed.get_world_size",
"torch.count_nonzero",
"numpy.random.seed",
"torch.manual_seed",
"torch.distributed.all_gather",
"torch.cuda.max_memory_allocated",
"torch.cuda.reset_peak_memory_stats",
"torch.cuda.reset_max_memory_allocated",
"torch.distributed.all_reduce",
"torch.cuda.memory_allocated"
],
[
"torch.distributed.get_rank",
"torch.distributed.broadcast",
"torch.distributed.get_world_size",
"torch.distributed.new_group"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
daniel-falk/nnabla | [
"3fe132ea52dc10521cc029a5d6ba8f565cf65ccf",
"3fe132ea52dc10521cc029a5d6ba8f565cf65ccf",
"3fe132ea52dc10521cc029a5d6ba8f565cf65ccf",
"3fe132ea52dc10521cc029a5d6ba8f565cf65ccf",
"3fe132ea52dc10521cc029a5d6ba8f565cf65ccf"
] | [
"python/test/function/refs.py",
"python/test/experimental/test_tb_graph_writer.py",
"python/test/function/test_gru.py",
"python/test/function/test_squared_error.py",
"python/test/solver/test_momentum.py"
] | [
"# Copyright 2017,2018,2019,2020,2021 Sony Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import division\nfrom six.moves import range\n\nimport itertools\nimport numpy as np\n\n\ndef get_conv_out_size(w, k, p, s, d=1):\n return (w + 2 * p - (d * (k - 1) + 1)) // s + 1\n\n\ndef get_deconv_out_size(w, k, p, s, d):\n return s * (w - 1) - 2 * p + (d * (k - 1) + 1)\n\n\ndef get_pool_out_size(w, k, p, s, ignore_border):\n return (w + p - ((k - p) if ignore_border else 1)) // s + 1\n\n\nclass ChannelLastToFirstTranspose(object):\n\n def __init__(self, dim, kdim):\n base_axis = dim - kdim - 1\n up_to_base = tuple(range(0, base_axis))\n self.axes = up_to_base + (dim - 1,) + tuple(range(base_axis, dim - 1))\n self.inv_axes = up_to_base + \\\n tuple(range(base_axis + 1, dim)) + (base_axis,)\n\n def __call__(self, x):\n return x.transpose(self.axes).copy()\n\n def inv(self, x):\n return x.transpose(self.inv_axes).copy()\n\n\ndef convolution_1d(x, w, b, pad, stride, dilation, group, dtype=np.float32):\n \"\"\"\n \"\"\"\n C, H = x.shape\n K, Cg, M = w.shape\n\n Ho = get_conv_out_size(H, M, pad[0], stride[0], dilation[0])\n x_pad = np.zeros((C, H + pad[0] * 2), dtype=dtype)\n x_pad[:, pad[0]:pad[0] + H] = x\n y = np.zeros((K, Ho), dtype=dtype)\n for k in range(K):\n g = int(k // (K // group))\n for ho in range(Ho):\n hi = ho * stride[0] + np.arange(0, M) * dilation[0]\n ci = np.arange(g * Cg, (g + 1) * Cg)\n y[k, ho] = (w[k] * x_pad[np.ix_(ci, hi)]).sum()\n if b is not None:\n y += b[..., np.newaxis]\n return y\n\n\ndef convolution_2d(x, w, b, pad, stride, dilation, group, dtype=np.float32):\n \"\"\"\n \"\"\"\n C, H, W = x.shape\n K, Cg, M, N = w.shape\n\n Ho = get_conv_out_size(H, M, pad[0], stride[0], dilation[0])\n Wo = get_conv_out_size(W, N, pad[1], stride[1], dilation[1])\n x_pad = np.zeros((C, H + pad[0] * 2, W + pad[1] * 2), dtype=dtype)\n x_pad[:, pad[0]:pad[0] + H, pad[1]:pad[1] + W] = x\n y = np.zeros((K, Ho, Wo), dtype=dtype)\n for k in range(K):\n g = int(k // (K // group))\n for ho in range(Ho):\n for wo in range(Wo):\n hi = ho * stride[0] + np.arange(0, M) * dilation[0]\n wi = wo * stride[1] + np.arange(0, N) * dilation[1]\n ci = np.arange(g * Cg, (g + 1) * Cg)\n y[k, ho, wo] = (w[k] * x_pad[np.ix_(ci, hi, wi)]).sum()\n if b is not None:\n y += b[..., np.newaxis, np.newaxis]\n return y\n\n\ndef convolution_nd(x, w, b, pad, stride, dilation, group, dtype=np.float32):\n \"\"\"\n \"\"\"\n C = x.shape[0]\n inshape = x.shape[1:]\n ndim = len(inshape)\n assert w.ndim == ndim + 2\n K, Cg = w.shape[:2]\n kshape = w.shape[2:]\n\n def get_conv_out_size_recursive(d, ndim):\n if d == ndim:\n return []\n s = get_conv_out_size(\n inshape[d], kshape[d], pad[d], stride[d], dilation[d])\n return [s] + get_conv_out_size_recursive(d + 1, ndim)\n\n outshape = get_conv_out_size_recursive(0, ndim)\n inshape_pad = [C] + [inshape[d] + 2 * pad[d] for d in range(ndim)]\n x_pad = np.zeros(inshape_pad, dtype=dtype)\n x_pad[[slice(None,)] + [slice(pad[d], pad[d] + inshape[d])\n for d in range(ndim)]] = x\n y = np.zeros([K] + outshape, dtype=dtype)\n for k in range(K):\n g = int(k // (K // group))\n for outindex in itertools.product(*map(range, outshape)):\n inindex = [outindex[d] * stride[d] +\n np.arange(0, kshape[d]) * dilation[d] for d in range(ndim)]\n ci = np.arange(g * Cg, (g + 1) * Cg)\n y[(k,) + tuple(outindex)] = (w[k] *\n x_pad[np.ix_(ci, *inindex)]).sum()\n if b is not None:\n y += b[[Ellipsis] + [np.newaxis for d in range(ndim)]]\n return y\n\n\ndef deconvolution_1d(x, w, b, pad, stride, dilation, group, dtype=np.float32,\n output_padding=(0,)):\n y = x\n K, Ho = y.shape\n K, Cg, M = w.shape\n C = Cg * group\n\n H = (get_deconv_out_size(Ho, M, pad[0], stride[0], dilation[0])\n + output_padding[0])\n x_pad = np.zeros((C, H + pad[0] * 2), dtype=dtype)\n for k in range(K):\n g = int(k // (K // group))\n for ho in range(Ho):\n hi = ho * stride[0] + np.arange(0, M) * dilation[0]\n ci = np.arange(g * Cg, (g + 1) * Cg)\n x_pad[np.ix_(ci, hi)] += w[k] * y[k, ho]\n x = x_pad[:, pad[0]:pad[0] + H]\n if b is not None:\n x += b[..., np.newaxis]\n return x\n\n\ndef deconvolution_2d(x, w, b, pad, stride, dilation, group, dtype=np.float32,\n output_padding=(0, 0)):\n y = x\n K, Ho, Wo = y.shape\n K, Cg, M, N = w.shape\n C = Cg * group\n\n H = (get_deconv_out_size(Ho, M, pad[0], stride[0], dilation[0])\n + output_padding[0])\n W = (get_deconv_out_size(Wo, N, pad[1], stride[1], dilation[1])\n + output_padding[1])\n x_pad = np.zeros((C, H + pad[0] * 2, W + pad[1] * 2), dtype=dtype)\n for k in range(K):\n g = int(k // (K // group))\n for ho in range(Ho):\n for wo in range(Wo):\n hi = ho * stride[0] + np.arange(0, M) * dilation[0]\n wi = wo * stride[1] + np.arange(0, N) * dilation[1]\n ci = np.arange(g * Cg, (g + 1) * Cg)\n x_pad[np.ix_(ci, hi, wi)] += w[k] * y[k, ho, wo]\n x = x_pad[:, pad[0]:pad[0] + H, pad[1]:pad[1] + W]\n if b is not None:\n x += b[..., np.newaxis, np.newaxis]\n return x\n\n\ndef deformable_convolution_2d(x, w, offset, mask, b, pad, stride,\n dilation, group, deformable_group,\n channel_last, dtype=np.float32):\n \"\"\"\n Deformable convlution 2D for a single batch data\n \"\"\"\n C, H, W = x.shape # without batch dimension\n K, Cg, M, N = w.shape\n\n assert C == Cg * \\\n group, \"Wrong shape, x: {}, w: {}\".format(x.shape, w.shape)\n assert offset.shape[0] == 2 * deformable_group * M * N, \\\n \"Wrong shape offset: {}, 2 * deformable_group * Kw * Kh: {}\".format(\n offset.shape, 2 * deformable_group * M * N)\n assert offset.shape[1:] == (\n H, W), \"Wrong shape, offset: {}, w: {}\".format(offset.shape, w.shape)\n assert mask.shape[0] == deformable_group * M * N, \\\n \"Wrong shape mask: {}, deformable_group * Kw * Kh: {}\".format(\n mask.shape, deformable_group * M * N)\n assert mask.shape[1:] == (\n H, W), \"Wrong shape, mask: {}, w: {}\".format(mask.shape, w.shape)\n assert pad[0] < (w.shape[2] + 1)//2 and pad[1] < (w.shape[3] +\n 1)//2, \"Wrong shape, kernel: {}, pad: {}\".format(w.shape[2:], pad)\n\n # Zero padding\n x_pad = np.zeros((C, H + pad[0] * 2, W + pad[1] * 2), dtype=dtype)\n x_pad[:, pad[0]:pad[0] + H, pad[1]:pad[1] + W] = x\n\n # Create and initialize output variable\n Ho = get_conv_out_size(H, M, pad[0], stride[0], dilation[0])\n Wo = get_conv_out_size(W, N, pad[1], stride[1], dilation[1])\n y = np.zeros((K, Ho, Wo), dtype=dtype)\n\n _, Hp, Wp = x_pad.shape\n\n # Deformable Convolution\n for k in range(K):\n for c in range(C//group):\n g = k // (K//group)\n ci = Cg * g + c\n dg = ci // (C // deformable_group)\n\n for ho in range(Ho):\n for wo in range(Wo):\n # Get the input coordinates {(hi, wi)} which are\n # mapped to the output coordinate (ho, wo) by the kernel.\n hi = ho * stride[0] + np.arange(0, M) * dilation[0]\n wi = wo * stride[1] + np.arange(0, N) * dilation[1]\n\n # Apply the kernel\n modulated_x = np.zeros((M, N), dtype=dtype)\n\n for m in range(M):\n for n in range(N):\n # Shift (hi, wi) to (ph, pw) by using offset\n ph = hi[m] + offset[2*((dg*M*N) + (m * N) + n),\n ho * stride[0], wo * stride[1]]\n pw = wi[n] + offset[2*((dg*M*N) + (m * N) + n) + 1,\n ho * stride[0], wo * stride[1]]\n\n # Bilinear interpolation\n h_low = int(np.floor(ph))\n w_low = int(np.floor(pw))\n h_high = h_low + 1\n w_high = w_low + 1\n\n if h_low >= Hp or w_low >= Wp or \\\n h_high < 0 or w_high < 0:\n # Out of bounds.\n # Interpolation cannot be perform.\n val = 0\n else:\n v1 = 0 # (h_low, w_low)\n v2 = 0 # (h_low, w_high)\n v3 = 0 # (h_high, w_low)\n v4 = 0 # (h_high, w_high)\n if h_low >= 0 and w_low >= 0:\n v1 = x_pad[ci, h_low, w_low]\n if h_low >= 0 and w_high < Wp:\n v2 = x_pad[ci, h_low, w_high]\n if h_high < Hp and w_low >= 0:\n v3 = x_pad[ci, h_high, w_low]\n if h_high < Hp and w_high < Wp:\n v4 = x_pad[ci, h_high, w_high]\n\n lh = ph - h_low\n lw = pw - w_low\n hh = 1 - lh\n hw = 1 - lw\n w1 = hh * hw\n w2 = hh * lw\n w3 = lh * hw\n w4 = lh * lw\n val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4\n\n # Apply mask\n val *= mask[(dg*M*N) + (m * N) + n,\n ho * stride[0], wo * stride[1]]\n\n modulated_x[m, n] = val\n\n y[k, ho, wo] += (w[k, c] * modulated_x).sum()\n\n if b is not None:\n y += b[..., np.newaxis, np.newaxis]\n\n return y\n\n\ndef pooling_2d(x, mode, kernel, stride, pad, ignore_border=True,\n including_pad=True, dtype=np.float32):\n \"\"\"\n \"\"\"\n assert mode in ['average', 'sum', 'max']\n\n C, H, W = x.shape\n Ho = get_pool_out_size(H, kernel[0], pad[0], stride[0], ignore_border)\n Wo = get_pool_out_size(W, kernel[1], pad[1], stride[1], ignore_border)\n Hi = H + pad[0] + (pad[0] if ignore_border else kernel[0] - 1)\n Wi = W + pad[1] + (pad[1] if ignore_border else kernel[1] - 1)\n\n x_pad = np.ones((C, Hi, Wi), dtype=dtype)\n x_pad *= x.min() if mode == 'max' else 0\n x_pad[:, pad[0]:pad[0] + H, pad[1]:pad[1] + W] = x\n\n if mode == 'average':\n b_pad = np.zeros((C, Hi, Wi), dtype=np.uint)\n h_beg = int(not including_pad) * pad[0]\n w_beg = int(not including_pad) * pad[1]\n h_end = H + (1 + int(including_pad)) * pad[0]\n w_end = W + (1 + int(including_pad)) * pad[1]\n b_pad[:, h_beg:h_end, w_beg:w_end] = 1\n\n y = np.zeros((C, Ho, Wo), dtype=dtype)\n\n for c in range(C):\n for ho in range(Ho):\n for wo in range(Wo):\n hi = ho * stride[0] + np.arange(0, kernel[0])\n wi = wo * stride[1] + np.arange(0, kernel[1])\n yy = y[c]\n xx = x_pad[c]\n if mode == \"max\":\n yy[ho, wo] = xx[np.ix_(hi, wi)].max()\n elif mode == \"sum\":\n yy[ho, wo] = xx[np.ix_(hi, wi)].sum()\n elif mode == \"average\":\n pad_sum = xx[np.ix_(hi, wi)].sum()\n pad_cnt = b_pad[c][np.ix_(hi, wi)].sum()\n yy[ho, wo] = pad_sum / pad_cnt\n return y\n\n\ndef pooling_3d(x, mode, kernel, stride, pad, ignore_border=True,\n including_pad=True, dtype=np.float32):\n \"\"\"\n \"\"\"\n assert mode in ['average', 'sum', 'max']\n\n C, Z, H, W = x.shape\n Zo = get_pool_out_size(Z, kernel[0], pad[0], stride[0], ignore_border)\n Ho = get_pool_out_size(H, kernel[1], pad[1], stride[1], ignore_border)\n Wo = get_pool_out_size(W, kernel[2], pad[2], stride[2], ignore_border)\n Zi = Z + pad[0] + (pad[0] if ignore_border else kernel[0] - 1)\n Hi = H + pad[1] + (pad[1] if ignore_border else kernel[1] - 1)\n Wi = W + pad[2] + (pad[2] if ignore_border else kernel[2] - 1)\n\n x_pad = np.ones((C, Zi, Hi, Wi), dtype=dtype)\n x_pad *= x.min() if mode == 'max' else 0\n x_pad[:, pad[0]:pad[0] + Z, pad[1]:pad[1] + H, pad[2]:pad[2] + W] = x\n\n if mode == 'average':\n b_pad = np.zeros((C, Zi, Hi, Wi), dtype=np.uint)\n z_beg = int(not including_pad) * pad[0]\n h_beg = int(not including_pad) * pad[1]\n w_beg = int(not including_pad) * pad[2]\n z_end = Z + (1 + int(including_pad)) * pad[0]\n h_end = H + (1 + int(including_pad)) * pad[1]\n w_end = W + (1 + int(including_pad)) * pad[2]\n b_pad[:, z_beg:z_end, h_beg:h_end, w_beg:w_end] = 1\n #b_pad[:, pad[0]:pad[0] + Z, pad[1]:pad[1] + H, pad[2]:pad[2] + W] = 1\n\n y = np.zeros((C, Zo, Ho, Wo), dtype=dtype)\n\n for c in range(C):\n for zo in range(Zo):\n for ho in range(Ho):\n for wo in range(Wo):\n zi = zo * stride[0] + np.arange(0, kernel[0])\n hi = ho * stride[1] + np.arange(0, kernel[1])\n wi = wo * stride[2] + np.arange(0, kernel[2])\n yy = y[c]\n xx = x_pad[c]\n if mode == \"max\":\n yy[zo, ho, wo] = xx[np.ix_(zi, hi, wi)].max()\n elif mode == \"sum\":\n yy[zo, ho, wo] = xx[np.ix_(zi, hi, wi)].sum()\n elif mode == \"average\":\n pool_sum = xx[np.ix_(zi, hi, wi)].sum()\n pool_cnt = b_pad[c][np.ix_(zi, hi, wi)].sum()\n yy[zo, ho, wo] = pool_sum / pool_cnt\n return y\n\n\ndef generate_rotation_2d(rng, B):\n rotates = []\n for i in range(B):\n degree = 2 * np.pi * (2.0 * rng.rand() - 1.0)\n c, s = np.cos(degree), np.sin(degree)\n rotate = np.asarray([[c, -s],\n [s, c]])\n rotates.append(rotate)\n return np.asarray(rotates)\n\n\ndef generate_rotation_3d(rng, B):\n rotates = []\n for i in range(B):\n alpha = np.pi * (2.0 * rng.rand() - 1.0)\n beta = np.pi / 2.0 * (2.0 * rng.rand() - 1.0)\n gamma = np.pi * (2.0 * rng.rand() - 1.0)\n\n c, s = np.cos(alpha), np.sin(alpha)\n Ra = np.asarray([[c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])\n c, s = np.cos(beta), np.sin(beta)\n Rb = np.asarray([[c, 0, s],\n [0, 1, 0],\n [-s, 0, c]])\n c, s = np.cos(gamma), np.sin(gamma)\n Rg = np.asarray([[1, 0, 0],\n [0, c, -s],\n [0, s, c]])\n rotate = Ra.dot(Rb).dot(Rg)\n rotates.append(rotate)\n return np.asarray(rotates)\n\n\ndef generate_transformation_2d(rng, batch_size):\n rotate = generate_rotation_2d(rng, batch_size)\n translate = (2.0 * rng.rand(batch_size, 2, 1) - 1.0) * 0.001\n theta = np.concatenate([rotate, translate], axis=2)\n return theta.astype(np.float32)\n\n\ndef generate_transformation_3d(rng, batch_size):\n rotate = generate_rotation_3d(rng, batch_size)\n translate = (2.0 * rng.rand(batch_size, 3, 1) - 1.0) * 0.001\n theta = np.concatenate([rotate, translate], axis=2)\n return theta.astype(np.float32)\n\n\ndef generate_normalized_grid_2d(B, size, align_corners):\n H, W = size\n hgrid = np.linspace(-1.0, 1.0, H)\n wgrid = np.linspace(-1.0, 1.0, W)\n hgrid = hgrid if align_corners else hgrid * (H - 1) / H\n wgrid = wgrid if align_corners else wgrid * (W - 1) / W\n w, h = np.meshgrid(wgrid, hgrid)\n\n x = w.reshape(-1)\n y = h.reshape(-1)\n t = np.ones(len(x))\n normalized_grid = np.stack((x, y, t), axis=1)\n normalized_grid = normalized_grid.reshape(H, W, 3)\n normalized_grid = np.repeat(\n normalized_grid[np.newaxis, :, :, :], B, axis=0)\n return normalized_grid.astype(np.float32)\n\n\ndef generate_normalized_grid_3d(B, size, align_corners):\n D, H, W = size\n dgrid = np.linspace(-1.0, 1.0, D)\n hgrid = np.linspace(-1.0, 1.0, H)\n wgrid = np.linspace(-1.0, 1.0, W)\n dgrid = dgrid if align_corners else dgrid * (D - 1) / D\n hgrid = hgrid if align_corners else hgrid * (H - 1) / H\n wgrid = wgrid if align_corners else wgrid * (W - 1) / W\n h, d, w = np.meshgrid(hgrid, dgrid, wgrid)\n\n x = w.reshape(-1)\n y = h.reshape(-1)\n z = d.reshape(-1)\n t = np.ones(len(x))\n normalized_grid = np.stack((x, y, z, t), axis=1)\n normalized_grid = normalized_grid.reshape(D, H, W, 4)\n normalized_grid = np.repeat(\n normalized_grid[np.newaxis, :, :, :, :], B, axis=0)\n return normalized_grid.astype(np.float32)\n\n\ndef affine_grid_2d(affine, size, align_corners):\n B = affine.shape[0]\n H, W = size\n grid_t = generate_normalized_grid_2d(B, size, align_corners)\n grid_s = np.matmul(grid_t.reshape(B, H * W, 3),\n affine.transpose((0, 2, 1)))\n grid_s = grid_s.reshape(B, H, W, 2)\n return grid_s.astype(np.float32)\n\n\ndef affine_grid_3d(affine, size, align_corners):\n B = affine.shape[0]\n D, H, W = size\n grid_t = generate_normalized_grid_3d(B, size, align_corners)\n grid_s = np.matmul(grid_t.reshape(B, D * H * W, 4),\n affine.transpose((0, 2, 1)))\n grid_s = grid_s.reshape(B, D, H, W, 3)\n return grid_s.astype(np.float32)\n\n\ndef pad_sequence(sequences, batch_first):\n # sequences: list of nparray\n # sequences[i]: (T_i, D_1, ..., D_M)\n Ds = () if len(sequences[0].shape) == 1 else sequences[0].shape[1:]\n B = len(sequences)\n T = max([seq.shape[0] for seq in sequences])\n data = np.zeros((B, T) + Ds) if batch_first else np.zeros((T, B) + Ds)\n for b, seq in enumerate(sequences):\n l = seq.shape[0]\n if batch_first:\n data[b, :l] = seq\n else:\n data[:l, b] = seq\n return data\n",
"# Copyright 2021 Sony Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\n\nimport numpy as np\nimport nnabla as nn\nimport nnabla.functions as F\nimport nnabla.parametric_functions as PF\n\n\ndef test_show_graph():\n try:\n from nnabla.experimental.tb_graph_writer import TBGraphWriter\n except:\n pytest.skip(\n 'Skip because tensorboardX and tensorflow is not installed.')\n\n nn.clear_parameters()\n x = nn.Variable((2, 3, 4, 4))\n with nn.parameter_scope('c1'):\n h = PF.convolution(x, 8, (3, 3), pad=(1, 1))\n h = F.relu(PF.batch_normalization(h))\n with nn.parameter_scope('f1'):\n y = PF.affine(h, 10)\n\n with TBGraphWriter(log_dir='log_out') as tb:\n tb.from_variable(y, output_name=\"y\")\n\n\ndef test_show_curve():\n try:\n from nnabla.experimental.tb_graph_writer import TBGraphWriter\n except:\n pytest.skip(\n 'Skip because tensorboardX and tensorflow is not installed.')\n\n with TBGraphWriter(log_dir='log_out') as tb:\n values = []\n for i in range(360):\n s = np.sin(i / 180.0 * np.pi)\n tb.add_scalar(\"show_curve/sin\", s, i)\n values.append(s)\n\n nd_values = np.array(values)\n for i in range(10):\n tb.add_histogram(\"histogram\", nd_values, i)\n nd_values += 0.05\n",
"# Copyright 2019,2020,2021 Sony Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nimport numpy as np\nimport nnabla as nn\nimport nnabla.functions as F\nfrom nnabla.utils.rnn import _create_fixed_length_gru\nfrom nbla_test_utils import list_context\n\nctxs = list_context('GRU')\n\n\ndef execute_fixed_length_gru(xs_np, h0_np, w0_np, w_np, b_np, num_layers=1, dropout=0.0, bidirectional=False, training=True):\n # Inputs are numpy arrays\n num_directions = 2 if bidirectional else 1\n seq_len = xs_np.shape[0]\n batch_size = xs_np.shape[1]\n hidden_size = h0_np.shape[3]\n\n xs = nn.Variable.from_numpy_array(xs_np)\n h0 = nn.Variable.from_numpy_array(h0_np)\n w0 = nn.Variable.from_numpy_array(w0_np)\n w = None\n b = None\n with_bias = False\n if num_layers > 1:\n w = nn.Variable.from_numpy_array(w_np)\n if type(b_np) is np.ndarray:\n b = nn.Variable.from_numpy_array(b_np)\n with_bias = True\n\n ys, hn = _create_fixed_length_gru(\n xs, h0, w0, w, b, num_layers, num_directions, with_bias) # returns Variables\n\n dummy = F.sink(ys, hn)\n dummy.forward()\n\n # returns numpy arrays\n ys = F.reshape(ys, (seq_len, batch_size, num_directions * hidden_size))\n ys.forward()\n return ys.d, hn.d\n\n\ndef get_gru_grad(xs_np, h0_np, w0_np, w_np, b_np, dy, dh, num_layers=1, dropout=0.0, bidirectional=False, training=True, **kw):\n # Inputs are numpy arrays\n num_directions = 2 if bidirectional else 1\n seq_len = xs_np.shape[0]\n batch_size = xs_np.shape[1]\n hidden_size = h0_np.shape[3]\n\n xs = nn.Variable.from_numpy_array(xs_np, need_grad=True)\n h0 = nn.Variable.from_numpy_array(h0_np, need_grad=True)\n w0 = nn.Variable.from_numpy_array(w0_np, need_grad=True)\n w = None\n b = None\n with_bias = False\n if num_layers > 1:\n w = nn.Variable.from_numpy_array(w_np, need_grad=True)\n if type(b_np) == np.ndarray:\n b = nn.Variable.from_numpy_array(b_np, need_grad=True)\n with_bias = True\n xs.grad.zero()\n h0.grad.zero()\n w0.grad.zero()\n if num_layers > 1:\n w.grad.zero()\n if with_bias:\n b.grad.zero()\n\n ys, hn = _create_fixed_length_gru(\n xs, h0, w0, w, b, num_layers, num_directions, with_bias) # returns Variables\n\n dummy = F.sink(ys, hn, one_input_grad=False)\n dummy.forward()\n ys.g = np.reshape(dy, ys.shape)\n hn.g = dh\n dummy.backward()\n\n if num_layers > 1 and with_bias:\n return np.concatenate((xs.g.flat, h0.g.flat, w0.g.flat, w.g.flat, b.g.flat))\n elif num_layers > 1 and not with_bias:\n return np.concatenate((xs.g.flat, h0.g.flat, w0.g.flat, w.g.flat))\n elif num_layers == 1 and with_bias:\n return np.concatenate((xs.g.flat, h0.g.flat, w0.g.flat, b.g.flat))\n else:\n return np.concatenate((xs.g.flat, h0.g.flat, w0.g.flat))\n\n\[email protected](\"ctx, func_name\", ctxs)\[email protected](\"seed\", [100])\[email protected](\"num_layers\", [1, 2])\[email protected](\"dropout\", [0.0])\[email protected](\"bidirectional\", [True, False])\[email protected](\"training\", [True, False])\[email protected](\"seq_len\", [2, 5])\[email protected](\"batch_size\", [3])\[email protected](\"input_size\", [2])\[email protected](\"hidden_size\", [3])\[email protected](\"with_bias\", [True, False])\ndef test_gru(seed, num_layers, dropout, bidirectional, training, seq_len, batch_size, input_size, hidden_size, with_bias, ctx, func_name):\n from nbla_test_utils import function_tester\n\n with nn.context_scope(ctx):\n rng = np.random.RandomState(seed)\n num_directions = 1\n if bidirectional:\n num_directions = 2\n inputs = [rng.randn(seq_len, batch_size,\n input_size).astype(np.float32)]\n inputs += [rng.randn(num_layers, num_directions,\n batch_size, hidden_size).astype(np.float32)]\n inputs += [rng.randn(num_directions, 3, hidden_size,\n input_size + hidden_size)]\n if num_layers > 1:\n inputs += [rng.randn(max(1, num_layers-1), num_directions, 3, hidden_size,\n num_directions*hidden_size + hidden_size).astype(np.float32)]\n else:\n inputs += [None]\n if with_bias:\n inputs += [rng.randn(num_layers, num_directions,\n 4, hidden_size).astype(np.float32)]\n else:\n inputs += [None]\n\n backward = [False for _ in inputs]\n if training:\n backward = [True for _ in inputs]\n\n function_tester(rng, F.gru, execute_fixed_length_gru, inputs, func_kwargs=dict(\n num_layers=num_layers, dropout=dropout, bidirectional=bidirectional, training=training), atol_f=2e-1, atol_b=2e-2, dstep=1e-3, backward=backward, ctx=ctx, func_name=func_name, ref_grad=get_gru_grad, disable_half_test=True)\n\n\[email protected](\"ctx, func_name\", ctxs)\[email protected](\"seed\", [100])\[email protected](\"num_layers\", [1, 2])\[email protected](\"dropout\", [0.0])\[email protected](\"bidirectional\", [True, False])\[email protected](\"training\", [True])\[email protected](\"seq_len\", [2, 5])\[email protected](\"batch_size\", [3])\[email protected](\"input_size\", [2])\[email protected](\"hidden_size\", [3])\[email protected](\"with_bias\", [True, False])\ndef test_gru_double_backward(seed, num_layers, dropout, bidirectional, training,\n seq_len, batch_size, input_size, hidden_size, with_bias, ctx, func_name):\n from nbla_test_utils import backward_function_tester\n\n with nn.context_scope(ctx):\n rng = np.random.RandomState(seed)\n num_directions = 1\n if bidirectional:\n num_directions = 2\n inputs = [rng.randn(seq_len, batch_size,\n input_size).astype(np.float32) * 0.1]\n inputs += [rng.randn(num_layers, num_directions,\n batch_size, hidden_size).astype(np.float32)]\n inputs += [rng.randn(num_directions, 3, hidden_size,\n input_size + hidden_size)]\n if num_layers > 1:\n inputs += [rng.randn(max(1, num_layers-1), num_directions, 3, hidden_size,\n num_directions*hidden_size + hidden_size).astype(np.float32)]\n else:\n inputs += [None]\n if with_bias:\n inputs += [rng.randn(num_layers, num_directions,\n 4, hidden_size).astype(np.float32)]\n else:\n inputs += [None]\n\n backward = [False for _ in inputs]\n if training:\n backward = [True for _ in inputs]\n\n backward_function_tester(rng, F.gru, inputs, func_kwargs=dict(\n num_layers=num_layers, dropout=dropout, bidirectional=bidirectional,\n training=training), atol_f=1e-6, dstep=1e-3, backward=backward,\n ctx=ctx, skip_backward_check=True)\n\n\[email protected](\"num_layers\", [2])\[email protected](\"bidirectional\", [False])\[email protected](\"seq_len\", [2, 5])\[email protected](\"batch_size\", [3])\[email protected](\"input_size\", [2])\[email protected](\"hidden_size\", [3])\[email protected](\"ctx, func_name\", ctxs)\ndef test_inference_backward(num_layers, bidirectional, seq_len, batch_size, input_size, hidden_size, ctx, func_name):\n with nn.context_scope(ctx):\n num_directions = 1\n if bidirectional:\n num_directions = 2\n\n x = nn.Variable((seq_len, batch_size, input_size), need_grad=True)\n h = nn.Variable((num_layers, num_directions,\n batch_size, hidden_size), need_grad=True)\n w0 = nn.Variable((num_directions, 3, hidden_size,\n input_size + hidden_size), need_grad=True)\n w = nn.Variable((max(1, num_layers-1), num_directions, 3, hidden_size,\n num_directions*hidden_size + hidden_size), need_grad=True)\n b = nn.Variable((num_layers, num_directions, 4,\n hidden_size), need_grad=True)\n y, hn = F.gru(x, h, w0, w, b, num_layers=num_layers, training=False)\n y.forward()\n with pytest.raises(RuntimeError) as e_info:\n y.backward()\n",
"# Copyright 2017,2018,2019,2020,2021 Sony Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nimport numpy as np\nimport nnabla.functions as F\nfrom nbla_test_utils import list_context\n\nctxs = list_context('SquaredError')\n\n\[email protected](\"ctx, func_name\", ctxs)\[email protected](\"seed\", [313])\ndef test_squared_error_forward_backward(seed, ctx, func_name):\n from nbla_test_utils import function_tester\n rng = np.random.RandomState(seed)\n inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2 for _ in range(2)]\n function_tester(rng, F.squared_error, lambda x, y: (x - y)**2, inputs,\n atol_b=2e-2, ctx=ctx, func_name=func_name)\n\n\[email protected](\"ctx, func_name\", ctxs)\[email protected](\"seed\", [313])\ndef test_squared_error_double_backward(seed, ctx, func_name):\n from nbla_test_utils import backward_function_tester\n rng = np.random.RandomState(seed)\n inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2 for _ in range(2)]\n backward_function_tester(rng, F.squared_error, inputs,\n atol_accum=2e-1, ctx=ctx)\n",
"# Copyright 2017,2018,2019,2020,2021 Sony Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nimport nnabla.solvers as S\nimport numpy as np\nfrom solver_test_utils import solver_tester, RefSolver\nfrom nbla_test_utils import list_context\n\nctxs = list_context('Momentum')\n\n\nclass RefMomentum(RefSolver):\n\n def __init__(self, lr, momentum):\n self.lr = lr\n self.momentum = momentum\n self.v = {}\n\n def _set_state_impl(self, key, param):\n self.v[key] = np.zeros_like(param)\n\n def _update_impl(self, key, p, g):\n _update_momentum(p, g, self.v[key], self.lr, self.momentum)\n\n\ndef _update_momentum(p, g, v, lr, momentum):\n v[...] = v * momentum + lr * g\n p[...] = p - v\n\n\[email protected](\"ctx, solver_name\", ctxs)\[email protected](\"decay\", [1e-4])\[email protected](\"lr\", [1e-1, 1e-3])\[email protected](\"momentum\", [0.9, 0.5])\[email protected](\"seed\", [313])\ndef test_momentum(seed, lr, momentum, decay, ctx, solver_name):\n rng = np.random.RandomState(seed)\n solver_tester(\n rng, S.Momentum, RefMomentum, [lr, momentum], atol=1e-6, ctx=ctx, solver_name=solver_name)\n"
] | [
[
"numpy.ix_",
"numpy.linspace",
"numpy.asarray",
"numpy.arange",
"numpy.cos",
"numpy.stack",
"numpy.ones",
"numpy.concatenate",
"numpy.sin",
"numpy.floor",
"numpy.repeat",
"numpy.meshgrid",
"numpy.zeros"
],
[
"numpy.array",
"numpy.sin"
],
[
"numpy.reshape",
"numpy.random.RandomState",
"numpy.concatenate"
],
[
"numpy.random.RandomState"
],
[
"numpy.random.RandomState",
"numpy.zeros_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
prannayk/MSRASI17 | [
"f7277d90ffdd062c1ba94391b7f82c621e619743"
] | [
"models/wc3.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport operator\nimport collections\nimport math\nimport time\nimport os\nimport random\nimport zipfile\nimport time\nimport numpy as np\nimport sys\nfrom six.moves import urllib\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\nsys.path.append( '../util/')\nfrom generators import *\nfrom loader import *\nfrom print_tweets import *\nfrom similar_tokens import * \nfrom training import *\nfrom similar_tokens import *\nfrom expand_query import *\nfrom argument_loader import *\nfrom setup import *\nfrom LSTM import *\n\ndataset, query_type, filename, num_steps, num_steps_roll, num_steps_train, expand_flag,lr_, matchname = import_arguments(sys.argv)\n\nchar_batch_dict, word_batch_dict,data, count, dictionary, reverse_dictionary, word_max_len, char_max_len, vocabulary_size, char_dictionary, reverse_char_dictionary, data_index, char_data_index, buffer_index, batch_list, char_batch_list, word_batch_list, char_data = build_everything(dataset)\n\ndata_index, batch, labels = generate_batch(data, data_index, batch_size=8, num_skips=2, skip_window=1,)\nfor i in range(8):\n print(batch[i], reverse_dictionary[batch[i]],\n '->', labels[i, 0], reverse_dictionary[labels[i, 0]])\nchar_data_index, batch, labels = generate_batch_char(char_data, char_data_index, batch_size=8, num_skips=2, skip_window=1)\nfor i in range(8):\n print(batch[i], reverse_char_dictionary[batch[i]],\n '->', labels[i, 0], reverse_char_dictionary[labels[i, 0]])\n\nlambda_1, tweet_batch_size, expand_start_count, query_name, query_tokens, query_tokens_alternate, char_batch_size, num_sampled, valid_examples, valid_window, valid_size, skip_window, num_skips, embedding_size, char_vocabulary_size, batch_size, num_char_skips, skip_char_window = setup(char_dictionary, dictionary, query_type)\nlearning_rate = lr_\n\ngraph = tf.Graph()\nwith graph.as_default():\n\n # Input data.\n need_constant = tf.constant(query_tokens,dtype=tf.int32)\n avail_constant = tf.constant(query_tokens_alternate, dtype=tf.int32)\n train_inputs = tf.placeholder(tf.int32, shape=[batch_size])\n train_input_chars = tf.placeholder(tf.int32, shape=[char_batch_size])\n train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n train_char_labels = tf.placeholder(tf.int32, shape=[char_batch_size, 1])\n word_char_embeddings = tf.placeholder(tf.int32, shape=[batch_size, char_max_len])\n valid_dataset = tf.constant(valid_examples[0], dtype=tf.int32)\n valid_char_dataset = tf.constant(valid_examples[1], dtype=tf.int32)\n query_ints = tf.placeholder(tf.int32, shape=len(query_tokens))\n expanded_query_ints = tf.placeholder(tf.int32, shape=(len(query_tokens)+3))\n tquery_word_holder = tf.placeholder(tf.int32, shape=[word_max_len],name=\"tweet_query_word_holder\")\n tquery_char_holder = tf.placeholder(tf.int32, shape=[word_max_len, char_max_len],name=\"tweet_query_char_holder\")\n # Ops and variables pinned to the CPU because of missing GPU implementation\n tweet_char_holder = tf.placeholder(tf.int32, shape=[tweet_batch_size,word_max_len,char_max_len],name=\"tweet_char_holder\")\n tweet_word_holder = tf.placeholder(tf.int32, shape=[tweet_batch_size, word_max_len],name=\"tweet_word_holder\")\n with tf.device('/gpu:0'):\n # Look up embeddings for inputs.\n embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))\n char_embeddings = tf.Variable(tf.random_uniform([char_vocabulary_size, embedding_size // 2],-1.0,1.0))\n embed = tf.nn.embedding_lookup(embeddings, train_inputs)\n char_embed = tf.nn.embedding_lookup(char_embeddings,train_input_chars)\n lambda_2 = tf.Variable(tf.random_normal([1],stddev=1.0))\n\n # weight variables\n w1 = tf.Variable(tf.random_normal([embedding_size,embedding_size // 4],stddev=1.0/math.sqrt(embedding_size)))\n w2 = tf.Variable(tf.random_normal([embedding_size // 4,1],stddev=1.0/math.sqrt(embedding_size)))\n weights = tf.stack([w1]*batch_size)\n vvector = tf.stack([w2]*batch_size)\n weights_tweet = tf.stack([w1]*tweet_batch_size*word_max_len)\n vvector_tweet = tf.stack([w2]*tweet_batch_size*word_max_len)\n\n # Construct the variables for the NCE loss\n nce_weights = tf.Variable(\n tf.truncated_normal([vocabulary_size, embedding_size],\n stddev=1.0 / math.sqrt(embedding_size)))\n nce_biases = tf.Variable(tf.zeros([vocabulary_size]))\n # character weights\n nce_char_weights = tf.Variable(\n tf.truncated_normal([vocabulary_size, embedding_size // 2],\n stddev=1.0 / math.sqrt(embedding_size // 2)))\n nce_char_biases = tf.Variable(tf.zeros([vocabulary_size]))\n\n nce_train_weights = tf.Variable(\n tf.truncated_normal([vocabulary_size, embedding_size],\n stddev=1.0 / math.sqrt(embedding_size)))\n nce_train_biases = tf.Variable(tf.zeros([vocabulary_size]))\n \n loss = tf.reduce_mean(\n tf.nn.nce_loss(weights=nce_weights,\n biases=nce_biases,\n labels=train_labels,\n inputs=embed,\n num_sampled=num_sampled,\n num_classes=vocabulary_size))\n\n loss_char = tf.reduce_mean(\n tf.nn.nce_loss(weights=nce_char_weights,\n biases=nce_char_biases,\n labels=train_char_labels,\n inputs=char_embed,\n num_sampled=10,\n num_classes=char_vocabulary_size))\n\n # Construct the SGD optimizer using a learning rate of 1.0.\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)\n optimizer_char = tf.train.AdamOptimizer(learning_rate /5).minimize(loss_char)\n\n # Compute the cosine similarity between minibatch examples and all embeddings.\n norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n normalized_embeddings = embeddings / norm\n valid_embeddings = tf.nn.embedding_lookup(\n normalized_embeddings, valid_dataset)\n similarity = tf.matmul(\n valid_embeddings, normalized_embeddings, transpose_b=True)\n query_embedding_token = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,query_ints),axis=0),shape=[1,embedding_size])\n expanded_query_embedding_token = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,expanded_query_ints),axis=0),shape=[1,embedding_size])\n similarity_query = tf.reshape(tf.matmul(\n query_embedding_token, normalized_embeddings, transpose_b=True),shape=[int(normalized_embeddings.shape[0])])\n similarity_expanded_query = tf.reshape(tf.matmul(\n expanded_query_embedding_token, normalized_embeddings, transpose_b=True),shape=[int(normalized_embeddings.shape[0])])\n\n norm_char = tf.sqrt(tf.reduce_sum(tf.square(char_embeddings), 1, keep_dims=True))\n normalized_char_embeddings = char_embeddings / norm_char\n valid_embeddings_char = tf.nn.embedding_lookup(\n normalized_char_embeddings, valid_char_dataset)\n similarity_char = tf.matmul(\n valid_embeddings_char, normalized_char_embeddings, transpose_b=True)\n \n bilstm = biLSTM_setup(embedding_size)\n character_word_embeddings = tf.nn.embedding_lookup(normalized_char_embeddings, word_char_embeddings)\n intermediate = biLSTM_implementation(character_word_embeddings, bilstm, False)\n output = attention(w1, w2, intermediate)\n\n word_embeddings = tf.nn.embedding_lookup(normalized_embeddings, train_inputs)\n final_embedding = lambda_2*word_embeddings + (1-lambda_2)*output\n with tf.variable_scope(tf.get_variable_scope(), reuse=None):\n\n loss_char_train = tf.reduce_mean(\n tf.nn.nce_loss(weights=nce_train_weights,\n biases=nce_train_biases,\n labels=train_labels,\n inputs=final_embedding,\n num_sampled=64,\n num_classes=vocabulary_size))\n\n optimizer_train = tf.train.AdamOptimizer(learning_rate/5).minimize(loss_char_train)\n\n tweet_word_embed = tf.nn.embedding_lookup(normalized_embeddings, tweet_word_holder)\n tweet_char_embeddings = tf.reshape(tf.nn.embedding_lookup(normalized_char_embeddings, tweet_char_holder),shape=[tweet_batch_size*word_max_len, char_max_len, embedding_size//2])\n intermediate = biLSTM_implementation(tweet_char_embeddings, bilstm)\n tweet_char_embed = tf.reshape(attention(w1,w2,intermediate),shape=[tweet_batch_size, word_max_len, embedding_size])\n tweet_embedding = tf.reduce_mean(lambda_1*tweet_word_embed + (1-lambda_1)*tweet_char_embed,axis=1)\n # query embeddings\n query_embedding = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,query_ints),axis=0),shape=[1,embedding_size])\n expanded_query_embedding = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,expanded_query_ints),axis=0),shape=[1,embedding_size],name=\"similarity_normal\")\n query_similarity = tf.reshape(tf.matmul(tweet_embedding, query_embedding, transpose_b=True),shape=[tweet_batch_size])\n expanded_query_similarity = tf.reshape(tf.matmul(tweet_embedding, expanded_query_embedding, transpose_b=True),shape=[tweet_batch_size],name=\"similarity_expanded\")\n # tweet level query : for matching / extraction\n tquery_word_embed = tf.nn.embedding_lookup(normalized_embeddings, tquery_word_holder)\n tquery_char_embeddings = tf.reshape(tf.nn.embedding_lookup(normalized_char_embeddings, tquery_char_holder),shape=[word_max_len, char_max_len, embedding_size//2])\n intermediate = biLSTM_implementation(tquery_char_embeddings, bilstm)\n tquery_char_embed = attention(w1, w2, intermediate)\n tquery_embedding = tf.reshape(tf.reduce_mean(lambda_1*tquery_word_embed + (1-lambda_1)*tquery_char_embed,axis=0),shape=[1,embedding_size])\n\n norm_query = tf.sqrt(tf.reduce_sum(tf.square(tquery_embedding), 1, keep_dims=True))\n tquery_embedding_norm = tquery_embedding / norm_query\n cosine = tf.matmul(tweet_embedding, tquery_embedding_norm, transpose_b=True)\n tweet_query_similarity = tf.reshape(cosine, shape=[tweet_batch_size], name=\"tweet_query_similarity\")\n \n tquery_embedding_norm_dim = tf.reshape(tquery_embedding_norm, shape=[1,embedding_size])\n query_need_embedding = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings, need_constant),axis=0),shape=[1,embedding_size])\n cosine_need = tf.matmul(tquery_embedding_norm_dim, query_need_embedding, transpose_b=True)\n tquery_embedding_reqd = tf.reshape(tquery_embedding_norm_dim - (cosine_need*tquery_embedding_norm_dim),shape=[1,embedding_size])\n # we have the need vector without the need vector\n query_avail_embedding = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,avail_constant),axis=0),shape=[1,embedding_size])\n query_norm = tf.sqrt(tf.reduce_sum(tf.square(query_avail_embedding),1,keep_dims=True))\n query_avail_embedding_norm = query_embedding / query_norm\n cosine_avail = tf.matmul(tweet_embedding, query_avail_embedding_norm, transpose_b=True)\n reduced_tweet_embedding = tweet_embedding - (tweet_embedding*cosine_avail)\n match_similarity = tf.reshape(tf.matmul(reduced_tweet_embedding, tquery_embedding_reqd, transpose_b=True),shape=[tweet_batch_size],name=\"match_similarity\")\n # Add variable initializer.\n init = tf.global_variables_initializer()\n saver = tf.train.Saver()\n\n# Step 5: Begin training.\n# loading tweet list in integer marking form\n# load more data\nexpand_count = 3\nwith tf.Session(graph=graph) as session:\n # We must initialize all variables before we use them.\n init.run()\n count = 0\n print(\"Initialized\")\n\n generators = [generate_batch, generate_batch_char]\n similarities = [similarity, similarity_char]\n placeholders = [[train_inputs,train_labels],[train_input_chars,train_char_labels]]\n losses = [loss, loss_char]\n optimizers = [optimizer, optimizer_char]\n interval1 = 800\n interval2 = 8000\n datas = [data,char_data]\n data_index = [data_index, char_data_index, buffer_index]\n reverse_dictionaries = [reverse_dictionary, reverse_char_dictionary]\n if query_type == 0:\n query_name = 'Need'\n else :\n query_name = 'Avail'\n print(query_tokens)\n print(query_name)\n count_ = train_model(session, dataset,query_similarity, query_tokens, query_ints, query_name, word_batch_list, char_batch_list, tweet_word_holder, tweet_char_holder, generators, similarities, num_steps, placeholders,losses, optimizers, interval1, interval2, valid_size, valid_examples, reverse_dictionaries, batch_size, num_skips, skip_window, filename, datas, data_index, tweet_batch_size)\n placeholders += [[train_inputs, word_char_embeddings, train_labels]]\n losses += [loss_char_train]\n optimizers += [optimizer_train]\n datas += [[word_batch_list, char_batch_list]]\n count_ = train_model(session, dataset,query_similarity, query_tokens ,query_ints, query_name, word_batch_list, char_batch_list, tweet_word_holder, tweet_char_holder, generators, similarities, num_steps_roll, placeholders,losses, optimizers, interval1, interval2, valid_size, valid_examples, reverse_dictionaries, batch_size, num_skips, skip_window, filename, datas, data_index, tweet_batch_size, count_)\n \n expanded_query_tokens, expanded_query_holder, final_query_similarity= expand_query(expand_flag, session,query_ints, np.array(query_tokens),dataset ,similarity_query, word_batch_dict, 100, query_ints, expanded_query_ints, query_similarity, expanded_query_similarity, expand_start_count, expand_count)\n expanded_query_tokens = query_tokens + expanded_query_tokens\n print(expanded_query_tokens)\n \n count_ = train_model(session, dataset, final_query_similarity, expanded_query_tokens, expanded_query_holder, query_name, word_batch_list, char_batch_list, tweet_word_holder, tweet_char_holder, generators, similarities, num_steps_train , placeholders,losses, optimizers, interval1, interval2, valid_size, valid_examples, reverse_dictionaries, batch_size, num_skips, skip_window, filename, datas, data_index, tweet_batch_size, count_)\n folder_name = './%s/%s/'%(dataset, query_type)\n final_embeddings = normalized_embeddings.eval()\n final_char_embedding = normalized_char_embeddings.eval()\n np.save('../results/%s/%s/%s_word_embeddings.npy'%(dataset, query_name, filename), final_embeddings)\n np.save('../results/%s/%s/%s_char_embeddings.npy'%(dataset, query_name, filename), final_char_embedding)\n saver.save(session, '../results/%s/%s/%s_model.ckpt'%(dataset, query_name, filename))\n"
] | [
[
"tensorflow.device",
"tensorflow.zeros",
"tensorflow.stack",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.nce_loss",
"tensorflow.Graph",
"numpy.save",
"tensorflow.Session",
"tensorflow.square",
"tensorflow.train.Saver",
"tensorflow.matmul",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"numpy.array",
"tensorflow.nn.embedding_lookup",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.get_variable_scope",
"tensorflow.random_uniform",
"tensorflow.random_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
ruohoruotsi/pyro | [
"b54a4b42b9474eb3ecee11505e45fde85b1cdc54"
] | [
"pyro/distributions/relaxed_straight_through.py"
] | [
"from __future__ import absolute_import, division, print_function\n\nimport torch\n\nfrom pyro.distributions.torch import RelaxedOneHotCategorical, RelaxedBernoulli\nfrom pyro.distributions.util import copy_docs_from\nfrom torch.distributions.utils import clamp_probs\n\n\n@copy_docs_from(RelaxedOneHotCategorical)\nclass RelaxedOneHotCategoricalStraightThrough(RelaxedOneHotCategorical):\n \"\"\"\n An implementation of\n :class:`~torch.distributions.relaxed_categorical.RelaxedOneHotCategorical`\n with a straight-through gradient estimator.\n\n This distribution has the following properties:\n\n - The samples returned by the :meth:`rsample` method are discrete/quantized.\n - The :meth:`log_prob` method returns the log probability of the\n relaxed/unquantized sample using the GumbelSoftmax distribution.\n - In the backward pass the gradient of the sample with respect to the\n parameters of the distribution uses the relaxed/unquantized sample.\n\n References:\n\n [1] The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables,\n Chris J. Maddison, Andriy Mnih, Yee Whye Teh\n [2] Categorical Reparameterization with Gumbel-Softmax,\n Eric Jang, Shixiang Gu, Ben Poole\n \"\"\"\n def rsample(self, sample_shape=torch.Size()):\n soft_sample = super(RelaxedOneHotCategoricalStraightThrough, self).rsample(sample_shape)\n soft_sample = clamp_probs(soft_sample)\n hard_sample = QuantizeCategorical.apply(soft_sample)\n return hard_sample\n\n def log_prob(self, value):\n value = getattr(value, '_unquantize', value)\n return super(RelaxedOneHotCategoricalStraightThrough, self).log_prob(value)\n\n\nclass QuantizeCategorical(torch.autograd.Function):\n @staticmethod\n def forward(ctx, soft_value):\n argmax = soft_value.max(-1)[1]\n hard_value = torch.zeros_like(soft_value)\n hard_value._unquantize = soft_value\n if argmax.dim() < hard_value.dim():\n argmax = argmax.unsqueeze(-1)\n return hard_value.scatter_(-1, argmax, 1)\n\n @staticmethod\n def backward(ctx, grad):\n return grad\n\n\n@copy_docs_from(RelaxedBernoulli)\nclass RelaxedBernoulliStraightThrough(RelaxedBernoulli):\n \"\"\"\n An implementation of\n :class:`~torch.distributions.relaxed_bernoulli.RelaxedBernoulli`\n with a straight-through gradient estimator.\n\n This distribution has the following properties:\n\n - The samples returned by the :meth:`rsample` method are discrete/quantized.\n - The :meth:`log_prob` method returns the log probability of the\n relaxed/unquantized sample using the GumbelSoftmax distribution.\n - In the backward pass the gradient of the sample with respect to the\n parameters of the distribution uses the relaxed/unquantized sample.\n\n References:\n\n [1] The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables,\n Chris J. Maddison, Andriy Mnih, Yee Whye Teh\n [2] Categorical Reparameterization with Gumbel-Softmax,\n Eric Jang, Shixiang Gu, Ben Poole\n \"\"\"\n def rsample(self, sample_shape=torch.Size()):\n soft_sample = super(RelaxedBernoulliStraightThrough, self).rsample(sample_shape)\n soft_sample = clamp_probs(soft_sample)\n hard_sample = QuantizeBernoulli.apply(soft_sample)\n return hard_sample\n\n def log_prob(self, value):\n value = getattr(value, '_unquantize', value)\n return super(RelaxedBernoulliStraightThrough, self).log_prob(value)\n\n\nclass QuantizeBernoulli(torch.autograd.Function):\n @staticmethod\n def forward(ctx, soft_value):\n hard_value = soft_value.round()\n hard_value._unquantize = soft_value\n return hard_value\n\n @staticmethod\n def backward(ctx, grad):\n return grad\n"
] | [
[
"torch.Size",
"torch.zeros_like",
"torch.distributions.utils.clamp_probs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tsingqguo/ABA | [
"c32edbbe5705b0332a08951b5ee436b5f58c2e70",
"c32edbbe5705b0332a08951b5ee436b5f58c2e70",
"c32edbbe5705b0332a08951b5ee436b5f58c2e70"
] | [
"ltr/dataset/lasot.py",
"utils/neuron/data/datasets/vot.py",
"OSABA/pix2pix/models/copy_network.py"
] | [
"import os\nimport os.path\nimport torch\nimport numpy as np\nimport pandas\nimport csv\nimport random\nfrom collections import OrderedDict\nfrom .base_video_dataset import BaseVideoDataset\nfrom ltr.data.image_loader import jpeg4py_loader\nfrom ltr.admin.environment import env_settings\n\n\nclass Lasot(BaseVideoDataset):\n \"\"\" LaSOT dataset.\n\n Publication:\n LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling\n CVPR, 2019\n https://arxiv.org/pdf/1809.07845.pdf\n\n Download the dataset from https://cis.temple.edu/lasot/download.html\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the lasot dataset.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n videos with subscripts -1, -3, and -5 from each class will be used for training.\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n vid_ids or split option can be used at a time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().lasot_dir if root is None else root\n super().__init__('LaSOT', root, image_loader)\n\n # Keep a list of all classes\n self.class_list = [f for f in os.listdir(self.root)]\n self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\n\n self.sequence_list = self._build_sequence_list(vid_ids, split)\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.seq_per_class = self._build_class_list()\n\n def _build_sequence_list(self, vid_ids=None, split=None):\n if split is not None:\n if vid_ids is not None:\n raise ValueError('Cannot set both split_name and vid_ids.')\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\n else:\n raise ValueError('Unknown split name.')\n sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\n elif vid_ids is not None:\n sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]\n else:\n raise ValueError('Set either split_name or vid_ids.')\n\n return sequence_list\n\n def _build_class_list(self):\n seq_per_class = {}\n for seq_id, seq_name in enumerate(self.sequence_list):\n class_name = seq_name.split('-')[0]\n if class_name in seq_per_class:\n seq_per_class[class_name].append(seq_id)\n else:\n seq_per_class[class_name] = [seq_id]\n\n return seq_per_class\n\n def get_name(self):\n return 'lasot'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n return torch.tensor(gt)\n\n def _read_target_visible(self, seq_path):\n # Read full occlusion and out_of_view\n occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\n out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\n\n with open(occlusion_file, 'r', newline='') as f:\n occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n with open(out_of_view_file, 'r') as f:\n out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n\n target_visible = ~occlusion & ~out_of_view\n\n return target_visible\n\n def _get_sequence_path(self, seq_id):\n seq_name = self.sequence_list[seq_id]\n class_name = seq_name.split('-')[0]\n vid_id = seq_name.split('-')[1]\n\n return os.path.join(self.root, class_name, class_name + '-' + vid_id)\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = self._read_target_visible(seq_path) & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n def _get_class(self, seq_path):\n raw_class = seq_path.split('/')[-2]\n return raw_class\n\n def get_class_name(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n obj_class = self._get_class(seq_path)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n #print(seq_path)\n obj_class = self._get_class(seq_path)\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta\n",
"import os\nimport os.path as osp\nimport glob\nimport numpy as np\nimport json\nimport hashlib\n\nimport neuron.ops as ops\nfrom neuron.config import registry\nfrom .dataset import SeqDataset\n\n\n__all__ = ['VOT']\n\n\[email protected]_module\nclass VOT(SeqDataset):\n r\"\"\"`VOT <http://www.votchallenge.net/>`_ Datasets.\n\n Publication:\n ``The Visual Object Tracking VOT2017 challenge results``, M. Kristan, A. Leonardis\n and J. Matas, etc. 2017.\n \n Args:\n root_dir (string): Root directory of dataset where sequence\n folders exist.\n version (integer, optional): Specify the benchmark version. Specify as\n one of 2013~2018. Default is 2017.\n anno_type (string, optional): Returned annotation types, chosen as one of\n ``rect`` and ``corner``. Default is ``rect``.\n download (boolean, optional): If True, downloads the dataset from the internet\n and puts it in root directory. If dataset is downloaded, it is not\n downloaded again.\n list_file (string, optional): If provided, only read sequences\n specified by the file.\n \"\"\"\n __valid_versions = [2013, 2014, 2015, 2016, 2017, 2018, 'LT2018',\n 2019, 'LT2019', 'RGBD2019', 'RGBT2019']\n\n def __init__(self, root_dir=None, version=2019, anno_type='rect',\n download=True, list_file=None):\n assert version in self.__valid_versions, 'Unsupport VOT version.'\n assert anno_type in ['default', 'rect', 'inner_rect'], \\\n 'Unknown annotation type.'\n if root_dir is None:\n root_dir = osp.expanduser('~/data/vot{}'.format(version))\n self.root_dir = root_dir\n self.version = version\n self.anno_type = anno_type\n if download:\n self._download(root_dir, version)\n if list_file is None:\n list_file = osp.join(root_dir, 'list.txt')\n \n # initialize the dataset\n super(VOT, self).__init__(\n name='VOT-{}'.format(self.version),\n root_dir=self.root_dir,\n list_file=list_file)\n\n def _construct_seq_dict(self, root_dir, list_file):\n # image and annotation paths\n with open(list_file, 'r') as f:\n seq_names = f.read().strip().split('\\n')\n seq_dirs = [osp.join(root_dir, s) for s in seq_names]\n anno_files = [osp.join(s, 'groundtruth.txt')\n for s in seq_dirs]\n \n # construct seq_dict\n seq_dict = {}\n for s, seq_name in enumerate(seq_names):\n img_files = sorted(glob.glob(\n osp.join(seq_dirs[s], '*.jpg')))\n anno = np.loadtxt(anno_files[s], delimiter=',')\n anno = self._format(anno)\n\n # meta information\n seq_len = len(img_files)\n img0 = ops.read_image(img_files[0])\n meta = self._fetch_meta(seq_dirs[s], seq_len)\n meta.update({\n 'width': img0.shape[1],\n 'height': img0.shape[0],\n 'frame_num': seq_len,\n 'target_num': 1,\n 'total_instances': seq_len})\n\n # update seq_dict\n seq_dict[seq_name] = {\n 'img_files': img_files,\n 'target': {\n 'anno': anno,\n 'meta': meta}}\n \n return seq_dict\n\n def _download(self, root_dir, version):\n assert version in self.__valid_versions\n\n if not osp.isdir(root_dir):\n os.makedirs(root_dir)\n elif osp.isfile(osp.join(root_dir, 'list.txt')):\n with open(osp.join(root_dir, 'list.txt')) as f:\n seq_names = f.read().strip().split('\\n')\n if all([osp.isdir(osp.join(root_dir, s)) for s in seq_names]):\n ops.sys_print('Files already downloaded.')\n return\n\n url = 'http://data.votchallenge.net/'\n if version in range(2013, 2015 + 1):\n # main challenge (2013~2015)\n homepage = url + 'vot{}/dataset/'.format(version)\n elif version in range(2015, 2019 + 1):\n # main challenge (2016~2019)\n homepage = url + 'vot{}/main/'.format(version)\n elif version.startswith('LT'):\n # long-term tracking challenge\n year = int(version[2:])\n homepage = url + 'vot{}/longterm/'.format(year)\n elif version.startswith('RGBD'):\n # RGBD tracking challenge\n year = int(version[4:])\n homepage = url + 'vot{}/rgbd/'.format(year)\n elif version.startswith('RGBT'):\n # RGBT tracking challenge\n year = int(version[4:])\n url = url + 'vot{}/rgbtir/'.format(year)\n homepage = url + 'meta/'\n \n # download description file\n bundle_url = homepage + 'description.json'\n bundle_file = osp.join(root_dir, 'description.json')\n if not osp.isfile(bundle_file):\n ops.sys_print('Downloading description file...')\n ops.download(bundle_url, bundle_file)\n\n # read description file\n ops.sys_print('\\nParsing description file...')\n with open(bundle_file) as f:\n bundle = json.load(f)\n\n # md5 generator\n def md5(filename):\n hash_md5 = hashlib.md5()\n with open(filename, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()\n \n # download all sequences\n seq_names = []\n for seq in bundle['sequences']:\n seq_name = seq['name']\n seq_names.append(seq_name)\n\n # download channel (color/depth/ir) files\n channels = seq['channels'].keys()\n seq_files = []\n for cn in channels:\n seq_url = seq['channels'][cn]['url']\n if not seq_url.startswith(('http', 'https')):\n seq_url = url + seq_url[seq_url.find('sequence'):]\n seq_file = osp.join(\n root_dir,\n '{}_{}.zip'.format(seq_name, cn))\n if not osp.isfile(seq_file) or \\\n md5(seq_file) != seq['channels'][cn]['checksum']:\n ops.sys_print('\\nDownloading %s...' % seq_name)\n ops.download(seq_url, seq_file)\n seq_files.append(seq_file)\n\n # download annotations\n anno_url = homepage + '%s.zip' % seq_name\n anno_file = osp.join(root_dir, seq_name + '_anno.zip')\n if not osp.isfile(anno_file) or \\\n md5(anno_file) != seq['annotations']['checksum']:\n ops.download(anno_url, anno_file)\n\n # unzip compressed files\n seq_dir = osp.join(root_dir, seq_name)\n if not osp.isfile(seq_dir) or len(os.listdir(seq_dir)) < 10:\n ops.sys_print('\\nExtracting %s...' % seq_name)\n os.makedirs(seq_dir)\n for seq_file in seq_files:\n ops.extract(seq_file, seq_dir)\n ops.extract(anno_file, seq_dir)\n\n # save list.txt\n list_file = osp.join(root_dir, 'list.txt')\n with open(list_file, 'w') as f:\n f.write(str.join('\\n', seq_names))\n\n return root_dir\n \n def _format(self, anno):\n if anno.shape[1] == 8:\n if self.anno_type == 'rect':\n anno = self._corner2rect(anno)\n elif self.anno_type == 'inner_rect':\n anno = self._corner2rect_inner(anno)\n \n if anno.shape[1] == 4:\n anno[:, 2:] = anno[:, :2] + anno[:, 2:] - 1\n \n return anno\n\n def _corner2rect(self, corners, center=False):\n x1 = np.min(corners[:, 0::2], axis=1)\n x2 = np.max(corners[:, 0::2], axis=1)\n y1 = np.min(corners[:, 1::2], axis=1)\n y2 = np.max(corners[:, 1::2], axis=1)\n\n w = x2 - x1\n h = y2 - y1\n\n if center:\n cx = np.mean(corners[:, 0::2], axis=1)\n cy = np.mean(corners[:, 1::2], axis=1)\n return np.array([cx, cy, w, h]).T\n else:\n return np.array([x1, y1, w, h]).T\n\n def _corner2rect_inner(self, corners, center=False):\n cx = np.mean(corners[:, 0::2], axis=1)\n cy = np.mean(corners[:, 1::2], axis=1)\n\n x1 = np.min(corners[:, 0::2], axis=1)\n x2 = np.max(corners[:, 0::2], axis=1)\n y1 = np.min(corners[:, 1::2], axis=1)\n y2 = np.max(corners[:, 1::2], axis=1)\n\n area1 = np.linalg.norm(corners[:, 0:2] - corners[:, 2:4], axis=1) * \\\n np.linalg.norm(corners[:, 2:4] - corners[:, 4:6], axis=1)\n area2 = (x2 - x1) * (y2 - y1)\n scale = np.sqrt(area1 / area2)\n w = scale * (x2 - x1) + 1\n h = scale * (y2 - y1) + 1\n\n if center:\n return np.array([cx, cy, w, h]).T\n else:\n return np.array([cx - w / 2, cy - h / 2, w, h]).T\n\n def _fetch_meta(self, seq_dir, frame_num):\n meta = {}\n\n # attributes\n tag_files = glob.glob(osp.join(seq_dir, '*.label')) + \\\n glob.glob(osp.join(seq_dir, '*.tag'))\n for f in tag_files:\n if not osp.exists(f):\n continue\n tag = osp.basename(f)\n tag = tag[:tag.rfind('.')]\n meta[tag] = np.loadtxt(f)\n \n # practical\n practical_file = osp.join(seq_dir, 'practical')\n if osp.isfile(practical_file + '.value'):\n meta['practical'] = np.loadtxt(practical_file + '.value')\n if osp.isfile(practical_file + '.txt'):\n meta['practical_txt'] = np.loadtxt(practical_file + '.txt')\n\n # pad zeros if necessary\n for tag, val in meta.items():\n if len(val) < frame_num:\n meta[tag] = np.pad(\n val, (0, frame_num - len(val)), 'constant')\n\n return meta\n",
"import torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport functools\nfrom torch.optim import lr_scheduler\nfrom torch.nn import functional as F\n\n\n\n\nclass cheng_UnetGenerator(nn.Module):\n def __init__(self,input_nc,output_nc,num_downs,ngf=64,norm_layer=nn.BatchNorm2d,use_dropout=False):\n super(cheng_UnetGenerator,self).__init__()\n #降采样部分\n self.first_con=nn.Conv2d(input_nc,ngf,kernel_size=4,stride=2,padding=1,bias=False)\n self.down1=self.block_d(ngf,ngf*2,True)\n self.down2=self.block_d(ngf*2,ngf*4,True)\n self.down3=self.block_d(ngf*4,ngf*8,True)\n self.down4=self.block_d(ngf*8,ngf*8,True)\n self.midrelu1=nn.LeakyReLU(0.2,False)\n self.midcon=nn.Conv2d(ngf*8,ngf*8,kernel_size=4,stride=2,padding=1,bias=False)\n self.midrelu2=nn.ReLU(False)\n\n #W上采样\n self.up1=nn.ConvTranspose2d(ngf*8,ngf*8,kernel_size=4,stride=2,padding=1)\n self.up2=nn.BatchNorm2d(ngf*8)\n\n self.upw1=self.block_w(ngf*16,ngf*8,False)\n self.upw2=self.block_w(ngf*16,ngf*4,False)\n self.upw3=self.block_w(ngf*8,ngf*2,False)\n self.upw4=self.block_w(ngf*4,ngf,False)\n self.upw5=nn.ReLU(True)\n self.upw6=nn.ConvTranspose2d(ngf*2,output_nc,kernel_size=4,stride=2,padding=1)\n self.upw7=nn.Tanh()\n\n #对抗性光流拆分权重\n self.upf1=nn.ConvTranspose2d(ngf*8,ngf*8,kernel_size=4,stride=2,padding=1)\n self.upf2=nn.BatchNorm2d(ngf*8)\n\n self.upwf1=self.block_w(ngf*16,ngf*8,False)\n self.upwf2=self.block_w(ngf*16,ngf*4,False)\n self.upwf3=self.block_w(ngf*8,ngf*2,False)\n self.upwf4=self.block_w(ngf*4,ngf,False)\n self.upwf5=nn.ReLU(True)\n self.upwf6=nn.ConvTranspose2d(ngf*2,34,kernel_size=4,stride=2,padding=1)\n self.upwf7=nn.Tanh()\n\n #光流上采样\n '''\n self.upf1 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n self.upf2 = nn.BatchNorm2d(ngf * 8)\n\n self.upf3 = self.block_f(ngf * 16 , ngf * 8)\n self.upf4 = self.block_f(ngf * 16 , ngf * 4)\n self.upf5 = self.block_f(ngf * 8 , ngf * 2)\n self.upf6 = self.block_f(ngf * 4 , ngf)\n self.upf7 = nn.LeakyReLU(0.2, False)\n self.upf8 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n self.upf9 = nn.Conv2d( ngf*2 ,ngf , kernel_size=3,stride=1, padding=1, bias= False)\n self.upf10 = nn.Conv2d( ngf ,ngf//2 , kernel_size=3,stride=1, padding=1, bias= False)\n self.upf11_1 = nn.Conv2d( ngf//2 , 2 , kernel_size=1,stride=1, padding=0, bias= False)\n self.upf11_2 = nn.Conv2d( ngf//2 , 2 , kernel_size=1,stride=1, padding=0, bias= False)\n self.upf12 = nn.LeakyReLU(0.2, False)\n '''\n def forward(self,x):\n #down\n x1=self.first_con(x)\n x2=self.down1(x1)\n x3=self.down2(x2)\n x4=self.down3(x3)\n x5=self.down4(x4)\n x6=self.midrelu1(x5)\n x7=self.midcon(x6)\n x8=self.midrelu2(x7)\n\n #分支w\n x9=self.up1(x8)\n x10=self.up2(x9)\n #print(x10.size(),x5.size()) \n x10=torch.cat((x10,x5),1)\n x11=self.upw1(x10)\n x11=torch.cat((x11,x4),1)\n x12=self.upw2(x11)\n x12=torch.cat((x12,x3),1)\n x13=self.upw3(x12)\n x13=torch.cat((x13,x2),1)\n x14=self.upw4(x13)\n x15=self.upw5(x14)\n x15=torch.cat((x15,x1),1)\n x16=self.upw6(x15)\n x17=self.upw7(x16)\n\n #光流拆分\n xf9=self.upf1(x8)\n xf10=self.upf2(xf9)\n #print(x10.size(),x5.size()) \n xf10=torch.cat((xf10,x5),1)\n xf11=self.upwf1(xf10)\n xf11=torch.cat((xf11,x4),1)\n xf12=self.upwf2(xf11)\n xf12=torch.cat((xf12,x3),1)\n xf13=self.upwf3(xf12)\n xf13=torch.cat((xf13,x2),1)\n xf14=self.upwf4(xf13)\n xf15=self.upwf5(xf14)\n xf15=torch.cat((xf15,x1),1)\n xf16=self.upwf6(xf15)\n xf17=self.upwf7(xf16)\n sx = xf17[:,0:17,:,:]\n sy = xf17[:,17:34,:,:]\n sx = F.softmax(sx,1)\n sy = F.softmax(sy,1)\n\n return sx,sy, x17\n\n def block_d(self,inc,ouc,use_bias):\n downconv = nn.Conv2d(inc , ouc , kernel_size=4,\n stride=2, padding=1, bias=use_bias)\n downrelu = nn.LeakyReLU(0.2, False)\n morm = nn.BatchNorm2d(ouc)\n\n block = [downrelu,downconv,morm]\n\n return nn.Sequential(*block)\n\n def block_w(self,inc,ouc,use_bias):\n upconv = nn.ConvTranspose2d(inc,ouc,kernel_size=4, stride=2,padding=1)\n uprelu = nn.ReLU(False)\n morm = nn.BatchNorm2d(ouc)\n\n block = [uprelu ,upconv ,morm]\n return nn.Sequential(*block)\n\n def block_f(self,inc,ouc):\n upsize = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n upconv1 = nn.Conv2d( inc,ouc , kernel_size=3,stride=1, padding=1)\n upconv2 = nn.Conv2d( ouc,ouc , kernel_size=3,stride=1, padding=1)\n uprelu = nn.LeakyReLU(inplace=True)\n morm = nn.BatchNorm2d(ouc)\n\n block = [uprelu ,upsize ,upconv1, morm ,uprelu, upconv2,morm]\n return nn.Sequential(*block)"
] | [
[
"pandas.read_csv",
"torch.tensor"
],
[
"numpy.sqrt",
"numpy.min",
"numpy.linalg.norm",
"numpy.max",
"numpy.mean",
"numpy.array",
"numpy.loadtxt"
],
[
"torch.nn.Sequential",
"torch.nn.functional.softmax",
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.Tanh",
"torch.nn.Upsample",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anton-potapov/openvino | [
"84119afe9a8c965e0a0cd920fff53aee67b05108",
"84119afe9a8c965e0a0cd920fff53aee67b05108",
"84119afe9a8c965e0a0cd920fff53aee67b05108",
"84119afe9a8c965e0a0cd920fff53aee67b05108",
"84119afe9a8c965e0a0cd920fff53aee67b05108",
"84119afe9a8c965e0a0cd920fff53aee67b05108"
] | [
"model-optimizer/mo/middle/passes/fusing/decomposition_test.py",
"model-optimizer/extensions/back/InterpolateReshape_test.py",
"model-optimizer/extensions/middle/ConvToBinaryConv.py",
"tools/python_api_reproducer.py",
"ngraph/python/test/ngraph/test_ops_unary.py",
"model-optimizer/mo/front/onnx/extractors/reshape.py"
] | [
"\"\"\"\n Copyright (C) 2018-2020 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nfrom mo.middle.passes.fusing.decomposition import convert_scale_shift_to_mul_add, convert_batch_norm\nfrom mo.utils.ir_engine.compare_graphs import compare_graphs\nfrom mo.utils.unittest.graph import build_graph\n\nnodes_attributes = {\n 'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},\n 'placeholder_2': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},\n # ScaleShift layer\n 'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift', 'axis': 0},\n 'const_scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'op'},\n 'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'},\n 'const_scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'op'},\n 'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'},\n 'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n # Mul and Add operations\n 'mul_1': {'type': None, 'value': None, 'kind': 'op', 'op': 'Mul'},\n 'const_mul_1_w': {'value': None, 'shape': None, 'kind': 'op'},\n 'mul_1_w': {'value': None, 'shape': None, 'kind': 'data'},\n 'mul_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'add_1': {'type': None, 'kind': 'op', 'op': 'Add'},\n 'const_add_1_w': {'value': None, 'shape': None, 'kind': 'op'},\n 'add_1_w': {'value': None, 'shape': None, 'kind': 'data'},\n 'add_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n # Mul and Add operations\n 'mul_2': {'type': None, 'kind': 'op', 'op': 'Mul'},\n 'const_mul_2_w': {'value': None, 'shape': None, 'kind': 'op'},\n 'mul_2_w': {'value': None, 'shape': None, 'kind': 'data'},\n 'mul_2_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'add_2': {'type': None, 'kind': 'op', 'op': 'Add'},\n 'const_add_2_w': {'value': None, 'shape': None, 'kind': 'op'},\n 'add_2_w': {'value': None, 'shape': None, 'kind': 'data'},\n 'add_2_data': {'value': None, 'shape': None, 'kind': 'data'},\n # Reshape\n 'placeholder_2/Reshape_': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},\n 'placeholder_2/Reshape_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'placeholder_2/Reshape_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': None},\n 'placeholder_2/Reshape_const_data': {'kind': 'data', 'value': None, 'shape': None},\n # BatchNorm operation\n 'bn_op': {'type': None, 'kind': 'op', 'op': 'BatchNorm', 'can_be_fused': True},\n 'const_bn_const': {'value': None, 'shape': None, 'kind': 'op'},\n 'bn_const': {'value': None, 'shape': None, 'kind': 'data'},\n 'const_bn_beta': {'value': None, 'shape': None, 'kind': 'op'},\n 'bn_beta': {'value': None, 'shape': None, 'kind': 'data'},\n 'const_bn_mean': {'value': None, 'shape': None, 'kind': 'op'},\n 'bn_mean': {'value': None, 'shape': None, 'kind': 'data'},\n 'const_bn_var': {'value': None, 'shape': None, 'kind': 'op'},\n 'bn_var': {'value': None, 'shape': None, 'kind': 'data'},\n 'bn_data': {'value': None, 'shape': None, 'kind': 'data'},\n # Concat1 operation\n 'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},\n 'concat_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'op_output': {'kind': 'op', 'op': 'Result'}\n}\n\n\nclass ScaleShiftToMulAdd(unittest.TestCase):\n # ScaleShift -> Mul\n def test_scaleshift_to_mul_1(self):\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'scaleshift_1'),\n ('const_scaleshift_1_w', 'scaleshift_1_w'),\n ('scaleshift_1_w', 'scaleshift_1'),\n ('scaleshift_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'scaleshift_1_data': {}\n })\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'mul_1'),\n ('const_mul_1_w', 'mul_1_w'),\n ('mul_1_w', 'mul_1'),\n ('mul_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'mul_1': {'can_be_fused': True},\n 'scaleshift_1_data': {}\n })\n\n graph.graph['layout'] = 'NHWC'\n convert_scale_shift_to_mul_add(graph)\n graph.clean_up()\n (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')\n self.assertTrue(flag, resp)\n\n # ScaleShift 2 inputs-> Mul\n def test_scaleshift2_to_mul(self):\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_2', 'placeholder_2_data'),\n ('placeholder_1_data', 'scaleshift_1'),\n ('placeholder_2_data', 'scaleshift_1'),\n ('scaleshift_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'placeholder_2_data': {'shape': np.array([1, 227])},\n 'scaleshift_1_data': {}\n })\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_2', 'placeholder_2_data'),\n ('placeholder_2_data', 'placeholder_2/Reshape_'),\n ('placeholder_2/Reshape_const', 'placeholder_2/Reshape_const_data'),\n ('placeholder_2/Reshape_const_data', 'placeholder_2/Reshape_'),\n ('placeholder_2/Reshape_', 'placeholder_2/Reshape_data'),\n ('placeholder_1_data', 'mul_1'),\n ('placeholder_2/Reshape_data', 'mul_1'),\n ('mul_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'placeholder_2_data': {'shape': np.array([1, 227])},\n 'placeholder_2/Reshape_const': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},\n 'placeholder_2/Reshape_const_data': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},\n 'placeholder_2/Reshape_data': {'shape': np.array([1, 227, 1, 1])},\n 'mul_1': {'can_be_fused': True},\n 'scaleshift_1_data': {}\n })\n\n graph.graph['layout'] = 'NHWC'\n convert_scale_shift_to_mul_add(graph)\n graph.clean_up()\n (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')\n self.assertTrue(flag, resp)\n\n # ScaleShift 2 inputs-> Mul (axis = 1)\n def test_scaleshift2_axis1_to_mul(self):\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_2', 'placeholder_2_data'),\n ('placeholder_1_data', 'scaleshift_1'),\n ('placeholder_2_data', 'scaleshift_1'),\n ('scaleshift_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'placeholder_2_data': {'shape': np.array([227])},\n 'scaleshift_1': {'axis': 1},\n 'scaleshift_1_data': {}\n })\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_2', 'placeholder_2_data'),\n ('placeholder_2_data', 'placeholder_2/Reshape_'),\n ('placeholder_2/Reshape_const', 'placeholder_2/Reshape_const_data'),\n ('placeholder_2/Reshape_const_data', 'placeholder_2/Reshape_'),\n ('placeholder_2/Reshape_', 'placeholder_2/Reshape_data'),\n ('placeholder_1_data', 'mul_1'),\n ('placeholder_2/Reshape_data', 'mul_1'),\n ('mul_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'placeholder_2_data': {'shape': np.array([227])},\n 'placeholder_2/Reshape_const': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},\n 'placeholder_2/Reshape_const_data': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},\n 'placeholder_2/Reshape_data': {'shape': np.array([1, 227, 1, 1])},\n 'mul_1': {'can_be_fused': True},\n 'scaleshift_1_data': {}\n })\n\n graph.graph['layout'] = 'NHWC'\n convert_scale_shift_to_mul_add(graph)\n graph.clean_up()\n (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')\n self.assertTrue(flag, resp)\n\n # ScaleShift -> Mul (Zero biases)\n def test_scaleshift_to_mul_2(self):\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'scaleshift_1'),\n ('const_scaleshift_1_w', 'scaleshift_1_w'),\n ('const_scaleshift_1_b', 'scaleshift_1_b'),\n ('scaleshift_1_w', 'scaleshift_1'),\n ('scaleshift_1_b', 'scaleshift_1'),\n ('scaleshift_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},\n 'scaleshift_1_data': {}\n })\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'mul_1'),\n ('const_mul_1_w', 'mul_1_w'),\n ('mul_1_w', 'mul_1'),\n ('mul_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'mul_1': {'can_be_fused': True},\n 'scaleshift_1_data': {}\n })\n\n graph.graph['layout'] = 'NHWC'\n convert_scale_shift_to_mul_add(graph)\n graph.clean_up()\n (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')\n self.assertTrue(flag, resp)\n\n # ScaleShift -> Mul->Add\n def test_scaleshift_to_mul_add(self):\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'scaleshift_1'),\n ('const_scaleshift_1_w', 'scaleshift_1_w'),\n ('const_scaleshift_1_b', 'scaleshift_1_b'),\n ('scaleshift_1_w', 'scaleshift_1'),\n ('scaleshift_1_b', 'scaleshift_1'),\n ('scaleshift_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},\n 'scaleshift_1_data': {}\n })\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'mul_1'),\n ('const_mul_1_w', 'mul_1_w'),\n ('mul_1_w', 'mul_1'),\n ('mul_1', 'mul_1_data'),\n ('mul_1_data', 'add_1'),\n ('const_add_1_w', 'add_1_w'),\n ('add_1_w', 'add_1'),\n ('add_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'const_add_1_w': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},\n 'add_1_w': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},\n 'mul_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'add_1': {'can_be_fused': True},\n 'mul_1': {'can_be_fused': True},\n 'scaleshift_1_data': {}\n })\n\n graph.graph['layout'] = 'NHWC'\n convert_scale_shift_to_mul_add(graph)\n graph.clean_up()\n (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')\n self.assertTrue(flag, resp)\n\n # ScaleShift -> None (Zero weights and biases)\n def test_scaleshift_to_nothing(self):\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'scaleshift_1'),\n ('const_scaleshift_1_w', 'scaleshift_1_w'),\n ('const_scaleshift_1_b', 'scaleshift_1_b'),\n ('scaleshift_1_w', 'scaleshift_1'),\n ('scaleshift_1_b', 'scaleshift_1'),\n ('scaleshift_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},\n 'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},\n 'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}\n }, nodes_with_edges_only=True)\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}}\n ,nodes_with_edges_only=True)\n\n graph.graph['layout'] = 'NHWC'\n convert_scale_shift_to_mul_add(graph)\n graph.clean_up()\n (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')\n self.assertTrue(flag, resp)\n\n # ScaleShift -> ScaleShift (can_be_fused=False)\n def test_scaleshift_can_be_fused(self):\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'scaleshift_1'),\n ('const_scaleshift_1_w', 'scaleshift_1_w'),\n ('const_scaleshift_1_b', 'scaleshift_1_b'),\n ('scaleshift_1_w', 'scaleshift_1'),\n ('scaleshift_1_b', 'scaleshift_1'),\n ('scaleshift_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},\n 'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},\n 'scaleshift_1': {'can_be_fused': False},\n 'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}\n })\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'scaleshift_1'),\n ('const_scaleshift_1_w', 'scaleshift_1_w'),\n ('const_scaleshift_1_b', 'scaleshift_1_b'),\n ('scaleshift_1_w', 'scaleshift_1'),\n ('scaleshift_1_b', 'scaleshift_1'),\n ('scaleshift_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'const_scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},\n 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},\n 'const_scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},\n 'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},\n 'scaleshift_1': {'can_be_fused': False},\n 'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}\n })\n\n convert_scale_shift_to_mul_add(graph)\n graph.clean_up()\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'scaleshift_1_data')\n self.assertTrue(flag, resp)\n\n\nclass BatchNormDecomposition(unittest.TestCase):\n def test_bn_decomposition_1(self):\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'bn_op'),\n ('const_bn_const', 'bn_const'),\n ('const_bn_beta', 'bn_beta'),\n ('const_bn_mean', 'bn_mean'),\n ('const_bn_var', 'bn_var'),\n ('bn_const', 'bn_op'),\n ('bn_beta', 'bn_op'),\n ('bn_mean', 'bn_op'),\n ('bn_var', 'bn_op'),\n ('bn_op', 'bn_data'),\n ('concat', 'concat_data'),\n ('bn_data', 'concat'),\n ('concat_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'bn_op': {'eps': 1.2},\n 'bn_const': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'bn_beta': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'bn_data': {'shape': np.array([1, 227, 227, 3])},\n 'concat_data': {}\n }, nodes_with_edges_only=True)\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'mul_1'),\n ('const_mul_1_w', 'mul_1_w'),\n ('mul_1_w', 'mul_1'),\n ('mul_1', 'mul_1_data'),\n ('mul_1_data', 'add_1'),\n ('const_add_1_w', 'add_1_w'),\n ('add_1_w', 'add_1'),\n ('add_1', 'add_1_data'),\n ('add_1_data', 'mul_2'),\n ('const_mul_2_w', 'mul_2_w'),\n ('mul_2_w', 'mul_2'),\n ('mul_2', 'mul_2_data'),\n ('mul_2_data', 'add_2'),\n ('const_add_2_w', 'add_2_w'),\n ('add_2_w', 'add_2'),\n ('add_2', 'add_2_data'),\n ('concat', 'concat_data'),\n ('add_2_data', 'concat'),\n ('concat_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'const_mul_1_w': {'shape': np.array([3]),\n 'value': np.array([0.67419986, 0.55901699, 0.48795004])},\n 'mul_1_w': {'shape': np.array([3]),\n 'value': np.array([0.67419986, 0.55901699, 0.48795004])},\n 'const_mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'const_add_1_w': {'shape': np.array([3]),\n 'value': np.array([-0.67419986, -1.11803399, -1.46385011])},\n 'add_1_w': {'shape': np.array([3]),\n 'value': np.array([-0.67419986, -1.11803399, -1.46385011])},\n 'const_add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'add_2_data': {'shape': np.array([1, 227, 227, 3])},\n 'mul_1': {'can_be_fused': True},\n 'mul_2': {'can_be_fused': True},\n 'add_1': {'can_be_fused': True},\n 'add_2': {'can_be_fused': True},\n 'concat_data': {}\n }, nodes_with_edges_only=True)\n\n graph.graph['layout'] = 'NHWC'\n convert_batch_norm(graph)\n graph.clean_up()\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'concat_data')\n self.assertTrue(flag, resp)\n\n # 'can_be_fused': False for BatchNorm\n def test_bn_decomposition_2(self):\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'bn_op'),\n ('const_bn_const', 'bn_const'),\n ('const_bn_beta', 'bn_beta'),\n ('const_bn_mean', 'bn_mean'),\n ('const_bn_var', 'bn_var'),\n ('bn_const', 'bn_op'),\n ('bn_beta', 'bn_op'),\n ('bn_mean', 'bn_op'),\n ('bn_var', 'bn_op'),\n ('bn_op', 'bn_data'),\n ('concat', 'concat_data'),\n ('bn_data', 'concat'),\n ('concat_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'bn_op': {'eps': 1.2, 'can_be_fused': False},\n 'bn_const': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'bn_beta': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'bn_data': {'shape': np.array([1, 227, 227, 3])},\n 'concat_data': {}\n })\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'mul_1'),\n ('const_mul_1_w', 'mul_1_w'),\n ('mul_1_w', 'mul_1'),\n ('mul_1', 'mul_1_data'),\n ('mul_1_data', 'add_1'),\n ('const_add_1_w', 'add_1_w'),\n ('add_1_w', 'add_1'),\n ('add_1', 'add_1_data'),\n ('add_1_data', 'mul_2'),\n ('const_mul_2_w', 'mul_2_w'),\n ('mul_2_w', 'mul_2'),\n ('mul_2', 'mul_2_data'),\n ('mul_2_data', 'add_2'),\n ('const_add_2_w', 'add_2_w'),\n ('add_2_w', 'add_2'),\n ('add_2', 'add_2_data'),\n ('concat', 'concat_data'),\n ('add_2_data', 'concat'),\n ('concat_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'const_mul_1_w': {'shape': np.array([3]),\n 'value': np.array([0.67419986, 0.55901699, 0.48795004])},\n 'mul_1_w': {'shape': np.array([3]),\n 'value': np.array([0.67419986, 0.55901699, 0.48795004])},\n 'const_mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'const_add_1_w': {'shape': np.array([3]),\n 'value': np.array([-0.67419986, -1.11803399, -1.46385011])},\n 'add_1_w': {'shape': np.array([3]),\n 'value': np.array([-0.67419986, -1.11803399, -1.46385011])},\n 'const_add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'add_2_data': {'shape': np.array([1, 227, 227, 3])},\n 'mul_1': {'can_be_fused': False},\n 'mul_2': {'can_be_fused': False},\n 'add_1': {'can_be_fused': False},\n 'add_2': {'can_be_fused': False},\n 'concat_data': {}\n })\n\n graph.graph['layout'] = 'NHWC'\n convert_batch_norm(graph)\n graph.clean_up()\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'concat_data')\n self.assertTrue(flag, resp)",
"\"\"\"\n Copyright (C) 2018-2020 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport unittest\nfrom argparse import Namespace\n\nimport numpy as np\n\nfrom extensions.back.InterpolateReshape import InterpolateReshapeWA, InterpolateConcat\nfrom mo.utils.ir_engine.compare_graphs import compare_graphs\nfrom mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, \\\n connect_data\n\nnodes = {\n **regular_op_with_shaped_data('placeholder', [1, 3, 30, 40], {'type': 'Parameter'}),\n **valued_const_with_data('out_shape', np.array([60, 160])),\n\n **regular_op_with_shaped_data('interpolate', [1, 3, 60, 160], {'type': 'Interpolate', 'axes': [2, 3]}),\n\n **regular_op_with_shaped_data('shape', [4], {'type': 'ShapeOf'}),\n **valued_const_with_data('indices', np.array([2, 3])),\n **valued_const_with_data('axis', np.array(0)),\n **regular_op_with_shaped_data('gather', [2], {'type': 'Gather'}),\n\n **valued_const_with_data('multiplier', np.array([2, 4])),\n **regular_op_with_shaped_data('mul', [2], {'type': 'Multiply'}),\n\n **regular_op_with_shaped_data('placeholder_1', [1, 3, 60, 160], {'type': 'Parameter'}),\n **regular_op_with_shaped_data('concat', [1, 7, 60, 160], {'type': 'Concat', 'axis': 1}),\n\n **result(),\n}\n\n\nclass TestInterpolateReshapeWA(unittest.TestCase):\n def test_interpolate_reshape_graph_comparison(self):\n graph = build_graph(nodes, [\n *connect('placeholder', '0:interpolate'),\n *connect('out_shape', '1:interpolate'),\n *connect('interpolate', 'output'),\n ], nodes_with_edges_only=True)\n InterpolateReshapeWA().find_and_replace_pattern(graph)\n graph.graph['cmd_params'] = Namespace(keep_shape_ops=True)\n graph.clean_up()\n graph_ref = build_graph(nodes, [\n *connect('placeholder', '0:interpolate'),\n *connect_data('placeholder', 'shape'),\n *connect('shape', '0:gather'),\n *connect('indices', '1:gather'),\n *connect('axis', '2:gather'),\n *connect('gather', '0:mul'),\n *connect('multiplier', '1:mul'),\n *connect('mul', '1:interpolate'),\n *connect('interpolate', 'output'),\n ], nodes_with_edges_only=True)\n (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)\n self.assertTrue(flag, resp)\n\n\nclass TestInterpolateConcat(unittest.TestCase):\n def test_interpolate_concat_reshape_graph_comparison(self):\n graph = build_graph(nodes, [\n *connect('placeholder', '0:interpolate'),\n *connect('out_shape', '1:interpolate'),\n *connect('interpolate', '0:concat'),\n *connect('placeholder_1', '1:concat'),\n *connect('concat', 'output'),\n ], nodes_with_edges_only=True)\n InterpolateConcat().find_and_replace_pattern(graph)\n graph.graph['cmd_params'] = Namespace(keep_shape_ops=True)\n graph.clean_up()\n graph_ref = build_graph(nodes, [\n *connect('placeholder', '0:interpolate'),\n *connect('placeholder_1', 'shape'),\n *connect('shape', '0:gather'),\n *connect('indices', '1:gather'),\n *connect('axis', '2:gather'),\n *connect('gather', '1:interpolate'),\n *connect('interpolate', '0:concat'),\n *connect_data('placeholder_1', '1:concat'),\n *connect('concat', 'output'),\n ], nodes_with_edges_only=True)\n (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)\n self.assertTrue(flag, resp)\n",
"\"\"\"\n Copyright (C) 2018-2020 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport logging as log\n\nimport numpy as np\n\nfrom extensions.ops.elementwise import Mul, Add\nfrom mo.graph.graph import Graph\nfrom mo.middle.replacement import MiddleReplacementPattern\nfrom mo.ops.const import Const\n\n\nclass ConvToBinaryConv(MiddleReplacementPattern):\n \"\"\" Transform usual convolution with [0,+1] input and [-1,+1] to BinaryConvolution\n\n Modifies output terms after the Convolution to be able to apply BinaryConvolution\n operation instead that accepts [-1,1] input and [-1,1] weights. It requires modification\n channel-wise addition with weights reduced along all axis except output channel dimension.\n \"\"\"\n enabled = True\n force_clean_up = True\n\n def pattern(self):\n return dict(\n nodes=[\n # This pass is applicable for binarization only. Other intX variants are not relevant.\n ('quantize', dict(kind='op', op='FakeQuantize', levels=2)),\n ('quantized', dict()), # input tensor, not weights\n ('operator', dict(kind='op', type='Convolution')),\n ],\n edges=[\n ('quantize', 'quantized'),\n ('quantized', 'operator', {'in':0}),\n ]\n )\n\n def replace_pattern(self, graph: Graph, match: dict):\n assert match['operator'].has('multiplication_transparent_ports')\n\n quantize = match['quantize']\n\n port = match['operator'].input_ports_with(match['quantized'])\n assert len(port) >= 1\n if len(port) > 1:\n log.debug('BinarizeWeightsM1P1 cannot apply transformation for data {} because it consumed more'\n ' than once'.format(match['quantized'].name))\n return\n\n assert len(port) == 1\n port = port[0]\n applicable = [pair for pair in match['operator'].multiplication_transparent_ports if pair[0] == port]\n if len(applicable) == 0:\n return\n\n # Look at 3-rd and 4-th inputs of FakeQuantize -- they have constants that should be passed through.\n # Assume that the constant that should be passed through is a scalar.\n output_low = quantize.in_node(3)\n output_high = quantize.in_node(4)\n assert len(output_low.out_nodes()) == 1\n assert len(output_high.out_nodes()) == 1\n\n if not output_low.has_valid('value') and not output_high.has_valid('value'):\n return\n\n output_low = output_low.value\n output_high = output_high.value\n\n operator = match['operator']\n\n weights = operator.in_node(1).value\n weights_rounded = np.round(weights)\n weights_consistent = np.all(np.isclose(weights, weights_rounded)) and \\\n set(np.unique(weights_rounded)).issubset({-1, 1})\n\n if weights_consistent and np.all(np.isclose(output_low, 0)) and np.all(np.isclose(output_high, 1)):\n reduction_indices = set(range(len(weights.shape))) - set([operator.output_feature_channel])\n weights_reduced = np.add.reduce(weights, axis=tuple(reduction_indices))\n weights_reduced = weights_reduced.reshape([len(weights_reduced), 1, 1]) # FIXME: works for NCHW only\n\n add_term = Const(graph, {'value': weights_reduced}).create_node()\n add = Add(graph, {}).create_node()\n add.in_port(1).connect(add_term.out_port(0))\n mul_term = Const(graph, {'value': np.array(0.5)}).create_node()\n mul = Mul(graph, {}).create_node()\n mul.in_port(1).connect(mul_term.out_port(0))\n add.out_port(0).connect(mul.in_port(0))\n\n operator.out_port(0).get_connection().set_source(mul.out_port(0))\n add.in_port(0).connect(operator.out_port(0))\n\n operator['pad_value'] = float(-1.0)\n elif weights_consistent and np.all(np.isclose(output_low, -1)) and np.all(np.isclose(output_high, +1)):\n pass\n else:\n log.debug('ConvToBinaryConv: cannot apply transformation because input range is neither in [0, +1] nor '\n 'in [-1, +1].')\n return\n\n operator['type'] = 'BinaryConvolution'\n operator['mode'] = 'xnor-popcount'\n operator['pad_value'] = operator.soft_get('pad_value', float(0))\n operator['input'] = operator.in_node(0).shape[1]\n # Weights are not bit-packed yet; there should be a separate transformation to do that\n\n assert output_low.size == 1\n assert output_high.size == 1\n\n output_low = quantize.in_node(3)\n output_high = quantize.in_node(4)\n\n # Make sure that low/high values are exactly 0/1\n output_low.value = np.zeros(output_low.shape)\n output_high.value = np.ones(output_high.shape)\n",
"import argparse\nimport logging as log\nimport sys\n\nlog.basicConfig(format=\"[ %(levelname)s ] %(message)s\", level=log.INFO, stream=sys.stdout)\nimport os\n\nimport numpy as np\nfrom openvino.inference_engine import IENetwork\n\n\ndef python_api_infer(net, feed_dict, device, lib, api, nireq, outputs_to_add: list = None):\n \"\"\"\n Function to perform IE inference using python API \"in place\"\n :param net: IENetwork instance\n :param feed_dict: Dict which contains mapping between input blob and input data\n :param device: Device name for inference\n :param lib: Absolute path to custom kernel lib\n :param outputs_to_add: Layer names list to take output from\n :param api: Defines use synchronous infer or asynchronous\n :param nireq: Number of infer requests to create for asynchronous infer\n :return: Dict containing out blob name and out data\n \"\"\"\n\n from openvino.inference_engine import IECore\n ie = IECore()\n\n if outputs_to_add:\n net.add_outputs(outputs_to_add)\n\n exec_net = ie.load_network(net, device, num_requests=nireq)\n\n if api == \"async\":\n res = []\n for i in range(nireq):\n reqest_handler = exec_net.start_async(request_id=i, inputs=feed_dict)\n reqest_handler.wait()\n res.append(reqest_handler.outputs)\n else:\n res = exec_net.infer(inputs=feed_dict)\n del net\n # It's important to delete executable network first to avoid double free in plugin offloading.\n # Issue relates ony for hetero and Myriad plugins\n del exec_net\n del ie\n return res\n\n\ndef cli_parser():\n parser = argparse.ArgumentParser(description='Python_api reproducer')\n parser.add_argument('-i', dest='feed_dict', required=True, help='Path to input data in .npz format')\n parser.add_argument('-m', dest='ir_path', required=True, help='Path to XML file of IR')\n parser.add_argument('-d', dest='device', required=True, help='Target device to infer on')\n parser.add_argument('-api', dest='api', default='sync', help='')\n parser.add_argument('-nireq', dest='nireq', default=1, help='')\n parser.add_argument('-r', dest='out_path', default=None,\n help='Dumps results to the output folder')\n parser.add_argument('--out_layers', dest='out_layers', default=[],\n help='Names of layers to dump inference results. Example: \"input,conv3d\"')\n parser.add_argument('--dump_all_layers', dest='dump_all_layers', default=False, action=\"store_true\",\n help='Bool value to dump inference results from all layers')\n\n args = parser.parse_args()\n feed_dict = args.feed_dict\n ir_path = args.ir_path\n device = args.device\n lib = args.lib\n api = args.api\n nireq = int(args.nireq)\n out_path = args.out_path\n if out_path and not os.path.exists(out_path):\n os.makedirs(out_path)\n out_layers = args.out_layers.split(\",\") if args.out_layers else args.out_layers\n dump_all_layers = args.dump_all_layers\n if out_layers and dump_all_layers:\n raise AttributeError('CMD arguments \"out_layers\" and \"dump_all_layers\" were specified together. '\n 'Please, specify only one argument')\n return feed_dict, ir_path, device, lib, api, nireq, out_path, out_layers, dump_all_layers\n\n\nif __name__ == \"__main__\":\n feed_dict, ir_path, device, lib, api, nireq, out_path, out_layers, dump_all_layers = cli_parser()\n\n bin_path = os.path.splitext(ir_path)[0] + '.bin'\n feed_dict = dict(np.load(feed_dict))\n network = IENetwork(model=ir_path, weights=bin_path)\n if dump_all_layers:\n out_layers = list(network.layers.keys())\n results = python_api_infer(net=network, feed_dict=feed_dict, device=device, lib=lib, api=api, nireq=nireq,\n outputs_to_add=out_layers)\n if out_path:\n if api == \"async\":\n for i, result in enumerate(results):\n dump_path = os.path.join(out_path, \"dump_req{}.npz\".format(str(i)))\n np.savez(dump_path, **result)\n log.info(\"Path for inference results for {} request: {}\".format(str(i), dump_path))\n else:\n dump_path = os.path.join(out_path, \"dump.npz\")\n np.savez(os.path.join(out_path, \"dump.npz\"), **results)\n log.info(\"Path for inference results: {}\".format(dump_path))\n else:\n log.info(\"Inference results won't be saved in the file. \"\n \"To do it need to specify '-r' option.\")\n log.info(\"Inference results:\")\n log.info(results)\n log.info(\"SUCCESS!\")\n",
"# ******************************************************************************\n# Copyright 2017-2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ******************************************************************************\nimport numpy as np\nimport pytest\n\nimport ngraph as ng\nfrom test.ngraph.util import run_op_numeric_data, run_op_node\n\n\[email protected](\n \"ng_api_fn, numpy_fn, range_start, range_end\",\n [\n (ng.absolute, np.abs, -1, 1),\n (ng.abs, np.abs, -1, 1),\n (ng.acos, np.arccos, -1, 1),\n (ng.asin, np.arcsin, -1, 1),\n (ng.atan, np.arctan, -100.0, 100.0),\n (ng.ceiling, np.ceil, -100.0, 100.0),\n (ng.ceil, np.ceil, -100.0, 100.0),\n (ng.cos, np.cos, -100.0, 100.0),\n (ng.cosh, np.cosh, -100.0, 100.0),\n (ng.exp, np.exp, -100.0, 100.0),\n (ng.floor, np.floor, -100.0, 100.0),\n (ng.log, np.log, 0, 100.0),\n (ng.relu, lambda x: np.maximum(0, x), -100.0, 100.0),\n (ng.sign, np.sign, -100.0, 100.0),\n (ng.sin, np.sin, -100.0, 100.0),\n (ng.sinh, np.sinh, -100.0, 100.0),\n (ng.sqrt, np.sqrt, 0.0, 100.0),\n (ng.tan, np.tan, -1.0, 1.0),\n (ng.tanh, np.tanh, -100.0, 100.0),\n ],\n)\ndef test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):\n np.random.seed(133391)\n input_data = range_start + np.random.rand(2, 3, 4) * (range_end - range_start)\n expected = numpy_fn(input_data)\n\n result = run_op_node([input_data], ng_api_fn)\n assert np.allclose(result, expected, rtol=0.001)\n\n result = run_op_numeric_data(input_data, ng_api_fn)\n assert np.allclose(result, expected, rtol=0.001)\n\n\[email protected](\n \"ng_api_fn, numpy_fn, input_data\",\n [\n (ng.absolute, np.abs, np.float32(-3)),\n (ng.abs, np.abs, np.float32(-3)),\n (ng.acos, np.arccos, np.float32(-0.5)),\n (ng.asin, np.arcsin, np.float32(-0.5)),\n (ng.atan, np.arctan, np.float32(-0.5)),\n (ng.ceiling, np.ceil, np.float32(1.5)),\n (ng.ceil, np.ceil, np.float32(1.5)),\n (ng.cos, np.cos, np.float32(np.pi / 4.0)),\n (ng.cosh, np.cosh, np.float32(np.pi / 4.0)),\n (ng.exp, np.exp, np.float32(1.5)),\n (ng.floor, np.floor, np.float32(1.5)),\n (ng.log, np.log, np.float32(1.5)),\n (ng.relu, lambda x: np.maximum(0, x), np.float32(-0.125)),\n (ng.sign, np.sign, np.float32(0.0)),\n (ng.sin, np.sin, np.float32(np.pi / 4.0)),\n (ng.sinh, np.sinh, np.float32(0.0)),\n (ng.sqrt, np.sqrt, np.float32(3.5)),\n (ng.tan, np.tan, np.float32(np.pi / 4.0)),\n (ng.tanh, np.tanh, np.float32(0.1234)),\n ],\n)\ndef test_unary_op_scalar(ng_api_fn, numpy_fn, input_data):\n expected = numpy_fn(input_data)\n\n result = run_op_node([input_data], ng_api_fn)\n assert np.allclose(result, expected)\n\n result = run_op_numeric_data(input_data, ng_api_fn)\n assert np.allclose(result, expected)\n\n\[email protected](\n \"input_data\", [(np.array([True, False, True, False])), (np.array(True)), (np.array(False))]\n)\ndef test_logical_not(input_data):\n expected = np.logical_not(input_data)\n\n result = run_op_node([input_data], ng.logical_not)\n\n assert np.allclose(result, expected)\n result = run_op_numeric_data(input_data, ng.logical_not)\n assert np.allclose(result, expected)\n\n\ndef test_sigmoid():\n input_data = np.array([-3.14, -1.0, 0.0, 2.71001, 1000.0], dtype=np.float32)\n result = run_op_node([input_data], ng.sigmoid)\n\n def sigmoid(x):\n return 1.0 / (1.0 + np.exp(-x))\n\n expected = np.array(list(map(sigmoid, input_data)))\n\n assert np.allclose(result, expected)\n\n\ndef test_softmax():\n axis = 0\n input_tensor = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)\n\n result = run_op_node([input_tensor], ng.ops.softmax, axis)\n\n expected = [[0.00426978, 0.01160646, 0.03154963], [0.08576079, 0.23312202, 0.6336913]]\n\n assert np.allclose(result, expected)\n\n\ndef test_erf():\n input_tensor = np.array([-1.0, 0.0, 1.0, 2.5, 3.14, 4.0], dtype=np.float32)\n expected = [-0.842701, 0.0, 0.842701, 0.999593, 0.999991, 1.0]\n\n result = run_op_node([input_tensor], ng.erf)\n assert np.allclose(result, expected)\n\n result = run_op_numeric_data(input_tensor, ng.erf)\n assert np.allclose(result, expected)\n",
"\"\"\"\n Copyright (C) 2018-2020 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport numpy as np\n\nfrom mo.front.onnx.extractors.utils import onnx_attr\nfrom mo.ops.reshape import Reshape\n\n\ndef onnx_reshape_ext(node):\n ''' Extract ONNX Reshape op of different versions.\n Support both latest Reshape and Reshape-1.\n The first one has 2 arguments, Reshape-1 has one input and shape is coded in attribute.\n '''\n dim = onnx_attr(node, 'shape', 'ints', None)\n if dim is not None:\n dim = np.array(dim, dtype=np.int64)\n Reshape.update_node_stat(node, {'dim': dim})\n else:\n Reshape.update_node_stat(node)\n return node.graph.node[node.id]\n"
] | [
[
"numpy.array"
],
[
"numpy.array"
],
[
"numpy.unique",
"numpy.ones",
"numpy.round",
"numpy.array",
"numpy.zeros",
"numpy.isclose"
],
[
"numpy.load",
"numpy.savez"
],
[
"numpy.logical_not",
"numpy.maximum",
"numpy.allclose",
"numpy.random.seed",
"numpy.random.rand",
"numpy.float32",
"numpy.exp",
"numpy.array"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DottD/pynger | [
"9a24b43a2170234e5059a54ed20329e036260b0a"
] | [
"pynger/fingerprint/FVC_utilities.py"
] | [
"import os\nimport re\nimport io\nimport numpy as np\nimport PIL.Image\nimport typing\nfrom pynger.types import Image, Mask, Field\nfrom pynger.fingerprint.tuning_lro import LROEstimator\nfrom pynger.fingerprint.sampling import convert_to_full, subsample\nfrom pynger.field.manipulation import polar2cart\nfrom pynger.misc import recursively_scan_dir_gen, recursively_scan_dir, random_combination\nfrom itertools import combinations, starmap\n\n\nclass Proxy:\n def write(self, path: str):\n raise NotImplementedError(\"Derived classes must reimplement this method\")\n\n def read(self, path: str):\n raise NotImplementedError(\"Derived classes must reimplement this method\")\n\nclass MaskProxy(Proxy):\n def __init__(self, *args):\n if len(args) == 1:\n if isinstance(args[0], np.ndarray):\n self.mask = args[0]\n elif isinstance(args[0], str):\n self.read(args[0])\n else:\n raise TypeError(\"Arguments not recognized\")\n else:\n self.mask = None\n\n def read(self, path: str, full: bool = True):\n \"\"\" Reads the mask, according to FVC-OnGoing specs.\n\n Args:\n path: The input file path (generally with .fg extension)\n full: Whether the full output should be returned (not implemented yet)\n\n Return:\n The boolean mask represented in the given file.\n \"\"\"\n if not os.path.exists(path):\n raise RuntimeError(\"The input file does not exist\")\n with open(path, 'r') as f:\n shape = tuple([int(n) for n in f.readline().split()])\n mask = np.empty(shape, dtype=bool)\n for row_n, line in enumerate(f):\n mask[row_n,:] = [bool(int(n)) for n in line.split()]\n self.mask = mask\n return mask\n\n def write(self, path: str):\n \"\"\" Writes the mask, according to FVC-OnGoing specs.\n\n Args:\n path: The output file path (generally with .fg extension)\n \"\"\"\n with open(path, 'w') as f:\n print(self.mask.shape, file=f)\n for line in self.mask.astype(int):\n print(line, file=f)\n\nclass FieldProxy(Proxy):\n def __init__(self, *args):\n if len(args) == 2 and isinstance(args[0], np.ndarray) and isinstance(args[1], np.ndarray):\n self.angle, self.mask = args[0].copy(), args[1].copy()\n elif len(args) == 1 and isinstance(args[0], str):\n self.read(args[0])\n else:\n self.angle, self.mask = None, None\n\n def read(self, path: str, full: bool = True):\n \"\"\" Reads the field, according to FVC-OnGoing specs.\n\n Args:\n path: The input file path (generally with .gt extension)\n full: Whether the full output should be returned\n\n Return:\n The field represented in the given file.\n \"\"\"\n if not os.path.exists(path):\n raise RuntimeError(\"The input file does not exist\")\n with open(path, 'rb') as f:\n # Read and discard the header. To visualize -> print(f.read(8).decode('ascii'))\n f.read(8)\n # Read the field specifications\n get_next_int = lambda: int.from_bytes(f.read(4), byteorder='little', signed=True)\n self.border_x = get_next_int()\n self.border_y = get_next_int()\n self.step_x = get_next_int()\n self.step_y = get_next_int()\n cols = get_next_int()\n rows = get_next_int()\n # Read the values\n get_next_uint8 = lambda: int.from_bytes(f.read(1), byteorder='little', signed=False)\n content = [(get_next_uint8(), get_next_uint8()) for _ in range(cols*rows)]\n angle, mask = zip(*content)\n angle = np.array(angle, dtype=float).reshape((rows, cols))\n angle *= np.pi / 255.0\n mask = np.array(mask, dtype=bool).reshape((rows, cols))\n # Optionally convert to full matrix\n if full:\n self.angle = convert_to_full(angle, border_x=self.border_x, border_y=self.border_y, step_x=self.step_x, step_y=self.step_y, mode='constant')\n self.mask = convert_to_full(mask, border_x=self.border_x, border_y=self.border_y, step_x=self.step_x, step_y=self.step_y, mode='constant')\n else:\n self.angle = angle\n self.mask = mask\n return self.angle, self.mask\n\n def write(self, path: str, **kwargs):\n \"\"\" Writes the field, according to FVC-OnGoing specs.\n\n Args:\n path: The output file path (generally with .gt extension)\n\n Keyword Args:\n border_x (int): Horizontal border used to sample the field (defaults to 14)\n border_y (int): Vertical border used to sample the field (defaults to 14)\n step_x (int): Horizontal distance between two conscutive sample points (defaults to 8)\n step_y (int): Vertical distance between two conscutive sample points (defaults to 8)\n subsample (bool): Whether the input shall be sub-sampled before saving it\n\n Note:\n The field is subsampled in the process. To avoid this behaviour, set border parameters to 0 and step parameters to 1.\n \"\"\"\n # Read parameters\n bx = kwargs.get('border_x', 14)\n by = kwargs.get('border_y', 14)\n sx = kwargs.get('step_x', 8)\n sy = kwargs.get('step_y', 8)\n needSubsample = kwargs.pop('subsample', True)\n # Sample the field\n if self.angle.shape != self.mask.shape:\n raise RuntimeError('angle and mask sizes mismatch')\n if needSubsample:\n angle = subsample(self.angle, is_field=False, smooth=False, **kwargs)\n mask = subsample(self.mask, is_field=False, smooth=False, **kwargs)\n else:\n angle = self.angle\n mask = self.mask\n with open(path, 'wb') as f:\n f.write(\"DIRIMG00\".encode('ascii'))\n # Read the field specifications\n put_int = lambda n: f.write(int(n).to_bytes(4, byteorder='little', signed=True))\n put_int(bx)\n put_int(by)\n put_int(sx)\n put_int(sy)\n rows, cols = angle.shape\n put_int(cols)\n put_int(rows)\n # Values conversion\n angle *= 255.0 / np.pi\n angle = angle.astype(int)\n mask = mask.astype(int)\n mask *= int(255 / mask.max())\n # Write the values\n put_uint8 = lambda n: f.write(int(n).to_bytes(1, byteorder='little', signed=False))\n for a, m in zip(angle.ravel(), mask.ravel()):\n put_uint8(a)\n put_uint8(m)\n\ndef loadDataset(path: str, loadGT: bool = True):\n \"\"\" Loads the FVC-TEST dataset.\n\n Args:\n path: Directory with the FVC-TEST dataset.\n loadGT: whether to load the ground truth information or not.\n\n Return:\n A generator of pairs (X, y) where X has the original image, its mask and its border specifications, and y is the corresponding orientation field ground truth.\n \"\"\"\n with open(path, 'r') as f:\n _ = int(f.readline())\n for line in f:\n name, step, bd = line.split()\n step = int(step)\n bd = int(bd)\n # Load image\n image_path = os.path.join(os.path.dirname(path), name)\n image = np.array(PIL.Image.open(image_path).convert('L')).astype(float)\n # Load mask\n mask_path = os.path.splitext(image_path)[0]+'.fg'\n mask = MaskProxy().read(mask_path)\n # Set specifications\n specs = [bd, bd, step, step]\n # Adjust image shape\n _mask = convert_to_full(mask, border_x=bd, border_y=bd, step_x=step, step_y=step, mode='constant')\n image = image[:_mask.shape[0], :_mask.shape[1]]\n # Load the ground truth field\n if loadGT:\n field_path = os.path.splitext(image_path)[0]+'.gt'\n lro, _ = FieldProxy().read(field_path, full=False)\n field = polar2cart(lro, 1, retField=True)\n # Serialize input data and append to X and the ground truth information\n yield (LROEstimator.serialize_Xrow(image, mask, specs), LROEstimator.serialize_yrow(field))\n else:\n yield (LROEstimator.serialize_Xrow(image, mask, specs), image_path)\n\ndef countDatasetElements(path):\n with open(path, 'r') as f:\n return int(f.readline())\n\ndef loadSegmentationDataset(sdir: str, odir: str):\n \"\"\" Loads the dataset for segmentation evaluation.\n\n Args:\n sdir: Path to the segmented images; all the images shall be direct children of this directory.\n odir: Path to the original images; this folder shall contain as direct children the folder of the databases FVC2000, FVC2002, FVC2004 (from DB1a, DB1b, to DB4a, DB4b) - e.g. the main root of the DVD shipped with Handbook of Fingerprint Recognition.\n\n Note:\n If some DB is not available a warning will be issued, but the other images will be loaded anyway.\n\n Return:\n A generator of pairs (X, y) where X is the original image, and y the corresponding ground truth segmentation image.\n \"\"\"\n pattern = re.compile('(FVC\\\\d+)_(\\\\w+)_\\\\w+_(\\\\d+)_(\\\\d+)')\n sfiles = recursively_scan_dir_gen(sdir, '.png')\n for sfile in sfiles:\n basename = os.path.basename(sfile)\n match = pattern.match(basename)\n if match:\n ofile = os.path.join(\n odir,\n match[1], # FVCxxxx\n 'Dbs',\n # converts DB1 to Db1, them appends an 'a' for the first 100 images, and a 'b' otherwise\n match[2].title() + '_' + ('a' if int(match[3])<=100 else 'b'),\n '{}_{}.tif'.format(match[3], match[4]) # append the filename\n )\n yield (ofile, sfile)\n\ndef loadMatchingDatasetFVC(path: str):\n \"\"\" Loads the FVC-TEST dataset.\n\n Args:\n path: Directory with the FVC-TEST dataset.\n\n Return:\n A dictionary whose keys are pairs of:\n - tuples containing a reference to the database and competition where the images belong, and values are lists of pairs (X, y) where X has the pair of image filenames, and y is the corresponding ground truth label, i.e. a 0 for reject or 1 for accept;\n - the list of all images found in the given folder.\n \"\"\"\n _, all_image_files = recursively_scan_dir(path, '.tif')\n\n _, index_files = recursively_scan_dir(path, '.MFA')\n comp_pattern = re.compile('(FVC\\\\d+)')\n \n competitions = {}\n # Loop over the four possible databases\n for db_n in range(1, 5):\n for MFA in index_files:\n # Get index for false matches\n MFR = MFA[:-1]+'R'\n # Retrieve competition\n match = comp_pattern.search(MFA)\n if match:\n competition = match[1]\n else:\n competition = 'NULL'\n # Retrieve database type (a or b)\n db_type = MFA[-5].lower()\n # Create a new key for this competition\n comp_key = (competition, db_n, db_type)\n competitions[comp_key] = []\n # Generate database name\n db_name = 'Db{}_{}'.format(db_n, db_type)\n # Take the subset of images related to this dataset\n image_files = [name for name in all_image_files if os.path.basename(os.path.dirname(name)) == db_name]\n # Load all the pairs that will be matched\n challenge_pairs = []\n for ifile, gt in zip([MFA, MFR], [0, 1]):\n dir_ = os.path.dirname(ifile)\n with open(ifile, 'r') as file_:\n for line in file_:\n file1, file2 = line.split()\n path1 = os.path.join(dir_, db_name, file1)\n path2 = os.path.join(dir_, db_name, file2)\n challenge_pairs.append( ((path1, path2), gt) )\n # Update the competition dictionary\n competitions[comp_key] = (challenge_pairs, image_files)\n return competitions\n\ndef loadMatchingDatasetNIST(path: str, ratio: float = 2.0, verbose: bool = True):\n \"\"\" Load NIST SD04 for matching.\n \n Args: \n path: Path to the folder containing the images.\n ratio: Ratio between the number of impostor and genuine matches.\n verbose: whether to print some basic information about the dataset.\n\n Return:\n A tuple (X, y, lenX) where X yields pairs of images, y generates 0 for a non-match and 1 for a match, lenX is the total number of elements.\n \"\"\"\n # Load all images\n _, image_files = recursively_scan_dir(path, ['.jpg', '.jpeg', '.png', '.bmp', '.tif', '.tiff'])\n # Split between first and second impression\n f_image_files = list(filter(lambda s: os.path.basename(s)[0]=='f', image_files))\n\n # Collect the genuine matches\n challenge_pairs = []\n for ffile in f_image_files:\n basename = os.path.basename(ffile)\n basename = 's'+basename[1:]\n sfile = os.path.join( os.path.dirname(ffile), basename )\n challenge_pairs.append( ((ffile, sfile), 1) )\n\n # Get the total number of impostor and genuine matches\n genuine_matches = len(challenge_pairs)\n impostor_matches = int(genuine_matches * ratio)\n total_matches = genuine_matches + impostor_matches\n if verbose:\n print('{} genuine matches and {} impostor matches will be selected'.format(genuine_matches, impostor_matches))\n\n # Collect the impostor matches:\n while True:\n pair = random_combination(image_files, 2)\n left_bname = os.path.basename(pair[0])\n right_bname = os.path.basename(pair[1])\n if left_bname[1:] == right_bname[1:]:\n continue # genuine or the same image\n else:\n challenge_pairs.append( (pair, 0) )\n if len(challenge_pairs) >= total_matches:\n break\n\n competitions = {\n ('NIST', 'SD04', '_'): (challenge_pairs, image_files)\n }\n return competitions\n"
] | [
[
"numpy.array",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rhwhite/rhwhitepackages3 | [
"91d5677ea57d7cc9a3643708cd8c82a74fb6188d"
] | [
"SSWs.py"
] | [
"# Module to search for and get data on SSWs\n# Using the definition of Charlton and Polvani (2007):\n\n# Author [email protected]\n\n# Created July 2017\n\nimport numpy as np\nimport xarray as xr\nimport math\nimport sys\n\ndef adddays(U,itime,ndays):\n # Find ndays consecutive days with easterlies\n numcons = 0\n torun = True\n while itime < len(U.time):\n if U[itime] > 0:\n numcons += 1\n else:\n numcons = 0\n if numcons >= ndays:\n return(itime,numcons,False)\n itime += 1\n return(itime,numcons,True)\n\ndef meanSE(N,in0,in1,in2,in3=0):\n # Calculate mean and standard error of number of SSWs\n # a la Charlton and Polvani (2007)\n p0 = float(in0)/float(N)\n p1 = float(in1)/float(N)\n p2 = float(in2)/float(N)\n p3 = float(in3)/float(N)\n\n calcmean = p1 + (2 * p2) + (3 * p3)\n calcSE = ((math.sqrt(((0-calcmean)**2 * p0) +\n ((1-calcmean)**2 * p1) +\n ((2-calcmean)**2 * p2) + \n ((3-calcmean)**2 * p3)))\n /math.sqrt(N))\n return calcmean,calcSE\n\ndef findyearSSWs(U,times,count,thresh,lastdate,startdate,toprint,SSWdates):\n # find all SSWs in a single year\n\n finalwarmingstart = -1\n yearcount = 0\n itime = 0\n # if U starts below 0, iterate until it isn't!\n while U[itime]<0:\n itime +=1\n while itime < len(U.time):\n if U[itime] < 0:\n central,end,itime = findend(U,itime,thresh)\n if end == -1:\n finalwarmingstart = ((times[central]+1) % 365)\n else:\n SSWdates.append(int(times[central]))\n if toprint: print ('SSW, day of year ' +\n str((times[central]) % 365))\n if lastdate < ((times[central] +1) % 365) < startdate :\n # it counts as a final warming\n finalwarmingstart = ((times[central]+1) % 365)\n else:\n count +=1\n yearcount +=1\n\n itime +=1\n return count,yearcount, finalwarmingstart, SSWdates\n\ndef findend(U,itime,thresh):\n # find final SSW\n centraltime,endtime = -1,-1\n if U[itime] < 0:\n centraltime = itime\n\n # Find end date\n while U[itime] < 0:\n itime = itime + 1\n if itime >= len(U.time): return (centraltime,-1,itime)\n endtime = itime\n\n # Check for final warming: ends after April 30th but started before July\n # Add 10 consective easterly days - must occur before April 30th for event\n # to count \n\n newtime,numcons,end = adddays(U,itime,thresh)\n\n if end:\n return(itime,-1,newtime)\n else:\n # Event counts. Now add 20 consecutive days\n itime,ndays,end = adddays(U,itime,20)\n return(centraltime,endtime,itime)\n\n\n\ndef findSSWs(U,thresh,Obs=False,startyr = 0):\n # Find SSWs, print the mean number, the standard error, and \n # return the dates\n # Created for WACCM daily data\n\n SSWdates = []\n toprint = False\n SSWyears = []\n startdate = 303 # beginning of November\n lastdate = 119 # end of April \n enddate = 119 # 30th April\n\n count = 0\n yearcount = 0\n singleyear = 0\n doubleyear = 0\n tripleyear = 0\n final = []\n nyears = len(U.time)//365\n times = U.time\n\n # Select first year\n if Obs:\n yearU = U.sel(time=slice(str(startyr) + '-01',str(startyr) + '-04'))\n yeartime = times.sel(time=slice(str(startyr) + '-01',\n str(startyr) +'-04'))\n yeartime = (yeartime.values - np.datetime64('1980-01-01'))/ np.timedelta64(1, 'D')\n else:\n yearU = U.isel(time=slice(0,120))\n yeartime = times[0:120].values\n\n count,yearcount,finalW,SSWdates = findyearSSWs(yearU,yeartime,count,thresh,\n lastdate,startdate,\n toprint, SSWdates)\n if yearcount == 1:\n singleyear +=1\n #if toprint: print('year 0 1 SSW \\n')\n SSWyears.append(0)\n elif yearcount ==2:\n doubleyear +=1\n #if toprint: print('year 0 2 SSWs \\n')\n SSWyears.append(0)\n elif yearcount ==3:\n tripleyear +=1\n SSWyears.append(0)\n final.append(finalW)\n\n for iyear in range(0,nyears):\n if Obs:\n yearU = U.sel(time=slice(str(startyr+iyear) +'-11',\n str(startyr+iyear+1) + '-04'))\n yeartime = times.sel(time=slice(str(startyr+iyear) + '-11',\n str(startyr+iyear+1) +'-04'))\n yeartime = ((yeartime.values - np.datetime64('1980-01-01'))/\n np.timedelta64(1, 'D'))\n\n else:\n yearU = U.isel(time=slice(startdate+(iyear*365),\n enddate + ((iyear + 1) * 365)))\n yeartime = (times[startdate+(iyear*365):\n enddate+((iyear+1)*365)].values)\n\n count,yearcount,finalW,SSWdates = findyearSSWs(\n yearU,yeartime,\n count,thresh,lastdate,startdate,\n toprint,SSWdates)\n if yearcount == 1:\n singleyear +=1\n SSWyears.append(iyear + 1)\n #if toprint: print('year ' + str(iyear +1) + ' 1 SSW \\n')\n elif yearcount ==2:\n doubleyear +=1\n #if toprint: print('year ' + str(iyear +1) + ' 2 SSWs \\n')\n SSWyears.append(iyear + 1)\n elif yearcount ==3:\n tripleyear +=1\n SSWyears.append(iyear + 1)\n final.append(finalW)\n\n if singleyear + 2 * doubleyear +3 * tripleyear != count:\n print(count)\n print(singleyear + 2 * doubleyear +3 * tripleyear)\n sys.exit(\"problem with counting, maybe a year with more than 3 SSWs?!\")\n\n mean,SE = meanSE(nyears,nyears - singleyear - doubleyear,singleyear,doubleyear)\n print ('mean: ' + str(mean) + ' ; s.e.: ' + str(SE) )\n\n return(SSWdates)\n\n"
] | [
[
"numpy.timedelta64",
"numpy.datetime64"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
haihabi/GenerativeCRB | [
"d53c01bec7214bb087fbe17dba241e12eb60858e",
"d53c01bec7214bb087fbe17dba241e12eb60858e",
"d53c01bec7214bb087fbe17dba241e12eb60858e"
] | [
"experiments/analysis/edge_bound/training_nlf/camera_nlf_training.py",
"experiments/models_architecture/simple_normalzing_flow.py",
"experiments/data_model/doa/doa_signal_generator.py"
] | [
"import torch\nimport numpy as np\nfrom tqdm import tqdm\nfrom torch.utils.data import DataLoader\nfrom experiments.data_model.image_denoising.noise_dataset import NoiseDataSet\nfrom experiments.models_architecture.camera_nlf_flow import generate_nlf_flow\n\n\ndef train_step(in_noise, in_cond_vector):\n opt.zero_grad()\n loss = flow.nll_mean(in_noise, in_cond_vector)\n loss.backward()\n loss_list.append(loss.item())\n opt.step()\n\n\nif __name__ == '__main__':\n\n lr = 1e-4\n patch_size = 32\n n_epochs = 5\n batch_size = 32\n n_iter_per_epoch = 1000\n input_shape = [4, patch_size, patch_size]\n trained_alpha = True\n\n flow = generate_nlf_flow(input_shape, trained_alpha)\n opt = torch.optim.Adam(flow.parameters(), lr=lr)\n nds = NoiseDataSet(\"/data/datasets/SIDD_Medium_Raw/Data\", n_pat_per_im=5000)\n\n nds_dl = DataLoader(nds, batch_size=batch_size, shuffle=True)\n loss_best = np.inf\n for n in range(n_epochs):\n loss_list = []\n for noise, clean, cam, iso in tqdm(nds_dl):\n noise, clean, cam, iso = noise.cuda(), clean.cuda(), cam.long().cuda(), iso.cuda()\n clean = torch.permute(clean, (0, 3, 1, 2)).float()\n noise = torch.permute(noise, (0, 3, 1, 2)).float()\n cond_vector = [clean, iso, cam]\n train_step(noise, cond_vector)\n\n loss_current = sum(loss_list) / len(loss_list)\n print(loss_current)\n if loss_current < loss_best:\n flow_name = \"flow_nlf_best.pt\" if trained_alpha else \"flow_gaussian_best.pt\"\n torch.save(flow.state_dict(), f\"./{flow_name}\")\n loss_best = loss_current\n print(f\"Update Best To:{loss_current}\")\n\n flow_name = \"flow_nlf.pt\" if trained_alpha else \"flow_gaussian.pt\"\n torch.save(flow.state_dict(), f\"./{flow_name}\")\n",
"import torch\nimport normflowpy as nfp\nfrom experiments import constants\nfrom torch import nn\nfrom torch.distributions import MultivariateNormal\nfrom experiments.models_architecture.sine_flow_layer import SineFlowLayer\n\n\ndef append_k_blocks(flows, n_flow_blocks, affine_coupling, spline_flow, generate_nl, input_vector_shape, dim,\n condition_embedding_size, hidden_size_cond, n_layer_cond,\n affine_inject_scale, bias,\n spline_k, spline_b, act_norm=True, affine_inject=True, invertible_fully_connected=True,\n neighbor_splitting=False, affine_coupling_scale=True):\n for i in range(n_flow_blocks): # TODO: make this a function\n if act_norm:\n flows.append(nfp.flows.ActNorm(x_shape=input_vector_shape))\n if invertible_fully_connected:\n flows.append(\n nfp.flows.InvertibleFullyConnected(dim=dim))\n if affine_inject:\n flows.append(\n nfp.flows.AffineInjector(x_shape=input_vector_shape,\n condition_vector_size=condition_embedding_size, n_hidden=hidden_size_cond,\n net_class=nfp.base_nets.generate_mlp_class(n_layer=n_layer_cond,\n non_linear_function=generate_nl,\n bias=bias),\n scale=affine_inject_scale))\n\n if affine_coupling:\n flows.append(\n nfp.flows.AffineCoupling(x_shape=input_vector_shape, parity=i % 2,\n net_class=nfp.base_nets.generate_mlp_class(non_linear_function=generate_nl),\n scale=affine_coupling_scale,\n neighbor_splitting=neighbor_splitting))\n if spline_flow:\n flows.append(nfp.flows.CSF_CL(dim=dim, K=spline_k, B=spline_b,\n base_network=nfp.base_nets.generate_mlp_class(\n non_linear_function=generate_nl)))\n\n\ndef generate_flow_model(dim, theta_dim, n_flow_blocks, spline_flow, affine_coupling, n_layer_cond=4,\n hidden_size_cond=24, spline_b=3,\n spline_k=8, bias=True, affine_scale=True, sine_layer=True, dual_flow=False,\n neighbor_splitting=False):\n flows = []\n condition_embedding_size = theta_dim\n\n def generate_nl():\n return nn.SiLU()\n\n input_vector_shape = [dim]\n if dual_flow:\n append_k_blocks(flows, n_flow_blocks, affine_coupling, spline_flow, generate_nl, input_vector_shape, dim,\n condition_embedding_size, hidden_size_cond, n_layer_cond,\n affine_scale, bias,\n spline_k, spline_b, act_norm=False, affine_inject=False, neighbor_splitting=neighbor_splitting,\n affine_coupling_scale=False)\n if sine_layer:\n flows.append(SineFlowLayer(x_shape=input_vector_shape))\n append_k_blocks(flows, n_flow_blocks, affine_coupling, spline_flow, generate_nl, input_vector_shape, dim,\n condition_embedding_size, hidden_size_cond, n_layer_cond,\n affine_scale, bias,\n spline_k, spline_b, neighbor_splitting=neighbor_splitting)\n\n return nfp.NormalizingFlowModel(MultivariateNormal(torch.zeros(dim, device=constants.DEVICE),\n torch.eye(dim, device=constants.DEVICE)), flows,\n condition_network=None).to(\n constants.DEVICE)\n",
"import math\nfrom experiments import constants\nimport torch\nimport numpy as np\nfrom enum import Enum\nfrom torch import nn\n\n\nclass SensorsArrangement(Enum):\n ULA = 0\n UCA = 1\n RANDOM = 2\n\n\ndef complex_exp(z):\n return torch.stack([torch.cos(z), torch.sin(z)], dim=0)\n\n\ndef complex_matmul(a, b):\n a_real = a[0, :]\n a_imag = a[1, :]\n\n b_real = b[0, :]\n b_imag = b[1, :]\n real_part = torch.matmul(a_real, b_real) - torch.matmul(a_imag, b_imag)\n imag_part = torch.matmul(a_imag, b_real) + torch.matmul(a_real, b_imag)\n return torch.stack([real_part, imag_part], dim=0)\n\n\ndef conjugate_transpose(a):\n a_real = a[0, :]\n a_imag = a[1, :]\n a_real = a_real.transpose(dim0=0, dim1=1)\n a_imag = -a_imag.transpose(dim0=0, dim1=1)\n return torch.stack([a_real, a_imag])\n\n\ndef get_nominal_position(m_sensor, sensors_arrangement, distance):\n sensors_index = torch.linspace(0, m_sensor - 1, m_sensor, device=constants.DEVICE).float()\n if sensors_arrangement == SensorsArrangement.UCA:\n angle = np.pi / (m_sensor - 1)\n r = distance / (2 * np.sin(angle))\n angle_vector = 2 * np.pi * sensors_index / (m_sensor)\n x = r * torch.cos(angle_vector)\n y = r * torch.sin(angle_vector)\n elif sensors_arrangement == SensorsArrangement.ULA:\n x = torch.zeros([m_sensor], device=constants.DEVICE)\n y = (sensors_index - m_sensor / 2 + 0.5) * distance\n elif sensors_arrangement == SensorsArrangement.RANDOM:\n d_max = 4 * distance\n x = 2 * d_max * (torch.rand([m_sensor], device=constants.DEVICE) - 0.5)\n y = 2 * d_max * (torch.rand([m_sensor], device=constants.DEVICE) - 0.5)\n else:\n raise Exception('Unknown sensor arrangement')\n return torch.stack([x, y], dim=0)\n\n\ndef gaussian_source_sampler(k_samples, in_n_source, mu_source=0, sigma_source=1):\n return mu_source + sigma_source * torch.randn([2, in_n_source, k_samples], device=constants.DEVICE)\n\n\ndef generate_steering_matrix(source_theta, nominal_position, location_perturbation, wavelength: float = 1):\n \"\"\"\n\n :param source_theta: a vector of size N\n :param nominal_position: a vector of size Mx2 (two is for the x and y position)\n :param location_perturbation: a vec\n :return:\n \"\"\"\n sensor_x = nominal_position[0, :]\n sensor_y = nominal_position[1, :]\n if location_perturbation is not None:\n m_sensor_pertubation = int(location_perturbation.shape[1] / 2)\n sensor_x[:, :m_sensor_pertubation, 0] += location_perturbation[:, :m_sensor_pertubation]\n sensor_y[:, :m_sensor_pertubation, 0] += location_perturbation[:, m_sensor_pertubation:]\n\n phi_i = torch.atan(sensor_y / sensor_x) + math.pi * (sensor_x < 0) + 2 * math.pi * (sensor_x > 0) * (sensor_y < 0)\n r = torch.sqrt(torch.pow(sensor_x, 2.0) + torch.pow(sensor_y, 2.0))\n d_nm = torch.unsqueeze(r, dim=-1) * torch.cos(\n torch.unsqueeze(phi_i, dim=-1) - torch.unsqueeze(source_theta, dim=0)) # [M,N]\n return complex_exp(-d_nm / wavelength)\n\n\nclass DOASignalGenerator(nn.Module):\n def __init__(self, m_sensor, k_samples, sensors_arrangement, d_sensors, location_perturbation_scale=0):\n super().__init__()\n\n self.nominal_position = nn.Parameter(get_nominal_position(m_sensor, sensors_arrangement, d_sensors),\n requires_grad=False)\n self.k_samples = k_samples\n self.location_perturbation_scale = location_perturbation_scale\n\n @property\n def m_sensors(self):\n return self.nominal_position.shape[1]\n\n def forward(self, source_theta):\n in_n_source = source_theta.shape[0]\n source_tensor = gaussian_source_sampler(self.k_samples, in_n_source)\n a = generate_steering_matrix(source_theta, self.nominal_position, None)\n return complex_matmul(a, source_tensor)\n"
] | [
[
"torch.permute",
"torch.utils.data.DataLoader"
],
[
"torch.nn.SiLU",
"torch.eye",
"torch.zeros"
],
[
"torch.linspace",
"torch.sin",
"torch.zeros",
"torch.randn",
"torch.unsqueeze",
"numpy.sin",
"torch.matmul",
"torch.pow",
"torch.rand",
"torch.stack",
"torch.atan",
"torch.cos"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RelationRx/pyrelational | [
"41ededeff84158bd88b76d39006764de3388c821"
] | [
"pyrelational/models/mcdropout_model.py"
] | [
"import copy\nimport logging\nfrom abc import ABC\nfrom typing import Dict, Optional, Type, Union\n\nimport torch\nfrom pytorch_lightning import LightningModule\nfrom torch.nn.modules import Module\nfrom torch.utils.data import DataLoader\n\nfrom .generic_model import GenericModel\nfrom .lightning_model import LightningModel\n\nlogger = logging.getLogger()\n\n\nclass GenericMCDropoutModel(GenericModel, ABC):\n \"\"\"\n Generic model wrapper for mcdropout uncertainty estimator\n \"\"\"\n\n def __init__(\n self,\n model_class: Type[Module],\n model_config: Union[str, Dict],\n trainer_config: Union[str, Dict],\n n_estimators: int = 10,\n eval_dropout_prob: float = 0.2,\n ):\n super(GenericMCDropoutModel, self).__init__(model_class, model_config, trainer_config)\n _check_mc_dropout_model(model_class, model_config)\n self.n_estimators = n_estimators\n self.eval_dropout_prob = eval_dropout_prob\n\n def __call__(self, loader: DataLoader) -> torch.Tensor:\n \"\"\"\n\n :param loader: pytorch dataloader\n :return: model predictions\n \"\"\"\n if self.current_model is None:\n raise ValueError(\"No current model, call 'train(train_loader, valid_loader)' to train the model first\")\n predictions = []\n model = self.current_model\n model.eval()\n\n with torch.no_grad():\n _enable_only_dropout_layers(model, self.eval_dropout_prob)\n for _ in range(self.n_estimators):\n model_prediction = []\n for x, _ in loader:\n model_prediction.append(model(x).detach().cpu())\n predictions.append(torch.cat(model_prediction, 0))\n predictions = torch.stack(predictions)\n return predictions\n\n\nclass LightningMCDropoutModel(GenericMCDropoutModel, LightningModel):\n r\"\"\"\n Wrapper for MC Dropout estimator with pytorch lightning trainer\n\n Example:\n\n .. code-block:: python\n\n import torch\n import pytorch_lightning as pl\n\n class PyLModel(pl.LightningModule):\n def __init__(self, in_dim, out_dim):\n super(PyLModel, self).()\n self.linear = torch.nn.Linear(in_dim, out_dim)\n # need to define other train/test steps and optimizers methods required\n # by pytorch-lightning to run this example\n\n wrapper = LightningMCDropoutModel(\n PyLModel,\n model_config={\"in_dim\":10, \"out_dim\":1},\n trainer_config={\"epochs\":100},\n n_estimators=10,\n eval_dropout_prob=0.2,\n )\n wrapper.train(train_loader, valid_loader)\n predictions = wrapper(loader)\n assert predictions.size(0) == 10\n\n \"\"\"\n\n def __init__(\n self,\n model_class: Type[LightningModule],\n model_config: Union[Dict, str],\n trainer_config: Union[Dict, str],\n n_estimators: int = 10,\n eval_dropout_prob: float = 0.2,\n ):\n super(LightningMCDropoutModel, self).__init__(\n model_class,\n model_config,\n trainer_config,\n n_estimators=n_estimators,\n eval_dropout_prob=eval_dropout_prob,\n )\n\n\ndef _enable_only_dropout_layers(model: Module, p: Optional[float] = None) -> None:\n def enable_dropout_on_module(m):\n if m.__class__.__name__.startswith(\"Dropout\"):\n if isinstance(p, float) and (0 <= p <= 1):\n m.p = p\n elif isinstance(p, float) and (p < 0 or p > 1):\n logger.warning(f\"Evaluation dropout probability should be a float between 0 and 1, got {p}\")\n m.train()\n\n model.apply(enable_dropout_on_module)\n\n\ndef _check_mc_dropout_model(model_class: Type[Module], model_config: Dict) -> None:\n model = model_class(**model_config)\n\n def has_dropout_module(model):\n is_dropout = []\n for m in model.children():\n if m.__class__.__name__.startswith(\"Dropout\"):\n is_dropout.append(True)\n else:\n is_dropout += has_dropout_module(m)\n return is_dropout\n\n if not any(has_dropout_module(model)):\n raise ValueError(\"Model provided do not contain any torch.nn.Dropout modules, cannot apply MC Dropout\")\n"
] | [
[
"torch.stack",
"torch.no_grad",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ravish0007/fairml | [
"bdfb707ff9554c1a789dc8de3926c1ef3cfb1fc8",
"bdfb707ff9554c1a789dc8de3926c1ef3cfb1fc8"
] | [
"fairml/tests/test_orthogonal_projection.py",
"fairml/utils.py"
] | [
"from __future__ import division\n\n\nimport pytest\nimport numpy as np\nfrom random import randint\n\nfrom fairml.orthogonal_projection import audit_model\nfrom fairml.orthogonal_projection import get_orthogonal_vector\n\nfrom fairml.utils import mse\nfrom fairml.utils import accuracy\nfrom fairml.utils import detect_feature_sign\n\nfrom fairml.perturbation_strategies import constant_zero\n\n\n# let's define a black-box function\ndef black_box_function(input_data):\n if not (input_data.shape[1] == weights.shape[0]):\n raise Exception(\"problem, misaligned dimensions\")\n output = np.dot(input_data, weights)\n return output\n\n\ndef test_orthogonal_projection(number_of_tries=20, size=10000):\n \"\"\"Orthogonal projection function. \"\"\"\n for i in range(number_of_tries):\n\n a = np.random.normal(0, 1, size)\n b = np.random.normal(0, 1, size)\n c = np.random.binomial(10, 0.1, size)\n d = np.random.uniform(0, 10, size)\n\n # normal-normal check\n orth_b = get_orthogonal_vector(a, b)\n assert np.dot(orth_b, a) < 1e-8\n\n # normal- normal check\n ortho_c = get_orthogonal_vector(a, c)\n assert np.dot(ortho_c, a) < 1e-8\n\n # normal - uniform check\n ortho_d = get_orthogonal_vector(a, d)\n assert np.dot(ortho_d, a) < 1e-8\n\n\ndef test_mse():\n y_true = [3, -0.5, 2, 7]\n y_pred = [2.5, 0.0, 2, 8]\n\n test_mse = mse(y_true, y_pred)\n assert test_mse == 0.375\n\n\ndef test_accuracy():\n y_pred = [0, 2, 1, 3]\n y_true = [0, 1, 2, 3]\n\n test_acc = accuracy(y_pred, y_true)\n print(test_acc)\n assert test_acc == 0.5\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\n# import dictionary with perturbation strategies.\nfrom .perturbation_strategies import perturbation_strategy_dictionary\n\n\ndef mse(y, y_hat):\n \"\"\" function to calculate mse between to numpy vectors \"\"\"\n\n y = np.array(y)\n y_hat = np.array(y_hat)\n\n y_hat = np.reshape(y_hat, (y_hat.shape[0],))\n y = np.reshape(y, (y.shape[0],))\n\n diff = y - y_hat\n diff_squared = np.square(diff)\n mse = np.mean(diff_squared)\n\n return mse\n\n\ndef accuracy(y, y_hat):\n \"\"\" function to calculate accuracy of y_hat given y\"\"\"\n y = np.array(y)\n y_hat = np.array(y_hat)\n\n y = y.astype(int)\n y_hat = y_hat.astype(int)\n\n y_hat = np.reshape(y_hat, (y_hat.shape[0],))\n y = np.reshape(y, (y.shape[0],))\n\n equal = (y == y_hat)\n accuracy = np.sum(equal) / y.shape[0]\n\n return accuracy\n\n\ndef replace_column_of_matrix(X, col_num, random_sample,\n ptb_strategy):\n \"\"\"\n Arguments: data matrix, n X k\n random sample: row of data matrix, 1 X k\n column number: 0 <-> k-1\n\n replace all elements of X[column number] X\n with random_sample[column_number]\n \"\"\"\n\n # need to implement random permutation.\n # need to implement perturbation strategy as a function\n # need a distance metrics file.\n # this probably does not work right now, I need to go through to fix.\n if col_num >= random_sample.shape[0]:\n raise ValueError(\"column {} entered. Column # should be\"\n \"less than {}\".format(col_num,\n random_sample.shape[0]))\n\n # select the specific perturbation function chosen\n # obtain value from that function\n val_chosen = perturbation_strategy_dictionary[ptb_strategy](X,\n col_num,\n random_sample)\n constant_array = np.repeat(val_chosen, X.shape[0])\n X[:, col_num] = constant_array\n\n return X\n\n\ndef detect_feature_sign(predict_function, X, col_num):\n\n normal_output = predict_function(X)\n column_range = X[:, col_num].max() - X[:, col_num].min()\n\n X[:, col_num] = X[:, col_num] + np.repeat(column_range, X.shape[0])\n new_output = predict_function(X)\n\n diff = new_output - normal_output\n total_diff = np.mean(diff)\n\n if total_diff >= 0:\n return 1\n else:\n return -1\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.random.binomial",
"numpy.dot",
"numpy.random.normal",
"numpy.random.uniform"
],
[
"numpy.square",
"numpy.reshape",
"numpy.mean",
"numpy.repeat",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mattboggess/pandas | [
"5551bcf9d297ea8a0aeffb70b17ae6730e8abf89",
"5551bcf9d297ea8a0aeffb70b17ae6730e8abf89",
"5551bcf9d297ea8a0aeffb70b17ae6730e8abf89",
"5551bcf9d297ea8a0aeffb70b17ae6730e8abf89",
"5551bcf9d297ea8a0aeffb70b17ae6730e8abf89"
] | [
"pandas/core/indexes/interval.py",
"pandas/core/arrays/base.py",
"scripts/validate_docstrings.py",
"pandas/tests/extension/decimal/array.py",
"pandas/tests/reshape/test_pivot.py"
] | [
"\"\"\" define the IntervalIndex \"\"\"\nimport textwrap\nimport warnings\n\nimport numpy as np\n\nfrom pandas.compat import add_metaclass\nfrom pandas.core.dtypes.missing import isna\nfrom pandas.core.dtypes.cast import find_common_type, maybe_downcast_to_dtype\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n is_list_like,\n is_datetime_or_timedelta_dtype,\n is_datetime64tz_dtype,\n is_integer_dtype,\n is_float_dtype,\n is_interval_dtype,\n is_object_dtype,\n is_scalar,\n is_float,\n is_number,\n is_integer)\nfrom pandas.core.indexes.base import (\n Index, ensure_index,\n default_pprint, _index_shared_docs)\n\nfrom pandas._libs import Timestamp, Timedelta\nfrom pandas._libs.interval import (\n Interval, IntervalMixin, IntervalTree,\n)\n\nfrom pandas.core.indexes.datetimes import date_range\nfrom pandas.core.indexes.timedeltas import timedelta_range\nfrom pandas.core.indexes.multi import MultiIndex\nimport pandas.core.common as com\nfrom pandas.util._decorators import cache_readonly, Appender\nfrom pandas.util._doctools import _WritableDoc\nfrom pandas.util._exceptions import rewrite_exception\nfrom pandas.core.config import get_option\nfrom pandas.tseries.frequencies import to_offset\nfrom pandas.tseries.offsets import DateOffset\n\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.arrays.interval import (IntervalArray,\n _interval_shared_docs)\n\n_VALID_CLOSED = {'left', 'right', 'both', 'neither'}\n_index_doc_kwargs = dict(ibase._index_doc_kwargs)\n_index_doc_kwargs.update(\n dict(klass='IntervalIndex',\n target_klass='IntervalIndex or list of Intervals',\n name=textwrap.dedent(\"\"\"\\\n name : object, optional\n to be stored in the index.\n \"\"\"),\n ))\n\n\ndef _get_next_label(label):\n dtype = getattr(label, 'dtype', type(label))\n if isinstance(label, (Timestamp, Timedelta)):\n dtype = 'datetime64'\n if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):\n return label + np.timedelta64(1, 'ns')\n elif is_integer_dtype(dtype):\n return label + 1\n elif is_float_dtype(dtype):\n return np.nextafter(label, np.infty)\n else:\n raise TypeError('cannot determine next label for type {typ!r}'\n .format(typ=type(label)))\n\n\ndef _get_prev_label(label):\n dtype = getattr(label, 'dtype', type(label))\n if isinstance(label, (Timestamp, Timedelta)):\n dtype = 'datetime64'\n if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):\n return label - np.timedelta64(1, 'ns')\n elif is_integer_dtype(dtype):\n return label - 1\n elif is_float_dtype(dtype):\n return np.nextafter(label, -np.infty)\n else:\n raise TypeError('cannot determine next label for type {typ!r}'\n .format(typ=type(label)))\n\n\ndef _get_interval_closed_bounds(interval):\n \"\"\"\n Given an Interval or IntervalIndex, return the corresponding interval with\n closed bounds.\n \"\"\"\n left, right = interval.left, interval.right\n if interval.open_left:\n left = _get_next_label(left)\n if interval.open_right:\n right = _get_prev_label(right)\n return left, right\n\n\ndef _new_IntervalIndex(cls, d):\n \"\"\"\n This is called upon unpickling, rather than the default which doesn't have\n arguments and breaks __new__\n \"\"\"\n return cls.from_arrays(**d)\n\n\n@Appender(_interval_shared_docs['class'] % dict(\n klass=\"IntervalIndex\",\n summary=\"Immutable index of intervals that are closed on the same side.\",\n name=_index_doc_kwargs['name'],\n versionadded=\"0.20.0\",\n extra_methods=\"contains\\n\",\n examples=textwrap.dedent(\"\"\"\\\n\n Examples\n --------\n A new ``IntervalIndex`` is typically constructed using\n :func:`interval_range`:\n\n >>> pd.interval_range(start=0, end=5)\n IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]]\n closed='right', dtype='interval[int64]')\n\n It may also be constructed using one of the constructor\n methods: :meth:`IntervalIndex.from_arrays`,\n :meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.\n\n See further examples in the doc strings of ``interval_range`` and the\n mentioned constructor methods.\n \"\"\"),\n\n))\n@add_metaclass(_WritableDoc)\nclass IntervalIndex(IntervalMixin, Index):\n _typ = 'intervalindex'\n _comparables = ['name']\n _attributes = ['name', 'closed']\n\n # we would like our indexing holder to defer to us\n _defer_to_indexing = True\n\n # Immutable, so we are able to cache computations like isna in '_mask'\n _mask = None\n\n def __new__(cls, data, closed=None, dtype=None, copy=False,\n name=None, verify_integrity=True):\n\n if name is None and hasattr(data, 'name'):\n name = data.name\n\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype,\n verify_integrity=verify_integrity)\n\n return cls._simple_new(array, name)\n\n @classmethod\n def _simple_new(cls, array, name, closed=None):\n \"\"\"\n Construct from an IntervalArray\n\n Parameters\n ----------\n array : IntervalArray\n name : str\n Attached as result.name\n closed : Any\n Ignored.\n \"\"\"\n result = IntervalMixin.__new__(cls)\n result._data = array\n result.name = name\n result._reset_identity()\n return result\n\n @Appender(_index_shared_docs['_shallow_copy'])\n def _shallow_copy(self, left=None, right=None, **kwargs):\n result = self._data._shallow_copy(left=left, right=right)\n attributes = self._get_attributes_dict()\n attributes.update(kwargs)\n return self._simple_new(result, **attributes)\n\n @cache_readonly\n def _isnan(self):\n \"\"\"Return a mask indicating if each value is NA\"\"\"\n if self._mask is None:\n self._mask = isna(self.left)\n return self._mask\n\n @cache_readonly\n def _engine(self):\n return IntervalTree(self.left, self.right, closed=self.closed)\n\n def __contains__(self, key):\n \"\"\"\n return a boolean if this key is IN the index\n We *only* accept an Interval\n\n Parameters\n ----------\n key : Interval\n\n Returns\n -------\n boolean\n \"\"\"\n if not isinstance(key, Interval):\n return False\n\n try:\n self.get_loc(key)\n return True\n except KeyError:\n return False\n\n def contains(self, key):\n \"\"\"\n Return a boolean indicating if the key is IN the index\n\n We accept / allow keys to be not *just* actual\n objects.\n\n Parameters\n ----------\n key : int, float, Interval\n\n Returns\n -------\n boolean\n \"\"\"\n try:\n self.get_loc(key)\n return True\n except KeyError:\n return False\n\n @classmethod\n @Appender(_interval_shared_docs['from_breaks'] % _index_doc_kwargs)\n def from_breaks(cls, breaks, closed='right', name=None, copy=False,\n dtype=None):\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray.from_breaks(breaks, closed=closed, copy=copy,\n dtype=dtype)\n return cls._simple_new(array, name=name)\n\n @classmethod\n @Appender(_interval_shared_docs['from_arrays'] % _index_doc_kwargs)\n def from_arrays(cls, left, right, closed='right', name=None, copy=False,\n dtype=None):\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray.from_arrays(left, right, closed, copy=copy,\n dtype=dtype)\n return cls._simple_new(array, name=name)\n\n @classmethod\n @Appender(_interval_shared_docs['from_intervals'] % _index_doc_kwargs)\n def from_intervals(cls, data, closed=None, name=None, copy=False,\n dtype=None):\n msg = ('IntervalIndex.from_intervals is deprecated and will be '\n 'removed in a future version; Use IntervalIndex(...) instead')\n warnings.warn(msg, FutureWarning, stacklevel=2)\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype)\n\n if name is None and isinstance(data, cls):\n name = data.name\n\n return cls._simple_new(array, name=name)\n\n @classmethod\n @Appender(_interval_shared_docs['from_tuples'] % _index_doc_kwargs)\n def from_tuples(cls, data, closed='right', name=None, copy=False,\n dtype=None):\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n arr = IntervalArray.from_tuples(data, closed=closed, copy=copy,\n dtype=dtype)\n return cls._simple_new(arr, name=name)\n\n @Appender(_interval_shared_docs['to_tuples'] % dict(\n return_type=\"Index\",\n examples=\"\"\"\n Examples\n --------\n >>> idx = pd.IntervalIndex.from_arrays([0, np.nan, 2], [1, np.nan, 3])\n >>> idx.to_tuples()\n Index([(0.0, 1.0), (nan, nan), (2.0, 3.0)], dtype='object')\n >>> idx.to_tuples(na_tuple=False)\n Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object')\"\"\",\n ))\n def to_tuples(self, na_tuple=True):\n tuples = self._data.to_tuples(na_tuple=na_tuple)\n return Index(tuples)\n\n @cache_readonly\n def _multiindex(self):\n return MultiIndex.from_arrays([self.left, self.right],\n names=['left', 'right'])\n\n @property\n def left(self):\n \"\"\"\n Return the left endpoints of each Interval in the IntervalIndex as\n an Index\n \"\"\"\n return self._data._left\n\n @property\n def right(self):\n \"\"\"\n Return the right endpoints of each Interval in the IntervalIndex as\n an Index\n \"\"\"\n return self._data._right\n\n @property\n def closed(self):\n \"\"\"\n Whether the intervals are closed on the left-side, right-side, both or\n neither\n \"\"\"\n return self._data._closed\n\n @Appender(_interval_shared_docs['set_closed'] % _index_doc_kwargs)\n def set_closed(self, closed):\n if closed not in _VALID_CLOSED:\n msg = \"invalid option for 'closed': {closed}\"\n raise ValueError(msg.format(closed=closed))\n\n # return self._shallow_copy(closed=closed)\n array = self._data.set_closed(closed)\n return self._simple_new(array, self.name)\n\n @property\n def length(self):\n \"\"\"\n Return an Index with entries denoting the length of each Interval in\n the IntervalIndex\n \"\"\"\n return self._data.length\n\n @property\n def size(self):\n # Avoid materializing ndarray[Interval]\n return self._data.size\n\n @property\n def shape(self):\n # Avoid materializing ndarray[Interval]\n return self._data.shape\n\n @property\n def itemsize(self):\n msg = ('IntervalIndex.itemsize is deprecated and will be removed in '\n 'a future version')\n warnings.warn(msg, FutureWarning, stacklevel=2)\n\n # supress the warning from the underlying left/right itemsize\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n return self.left.itemsize + self.right.itemsize\n\n def __len__(self):\n return len(self.left)\n\n @cache_readonly\n def values(self):\n \"\"\"\n Return the IntervalIndex's data as an IntervalArray.\n \"\"\"\n return self._data\n\n @cache_readonly\n def _values(self):\n return self._data\n\n @cache_readonly\n def _ndarray_values(self):\n return np.array(self._data)\n\n def __array__(self, result=None):\n \"\"\" the array interface, return my values \"\"\"\n return self._ndarray_values\n\n def __array_wrap__(self, result, context=None):\n # we don't want the superclass implementation\n return result\n\n def __reduce__(self):\n d = dict(left=self.left,\n right=self.right)\n d.update(self._get_attributes_dict())\n return _new_IntervalIndex, (self.__class__, d), None\n\n @Appender(_index_shared_docs['copy'])\n def copy(self, deep=False, name=None):\n array = self._data.copy(deep=deep)\n attributes = self._get_attributes_dict()\n if name is not None:\n attributes.update(name=name)\n\n return self._simple_new(array, **attributes)\n\n @Appender(_index_shared_docs['astype'])\n def astype(self, dtype, copy=True):\n with rewrite_exception('IntervalArray', self.__class__.__name__):\n new_values = self.values.astype(dtype, copy=copy)\n if is_interval_dtype(new_values):\n return self._shallow_copy(new_values.left, new_values.right)\n return super(IntervalIndex, self).astype(dtype, copy=copy)\n\n @cache_readonly\n def dtype(self):\n \"\"\"Return the dtype object of the underlying data\"\"\"\n return self._data.dtype\n\n @property\n def inferred_type(self):\n \"\"\"Return a string of the type inferred from the values\"\"\"\n return 'interval'\n\n @Appender(Index.memory_usage.__doc__)\n def memory_usage(self, deep=False):\n # we don't use an explicit engine\n # so return the bytes here\n return (self.left.memory_usage(deep=deep) +\n self.right.memory_usage(deep=deep))\n\n @cache_readonly\n def mid(self):\n \"\"\"\n Return the midpoint of each Interval in the IntervalIndex as an Index\n \"\"\"\n return self._data.mid\n\n @cache_readonly\n def is_monotonic(self):\n \"\"\"\n Return True if the IntervalIndex is monotonic increasing (only equal or\n increasing values), else False\n \"\"\"\n return self._multiindex.is_monotonic\n\n @cache_readonly\n def is_monotonic_increasing(self):\n \"\"\"\n Return True if the IntervalIndex is monotonic increasing (only equal or\n increasing values), else False\n \"\"\"\n return self._multiindex.is_monotonic_increasing\n\n @cache_readonly\n def is_monotonic_decreasing(self):\n \"\"\"\n Return True if the IntervalIndex is monotonic decreasing (only equal or\n decreasing values), else False\n \"\"\"\n return self._multiindex.is_monotonic_decreasing\n\n @cache_readonly\n def is_unique(self):\n \"\"\"\n Return True if the IntervalIndex contains unique elements, else False\n \"\"\"\n return self._multiindex.is_unique\n\n @cache_readonly\n def is_non_overlapping_monotonic(self):\n return self._data.is_non_overlapping_monotonic\n\n @Appender(_index_shared_docs['_convert_scalar_indexer'])\n def _convert_scalar_indexer(self, key, kind=None):\n if kind == 'iloc':\n return super(IntervalIndex, self)._convert_scalar_indexer(\n key, kind=kind)\n return key\n\n def _maybe_cast_slice_bound(self, label, side, kind):\n return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)\n\n @Appender(_index_shared_docs['_convert_list_indexer'])\n def _convert_list_indexer(self, keyarr, kind=None):\n \"\"\"\n we are passed a list-like indexer. Return the\n indexer for matching intervals.\n \"\"\"\n locs = self.get_indexer_for(keyarr)\n\n # we have missing values\n if (locs == -1).any():\n raise KeyError\n\n return locs\n\n def _maybe_cast_indexed(self, key):\n \"\"\"\n we need to cast the key, which could be a scalar\n or an array-like to the type of our subtype\n \"\"\"\n if isinstance(key, IntervalIndex):\n return key\n\n subtype = self.dtype.subtype\n if is_float_dtype(subtype):\n if is_integer(key):\n key = float(key)\n elif isinstance(key, (np.ndarray, Index)):\n key = key.astype('float64')\n elif is_integer_dtype(subtype):\n if is_integer(key):\n key = int(key)\n\n return key\n\n def _check_method(self, method):\n if method is None:\n return\n\n if method in ['bfill', 'backfill', 'pad', 'ffill', 'nearest']:\n msg = 'method {method} not yet implemented for IntervalIndex'\n raise NotImplementedError(msg.format(method=method))\n\n raise ValueError(\"Invalid fill method\")\n\n def _searchsorted_monotonic(self, label, side, exclude_label=False):\n if not self.is_non_overlapping_monotonic:\n raise KeyError('can only get slices from an IntervalIndex if '\n 'bounds are non-overlapping and all monotonic '\n 'increasing or decreasing')\n\n if isinstance(label, IntervalMixin):\n raise NotImplementedError\n\n # GH 20921: \"not is_monotonic_increasing\" for the second condition\n # instead of \"is_monotonic_decreasing\" to account for single element\n # indexes being both increasing and decreasing\n if ((side == 'left' and self.left.is_monotonic_increasing) or\n (side == 'right' and not self.left.is_monotonic_increasing)):\n sub_idx = self.right\n if self.open_right or exclude_label:\n label = _get_next_label(label)\n else:\n sub_idx = self.left\n if self.open_left or exclude_label:\n label = _get_prev_label(label)\n\n return sub_idx._searchsorted_monotonic(label, side)\n\n def _get_loc_only_exact_matches(self, key):\n if isinstance(key, Interval):\n\n if not self.is_unique:\n raise ValueError(\"cannot index with a slice Interval\"\n \" and a non-unique index\")\n\n # TODO: this expands to a tuple index, see if we can\n # do better\n return Index(self._multiindex.values).get_loc(key)\n raise KeyError\n\n def _find_non_overlapping_monotonic_bounds(self, key):\n if isinstance(key, IntervalMixin):\n start = self._searchsorted_monotonic(\n key.left, 'left', exclude_label=key.open_left)\n stop = self._searchsorted_monotonic(\n key.right, 'right', exclude_label=key.open_right)\n elif isinstance(key, slice):\n # slice\n start, stop = key.start, key.stop\n if (key.step or 1) != 1:\n raise NotImplementedError(\"cannot slice with a slice step\")\n if start is None:\n start = 0\n else:\n start = self._searchsorted_monotonic(start, 'left')\n if stop is None:\n stop = len(self)\n else:\n stop = self._searchsorted_monotonic(stop, 'right')\n else:\n # scalar or index-like\n\n start = self._searchsorted_monotonic(key, 'left')\n stop = self._searchsorted_monotonic(key, 'right')\n return start, stop\n\n def get_loc(self, key, method=None):\n \"\"\"Get integer location, slice or boolean mask for requested label.\n\n Parameters\n ----------\n key : label\n method : {None}, optional\n * default: matches where the label is within an interval only.\n\n Returns\n -------\n loc : int if unique index, slice if monotonic index, else mask\n\n Examples\n ---------\n >>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)\n >>> index = pd.IntervalIndex([i1, i2])\n >>> index.get_loc(1)\n 0\n\n You can also supply an interval or an location for a point inside an\n interval.\n\n >>> index.get_loc(pd.Interval(0, 2))\n array([0, 1], dtype=int64)\n >>> index.get_loc(1.5)\n 1\n\n If a label is in several intervals, you get the locations of all the\n relevant intervals.\n\n >>> i3 = pd.Interval(0, 2)\n >>> overlapping_index = pd.IntervalIndex([i2, i3])\n >>> overlapping_index.get_loc(1.5)\n array([0, 1], dtype=int64)\n \"\"\"\n self._check_method(method)\n\n original_key = key\n key = self._maybe_cast_indexed(key)\n\n if self.is_non_overlapping_monotonic:\n if isinstance(key, Interval):\n left = self._maybe_cast_slice_bound(key.left, 'left', None)\n right = self._maybe_cast_slice_bound(key.right, 'right', None)\n key = Interval(left, right, key.closed)\n else:\n key = self._maybe_cast_slice_bound(key, 'left', None)\n\n start, stop = self._find_non_overlapping_monotonic_bounds(key)\n\n if start is None or stop is None:\n return slice(start, stop)\n elif start + 1 == stop:\n return start\n elif start < stop:\n return slice(start, stop)\n else:\n raise KeyError(original_key)\n\n else:\n # use the interval tree\n if isinstance(key, Interval):\n left, right = _get_interval_closed_bounds(key)\n return self._engine.get_loc_interval(left, right)\n else:\n return self._engine.get_loc(key)\n\n def get_value(self, series, key):\n if com.is_bool_indexer(key):\n loc = key\n elif is_list_like(key):\n loc = self.get_indexer(key)\n elif isinstance(key, slice):\n\n if not (key.step is None or key.step == 1):\n raise ValueError(\"cannot support not-default step in a slice\")\n\n try:\n loc = self.get_loc(key)\n except TypeError:\n # we didn't find exact intervals or are non-unique\n msg = \"unable to slice with this key: {key}\".format(key=key)\n raise ValueError(msg)\n\n else:\n loc = self.get_loc(key)\n return series.iloc[loc]\n\n @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)\n def get_indexer(self, target, method=None, limit=None, tolerance=None):\n\n self._check_method(method)\n target = ensure_index(target)\n target = self._maybe_cast_indexed(target)\n\n if self.equals(target):\n return np.arange(len(self), dtype='intp')\n\n if self.is_non_overlapping_monotonic:\n start, stop = self._find_non_overlapping_monotonic_bounds(target)\n\n start_plus_one = start + 1\n if not ((start_plus_one < stop).any()):\n return np.where(start_plus_one == stop, start, -1)\n\n if not self.is_unique:\n raise ValueError(\"cannot handle non-unique indices\")\n\n # IntervalIndex\n if isinstance(target, IntervalIndex):\n indexer = self._get_reindexer(target)\n\n # non IntervalIndex\n else:\n indexer = np.concatenate([self.get_loc(i) for i in target])\n\n return ensure_platform_int(indexer)\n\n def _get_reindexer(self, target):\n \"\"\"\n Return an indexer for a target IntervalIndex with self\n \"\"\"\n\n # find the left and right indexers\n lindexer = self._engine.get_indexer(target.left.values)\n rindexer = self._engine.get_indexer(target.right.values)\n\n # we want to return an indexer on the intervals\n # however, our keys could provide overlapping of multiple\n # intervals, so we iterate thru the indexers and construct\n # a set of indexers\n\n indexer = []\n n = len(self)\n\n for i, (lhs, rhs) in enumerate(zip(lindexer, rindexer)):\n\n target_value = target[i]\n\n # matching on the lhs bound\n if (lhs != -1 and\n self.closed == 'right' and\n target_value.left == self[lhs].right):\n lhs += 1\n\n # matching on the lhs bound\n if (rhs != -1 and\n self.closed == 'left' and\n target_value.right == self[rhs].left):\n rhs -= 1\n\n # not found\n if lhs == -1 and rhs == -1:\n indexer.append(np.array([-1]))\n\n elif rhs == -1:\n\n indexer.append(np.arange(lhs, n))\n\n elif lhs == -1:\n\n # care about left/right closed here\n value = self[i]\n\n # target.closed same as self.closed\n if self.closed == target.closed:\n if target_value.left < value.left:\n indexer.append(np.array([-1]))\n continue\n\n # target.closed == 'left'\n elif self.closed == 'right':\n if target_value.left <= value.left:\n indexer.append(np.array([-1]))\n continue\n\n # target.closed == 'right'\n elif self.closed == 'left':\n if target_value.left <= value.left:\n indexer.append(np.array([-1]))\n continue\n\n indexer.append(np.arange(0, rhs + 1))\n\n else:\n indexer.append(np.arange(lhs, rhs + 1))\n\n return np.concatenate(indexer)\n\n @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)\n def get_indexer_non_unique(self, target):\n target = self._maybe_cast_indexed(ensure_index(target))\n return super(IntervalIndex, self).get_indexer_non_unique(target)\n\n @Appender(_index_shared_docs['where'])\n def where(self, cond, other=None):\n if other is None:\n other = self._na_value\n values = np.where(cond, self.values, other)\n return self._shallow_copy(values)\n\n def delete(self, loc):\n \"\"\"\n Return a new IntervalIndex with passed location(-s) deleted\n\n Returns\n -------\n new_index : IntervalIndex\n \"\"\"\n new_left = self.left.delete(loc)\n new_right = self.right.delete(loc)\n return self._shallow_copy(new_left, new_right)\n\n def insert(self, loc, item):\n \"\"\"\n Return a new IntervalIndex inserting new item at location. Follows\n Python list.append semantics for negative values. Only Interval\n objects and NA can be inserted into an IntervalIndex\n\n Parameters\n ----------\n loc : int\n item : object\n\n Returns\n -------\n new_index : IntervalIndex\n \"\"\"\n if isinstance(item, Interval):\n if item.closed != self.closed:\n raise ValueError('inserted item must be closed on the same '\n 'side as the index')\n left_insert = item.left\n right_insert = item.right\n elif is_scalar(item) and isna(item):\n # GH 18295\n left_insert = right_insert = item\n else:\n raise ValueError('can only insert Interval objects and NA into '\n 'an IntervalIndex')\n\n new_left = self.left.insert(loc, left_insert)\n new_right = self.right.insert(loc, right_insert)\n return self._shallow_copy(new_left, new_right)\n\n def _as_like_interval_index(self, other):\n self._assert_can_do_setop(other)\n other = ensure_index(other)\n if not isinstance(other, IntervalIndex):\n msg = ('the other index needs to be an IntervalIndex too, but '\n 'was type {}').format(other.__class__.__name__)\n raise TypeError(msg)\n elif self.closed != other.closed:\n msg = ('can only do set operations between two IntervalIndex '\n 'objects that are closed on the same side')\n raise ValueError(msg)\n return other\n\n def _concat_same_dtype(self, to_concat, name):\n \"\"\"\n assert that we all have the same .closed\n we allow a 0-len index here as well\n \"\"\"\n if not len({i.closed for i in to_concat if len(i)}) == 1:\n msg = ('can only append two IntervalIndex objects '\n 'that are closed on the same side')\n raise ValueError(msg)\n return super(IntervalIndex, self)._concat_same_dtype(to_concat, name)\n\n @Appender(_index_shared_docs['take'] % _index_doc_kwargs)\n def take(self, indices, axis=0, allow_fill=True,\n fill_value=None, **kwargs):\n result = self._data.take(indices, axis=axis, allow_fill=allow_fill,\n fill_value=fill_value, **kwargs)\n attributes = self._get_attributes_dict()\n return self._simple_new(result, **attributes)\n\n def __getitem__(self, value):\n result = self._data[value]\n if isinstance(result, IntervalArray):\n return self._shallow_copy(result)\n else:\n # scalar\n return result\n\n # __repr__ associated methods are based on MultiIndex\n\n def _format_with_header(self, header, **kwargs):\n return header + list(self._format_native_types(**kwargs))\n\n def _format_native_types(self, na_rep='', quoting=None, **kwargs):\n \"\"\" actually format my specific types \"\"\"\n from pandas.io.formats.format import IntervalArrayFormatter\n return IntervalArrayFormatter(values=self,\n na_rep=na_rep,\n justify='all').get_result()\n\n def _format_data(self, name=None):\n\n # TODO: integrate with categorical and make generic\n # name argument is unused here; just for compat with base / categorical\n n = len(self)\n max_seq_items = min((get_option(\n 'display.max_seq_items') or n) // 10, 10)\n\n formatter = str\n\n if n == 0:\n summary = '[]'\n elif n == 1:\n first = formatter(self[0])\n summary = '[{first}]'.format(first=first)\n elif n == 2:\n first = formatter(self[0])\n last = formatter(self[-1])\n summary = '[{first}, {last}]'.format(first=first, last=last)\n else:\n\n if n > max_seq_items:\n n = min(max_seq_items // 2, 10)\n head = [formatter(x) for x in self[:n]]\n tail = [formatter(x) for x in self[-n:]]\n summary = '[{head} ... {tail}]'.format(\n head=', '.join(head), tail=', '.join(tail))\n else:\n tail = [formatter(x) for x in self]\n summary = '[{tail}]'.format(tail=', '.join(tail))\n\n return summary + ',' + self._format_space()\n\n def _format_attrs(self):\n attrs = [('closed', repr(self.closed))]\n if self.name is not None:\n attrs.append(('name', default_pprint(self.name)))\n attrs.append(('dtype', \"'{dtype}'\".format(dtype=self.dtype)))\n return attrs\n\n def _format_space(self):\n space = ' ' * (len(self.__class__.__name__) + 1)\n return \"\\n{space}\".format(space=space)\n\n def argsort(self, *args, **kwargs):\n return np.lexsort((self.right, self.left))\n\n def equals(self, other):\n \"\"\"\n Determines if two IntervalIndex objects contain the same elements\n \"\"\"\n if self.is_(other):\n return True\n\n # if we can coerce to an II\n # then we can compare\n if not isinstance(other, IntervalIndex):\n if not is_interval_dtype(other):\n return False\n other = Index(getattr(other, '.values', other))\n\n return (self.left.equals(other.left) and\n self.right.equals(other.right) and\n self.closed == other.closed)\n\n def _setop(op_name):\n def func(self, other):\n other = self._as_like_interval_index(other)\n\n # GH 19016: ensure set op will not return a prohibited dtype\n subtypes = [self.dtype.subtype, other.dtype.subtype]\n common_subtype = find_common_type(subtypes)\n if is_object_dtype(common_subtype):\n msg = ('can only do {op} between two IntervalIndex '\n 'objects that have compatible dtypes')\n raise TypeError(msg.format(op=op_name))\n\n result = getattr(self._multiindex, op_name)(other._multiindex)\n result_name = self.name if self.name == other.name else None\n\n # GH 19101: ensure empty results have correct dtype\n if result.empty:\n result = result.values.astype(self.dtype.subtype)\n else:\n result = result.values\n\n return type(self).from_tuples(result, closed=self.closed,\n name=result_name)\n return func\n\n union = _setop('union')\n intersection = _setop('intersection')\n difference = _setop('difference')\n symmetric_difference = _setop('symmetric_difference')\n\n # TODO: arithmetic operations\n\n\nIntervalIndex._add_logical_methods_disabled()\n\n\ndef _is_valid_endpoint(endpoint):\n \"\"\"helper for interval_range to check if start/end are valid types\"\"\"\n return any([is_number(endpoint),\n isinstance(endpoint, Timestamp),\n isinstance(endpoint, Timedelta),\n endpoint is None])\n\n\ndef _is_type_compatible(a, b):\n \"\"\"helper for interval_range to check type compat of start/end/freq\"\"\"\n is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset))\n is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset))\n return ((is_number(a) and is_number(b)) or\n (is_ts_compat(a) and is_ts_compat(b)) or\n (is_td_compat(a) and is_td_compat(b)) or\n com._any_none(a, b))\n\n\ndef interval_range(start=None, end=None, periods=None, freq=None,\n name=None, closed='right'):\n \"\"\"\n Return a fixed frequency IntervalIndex\n\n Parameters\n ----------\n start : numeric or datetime-like, default None\n Left bound for generating intervals\n end : numeric or datetime-like, default None\n Right bound for generating intervals\n periods : integer, default None\n Number of periods to generate\n freq : numeric, string, or DateOffset, default None\n The length of each interval. Must be consistent with the type of start\n and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1\n for numeric and 'D' for datetime-like.\n name : string, default None\n Name of the resulting IntervalIndex\n closed : {'left', 'right', 'both', 'neither'}, default 'right'\n Whether the intervals are closed on the left-side, right-side, both\n or neither.\n\n Notes\n -----\n Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. If ``freq`` is omitted, the resulting\n ``IntervalIndex`` will have ``periods`` linearly spaced elements between\n ``start`` and ``end``, inclusively.\n\n To learn more about datetime-like frequency strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.\n\n Returns\n -------\n rng : IntervalIndex\n\n Examples\n --------\n Numeric ``start`` and ``end`` is supported.\n\n >>> pd.interval_range(start=0, end=5)\n IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]]\n closed='right', dtype='interval[int64]')\n\n Additionally, datetime-like input is also supported.\n\n >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),\n end=pd.Timestamp('2017-01-04'))\n IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],\n (2017-01-03, 2017-01-04]]\n closed='right', dtype='interval[datetime64[ns]]')\n\n The ``freq`` parameter specifies the frequency between the left and right.\n endpoints of the individual intervals within the ``IntervalIndex``. For\n numeric ``start`` and ``end``, the frequency must also be numeric.\n\n >>> pd.interval_range(start=0, periods=4, freq=1.5)\n IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]]\n closed='right', dtype='interval[float64]')\n\n Similarly, for datetime-like ``start`` and ``end``, the frequency must be\n convertible to a DateOffset.\n\n >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),\n periods=3, freq='MS')\n IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],\n (2017-03-01, 2017-04-01]]\n closed='right', dtype='interval[datetime64[ns]]')\n\n Specify ``start``, ``end``, and ``periods``; the frequency is generated\n automatically (linearly spaced).\n\n >>> pd.interval_range(start=0, end=6, periods=4)\n IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]]\n closed='right',\n dtype='interval[float64]')\n\n The ``closed`` parameter specifies which endpoints of the individual\n intervals within the ``IntervalIndex`` are closed.\n\n >>> pd.interval_range(end=5, periods=4, closed='both')\n IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]]\n closed='both', dtype='interval[int64]')\n\n See Also\n --------\n IntervalIndex : an Index of intervals that are all closed on the same side.\n \"\"\"\n start = com.maybe_box_datetimelike(start)\n end = com.maybe_box_datetimelike(end)\n endpoint = start if start is not None else end\n\n if freq is None and com._any_none(periods, start, end):\n freq = 1 if is_number(endpoint) else 'D'\n\n if com.count_not_none(start, end, periods, freq) != 3:\n raise ValueError('Of the four parameters: start, end, periods, and '\n 'freq, exactly three must be specified')\n\n if not _is_valid_endpoint(start):\n msg = 'start must be numeric or datetime-like, got {start}'\n raise ValueError(msg.format(start=start))\n elif not _is_valid_endpoint(end):\n msg = 'end must be numeric or datetime-like, got {end}'\n raise ValueError(msg.format(end=end))\n\n if is_float(periods):\n periods = int(periods)\n elif not is_integer(periods) and periods is not None:\n msg = 'periods must be a number, got {periods}'\n raise TypeError(msg.format(periods=periods))\n\n if freq is not None and not is_number(freq):\n try:\n freq = to_offset(freq)\n except ValueError:\n raise ValueError('freq must be numeric or convertible to '\n 'DateOffset, got {freq}'.format(freq=freq))\n\n # verify type compatibility\n if not all([_is_type_compatible(start, end),\n _is_type_compatible(start, freq),\n _is_type_compatible(end, freq)]):\n raise TypeError(\"start, end, freq need to be type compatible\")\n\n # +1 to convert interval count to breaks count (n breaks = n-1 intervals)\n if periods is not None:\n periods += 1\n\n if is_number(endpoint):\n # force consistency between start/end/freq (lower end if freq skips it)\n if com._all_not_none(start, end, freq):\n end -= (end - start) % freq\n\n # compute the period/start/end if unspecified (at most one)\n if periods is None:\n periods = int((end - start) // freq) + 1\n elif start is None:\n start = end - (periods - 1) * freq\n elif end is None:\n end = start + (periods - 1) * freq\n\n breaks = np.linspace(start, end, periods)\n if all(is_integer(x) for x in com._not_none(start, end, freq)):\n # np.linspace always produces float output\n breaks = maybe_downcast_to_dtype(breaks, 'int64')\n else:\n # delegate to the appropriate range function\n if isinstance(endpoint, Timestamp):\n range_func = date_range\n else:\n range_func = timedelta_range\n\n breaks = range_func(start=start, end=end, periods=periods, freq=freq)\n\n return IntervalIndex.from_breaks(breaks, name=name, closed=closed)\n",
"\"\"\"An interface for extending pandas with custom arrays.\n\n.. warning::\n\n This is an experimental API and subject to breaking changes\n without warning.\n\"\"\"\nimport numpy as np\n\nimport operator\n\nfrom pandas.errors import AbstractMethodError\nfrom pandas.compat.numpy import function as nv\nfrom pandas.compat import set_function_name, PY3\nfrom pandas.core import ops\nfrom pandas.core.dtypes.common import is_list_like\n\n_not_implemented_message = \"{} does not implement {}.\"\n\n\nclass ExtensionArray(object):\n \"\"\"Abstract base class for custom 1-D array types.\n\n pandas will recognize instances of this class as proper arrays\n with a custom type and will not attempt to coerce them to objects. They\n may be stored directly inside a :class:`DataFrame` or :class:`Series`.\n\n .. versionadded:: 0.23.0\n\n Notes\n -----\n The interface includes the following abstract methods that must be\n implemented by subclasses:\n\n * _from_sequence\n * _from_factorized\n * __getitem__\n * __len__\n * dtype\n * nbytes\n * isna\n * take\n * copy\n * _concat_same_type\n\n An additional method is available to satisfy pandas' internal,\n private block API.\n\n * _formatting_values\n\n Some methods require casting the ExtensionArray to an ndarray of Python\n objects with ``self.astype(object)``, which may be expensive. When\n performance is a concern, we highly recommend overriding the following\n methods:\n\n * fillna\n * dropna\n * unique\n * factorize / _values_for_factorize\n * argsort / _values_for_argsort\n\n The remaining methods implemented on this class should be performant,\n as they only compose abstract methods. Still, a more efficient\n implementation may be available, and these methods can be overridden.\n\n This class does not inherit from 'abc.ABCMeta' for performance reasons.\n Methods and properties required by the interface raise\n ``pandas.errors.AbstractMethodError`` and no ``register`` method is\n provided for registering virtual subclasses.\n\n ExtensionArrays are limited to 1 dimension.\n\n They may be backed by none, one, or many NumPy arrays. For example,\n ``pandas.Categorical`` is an extension array backed by two arrays,\n one for codes and one for categories. An array of IPv6 address may\n be backed by a NumPy structured array with two fields, one for the\n lower 64 bits and one for the upper 64 bits. Or they may be backed\n by some other storage type, like Python lists. Pandas makes no\n assumptions on how the data are stored, just that it can be converted\n to a NumPy array.\n The ExtensionArray interface does not impose any rules on how this data\n is stored. However, currently, the backing data cannot be stored in\n attributes called ``.values`` or ``._values`` to ensure full compatibility\n with pandas internals. But other names as ``.data``, ``._data``,\n ``._items``, ... can be freely used.\n \"\"\"\n # '_typ' is for pandas.core.dtypes.generic.ABCExtensionArray.\n # Don't override this.\n _typ = 'extension'\n\n # ------------------------------------------------------------------------\n # Constructors\n # ------------------------------------------------------------------------\n @classmethod\n def _from_sequence(cls, scalars, dtype=None, copy=False):\n \"\"\"Construct a new ExtensionArray from a sequence of scalars.\n\n Parameters\n ----------\n scalars : Sequence\n Each element will be an instance of the scalar type for this\n array, ``cls.dtype.type``.\n dtype : dtype, optional\n Construct for this particular dtype. This should be a Dtype\n compatible with the ExtensionArray.\n copy : boolean, default False\n If True, copy the underlying data.\n Returns\n -------\n ExtensionArray\n \"\"\"\n raise AbstractMethodError(cls)\n\n @classmethod\n def _from_factorized(cls, values, original):\n \"\"\"Reconstruct an ExtensionArray after factorization.\n\n Parameters\n ----------\n values : ndarray\n An integer ndarray with the factorized values.\n original : ExtensionArray\n The original ExtensionArray that factorize was called on.\n\n See Also\n --------\n pandas.factorize\n ExtensionArray.factorize\n \"\"\"\n raise AbstractMethodError(cls)\n\n # ------------------------------------------------------------------------\n # Must be a Sequence\n # ------------------------------------------------------------------------\n\n def __getitem__(self, item):\n # type (Any) -> Any\n \"\"\"Select a subset of self.\n\n Parameters\n ----------\n item : int, slice, or ndarray\n * int: The position in 'self' to get.\n\n * slice: A slice object, where 'start', 'stop', and 'step' are\n integers or None\n\n * ndarray: A 1-d boolean NumPy ndarray the same length as 'self'\n\n Returns\n -------\n item : scalar or ExtensionArray\n\n Notes\n -----\n For scalar ``item``, return a scalar value suitable for the array's\n type. This should be an instance of ``self.dtype.type``.\n\n For slice ``key``, return an instance of ``ExtensionArray``, even\n if the slice is length 0 or 1.\n\n For a boolean mask, return an instance of ``ExtensionArray``, filtered\n to the values where ``item`` is True.\n \"\"\"\n raise AbstractMethodError(self)\n\n def __setitem__(self, key, value):\n # type: (Union[int, np.ndarray], Any) -> None\n \"\"\"Set one or more values inplace.\n\n This method is not required to satisfy the pandas extension array\n interface.\n\n Parameters\n ----------\n key : int, ndarray, or slice\n When called from, e.g. ``Series.__setitem__``, ``key`` will be\n one of\n\n * scalar int\n * ndarray of integers.\n * boolean ndarray\n * slice object\n\n value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object\n value or values to be set of ``key``.\n\n Returns\n -------\n None\n \"\"\"\n # Some notes to the ExtensionArray implementor who may have ended up\n # here. While this method is not required for the interface, if you\n # *do* choose to implement __setitem__, then some semantics should be\n # observed:\n #\n # * Setting multiple values : ExtensionArrays should support setting\n # multiple values at once, 'key' will be a sequence of integers and\n # 'value' will be a same-length sequence.\n #\n # * Broadcasting : For a sequence 'key' and a scalar 'value',\n # each position in 'key' should be set to 'value'.\n #\n # * Coercion : Most users will expect basic coercion to work. For\n # example, a string like '2018-01-01' is coerced to a datetime\n # when setting on a datetime64ns array. In general, if the\n # __init__ method coerces that value, then so should __setitem__\n raise NotImplementedError(_not_implemented_message.format(\n type(self), '__setitem__')\n )\n\n def __len__(self):\n # type: () -> int\n \"\"\"Length of this array\n\n Returns\n -------\n length : int\n \"\"\"\n raise AbstractMethodError(self)\n\n def __iter__(self):\n \"\"\"Iterate over elements of the array.\n\n \"\"\"\n # This needs to be implemented so that pandas recognizes extension\n # arrays as list-like. The default implementation makes successive\n # calls to ``__getitem__``, which may be slower than necessary.\n for i in range(len(self)):\n yield self[i]\n\n # ------------------------------------------------------------------------\n # Required attributes\n # ------------------------------------------------------------------------\n @property\n def dtype(self):\n # type: () -> ExtensionDtype\n \"\"\"An instance of 'ExtensionDtype'.\"\"\"\n raise AbstractMethodError(self)\n\n @property\n def shape(self):\n # type: () -> Tuple[int, ...]\n \"\"\"Return a tuple of the array dimensions.\"\"\"\n return (len(self),)\n\n @property\n def ndim(self):\n # type: () -> int\n \"\"\"Extension Arrays are only allowed to be 1-dimensional.\"\"\"\n return 1\n\n @property\n def nbytes(self):\n # type: () -> int\n \"\"\"The number of bytes needed to store this object in memory.\n\n \"\"\"\n # If this is expensive to compute, return an approximate lower bound\n # on the number of bytes needed.\n raise AbstractMethodError(self)\n\n # ------------------------------------------------------------------------\n # Additional Methods\n # ------------------------------------------------------------------------\n def astype(self, dtype, copy=True):\n \"\"\"Cast to a NumPy array with 'dtype'.\n\n Parameters\n ----------\n dtype : str or dtype\n Typecode or data-type to which the array is cast.\n copy : bool, default True\n Whether to copy the data, even if not necessary. If False,\n a copy is made only if the old dtype does not match the\n new dtype.\n\n Returns\n -------\n array : ndarray\n NumPy ndarray with 'dtype' for its dtype.\n \"\"\"\n return np.array(self, dtype=dtype, copy=copy)\n\n def isna(self):\n # type: () -> np.ndarray\n \"\"\"Boolean NumPy array indicating if each value is missing.\n\n This should return a 1-D array the same length as 'self'.\n \"\"\"\n raise AbstractMethodError(self)\n\n def _values_for_argsort(self):\n # type: () -> ndarray\n \"\"\"Return values for sorting.\n\n Returns\n -------\n ndarray\n The transformed values should maintain the ordering between values\n within the array.\n\n See Also\n --------\n ExtensionArray.argsort\n \"\"\"\n # Note: this is used in `ExtensionArray.argsort`.\n return np.array(self)\n\n def argsort(self, ascending=True, kind='quicksort', *args, **kwargs):\n \"\"\"\n Return the indices that would sort this array.\n\n Parameters\n ----------\n ascending : bool, default True\n Whether the indices should result in an ascending\n or descending sort.\n kind : {'quicksort', 'mergesort', 'heapsort'}, optional\n Sorting algorithm.\n *args, **kwargs:\n passed through to :func:`numpy.argsort`.\n\n Returns\n -------\n index_array : ndarray\n Array of indices that sort ``self``.\n\n See Also\n --------\n numpy.argsort : Sorting implementation used internally.\n \"\"\"\n # Implementor note: You have two places to override the behavior of\n # argsort.\n # 1. _values_for_argsort : construct the values passed to np.argsort\n # 2. argsort : total control over sorting.\n ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)\n values = self._values_for_argsort()\n result = np.argsort(values, kind=kind, **kwargs)\n if not ascending:\n result = result[::-1]\n return result\n\n def fillna(self, value=None, method=None, limit=None):\n \"\"\" Fill NA/NaN values using the specified method.\n\n Parameters\n ----------\n value : scalar, array-like\n If a scalar value is passed it is used to fill all missing values.\n Alternatively, an array-like 'value' can be given. It's expected\n that the array-like have the same length as 'self'.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series\n pad / ffill: propagate last valid observation forward to next valid\n backfill / bfill: use NEXT valid observation to fill gap\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled.\n\n Returns\n -------\n filled : ExtensionArray with NA/NaN filled\n \"\"\"\n from pandas.api.types import is_array_like\n from pandas.util._validators import validate_fillna_kwargs\n from pandas.core.missing import pad_1d, backfill_1d\n\n value, method = validate_fillna_kwargs(value, method)\n\n mask = self.isna()\n\n if is_array_like(value):\n if len(value) != len(self):\n raise ValueError(\"Length of 'value' does not match. Got ({}) \"\n \" expected {}\".format(len(value), len(self)))\n value = value[mask]\n\n if mask.any():\n if method is not None:\n func = pad_1d if method == 'pad' else backfill_1d\n new_values = func(self.astype(object), limit=limit,\n mask=mask)\n new_values = self._from_sequence(new_values, dtype=self.dtype)\n else:\n # fill with value\n new_values = self.copy()\n new_values[mask] = value\n else:\n new_values = self.copy()\n return new_values\n\n def dropna(self):\n \"\"\" Return ExtensionArray without NA values\n\n Returns\n -------\n valid : ExtensionArray\n \"\"\"\n\n return self[~self.isna()]\n\n def shift(self, periods=1):\n # type: (int) -> ExtensionArray\n \"\"\"\n Shift values by desired number.\n\n Newly introduced missing values are filled with\n ``self.dtype.na_value``.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n periods : int, default 1\n The number of periods to shift. Negative values are allowed\n for shifting backwards.\n\n Returns\n -------\n shifted : ExtensionArray\n \"\"\"\n # Note: this implementation assumes that `self.dtype.na_value` can be\n # stored in an instance of your ExtensionArray with `self.dtype`.\n if periods == 0:\n return self.copy()\n empty = self._from_sequence([self.dtype.na_value] * abs(periods),\n dtype=self.dtype)\n if periods > 0:\n a = empty\n b = self[:-periods]\n else:\n a = self[abs(periods):]\n b = empty\n return self._concat_same_type([a, b])\n\n def unique(self):\n \"\"\"Compute the ExtensionArray of unique values.\n\n Returns\n -------\n uniques : ExtensionArray\n \"\"\"\n from pandas import unique\n\n uniques = unique(self.astype(object))\n return self._from_sequence(uniques, dtype=self.dtype)\n\n def _values_for_factorize(self):\n # type: () -> Tuple[ndarray, Any]\n \"\"\"Return an array and missing value suitable for factorization.\n\n Returns\n -------\n values : ndarray\n\n An array suitable for factorization. This should maintain order\n and be a supported dtype (Float64, Int64, UInt64, String, Object).\n By default, the extension array is cast to object dtype.\n na_value : object\n The value in `values` to consider missing. This will be treated\n as NA in the factorization routines, so it will be coded as\n `na_sentinal` and not included in `uniques`. By default,\n ``np.nan`` is used.\n \"\"\"\n return self.astype(object), np.nan\n\n def factorize(self, na_sentinel=-1):\n # type: (int) -> Tuple[ndarray, ExtensionArray]\n \"\"\"Encode the extension array as an enumerated type.\n\n Parameters\n ----------\n na_sentinel : int, default -1\n Value to use in the `labels` array to indicate missing values.\n\n Returns\n -------\n labels : ndarray\n An integer NumPy array that's an indexer into the original\n ExtensionArray.\n uniques : ExtensionArray\n An ExtensionArray containing the unique values of `self`.\n\n .. note::\n\n uniques will *not* contain an entry for the NA value of\n the ExtensionArray if there are any missing values present\n in `self`.\n\n See Also\n --------\n pandas.factorize : Top-level factorize method that dispatches here.\n\n Notes\n -----\n :meth:`pandas.factorize` offers a `sort` keyword as well.\n \"\"\"\n # Impelmentor note: There are two ways to override the behavior of\n # pandas.factorize\n # 1. _values_for_factorize and _from_factorize.\n # Specify the values passed to pandas' internal factorization\n # routines, and how to convert from those values back to the\n # original ExtensionArray.\n # 2. ExtensionArray.factorize.\n # Complete control over factorization.\n from pandas.core.algorithms import _factorize_array\n\n arr, na_value = self._values_for_factorize()\n\n labels, uniques = _factorize_array(arr, na_sentinel=na_sentinel,\n na_value=na_value)\n\n uniques = self._from_factorized(uniques, self)\n return labels, uniques\n\n # ------------------------------------------------------------------------\n # Indexing methods\n # ------------------------------------------------------------------------\n\n def take(self, indices, allow_fill=False, fill_value=None):\n # type: (Sequence[int], bool, Optional[Any]) -> ExtensionArray\n \"\"\"Take elements from an array.\n\n Parameters\n ----------\n indices : sequence of integers\n Indices to be taken.\n allow_fill : bool, default False\n How to handle negative values in `indices`.\n\n * False: negative values in `indices` indicate positional indices\n from the right (the default). This is similar to\n :func:`numpy.take`.\n\n * True: negative values in `indices` indicate\n missing values. These values are set to `fill_value`. Any other\n other negative values raise a ``ValueError``.\n\n fill_value : any, optional\n Fill value to use for NA-indices when `allow_fill` is True.\n This may be ``None``, in which case the default NA value for\n the type, ``self.dtype.na_value``, is used.\n\n For many ExtensionArrays, there will be two representations of\n `fill_value`: a user-facing \"boxed\" scalar, and a low-level\n physical NA value. `fill_value` should be the user-facing version,\n and the implementation should handle translating that to the\n physical version for processing the take if necessary.\n\n Returns\n -------\n ExtensionArray\n\n Raises\n ------\n IndexError\n When the indices are out of bounds for the array.\n ValueError\n When `indices` contains negative values other than ``-1``\n and `allow_fill` is True.\n\n Notes\n -----\n ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``,\n ``iloc``, when `indices` is a sequence of values. Additionally,\n it's called by :meth:`Series.reindex`, or any other method\n that causes realignment, with a `fill_value`.\n\n See Also\n --------\n numpy.take\n pandas.api.extensions.take\n\n Examples\n --------\n Here's an example implementation, which relies on casting the\n extension array to object dtype. This uses the helper method\n :func:`pandas.api.extensions.take`.\n\n .. code-block:: python\n\n def take(self, indices, allow_fill=False, fill_value=None):\n from pandas.core.algorithms import take\n\n # If the ExtensionArray is backed by an ndarray, then\n # just pass that here instead of coercing to object.\n data = self.astype(object)\n\n if allow_fill and fill_value is None:\n fill_value = self.dtype.na_value\n\n # fill value should always be translated from the scalar\n # type for the array, to the physical storage type for\n # the data, before passing to take.\n\n result = take(data, indices, fill_value=fill_value,\n allow_fill=allow_fill)\n return self._from_sequence(result, dtype=self.dtype)\n \"\"\"\n # Implementer note: The `fill_value` parameter should be a user-facing\n # value, an instance of self.dtype.type. When passed `fill_value=None`,\n # the default of `self.dtype.na_value` should be used.\n # This may differ from the physical storage type your ExtensionArray\n # uses. In this case, your implementation is responsible for casting\n # the user-facing type to the storage type, before using\n # pandas.api.extensions.take\n raise AbstractMethodError(self)\n\n def copy(self, deep=False):\n # type: (bool) -> ExtensionArray\n \"\"\"Return a copy of the array.\n\n Parameters\n ----------\n deep : bool, default False\n Also copy the underlying data backing this array.\n\n Returns\n -------\n ExtensionArray\n \"\"\"\n raise AbstractMethodError(self)\n\n # ------------------------------------------------------------------------\n # Block-related methods\n # ------------------------------------------------------------------------\n\n def _formatting_values(self):\n # type: () -> np.ndarray\n # At the moment, this has to be an array since we use result.dtype\n \"\"\"An array of values to be printed in, e.g. the Series repr\"\"\"\n return np.array(self)\n\n @classmethod\n def _concat_same_type(cls, to_concat):\n # type: (Sequence[ExtensionArray]) -> ExtensionArray\n \"\"\"Concatenate multiple array\n\n Parameters\n ----------\n to_concat : sequence of this type\n\n Returns\n -------\n ExtensionArray\n \"\"\"\n raise AbstractMethodError(cls)\n\n # The _can_hold_na attribute is set to True so that pandas internals\n # will use the ExtensionDtype.na_value as the NA value in operations\n # such as take(), reindex(), shift(), etc. In addition, those results\n # will then be of the ExtensionArray subclass rather than an array\n # of objects\n _can_hold_na = True\n\n @property\n def _ndarray_values(self):\n # type: () -> np.ndarray\n \"\"\"Internal pandas method for lossy conversion to a NumPy ndarray.\n\n This method is not part of the pandas interface.\n\n The expectation is that this is cheap to compute, and is primarily\n used for interacting with our indexers.\n \"\"\"\n return np.array(self)\n\n\nclass ExtensionOpsMixin(object):\n \"\"\"\n A base class for linking the operators to their dunder names\n \"\"\"\n\n @classmethod\n def _add_arithmetic_ops(cls):\n cls.__add__ = cls._create_arithmetic_method(operator.add)\n cls.__radd__ = cls._create_arithmetic_method(ops.radd)\n cls.__sub__ = cls._create_arithmetic_method(operator.sub)\n cls.__rsub__ = cls._create_arithmetic_method(ops.rsub)\n cls.__mul__ = cls._create_arithmetic_method(operator.mul)\n cls.__rmul__ = cls._create_arithmetic_method(ops.rmul)\n cls.__pow__ = cls._create_arithmetic_method(operator.pow)\n cls.__rpow__ = cls._create_arithmetic_method(ops.rpow)\n cls.__mod__ = cls._create_arithmetic_method(operator.mod)\n cls.__rmod__ = cls._create_arithmetic_method(ops.rmod)\n cls.__floordiv__ = cls._create_arithmetic_method(operator.floordiv)\n cls.__rfloordiv__ = cls._create_arithmetic_method(ops.rfloordiv)\n cls.__truediv__ = cls._create_arithmetic_method(operator.truediv)\n cls.__rtruediv__ = cls._create_arithmetic_method(ops.rtruediv)\n if not PY3:\n cls.__div__ = cls._create_arithmetic_method(operator.div)\n cls.__rdiv__ = cls._create_arithmetic_method(ops.rdiv)\n\n cls.__divmod__ = cls._create_arithmetic_method(divmod)\n cls.__rdivmod__ = cls._create_arithmetic_method(ops.rdivmod)\n\n @classmethod\n def _add_comparison_ops(cls):\n cls.__eq__ = cls._create_comparison_method(operator.eq)\n cls.__ne__ = cls._create_comparison_method(operator.ne)\n cls.__lt__ = cls._create_comparison_method(operator.lt)\n cls.__gt__ = cls._create_comparison_method(operator.gt)\n cls.__le__ = cls._create_comparison_method(operator.le)\n cls.__ge__ = cls._create_comparison_method(operator.ge)\n\n\nclass ExtensionScalarOpsMixin(ExtensionOpsMixin):\n \"\"\"A mixin for defining the arithmetic and logical operations on\n an ExtensionArray class, where it is assumed that the underlying objects\n have the operators already defined.\n\n Usage\n ------\n If you have defined a subclass MyExtensionArray(ExtensionArray), then\n use MyExtensionArray(ExtensionArray, ExtensionScalarOpsMixin) to\n get the arithmetic operators. After the definition of MyExtensionArray,\n insert the lines\n\n MyExtensionArray._add_arithmetic_ops()\n MyExtensionArray._add_comparison_ops()\n\n to link the operators to your class.\n \"\"\"\n\n @classmethod\n def _create_method(cls, op, coerce_to_dtype=True):\n \"\"\"\n A class method that returns a method that will correspond to an\n operator for an ExtensionArray subclass, by dispatching to the\n relevant operator defined on the individual elements of the\n ExtensionArray.\n\n Parameters\n ----------\n op : function\n An operator that takes arguments op(a, b)\n coerce_to_dtype : bool, default True\n boolean indicating whether to attempt to convert\n the result to the underlying ExtensionArray dtype.\n If it's not possible to create a new ExtensionArray with the\n values, an ndarray is returned instead.\n\n Returns\n -------\n Callable[[Any, Any], Union[ndarray, ExtensionArray]]\n A method that can be bound to a class. When used, the method\n receives the two arguments, one of which is the instance of\n this class, and should return an ExtensionArray or an ndarray.\n\n Returning an ndarray may be necessary when the result of the\n `op` cannot be stored in the ExtensionArray. The dtype of the\n ndarray uses NumPy's normal inference rules.\n\n Example\n -------\n Given an ExtensionArray subclass called MyExtensionArray, use\n\n >>> __add__ = cls._create_method(operator.add)\n\n in the class definition of MyExtensionArray to create the operator\n for addition, that will be based on the operator implementation\n of the underlying elements of the ExtensionArray\n \"\"\"\n\n def _binop(self, other):\n def convert_values(param):\n if isinstance(param, ExtensionArray) or is_list_like(param):\n ovalues = param\n else: # Assume its an object\n ovalues = [param] * len(self)\n return ovalues\n lvalues = self\n rvalues = convert_values(other)\n\n # If the operator is not defined for the underlying objects,\n # a TypeError should be raised\n res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]\n\n if coerce_to_dtype:\n try:\n res = self._from_sequence(res)\n except Exception:\n # https://github.com/pandas-dev/pandas/issues/22850\n # We catch all regular exceptions here, and fall back\n # to an ndarray.\n res = np.asarray(res)\n else:\n res = np.asarray(res)\n\n return res\n\n op_name = ops._get_op_name(op, True)\n return set_function_name(_binop, op_name, cls)\n\n @classmethod\n def _create_arithmetic_method(cls, op):\n return cls._create_method(op)\n\n @classmethod\n def _create_comparison_method(cls, op):\n return cls._create_method(op, coerce_to_dtype=False)\n",
"#!/usr/bin/env python\n\"\"\"\nAnalyze docstrings to detect errors.\n\nIf no argument is provided, it does a quick check of docstrings and returns\na csv with all API functions and results of basic checks.\n\nIf a function or method is provided in the form \"pandas.function\",\n\"pandas.module.class.method\", etc. a list of all errors in the docstring for\nthe specified function or method.\n\nUsage::\n $ ./validate_docstrings.py\n $ ./validate_docstrings.py pandas.DataFrame.head\n\"\"\"\nimport os\nimport sys\nimport csv\nimport re\nimport functools\nimport collections\nimport argparse\nimport pydoc\nimport inspect\nimport importlib\nimport doctest\ntry:\n from io import StringIO\nexcept ImportError:\n from cStringIO import StringIO\nimport numpy\n\nBASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nsys.path.insert(0, os.path.join(BASE_PATH))\nimport pandas\nfrom pandas.compat import signature\n\nsys.path.insert(1, os.path.join(BASE_PATH, 'doc', 'sphinxext'))\nfrom numpydoc.docscrape import NumpyDocString\nfrom pandas.io.formats.printing import pprint_thing\n\n\nPRIVATE_CLASSES = ['NDFrame', 'IndexOpsMixin']\nDIRECTIVES = ['versionadded', 'versionchanged', 'deprecated']\n\n\ndef _load_obj(obj_name):\n for maxsplit in range(1, obj_name.count('.') + 1):\n # TODO when py3 only replace by: module, *func_parts = ...\n func_name_split = obj_name.rsplit('.', maxsplit)\n module = func_name_split[0]\n func_parts = func_name_split[1:]\n try:\n obj = importlib.import_module(module)\n except ImportError:\n pass\n else:\n continue\n\n if 'module' not in locals():\n raise ImportError('No module can be imported '\n 'from \"{}\"'.format(obj_name))\n\n for part in func_parts:\n obj = getattr(obj, part)\n return obj\n\n\ndef _to_original_callable(obj):\n while True:\n if inspect.isfunction(obj) or inspect.isclass(obj):\n f = inspect.getfile(obj)\n if f.startswith('<') and f.endswith('>'):\n return None\n return obj\n if inspect.ismethod(obj):\n obj = obj.__func__\n elif isinstance(obj, functools.partial):\n obj = obj.func\n elif isinstance(obj, property):\n obj = obj.fget\n else:\n return None\n\n\ndef _output_header(title, width=80, char='#'):\n full_line = char * width\n side_len = (width - len(title) - 2) // 2\n adj = '' if len(title) % 2 == 0 else ' '\n title_line = '{side} {title}{adj} {side}'.format(side=char * side_len,\n title=title,\n adj=adj)\n\n return '\\n{full_line}\\n{title_line}\\n{full_line}\\n\\n'.format(\n full_line=full_line, title_line=title_line)\n\n\nclass Docstring(object):\n def __init__(self, method_name, method_obj):\n self.method_name = method_name\n self.method_obj = method_obj\n self.raw_doc = method_obj.__doc__ or ''\n self.clean_doc = pydoc.getdoc(self.method_obj)\n self.doc = NumpyDocString(self.clean_doc)\n\n def __len__(self):\n return len(self.raw_doc)\n\n @property\n def is_function_or_method(self):\n # TODO(py27): remove ismethod\n return (inspect.isfunction(self.method_obj)\n or inspect.ismethod(self.method_obj))\n\n @property\n def source_file_name(self):\n fname = inspect.getsourcefile(self.method_obj)\n if fname:\n fname = os.path.relpath(fname, BASE_PATH)\n return fname\n\n @property\n def source_file_def_line(self):\n try:\n return inspect.getsourcelines(self.method_obj)[-1]\n except OSError:\n pass\n\n @property\n def github_url(self):\n url = 'https://github.com/pandas-dev/pandas/blob/master/'\n url += '{}#L{}'.format(self.source_file_name,\n self.source_file_def_line)\n return url\n\n @property\n def start_blank_lines(self):\n i = None\n if self.raw_doc:\n for i, row in enumerate(self.raw_doc.split('\\n')):\n if row.strip():\n break\n return i\n\n @property\n def end_blank_lines(self):\n i = None\n if self.raw_doc:\n for i, row in enumerate(reversed(self.raw_doc.split('\\n'))):\n if row.strip():\n break\n return i\n\n @property\n def double_blank_lines(self):\n prev = True\n for row in self.raw_doc.split('\\n'):\n if not prev and not row.strip():\n return True\n prev = row.strip()\n return False\n\n @property\n def summary(self):\n return ' '.join(self.doc['Summary'])\n\n @property\n def num_summary_lines(self):\n return len(self.doc['Summary'])\n\n @property\n def extended_summary(self):\n if not self.doc['Extended Summary'] and len(self.doc['Summary']) > 1:\n return ' '.join(self.doc['Summary'])\n return ' '.join(self.doc['Extended Summary'])\n\n @property\n def needs_summary(self):\n return not (bool(self.summary) and bool(self.extended_summary))\n\n @property\n def doc_parameters(self):\n return collections.OrderedDict((name, (type_, ''.join(desc)))\n for name, type_, desc\n in self.doc['Parameters'])\n\n @property\n def signature_parameters(self):\n if inspect.isclass(self.method_obj):\n if hasattr(self.method_obj, '_accessors') and (\n self.method_name.split('.')[-1] in\n self.method_obj._accessors):\n # accessor classes have a signature but don't want to show this\n return tuple()\n try:\n sig = signature(self.method_obj)\n except (TypeError, ValueError):\n # Some objects, mainly in C extensions do not support introspection\n # of the signature\n return tuple()\n params = sig.args\n if sig.varargs:\n params.append(\"*\" + sig.varargs)\n if sig.keywords:\n params.append(\"**\" + sig.keywords)\n params = tuple(params)\n if params and params[0] in ('self', 'cls'):\n return params[1:]\n return params\n\n @property\n def parameter_mismatches(self):\n errs = []\n signature_params = self.signature_parameters\n doc_params = tuple(self.doc_parameters)\n missing = set(signature_params) - set(doc_params)\n if missing:\n errs.append(\n 'Parameters {} not documented'.format(pprint_thing(missing)))\n extra = set(doc_params) - set(signature_params)\n if extra:\n errs.append('Unknown parameters {}'.format(pprint_thing(extra)))\n if (not missing and not extra and signature_params != doc_params\n and not (not signature_params and not doc_params)):\n errs.append('Wrong parameters order. ' +\n 'Actual: {!r}. '.format(signature_params) +\n 'Documented: {!r}'.format(doc_params))\n\n return errs\n\n @property\n def correct_parameters(self):\n return not bool(self.parameter_mismatches)\n\n def parameter_type(self, param):\n return self.doc_parameters[param][0]\n\n def parameter_desc(self, param):\n desc = self.doc_parameters[param][1]\n # Find and strip out any sphinx directives\n for directive in DIRECTIVES:\n full_directive = '.. {}'.format(directive)\n if full_directive in desc:\n # Only retain any description before the directive\n desc = desc[:desc.index(full_directive)]\n return desc\n\n @property\n def see_also(self):\n return collections.OrderedDict((name, ''.join(desc))\n for name, desc, _\n in self.doc['See Also'])\n\n @property\n def examples(self):\n return self.doc['Examples']\n\n @property\n def returns(self):\n return self.doc['Returns']\n\n @property\n def yields(self):\n return self.doc['Yields']\n\n @property\n def method_source(self):\n return inspect.getsource(self.method_obj)\n\n @property\n def first_line_ends_in_dot(self):\n if self.doc:\n return self.doc.split('\\n')[0][-1] == '.'\n\n @property\n def deprecated(self):\n pattern = re.compile('.. deprecated:: ')\n return (self.method_name.startswith('pandas.Panel') or\n bool(pattern.search(self.summary)) or\n bool(pattern.search(self.extended_summary)))\n\n @property\n def mentioned_private_classes(self):\n return [klass for klass in PRIVATE_CLASSES if klass in self.raw_doc]\n\n @property\n def examples_errors(self):\n flags = doctest.NORMALIZE_WHITESPACE | doctest.IGNORE_EXCEPTION_DETAIL\n finder = doctest.DocTestFinder()\n runner = doctest.DocTestRunner(optionflags=flags)\n context = {'np': numpy, 'pd': pandas}\n error_msgs = ''\n for test in finder.find(self.raw_doc, self.method_name, globs=context):\n f = StringIO()\n runner.run(test, out=f.write)\n error_msgs += f.getvalue()\n return error_msgs\n\n\ndef get_api_items():\n api_fname = os.path.join(BASE_PATH, 'doc', 'source', 'api.rst')\n\n previous_line = current_section = current_subsection = ''\n position = None\n with open(api_fname) as f:\n for line in f:\n line = line.strip()\n if len(line) == len(previous_line):\n if set(line) == set('-'):\n current_section = previous_line\n continue\n if set(line) == set('~'):\n current_subsection = previous_line\n continue\n\n if line.startswith('.. currentmodule::'):\n current_module = line.replace('.. currentmodule::', '').strip()\n continue\n\n if line == '.. autosummary::':\n position = 'autosummary'\n continue\n\n if position == 'autosummary':\n if line == '':\n position = 'items'\n continue\n\n if position == 'items':\n if line == '':\n position = None\n continue\n item = line.strip()\n func = importlib.import_module(current_module)\n for part in item.split('.'):\n func = getattr(func, part)\n\n yield ('.'.join([current_module, item]), func,\n current_section, current_subsection)\n\n previous_line = line\n\n\ndef _csv_row(func_name, func_obj, section, subsection, in_api, seen={}):\n obj_type = type(func_obj).__name__\n original_callable = _to_original_callable(func_obj)\n if original_callable is None:\n return [func_name, obj_type] + [''] * 12, ''\n else:\n doc = Docstring(func_name, original_callable)\n key = doc.source_file_name, doc.source_file_def_line\n shared_code = seen.get(key, '')\n return [func_name,\n obj_type,\n in_api,\n int(doc.deprecated),\n section,\n subsection,\n doc.source_file_name,\n doc.source_file_def_line,\n doc.github_url,\n int(bool(doc.summary)),\n int(bool(doc.extended_summary)),\n int(doc.correct_parameters),\n int(bool(doc.examples)),\n shared_code], key\n\n\ndef validate_all():\n writer = csv.writer(sys.stdout)\n cols = ('Function or method',\n 'Type',\n 'In API doc',\n 'Is deprecated',\n 'Section',\n 'Subsection',\n 'File',\n 'Code line',\n 'GitHub link',\n 'Has summary',\n 'Has extended summary',\n 'Parameters ok',\n 'Has examples',\n 'Shared code with')\n writer.writerow(cols)\n seen = {}\n api_items = list(get_api_items())\n for func_name, func, section, subsection in api_items:\n row, key = _csv_row(func_name, func, section, subsection,\n in_api=1, seen=seen)\n seen[key] = func_name\n writer.writerow(row)\n\n api_item_names = set(list(zip(*api_items))[0])\n for class_ in (pandas.Series, pandas.DataFrame, pandas.Panel):\n for member in inspect.getmembers(class_):\n func_name = 'pandas.{}.{}'.format(class_.__name__, member[0])\n if (not member[0].startswith('_') and\n func_name not in api_item_names):\n func = _load_obj(func_name)\n row, key = _csv_row(func_name, func, section='', subsection='',\n in_api=0)\n writer.writerow(row)\n\n return 0\n\n\ndef validate_one(func_name):\n \"\"\"\n Validate the docstring for the given func_name\n\n Parameters\n ----------\n func_name : function\n Function whose docstring will be evaluated\n\n Returns\n -------\n int\n The number of errors found in the `func_name` docstring\n \"\"\"\n func_obj = _load_obj(func_name)\n doc = Docstring(func_name, func_obj)\n\n sys.stderr.write(_output_header('Docstring ({})'.format(func_name)))\n sys.stderr.write('{}\\n'.format(doc.clean_doc))\n\n errs = []\n wrns = []\n if doc.start_blank_lines != 1:\n errs.append('Docstring text (summary) should start in the line '\n 'immediately after the opening quotes (not in the same '\n 'line, or leaving a blank line in between)')\n if doc.end_blank_lines != 1:\n errs.append('Closing quotes should be placed in the line after '\n 'the last text in the docstring (do not close the '\n 'quotes in the same line as the text, or leave a '\n 'blank line between the last text and the quotes)')\n if doc.double_blank_lines:\n errs.append('Use only one blank line to separate sections or '\n 'paragraphs')\n\n if not doc.summary:\n errs.append('No summary found (a short summary in a single line '\n 'should be present at the beginning of the docstring)')\n else:\n if not doc.summary[0].isupper():\n errs.append('Summary does not start with a capital letter')\n if doc.summary[-1] != '.':\n errs.append('Summary does not end with a period')\n if (doc.is_function_or_method and\n doc.summary.split(' ')[0][-1] == 's'):\n errs.append('Summary must start with infinitive verb, '\n 'not third person (e.g. use \"Generate\" instead of '\n '\"Generates\")')\n if doc.num_summary_lines > 1:\n errs.append(\"Summary should fit in a single line.\")\n if not doc.extended_summary:\n wrns.append('No extended summary found')\n\n param_errs = doc.parameter_mismatches\n for param in doc.doc_parameters:\n if not param.startswith(\"*\"): # Check can ignore var / kwargs\n if not doc.parameter_type(param):\n param_errs.append('Parameter \"{}\" has no type'.format(param))\n else:\n if doc.parameter_type(param)[-1] == '.':\n param_errs.append('Parameter \"{}\" type should '\n 'not finish with \".\"'.format(param))\n\n if not doc.parameter_desc(param):\n param_errs.append('Parameter \"{}\" '\n 'has no description'.format(param))\n else:\n if not doc.parameter_desc(param)[0].isupper():\n param_errs.append('Parameter \"{}\" description '\n 'should start with a '\n 'capital letter'.format(param))\n if doc.parameter_desc(param)[-1] != '.':\n param_errs.append('Parameter \"{}\" description '\n 'should finish with \".\"'.format(param))\n if param_errs:\n errs.append('Errors in parameters section')\n for param_err in param_errs:\n errs.append('\\t{}'.format(param_err))\n\n if doc.is_function_or_method:\n if not doc.returns and \"return\" in doc.method_source:\n errs.append('No Returns section found')\n if not doc.yields and \"yield\" in doc.method_source:\n errs.append('No Yields section found')\n\n mentioned_errs = doc.mentioned_private_classes\n if mentioned_errs:\n errs.append('Private classes ({}) should not be mentioned in public '\n 'docstring.'.format(mentioned_errs))\n\n if not doc.see_also:\n wrns.append('See Also section not found')\n else:\n for rel_name, rel_desc in doc.see_also.items():\n if not rel_desc:\n errs.append('Missing description for '\n 'See Also \"{}\" reference'.format(rel_name))\n\n for line in doc.raw_doc.splitlines():\n if re.match(\"^ *\\t\", line):\n errs.append('Tabs found at the start of line \"{}\", '\n 'please use whitespace only'.format(line.lstrip()))\n\n examples_errs = ''\n if not doc.examples:\n wrns.append('No examples section found')\n else:\n examples_errs = doc.examples_errors\n if examples_errs:\n errs.append('Examples do not pass tests')\n\n sys.stderr.write(_output_header('Validation'))\n if errs:\n sys.stderr.write('Errors found:\\n')\n for err in errs:\n sys.stderr.write('\\t{}\\n'.format(err))\n if wrns:\n sys.stderr.write('Warnings found:\\n')\n for wrn in wrns:\n sys.stderr.write('\\t{}\\n'.format(wrn))\n\n if not errs:\n sys.stderr.write('Docstring for \"{}\" correct. :)\\n'.format(func_name))\n\n if examples_errs:\n sys.stderr.write(_output_header('Doctests'))\n sys.stderr.write(examples_errs)\n\n return len(errs)\n\n\ndef main(function):\n if function is None:\n return validate_all()\n else:\n return validate_one(function)\n\n\nif __name__ == '__main__':\n argparser = argparse.ArgumentParser(\n description='validate pandas docstrings')\n argparser.add_argument('function',\n nargs='?',\n default=None,\n help=('function or method to validate '\n '(e.g. pandas.DataFrame.head) '\n 'if not provided, all docstrings '\n 'are validated'))\n args = argparser.parse_args()\n sys.exit(main(args.function))\n",
"import decimal\nimport numbers\nimport random\nimport sys\n\nimport numpy as np\n\nimport pandas as pd\nfrom pandas.core.arrays import (ExtensionArray,\n ExtensionScalarOpsMixin)\nfrom pandas.core.dtypes.base import ExtensionDtype\n\n\nclass DecimalDtype(ExtensionDtype):\n type = decimal.Decimal\n name = 'decimal'\n na_value = decimal.Decimal('NaN')\n\n def __init__(self, context=None):\n self.context = context or decimal.getcontext()\n\n def __eq__(self, other):\n if isinstance(other, type(self)):\n return self.context == other.context\n return super(DecimalDtype, self).__eq__(other)\n\n def __repr__(self):\n return 'DecimalDtype(context={})'.format(self.context)\n\n @classmethod\n def construct_array_type(cls):\n \"\"\"Return the array type associated with this dtype\n\n Returns\n -------\n type\n \"\"\"\n return DecimalArray\n\n @classmethod\n def construct_from_string(cls, string):\n if string == cls.name:\n return cls()\n else:\n raise TypeError(\"Cannot construct a '{}' from \"\n \"'{}'\".format(cls, string))\n\n @property\n def _is_numeric(self):\n return True\n\n\nclass DecimalArray(ExtensionArray, ExtensionScalarOpsMixin):\n\n def __init__(self, values, dtype=None, copy=False, context=None):\n for val in values:\n if not isinstance(val, decimal.Decimal):\n raise TypeError(\"All values must be of type \" +\n str(decimal.Decimal))\n values = np.asarray(values, dtype=object)\n\n self._data = values\n # Some aliases for common attribute names to ensure pandas supports\n # these\n self._items = self.data = self._data\n # those aliases are currently not working due to assumptions\n # in internal code (GH-20735)\n # self._values = self.values = self.data\n self._dtype = DecimalDtype(context)\n\n @property\n def dtype(self):\n return self._dtype\n\n @classmethod\n def _from_sequence(cls, scalars, dtype=None, copy=False):\n return cls(scalars)\n\n @classmethod\n def _from_factorized(cls, values, original):\n return cls(values)\n\n def __getitem__(self, item):\n if isinstance(item, numbers.Integral):\n return self._data[item]\n else:\n return type(self)(self._data[item])\n\n def take(self, indexer, allow_fill=False, fill_value=None):\n from pandas.api.extensions import take\n\n data = self._data\n if allow_fill and fill_value is None:\n fill_value = self.dtype.na_value\n\n result = take(data, indexer, fill_value=fill_value,\n allow_fill=allow_fill)\n return self._from_sequence(result)\n\n def copy(self, deep=False):\n if deep:\n return type(self)(self._data.copy())\n return type(self)(self)\n\n def astype(self, dtype, copy=True):\n if isinstance(dtype, type(self.dtype)):\n return type(self)(self._data, context=dtype.context)\n return super(DecimalArray, self).astype(dtype, copy)\n\n def __setitem__(self, key, value):\n if pd.api.types.is_list_like(value):\n value = [decimal.Decimal(v) for v in value]\n else:\n value = decimal.Decimal(value)\n self._data[key] = value\n\n def __len__(self):\n return len(self._data)\n\n def __repr__(self):\n return 'DecimalArray({!r})'.format(self._data)\n\n @property\n def nbytes(self):\n n = len(self)\n if n:\n return n * sys.getsizeof(self[0])\n return 0\n\n def isna(self):\n return np.array([x.is_nan() for x in self._data], dtype=bool)\n\n @property\n def _na_value(self):\n return decimal.Decimal('NaN')\n\n @classmethod\n def _concat_same_type(cls, to_concat):\n return cls(np.concatenate([x._data for x in to_concat]))\n\n\ndef to_decimal(values, context=None):\n return DecimalArray([decimal.Decimal(x) for x in values], context=context)\n\n\ndef make_data():\n return [decimal.Decimal(random.random()) for _ in range(100)]\n\n\nDecimalArray._add_arithmetic_ops()\nDecimalArray._add_comparison_ops()\n",
"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime, date, timedelta\n\nimport pytest\n\n\nimport numpy as np\n\nfrom collections import OrderedDict\nimport pandas as pd\nfrom pandas import (DataFrame, Series, Index, MultiIndex,\n Grouper, date_range, concat, Categorical)\nfrom pandas.core.reshape.pivot import pivot_table, crosstab\nfrom pandas.compat import range, product\nimport pandas.util.testing as tm\nfrom pandas.api.types import CategoricalDtype as CDT\n\n\[email protected](params=[True, False])\ndef dropna(request):\n return request.param\n\n\nclass TestPivotTable(object):\n\n def setup_method(self, method):\n self.data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',\n 'bar', 'bar', 'bar', 'bar',\n 'foo', 'foo', 'foo'],\n 'B': ['one', 'one', 'one', 'two',\n 'one', 'one', 'one', 'two',\n 'two', 'two', 'one'],\n 'C': ['dull', 'dull', 'shiny', 'dull',\n 'dull', 'shiny', 'shiny', 'dull',\n 'shiny', 'shiny', 'shiny'],\n 'D': np.random.randn(11),\n 'E': np.random.randn(11),\n 'F': np.random.randn(11)})\n\n def test_pivot_table(self):\n index = ['A', 'B']\n columns = 'C'\n table = pivot_table(self.data, values='D',\n index=index, columns=columns)\n\n table2 = self.data.pivot_table(\n values='D', index=index, columns=columns)\n tm.assert_frame_equal(table, table2)\n\n # this works\n pivot_table(self.data, values='D', index=index)\n\n if len(index) > 1:\n assert table.index.names == tuple(index)\n else:\n assert table.index.name == index[0]\n\n if len(columns) > 1:\n assert table.columns.names == columns\n else:\n assert table.columns.name == columns[0]\n\n expected = self.data.groupby(\n index + [columns])['D'].agg(np.mean).unstack()\n tm.assert_frame_equal(table, expected)\n\n def test_pivot_table_nocols(self):\n df = DataFrame({'rows': ['a', 'b', 'c'],\n 'cols': ['x', 'y', 'z'],\n 'values': [1, 2, 3]})\n rs = df.pivot_table(columns='cols', aggfunc=np.sum)\n xp = df.pivot_table(index='cols', aggfunc=np.sum).T\n tm.assert_frame_equal(rs, xp)\n\n rs = df.pivot_table(columns='cols', aggfunc={'values': 'mean'})\n xp = df.pivot_table(index='cols', aggfunc={'values': 'mean'}).T\n tm.assert_frame_equal(rs, xp)\n\n def test_pivot_table_dropna(self):\n df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000},\n 'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'},\n 'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310},\n 'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'},\n 'quantity': {0: 2000000, 1: 500000,\n 2: 1000000, 3: 1000000}})\n pv_col = df.pivot_table('quantity', 'month', [\n 'customer', 'product'], dropna=False)\n pv_ind = df.pivot_table(\n 'quantity', ['customer', 'product'], 'month', dropna=False)\n\n m = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), ('A', 'c'),\n ('A', 'd'), ('B', 'a'), ('B', 'b'),\n ('B', 'c'), ('B', 'd'), ('C', 'a'),\n ('C', 'b'), ('C', 'c'), ('C', 'd')],\n names=['customer', 'product'])\n tm.assert_index_equal(pv_col.columns, m)\n tm.assert_index_equal(pv_ind.index, m)\n\n def test_pivot_table_categorical(self):\n\n cat1 = Categorical([\"a\", \"a\", \"b\", \"b\"],\n categories=[\"a\", \"b\", \"z\"], ordered=True)\n cat2 = Categorical([\"c\", \"d\", \"c\", \"d\"],\n categories=[\"c\", \"d\", \"y\"], ordered=True)\n df = DataFrame({\"A\": cat1, \"B\": cat2, \"values\": [1, 2, 3, 4]})\n result = pd.pivot_table(df, values='values', index=['A', 'B'],\n dropna=True)\n\n exp_index = pd.MultiIndex.from_arrays(\n [cat1, cat2],\n names=['A', 'B'])\n expected = DataFrame(\n {'values': [1, 2, 3, 4]},\n index=exp_index)\n tm.assert_frame_equal(result, expected)\n\n def test_pivot_table_dropna_categoricals(self, dropna):\n # GH 15193\n categories = ['a', 'b', 'c', 'd']\n\n df = DataFrame({'A': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'],\n 'B': [1, 2, 3, 1, 2, 3, 1, 2, 3],\n 'C': range(0, 9)})\n\n df['A'] = df['A'].astype(CDT(categories, ordered=False))\n result = df.pivot_table(index='B', columns='A', values='C',\n dropna=dropna)\n expected_columns = Series(['a', 'b', 'c'], name='A')\n expected_columns = expected_columns.astype(\n CDT(categories, ordered=False))\n expected_index = Series([1, 2, 3], name='B')\n expected = DataFrame([[0, 3, 6],\n [1, 4, 7],\n [2, 5, 8]],\n index=expected_index,\n columns=expected_columns,)\n if not dropna:\n # add back the non observed to compare\n expected = expected.reindex(\n columns=Categorical(categories)).astype('float')\n\n tm.assert_frame_equal(result, expected)\n\n def test_pivot_with_non_observable_dropna(self, dropna):\n # gh-21133\n df = pd.DataFrame(\n {'A': pd.Categorical([np.nan, 'low', 'high', 'low', 'high'],\n categories=['low', 'high'],\n ordered=True),\n 'B': range(5)})\n\n result = df.pivot_table(index='A', values='B', dropna=dropna)\n expected = pd.DataFrame(\n {'B': [2, 3]},\n index=pd.Index(\n pd.Categorical.from_codes([0, 1],\n categories=['low', 'high'],\n ordered=True),\n name='A'))\n\n tm.assert_frame_equal(result, expected)\n\n # gh-21378\n df = pd.DataFrame(\n {'A': pd.Categorical(['left', 'low', 'high', 'low', 'high'],\n categories=['low', 'high', 'left'],\n ordered=True),\n 'B': range(5)})\n\n result = df.pivot_table(index='A', values='B', dropna=dropna)\n expected = pd.DataFrame(\n {'B': [2, 3, 0]},\n index=pd.Index(\n pd.Categorical.from_codes([0, 1, 2],\n categories=['low', 'high', 'left'],\n ordered=True),\n name='A'))\n\n tm.assert_frame_equal(result, expected)\n\n def test_pass_array(self):\n result = self.data.pivot_table(\n 'D', index=self.data.A, columns=self.data.C)\n expected = self.data.pivot_table('D', index='A', columns='C')\n tm.assert_frame_equal(result, expected)\n\n def test_pass_function(self):\n result = self.data.pivot_table('D', index=lambda x: x // 5,\n columns=self.data.C)\n expected = self.data.pivot_table('D', index=self.data.index // 5,\n columns='C')\n tm.assert_frame_equal(result, expected)\n\n def test_pivot_table_multiple(self):\n index = ['A', 'B']\n columns = 'C'\n table = pivot_table(self.data, index=index, columns=columns)\n expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()\n tm.assert_frame_equal(table, expected)\n\n def test_pivot_dtypes(self):\n\n # can convert dtypes\n f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [\n 1, 2, 3, 4], 'i': ['a', 'b', 'a', 'b']})\n assert f.dtypes['v'] == 'int64'\n\n z = pivot_table(f, values='v', index=['a'], columns=[\n 'i'], fill_value=0, aggfunc=np.sum)\n result = z.get_dtype_counts()\n expected = Series(dict(int64=2))\n tm.assert_series_equal(result, expected)\n\n # cannot convert dtypes\n f = DataFrame({'a': ['cat', 'bat', 'cat', 'bat'], 'v': [\n 1.5, 2.5, 3.5, 4.5], 'i': ['a', 'b', 'a', 'b']})\n assert f.dtypes['v'] == 'float64'\n\n z = pivot_table(f, values='v', index=['a'], columns=[\n 'i'], fill_value=0, aggfunc=np.mean)\n result = z.get_dtype_counts()\n expected = Series(dict(float64=2))\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize('columns,values',\n [('bool1', ['float1', 'float2']),\n ('bool1', ['float1', 'float2', 'bool1']),\n ('bool2', ['float1', 'float2', 'bool1'])])\n def test_pivot_preserve_dtypes(self, columns, values):\n # GH 7142 regression test\n v = np.arange(5, dtype=np.float64)\n df = DataFrame({'float1': v, 'float2': v + 2.0,\n 'bool1': v <= 2, 'bool2': v <= 3})\n\n df_res = df.reset_index().pivot_table(\n index='index', columns=columns, values=values)\n\n result = dict(df_res.dtypes)\n expected = {col: np.dtype('O') if col[0].startswith('b')\n else np.dtype('float64') for col in df_res}\n assert result == expected\n\n def test_pivot_no_values(self):\n # GH 14380\n idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-01-02',\n '2011-01-01', '2011-01-02'])\n df = pd.DataFrame({'A': [1, 2, 3, 4, 5]},\n index=idx)\n res = df.pivot_table(index=df.index.month, columns=df.index.day)\n\n exp_columns = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)])\n exp = pd.DataFrame([[2.5, 4.0], [2.0, np.nan]],\n index=[1, 2], columns=exp_columns)\n tm.assert_frame_equal(res, exp)\n\n df = pd.DataFrame({'A': [1, 2, 3, 4, 5],\n 'dt': pd.date_range('2011-01-01', freq='D',\n periods=5)},\n index=idx)\n res = df.pivot_table(index=df.index.month,\n columns=pd.Grouper(key='dt', freq='M'))\n exp_columns = pd.MultiIndex.from_tuples([('A',\n pd.Timestamp('2011-01-31'))])\n exp_columns.names = [None, 'dt']\n exp = pd.DataFrame([3.25, 2.0],\n index=[1, 2], columns=exp_columns)\n tm.assert_frame_equal(res, exp)\n\n res = df.pivot_table(index=pd.Grouper(freq='A'),\n columns=pd.Grouper(key='dt', freq='M'))\n exp = pd.DataFrame([3],\n index=pd.DatetimeIndex(['2011-12-31']),\n columns=exp_columns)\n tm.assert_frame_equal(res, exp)\n\n def test_pivot_multi_values(self):\n result = pivot_table(self.data, values=['D', 'E'],\n index='A', columns=['B', 'C'], fill_value=0)\n expected = pivot_table(self.data.drop(['F'], axis=1),\n index='A', columns=['B', 'C'], fill_value=0)\n tm.assert_frame_equal(result, expected)\n\n def test_pivot_multi_functions(self):\n f = lambda func: pivot_table(self.data, values=['D', 'E'],\n index=['A', 'B'], columns='C',\n aggfunc=func)\n result = f([np.mean, np.std])\n means = f(np.mean)\n stds = f(np.std)\n expected = concat([means, stds], keys=['mean', 'std'], axis=1)\n tm.assert_frame_equal(result, expected)\n\n # margins not supported??\n f = lambda func: pivot_table(self.data, values=['D', 'E'],\n index=['A', 'B'], columns='C',\n aggfunc=func, margins=True)\n result = f([np.mean, np.std])\n means = f(np.mean)\n stds = f(np.std)\n expected = concat([means, stds], keys=['mean', 'std'], axis=1)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize('method', [True, False])\n def test_pivot_index_with_nan(self, method):\n # GH 3588\n nan = np.nan\n df = DataFrame({'a': ['R1', 'R2', nan, 'R4'],\n 'b': ['C1', 'C2', 'C3', 'C4'],\n 'c': [10, 15, 17, 20]})\n if method:\n result = df.pivot('a', 'b', 'c')\n else:\n result = pd.pivot(df, 'a', 'b', 'c')\n expected = DataFrame([[nan, nan, 17, nan], [10, nan, nan, nan],\n [nan, 15, nan, nan], [nan, nan, nan, 20]],\n index=Index([nan, 'R1', 'R2', 'R4'], name='a'),\n columns=Index(['C1', 'C2', 'C3', 'C4'], name='b'))\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(df.pivot('b', 'a', 'c'), expected.T)\n\n # GH9491\n df = DataFrame({'a': pd.date_range('2014-02-01', periods=6, freq='D'),\n 'c': 100 + np.arange(6)})\n df['b'] = df['a'] - pd.Timestamp('2014-02-02')\n df.loc[1, 'a'] = df.loc[3, 'a'] = nan\n df.loc[1, 'b'] = df.loc[4, 'b'] = nan\n\n if method:\n pv = df.pivot('a', 'b', 'c')\n else:\n pv = pd.pivot(df, 'a', 'b', 'c')\n assert pv.notna().values.sum() == len(df)\n\n for _, row in df.iterrows():\n assert pv.loc[row['a'], row['b']] == row['c']\n\n if method:\n result = df.pivot('b', 'a', 'c')\n else:\n result = pd.pivot(df, 'b', 'a', 'c')\n tm.assert_frame_equal(result, pv.T)\n\n @pytest.mark.parametrize('method', [True, False])\n def test_pivot_with_tz(self, method):\n # GH 5878\n df = DataFrame({'dt1': [datetime(2013, 1, 1, 9, 0),\n datetime(2013, 1, 2, 9, 0),\n datetime(2013, 1, 1, 9, 0),\n datetime(2013, 1, 2, 9, 0)],\n 'dt2': [datetime(2014, 1, 1, 9, 0),\n datetime(2014, 1, 1, 9, 0),\n datetime(2014, 1, 2, 9, 0),\n datetime(2014, 1, 2, 9, 0)],\n 'data1': np.arange(4, dtype='int64'),\n 'data2': np.arange(4, dtype='int64')})\n\n df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))\n df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))\n\n exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])\n exp_col2 = pd.DatetimeIndex(['2014/01/01 09:00',\n '2014/01/02 09:00'] * 2,\n name='dt2', tz='Asia/Tokyo')\n exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])\n expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],\n index=pd.DatetimeIndex(['2013/01/01 09:00',\n '2013/01/02 09:00'],\n name='dt1',\n tz='US/Pacific'),\n columns=exp_col)\n\n if method:\n pv = df.pivot(index='dt1', columns='dt2')\n else:\n pv = pd.pivot(df, index='dt1', columns='dt2')\n tm.assert_frame_equal(pv, expected)\n\n expected = DataFrame([[0, 2], [1, 3]],\n index=pd.DatetimeIndex(['2013/01/01 09:00',\n '2013/01/02 09:00'],\n name='dt1',\n tz='US/Pacific'),\n columns=pd.DatetimeIndex(['2014/01/01 09:00',\n '2014/01/02 09:00'],\n name='dt2',\n tz='Asia/Tokyo'))\n\n if method:\n pv = df.pivot(index='dt1', columns='dt2', values='data1')\n else:\n pv = pd.pivot(df, index='dt1', columns='dt2', values='data1')\n tm.assert_frame_equal(pv, expected)\n\n @pytest.mark.parametrize('method', [True, False])\n def test_pivot_periods(self, method):\n df = DataFrame({'p1': [pd.Period('2013-01-01', 'D'),\n pd.Period('2013-01-02', 'D'),\n pd.Period('2013-01-01', 'D'),\n pd.Period('2013-01-02', 'D')],\n 'p2': [pd.Period('2013-01', 'M'),\n pd.Period('2013-01', 'M'),\n pd.Period('2013-02', 'M'),\n pd.Period('2013-02', 'M')],\n 'data1': np.arange(4, dtype='int64'),\n 'data2': np.arange(4, dtype='int64')})\n\n exp_col1 = Index(['data1', 'data1', 'data2', 'data2'])\n exp_col2 = pd.PeriodIndex(['2013-01', '2013-02'] * 2,\n name='p2', freq='M')\n exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])\n expected = DataFrame([[0, 2, 0, 2], [1, 3, 1, 3]],\n index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],\n name='p1', freq='D'),\n columns=exp_col)\n if method:\n pv = df.pivot(index='p1', columns='p2')\n else:\n pv = pd.pivot(df, index='p1', columns='p2')\n tm.assert_frame_equal(pv, expected)\n\n expected = DataFrame([[0, 2], [1, 3]],\n index=pd.PeriodIndex(['2013-01-01', '2013-01-02'],\n name='p1', freq='D'),\n columns=pd.PeriodIndex(['2013-01', '2013-02'],\n name='p2', freq='M'))\n if method:\n pv = df.pivot(index='p1', columns='p2', values='data1')\n else:\n pv = pd.pivot(df, index='p1', columns='p2', values='data1')\n tm.assert_frame_equal(pv, expected)\n\n @pytest.mark.parametrize('values', [\n ['baz', 'zoo'], np.array(['baz', 'zoo']),\n pd.Series(['baz', 'zoo']), pd.Index(['baz', 'zoo'])\n ])\n @pytest.mark.parametrize('method', [True, False])\n def test_pivot_with_list_like_values(self, values, method):\n # issue #17160\n df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],\n 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],\n 'baz': [1, 2, 3, 4, 5, 6],\n 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})\n\n if method:\n result = df.pivot(index='foo', columns='bar', values=values)\n else:\n result = pd.pivot(df, index='foo', columns='bar', values=values)\n\n data = [[1, 2, 3, 'x', 'y', 'z'],\n [4, 5, 6, 'q', 'w', 't']]\n index = Index(data=['one', 'two'], name='foo')\n columns = MultiIndex(levels=[['baz', 'zoo'], ['A', 'B', 'C']],\n labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],\n names=[None, 'bar'])\n expected = DataFrame(data=data, index=index,\n columns=columns, dtype='object')\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize('values', [\n ['bar', 'baz'], np.array(['bar', 'baz']),\n pd.Series(['bar', 'baz']), pd.Index(['bar', 'baz'])\n ])\n @pytest.mark.parametrize('method', [True, False])\n def test_pivot_with_list_like_values_nans(self, values, method):\n # issue #17160\n df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],\n 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],\n 'baz': [1, 2, 3, 4, 5, 6],\n 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})\n\n if method:\n result = df.pivot(index='zoo', columns='foo', values=values)\n else:\n result = pd.pivot(df, index='zoo', columns='foo', values=values)\n\n data = [[np.nan, 'A', np.nan, 4],\n [np.nan, 'C', np.nan, 6],\n [np.nan, 'B', np.nan, 5],\n ['A', np.nan, 1, np.nan],\n ['B', np.nan, 2, np.nan],\n ['C', np.nan, 3, np.nan]]\n index = Index(data=['q', 't', 'w', 'x', 'y', 'z'], name='zoo')\n columns = MultiIndex(levels=[['bar', 'baz'], ['one', 'two']],\n labels=[[0, 0, 1, 1], [0, 1, 0, 1]],\n names=[None, 'foo'])\n expected = DataFrame(data=data, index=index,\n columns=columns, dtype='object')\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.xfail(reason='MultiIndexed unstack with tuple names fails'\n 'with KeyError GH#19966',\n strict=True)\n @pytest.mark.parametrize('method', [True, False])\n def test_pivot_with_multiindex(self, method):\n # issue #17160\n index = Index(data=[0, 1, 2, 3, 4, 5])\n data = [['one', 'A', 1, 'x'],\n ['one', 'B', 2, 'y'],\n ['one', 'C', 3, 'z'],\n ['two', 'A', 4, 'q'],\n ['two', 'B', 5, 'w'],\n ['two', 'C', 6, 't']]\n columns = MultiIndex(levels=[['bar', 'baz'], ['first', 'second']],\n labels=[[0, 0, 1, 1], [0, 1, 0, 1]])\n df = DataFrame(data=data, index=index, columns=columns, dtype='object')\n if method:\n result = df.pivot(index=('bar', 'first'),\n columns=('bar', 'second'),\n values=('baz', 'first'))\n else:\n result = pd.pivot(df,\n index=('bar', 'first'),\n columns=('bar', 'second'),\n values=('baz', 'first'))\n\n data = {'A': Series([1, 4], index=['one', 'two']),\n 'B': Series([2, 5], index=['one', 'two']),\n 'C': Series([3, 6], index=['one', 'two'])}\n expected = DataFrame(data)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize('method', [True, False])\n def test_pivot_with_tuple_of_values(self, method):\n # issue #17160\n df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],\n 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],\n 'baz': [1, 2, 3, 4, 5, 6],\n 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})\n with pytest.raises(KeyError):\n # tuple is seen as a single column name\n if method:\n df.pivot(index='zoo', columns='foo', values=('bar', 'baz'))\n else:\n pd.pivot(df, index='zoo', columns='foo', values=('bar', 'baz'))\n\n def test_margins(self):\n def _check_output(result, values_col, index=['A', 'B'],\n columns=['C'],\n margins_col='All'):\n col_margins = result.loc[result.index[:-1], margins_col]\n expected_col_margins = self.data.groupby(index)[values_col].mean()\n tm.assert_series_equal(col_margins, expected_col_margins,\n check_names=False)\n assert col_margins.name == margins_col\n\n result = result.sort_index()\n index_margins = result.loc[(margins_col, '')].iloc[:-1]\n\n expected_ix_margins = self.data.groupby(columns)[values_col].mean()\n tm.assert_series_equal(index_margins, expected_ix_margins,\n check_names=False)\n assert index_margins.name == (margins_col, '')\n\n grand_total_margins = result.loc[(margins_col, ''), margins_col]\n expected_total_margins = self.data[values_col].mean()\n assert grand_total_margins == expected_total_margins\n\n # column specified\n result = self.data.pivot_table(values='D', index=['A', 'B'],\n columns='C',\n margins=True, aggfunc=np.mean)\n _check_output(result, 'D')\n\n # Set a different margins_name (not 'All')\n result = self.data.pivot_table(values='D', index=['A', 'B'],\n columns='C',\n margins=True, aggfunc=np.mean,\n margins_name='Totals')\n _check_output(result, 'D', margins_col='Totals')\n\n # no column specified\n table = self.data.pivot_table(index=['A', 'B'], columns='C',\n margins=True, aggfunc=np.mean)\n for value_col in table.columns.levels[0]:\n _check_output(table[value_col], value_col)\n\n # no col\n\n # to help with a buglet\n self.data.columns = [k * 2 for k in self.data.columns]\n table = self.data.pivot_table(index=['AA', 'BB'], margins=True,\n aggfunc=np.mean)\n for value_col in table.columns:\n totals = table.loc[('All', ''), value_col]\n assert totals == self.data[value_col].mean()\n\n # no rows\n rtable = self.data.pivot_table(columns=['AA', 'BB'], margins=True,\n aggfunc=np.mean)\n assert isinstance(rtable, Series)\n\n table = self.data.pivot_table(index=['AA', 'BB'], margins=True,\n aggfunc='mean')\n for item in ['DD', 'EE', 'FF']:\n totals = table.loc[('All', ''), item]\n assert totals == self.data[item].mean()\n\n def test_margins_dtype(self):\n # GH 17013\n\n df = self.data.copy()\n df[['D', 'E', 'F']] = np.arange(len(df) * 3).reshape(len(df), 3)\n\n mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]\n mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))\n expected = DataFrame({'dull': [12, 21, 3, 9, 45],\n 'shiny': [33, 0, 36, 51, 120]},\n index=mi).rename_axis('C', axis=1)\n expected['All'] = expected['dull'] + expected['shiny']\n\n result = df.pivot_table(values='D', index=['A', 'B'],\n columns='C', margins=True,\n aggfunc=np.sum, fill_value=0)\n\n tm.assert_frame_equal(expected, result)\n\n @pytest.mark.xfail(reason='GH#17035 (len of floats is casted back to '\n 'floats)',\n strict=True)\n def test_margins_dtype_len(self):\n mi_val = list(product(['bar', 'foo'], ['one', 'two'])) + [('All', '')]\n mi = MultiIndex.from_tuples(mi_val, names=('A', 'B'))\n expected = DataFrame({'dull': [1, 1, 2, 1, 5],\n 'shiny': [2, 0, 2, 2, 6]},\n index=mi).rename_axis('C', axis=1)\n expected['All'] = expected['dull'] + expected['shiny']\n\n result = self.data.pivot_table(values='D', index=['A', 'B'],\n columns='C', margins=True,\n aggfunc=len, fill_value=0)\n\n tm.assert_frame_equal(expected, result)\n\n def test_pivot_integer_columns(self):\n # caused by upstream bug in unstack\n\n d = date.min\n data = list(product(['foo', 'bar'], ['A', 'B', 'C'], ['x1', 'x2'],\n [d + timedelta(i)\n for i in range(20)], [1.0]))\n df = DataFrame(data)\n table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2])\n\n df2 = df.rename(columns=str)\n table2 = df2.pivot_table(\n values='4', index=['0', '1', '3'], columns=['2'])\n\n tm.assert_frame_equal(table, table2, check_names=False)\n\n def test_pivot_no_level_overlap(self):\n # GH #1181\n\n data = DataFrame({'a': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'] * 2,\n 'b': [0, 0, 0, 0, 1, 1, 1, 1] * 2,\n 'c': (['foo'] * 4 + ['bar'] * 4) * 2,\n 'value': np.random.randn(16)})\n\n table = data.pivot_table('value', index='a', columns=['b', 'c'])\n\n grouped = data.groupby(['a', 'b', 'c'])['value'].mean()\n expected = grouped.unstack('b').unstack('c').dropna(axis=1, how='all')\n tm.assert_frame_equal(table, expected)\n\n def test_pivot_columns_lexsorted(self):\n\n n = 10000\n\n dtype = np.dtype([\n (\"Index\", object),\n (\"Symbol\", object),\n (\"Year\", int),\n (\"Month\", int),\n (\"Day\", int),\n (\"Quantity\", int),\n (\"Price\", float),\n ])\n\n products = np.array([\n ('SP500', 'ADBE'),\n ('SP500', 'NVDA'),\n ('SP500', 'ORCL'),\n ('NDQ100', 'AAPL'),\n ('NDQ100', 'MSFT'),\n ('NDQ100', 'GOOG'),\n ('FTSE', 'DGE.L'),\n ('FTSE', 'TSCO.L'),\n ('FTSE', 'GSK.L'),\n ], dtype=[('Index', object), ('Symbol', object)])\n items = np.empty(n, dtype=dtype)\n iproduct = np.random.randint(0, len(products), n)\n items['Index'] = products['Index'][iproduct]\n items['Symbol'] = products['Symbol'][iproduct]\n dr = pd.date_range(date(2000, 1, 1),\n date(2010, 12, 31))\n dates = dr[np.random.randint(0, len(dr), n)]\n items['Year'] = dates.year\n items['Month'] = dates.month\n items['Day'] = dates.day\n items['Price'] = np.random.lognormal(4.0, 2.0, n)\n\n df = DataFrame(items)\n\n pivoted = df.pivot_table('Price', index=['Month', 'Day'],\n columns=['Index', 'Symbol', 'Year'],\n aggfunc='mean')\n\n assert pivoted.columns.is_monotonic\n\n def test_pivot_complex_aggfunc(self):\n f = OrderedDict([('D', ['std']), ('E', ['sum'])])\n expected = self.data.groupby(['A', 'B']).agg(f).unstack('B')\n result = self.data.pivot_table(index='A', columns='B', aggfunc=f)\n\n tm.assert_frame_equal(result, expected)\n\n def test_margins_no_values_no_cols(self):\n # Regression test on pivot table: no values or cols passed.\n result = self.data[['A', 'B']].pivot_table(\n index=['A', 'B'], aggfunc=len, margins=True)\n result_list = result.tolist()\n assert sum(result_list[:-1]) == result_list[-1]\n\n def test_margins_no_values_two_rows(self):\n # Regression test on pivot table: no values passed but rows are a\n # multi-index\n result = self.data[['A', 'B', 'C']].pivot_table(\n index=['A', 'B'], columns='C', aggfunc=len, margins=True)\n assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]\n\n def test_margins_no_values_one_row_one_col(self):\n # Regression test on pivot table: no values passed but row and col\n # defined\n result = self.data[['A', 'B']].pivot_table(\n index='A', columns='B', aggfunc=len, margins=True)\n assert result.All.tolist() == [4.0, 7.0, 11.0]\n\n def test_margins_no_values_two_row_two_cols(self):\n # Regression test on pivot table: no values passed but rows and cols\n # are multi-indexed\n self.data['D'] = ['a', 'b', 'c', 'd',\n 'e', 'f', 'g', 'h', 'i', 'j', 'k']\n result = self.data[['A', 'B', 'C', 'D']].pivot_table(\n index=['A', 'B'], columns=['C', 'D'], aggfunc=len, margins=True)\n assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]\n\n def test_pivot_table_with_margins_set_margin_name(self):\n # see gh-3335\n for margin_name in ['foo', 'one', 666, None, ['a', 'b']]:\n with pytest.raises(ValueError):\n # multi-index index\n pivot_table(self.data, values='D', index=['A', 'B'],\n columns=['C'], margins=True,\n margins_name=margin_name)\n with pytest.raises(ValueError):\n # multi-index column\n pivot_table(self.data, values='D', index=['C'],\n columns=['A', 'B'], margins=True,\n margins_name=margin_name)\n with pytest.raises(ValueError):\n # non-multi-index index/column\n pivot_table(self.data, values='D', index=['A'],\n columns=['B'], margins=True,\n margins_name=margin_name)\n\n def test_pivot_timegrouper(self):\n df = DataFrame({\n 'Branch': 'A A A A A A A B'.split(),\n 'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),\n 'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],\n 'Date': [datetime(2013, 1, 1),\n datetime(2013, 1, 1),\n datetime(2013, 10, 1),\n datetime(2013, 10, 2),\n datetime(2013, 10, 1),\n datetime(2013, 10, 2),\n datetime(2013, 12, 2),\n datetime(2013, 12, 2), ]}).set_index('Date')\n\n expected = DataFrame(np.array([10, 18, 3], dtype='int64')\n .reshape(1, 3),\n index=[datetime(2013, 12, 31)],\n columns='Carl Joe Mark'.split())\n expected.index.name = 'Date'\n expected.columns.name = 'Buyer'\n\n result = pivot_table(df, index=Grouper(freq='A'), columns='Buyer',\n values='Quantity', aggfunc=np.sum)\n tm.assert_frame_equal(result, expected)\n\n result = pivot_table(df, index='Buyer', columns=Grouper(freq='A'),\n values='Quantity', aggfunc=np.sum)\n tm.assert_frame_equal(result, expected.T)\n\n expected = DataFrame(np.array([1, np.nan, 3, 9, 18, np.nan])\n .reshape(2, 3),\n index=[datetime(2013, 1, 1),\n datetime(2013, 7, 1)],\n columns='Carl Joe Mark'.split())\n expected.index.name = 'Date'\n expected.columns.name = 'Buyer'\n\n result = pivot_table(df, index=Grouper(freq='6MS'), columns='Buyer',\n values='Quantity', aggfunc=np.sum)\n tm.assert_frame_equal(result, expected)\n\n result = pivot_table(df, index='Buyer', columns=Grouper(freq='6MS'),\n values='Quantity', aggfunc=np.sum)\n tm.assert_frame_equal(result, expected.T)\n\n # passing the name\n df = df.reset_index()\n result = pivot_table(df, index=Grouper(freq='6MS', key='Date'),\n columns='Buyer',\n values='Quantity', aggfunc=np.sum)\n tm.assert_frame_equal(result, expected)\n\n result = pivot_table(df, index='Buyer',\n columns=Grouper(freq='6MS', key='Date'),\n values='Quantity', aggfunc=np.sum)\n tm.assert_frame_equal(result, expected.T)\n\n pytest.raises(KeyError, lambda: pivot_table(\n df, index=Grouper(freq='6MS', key='foo'),\n columns='Buyer', values='Quantity', aggfunc=np.sum))\n pytest.raises(KeyError, lambda: pivot_table(\n df, index='Buyer',\n columns=Grouper(freq='6MS', key='foo'),\n values='Quantity', aggfunc=np.sum))\n\n # passing the level\n df = df.set_index('Date')\n result = pivot_table(df, index=Grouper(freq='6MS', level='Date'),\n columns='Buyer', values='Quantity',\n aggfunc=np.sum)\n tm.assert_frame_equal(result, expected)\n\n result = pivot_table(df, index='Buyer',\n columns=Grouper(freq='6MS', level='Date'),\n values='Quantity', aggfunc=np.sum)\n tm.assert_frame_equal(result, expected.T)\n\n pytest.raises(ValueError, lambda: pivot_table(\n df, index=Grouper(freq='6MS', level='foo'),\n columns='Buyer', values='Quantity', aggfunc=np.sum))\n pytest.raises(ValueError, lambda: pivot_table(\n df, index='Buyer',\n columns=Grouper(freq='6MS', level='foo'),\n values='Quantity', aggfunc=np.sum))\n\n # double grouper\n df = DataFrame({\n 'Branch': 'A A A A A A A B'.split(),\n 'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),\n 'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],\n 'Date': [datetime(2013, 11, 1, 13, 0), datetime(2013, 9, 1, 13, 5),\n datetime(2013, 10, 1, 20, 0),\n datetime(2013, 10, 2, 10, 0),\n datetime(2013, 11, 1, 20, 0),\n datetime(2013, 10, 2, 10, 0),\n datetime(2013, 10, 2, 12, 0),\n datetime(2013, 12, 5, 14, 0)],\n 'PayDay': [datetime(2013, 10, 4, 0, 0),\n datetime(2013, 10, 15, 13, 5),\n datetime(2013, 9, 5, 20, 0),\n datetime(2013, 11, 2, 10, 0),\n datetime(2013, 10, 7, 20, 0),\n datetime(2013, 9, 5, 10, 0),\n datetime(2013, 12, 30, 12, 0),\n datetime(2013, 11, 20, 14, 0), ]})\n\n result = pivot_table(df, index=Grouper(freq='M', key='Date'),\n columns=Grouper(freq='M', key='PayDay'),\n values='Quantity', aggfunc=np.sum)\n expected = DataFrame(np.array([np.nan, 3, np.nan, np.nan,\n 6, np.nan, 1, 9,\n np.nan, 9, np.nan, np.nan, np.nan,\n np.nan, 3, np.nan]).reshape(4, 4),\n index=[datetime(2013, 9, 30),\n datetime(2013, 10, 31),\n datetime(2013, 11, 30),\n datetime(2013, 12, 31)],\n columns=[datetime(2013, 9, 30),\n datetime(2013, 10, 31),\n datetime(2013, 11, 30),\n datetime(2013, 12, 31)])\n expected.index.name = 'Date'\n expected.columns.name = 'PayDay'\n\n tm.assert_frame_equal(result, expected)\n\n result = pivot_table(df, index=Grouper(freq='M', key='PayDay'),\n columns=Grouper(freq='M', key='Date'),\n values='Quantity', aggfunc=np.sum)\n tm.assert_frame_equal(result, expected.T)\n\n tuples = [(datetime(2013, 9, 30), datetime(2013, 10, 31)),\n (datetime(2013, 10, 31),\n datetime(2013, 9, 30)),\n (datetime(2013, 10, 31),\n datetime(2013, 11, 30)),\n (datetime(2013, 10, 31),\n datetime(2013, 12, 31)),\n (datetime(2013, 11, 30),\n datetime(2013, 10, 31)),\n (datetime(2013, 12, 31), datetime(2013, 11, 30)), ]\n idx = MultiIndex.from_tuples(tuples, names=['Date', 'PayDay'])\n expected = DataFrame(np.array([3, np.nan, 6, np.nan, 1, np.nan,\n 9, np.nan, 9, np.nan,\n np.nan, 3]).reshape(6, 2),\n index=idx, columns=['A', 'B'])\n expected.columns.name = 'Branch'\n\n result = pivot_table(\n df, index=[Grouper(freq='M', key='Date'),\n Grouper(freq='M', key='PayDay')], columns=['Branch'],\n values='Quantity', aggfunc=np.sum)\n tm.assert_frame_equal(result, expected)\n\n result = pivot_table(df, index=['Branch'],\n columns=[Grouper(freq='M', key='Date'),\n Grouper(freq='M', key='PayDay')],\n values='Quantity', aggfunc=np.sum)\n tm.assert_frame_equal(result, expected.T)\n\n def test_pivot_datetime_tz(self):\n dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',\n '2011-07-19 09:00:00',\n '2011-07-19 07:00:00', '2011-07-19 08:00:00',\n '2011-07-19 09:00:00']\n dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',\n '2013-01-01 15:00:00',\n '2013-02-01 15:00:00', '2013-02-01 15:00:00',\n '2013-02-01 15:00:00']\n df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],\n 'dt1': dates1, 'dt2': dates2,\n 'value1': np.arange(6, dtype='int64'),\n 'value2': [1, 2] * 3})\n df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))\n df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d, tz='Asia/Tokyo'))\n\n exp_idx = pd.DatetimeIndex(['2011-07-19 07:00:00',\n '2011-07-19 08:00:00',\n '2011-07-19 09:00:00'],\n tz='US/Pacific', name='dt1')\n exp_col1 = Index(['value1', 'value1'])\n exp_col2 = Index(['a', 'b'], name='label')\n exp_col = MultiIndex.from_arrays([exp_col1, exp_col2])\n expected = DataFrame([[0, 3], [1, 4], [2, 5]],\n index=exp_idx, columns=exp_col)\n result = pivot_table(df, index=['dt1'], columns=[\n 'label'], values=['value1'])\n tm.assert_frame_equal(result, expected)\n\n exp_col1 = Index(['sum', 'sum', 'sum', 'sum',\n 'mean', 'mean', 'mean', 'mean'])\n exp_col2 = Index(['value1', 'value1', 'value2', 'value2'] * 2)\n exp_col3 = pd.DatetimeIndex(['2013-01-01 15:00:00',\n '2013-02-01 15:00:00'] * 4,\n tz='Asia/Tokyo', name='dt2')\n exp_col = MultiIndex.from_arrays([exp_col1, exp_col2, exp_col3])\n expected = DataFrame(np.array([[0, 3, 1, 2, 0, 3, 1, 2],\n [1, 4, 2, 1, 1, 4, 2, 1],\n [2, 5, 1, 2, 2, 5, 1, 2]],\n dtype='int64'),\n index=exp_idx,\n columns=exp_col)\n\n result = pivot_table(df, index=['dt1'], columns=['dt2'],\n values=['value1', 'value2'],\n aggfunc=[np.sum, np.mean])\n tm.assert_frame_equal(result, expected)\n\n def test_pivot_dtaccessor(self):\n # GH 8103\n dates1 = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',\n '2011-07-19 09:00:00',\n '2011-07-19 07:00:00', '2011-07-19 08:00:00',\n '2011-07-19 09:00:00']\n dates2 = ['2013-01-01 15:00:00', '2013-01-01 15:00:00',\n '2013-01-01 15:00:00',\n '2013-02-01 15:00:00', '2013-02-01 15:00:00',\n '2013-02-01 15:00:00']\n df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],\n 'dt1': dates1, 'dt2': dates2,\n 'value1': np.arange(6, dtype='int64'),\n 'value2': [1, 2] * 3})\n df['dt1'] = df['dt1'].apply(lambda d: pd.Timestamp(d))\n df['dt2'] = df['dt2'].apply(lambda d: pd.Timestamp(d))\n\n result = pivot_table(df, index='label', columns=df['dt1'].dt.hour,\n values='value1')\n\n exp_idx = Index(['a', 'b'], name='label')\n expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},\n index=exp_idx,\n columns=Index([7, 8, 9], name='dt1'))\n tm.assert_frame_equal(result, expected)\n\n result = pivot_table(df, index=df['dt2'].dt.month,\n columns=df['dt1'].dt.hour,\n values='value1')\n\n expected = DataFrame({7: [0, 3], 8: [1, 4], 9: [2, 5]},\n index=Index([1, 2], name='dt2'),\n columns=Index([7, 8, 9], name='dt1'))\n tm.assert_frame_equal(result, expected)\n\n result = pivot_table(df, index=df['dt2'].dt.year.values,\n columns=[df['dt1'].dt.hour, df['dt2'].dt.month],\n values='value1')\n\n exp_col = MultiIndex.from_arrays(\n [[7, 7, 8, 8, 9, 9], [1, 2] * 3], names=['dt1', 'dt2'])\n expected = DataFrame(np.array([[0, 3, 1, 4, 2, 5]], dtype='int64'),\n index=[2013], columns=exp_col)\n tm.assert_frame_equal(result, expected)\n\n result = pivot_table(df, index=np.array(['X', 'X', 'X',\n 'X', 'Y', 'Y']),\n columns=[df['dt1'].dt.hour, df['dt2'].dt.month],\n values='value1')\n expected = DataFrame(np.array([[0, 3, 1, np.nan, 2, np.nan],\n [np.nan, np.nan, np.nan,\n 4, np.nan, 5]]),\n index=['X', 'Y'], columns=exp_col)\n tm.assert_frame_equal(result, expected)\n\n def test_daily(self):\n rng = date_range('1/1/2000', '12/31/2004', freq='D')\n ts = Series(np.random.randn(len(rng)), index=rng)\n\n annual = pivot_table(DataFrame(ts), index=ts.index.year,\n columns=ts.index.dayofyear)\n annual.columns = annual.columns.droplevel(0)\n\n doy = np.asarray(ts.index.dayofyear)\n\n for i in range(1, 367):\n subset = ts[doy == i]\n subset.index = subset.index.year\n\n result = annual[i].dropna()\n tm.assert_series_equal(result, subset, check_names=False)\n assert result.name == i\n\n def test_monthly(self):\n rng = date_range('1/1/2000', '12/31/2004', freq='M')\n ts = Series(np.random.randn(len(rng)), index=rng)\n\n annual = pivot_table(pd.DataFrame(ts), index=ts.index.year,\n columns=ts.index.month)\n annual.columns = annual.columns.droplevel(0)\n\n month = ts.index.month\n for i in range(1, 13):\n subset = ts[month == i]\n subset.index = subset.index.year\n result = annual[i].dropna()\n tm.assert_series_equal(result, subset, check_names=False)\n assert result.name == i\n\n def test_pivot_table_with_iterator_values(self):\n # GH 12017\n aggs = {'D': 'sum', 'E': 'mean'}\n\n pivot_values_list = pd.pivot_table(\n self.data, index=['A'], values=list(aggs.keys()), aggfunc=aggs,\n )\n\n pivot_values_keys = pd.pivot_table(\n self.data, index=['A'], values=aggs.keys(), aggfunc=aggs,\n )\n tm.assert_frame_equal(pivot_values_keys, pivot_values_list)\n\n agg_values_gen = (value for value in aggs.keys())\n pivot_values_gen = pd.pivot_table(\n self.data, index=['A'], values=agg_values_gen, aggfunc=aggs,\n )\n tm.assert_frame_equal(pivot_values_gen, pivot_values_list)\n\n def test_pivot_table_margins_name_with_aggfunc_list(self):\n # GH 13354\n margins_name = 'Weekly'\n costs = pd.DataFrame(\n {'item': ['bacon', 'cheese', 'bacon', 'cheese'],\n 'cost': [2.5, 4.5, 3.2, 3.3],\n 'day': ['M', 'M', 'T', 'T']}\n )\n table = costs.pivot_table(\n index=\"item\", columns=\"day\", margins=True,\n margins_name=margins_name, aggfunc=[np.mean, max]\n )\n ix = pd.Index(\n ['bacon', 'cheese', margins_name], dtype='object', name='item'\n )\n tups = [('mean', 'cost', 'M'), ('mean', 'cost', 'T'),\n ('mean', 'cost', margins_name), ('max', 'cost', 'M'),\n ('max', 'cost', 'T'), ('max', 'cost', margins_name)]\n cols = pd.MultiIndex.from_tuples(tups, names=[None, None, 'day'])\n expected = pd.DataFrame(table.values, index=ix, columns=cols)\n tm.assert_frame_equal(table, expected)\n\n @pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '\n 'ints)',\n strict=True)\n def test_categorical_margins(self, observed):\n # GH 10989\n df = pd.DataFrame({'x': np.arange(8),\n 'y': np.arange(8) // 4,\n 'z': np.arange(8) % 2})\n\n expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])\n expected.index = Index([0, 1, 'All'], name='y')\n expected.columns = Index([0, 1, 'All'], name='z')\n\n table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)\n tm.assert_frame_equal(table, expected)\n\n @pytest.mark.xfail(reason='GH#17035 (np.mean of ints is casted back to '\n 'ints)',\n strict=True)\n def test_categorical_margins_category(self, observed):\n df = pd.DataFrame({'x': np.arange(8),\n 'y': np.arange(8) // 4,\n 'z': np.arange(8) % 2})\n\n expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])\n expected.index = Index([0, 1, 'All'], name='y')\n expected.columns = Index([0, 1, 'All'], name='z')\n\n df.y = df.y.astype('category')\n df.z = df.z.astype('category')\n table = df.pivot_table('x', 'y', 'z', dropna=observed, margins=True)\n tm.assert_frame_equal(table, expected)\n\n def test_categorical_aggfunc(self, observed):\n # GH 9534\n df = pd.DataFrame({\"C1\": [\"A\", \"B\", \"C\", \"C\"],\n \"C2\": [\"a\", \"a\", \"b\", \"b\"],\n \"V\": [1, 2, 3, 4]})\n df[\"C1\"] = df[\"C1\"].astype(\"category\")\n result = df.pivot_table(\"V\", index=\"C1\", columns=\"C2\",\n dropna=observed, aggfunc=\"count\")\n\n expected_index = pd.CategoricalIndex(['A', 'B', 'C'],\n categories=['A', 'B', 'C'],\n ordered=False,\n name='C1')\n expected_columns = pd.Index(['a', 'b'], name='C2')\n expected_data = np.array([[1., np.nan],\n [1., np.nan],\n [np.nan, 2.]])\n expected = pd.DataFrame(expected_data,\n index=expected_index,\n columns=expected_columns)\n tm.assert_frame_equal(result, expected)\n\n def test_categorical_pivot_index_ordering(self, observed):\n # GH 8731\n df = pd.DataFrame({'Sales': [100, 120, 220],\n 'Month': ['January', 'January', 'January'],\n 'Year': [2013, 2014, 2013]})\n months = ['January', 'February', 'March', 'April', 'May', 'June',\n 'July', 'August', 'September', 'October', 'November',\n 'December']\n df['Month'] = df['Month'].astype('category').cat.set_categories(months)\n result = df.pivot_table(values='Sales',\n index='Month',\n columns='Year',\n dropna=observed,\n aggfunc='sum')\n expected_columns = pd.Int64Index([2013, 2014], name='Year')\n expected_index = pd.CategoricalIndex(['January'],\n categories=months,\n ordered=False,\n name='Month')\n expected = pd.DataFrame([[320, 120]],\n index=expected_index,\n columns=expected_columns)\n if not observed:\n result = result.dropna().astype(np.int64)\n\n tm.assert_frame_equal(result, expected)\n\n def test_pivot_table_not_series(self):\n # GH 4386\n # pivot_table always returns a DataFrame\n # when values is not list like and columns is None\n # and aggfunc is not instance of list\n df = DataFrame({'col1': [3, 4, 5],\n 'col2': ['C', 'D', 'E'],\n 'col3': [1, 3, 9]})\n\n result = df.pivot_table('col1', index=['col3', 'col2'], aggfunc=np.sum)\n m = MultiIndex.from_arrays([[1, 3, 9],\n ['C', 'D', 'E']],\n names=['col3', 'col2'])\n expected = DataFrame([3, 4, 5],\n index=m, columns=['col1'])\n\n tm.assert_frame_equal(result, expected)\n\n result = df.pivot_table(\n 'col1', index='col3', columns='col2', aggfunc=np.sum\n )\n expected = DataFrame([[3, np.NaN, np.NaN],\n [np.NaN, 4, np.NaN],\n [np.NaN, np.NaN, 5]],\n index=Index([1, 3, 9], name='col3'),\n columns=Index(['C', 'D', 'E'], name='col2'))\n\n tm.assert_frame_equal(result, expected)\n\n result = df.pivot_table('col1', index='col3', aggfunc=[np.sum])\n m = MultiIndex.from_arrays([['sum'],\n ['col1']])\n expected = DataFrame([3, 4, 5],\n index=Index([1, 3, 9], name='col3'),\n columns=m)\n\n tm.assert_frame_equal(result, expected)\n\n def test_pivot_margins_name_unicode(self):\n # issue #13292\n greek = u'\\u0394\\u03bf\\u03ba\\u03b9\\u03bc\\u03ae'\n frame = pd.DataFrame({'foo': [1, 2, 3]})\n table = pd.pivot_table(frame, index=['foo'], aggfunc=len, margins=True,\n margins_name=greek)\n index = pd.Index([1, 2, 3, greek], dtype='object', name='foo')\n expected = pd.DataFrame(index=index)\n tm.assert_frame_equal(table, expected)\n\n def test_pivot_string_as_func(self):\n # GH #18713\n # for correctness purposes\n data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar',\n 'bar', 'bar', 'foo', 'foo', 'foo'],\n 'B': ['one', 'one', 'one', 'two', 'one', 'one',\n 'one', 'two', 'two', 'two', 'one'],\n 'C': range(11)})\n\n result = pivot_table(data, index='A', columns='B', aggfunc='sum')\n mi = MultiIndex(levels=[['C'], ['one', 'two']],\n labels=[[0, 0], [0, 1]], names=[None, 'B'])\n expected = DataFrame({('C', 'one'): {'bar': 15, 'foo': 13},\n ('C', 'two'): {'bar': 7, 'foo': 20}},\n columns=mi).rename_axis('A')\n tm.assert_frame_equal(result, expected)\n\n result = pivot_table(data, index='A', columns='B',\n aggfunc=['sum', 'mean'])\n mi = MultiIndex(levels=[['sum', 'mean'], ['C'], ['one', 'two']],\n labels=[[0, 0, 1, 1], [0, 0, 0, 0], [0, 1, 0, 1]],\n names=[None, None, 'B'])\n expected = DataFrame({('mean', 'C', 'one'): {'bar': 5.0, 'foo': 3.25},\n ('mean', 'C', 'two'): {'bar': 7.0,\n 'foo': 6.666666666666667},\n ('sum', 'C', 'one'): {'bar': 15, 'foo': 13},\n ('sum', 'C', 'two'): {'bar': 7, 'foo': 20}},\n columns=mi).rename_axis('A')\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize('f, f_numpy',\n [('sum', np.sum),\n ('mean', np.mean),\n ('std', np.std),\n (['sum', 'mean'], [np.sum, np.mean]),\n (['sum', 'std'], [np.sum, np.std]),\n (['std', 'mean'], [np.std, np.mean])])\n def test_pivot_string_func_vs_func(self, f, f_numpy):\n # GH #18713\n # for consistency purposes\n result = pivot_table(self.data, index='A', columns='B', aggfunc=f)\n expected = pivot_table(self.data, index='A', columns='B',\n aggfunc=f_numpy)\n tm.assert_frame_equal(result, expected)\n\n\nclass TestCrosstab(object):\n\n def setup_method(self, method):\n df = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',\n 'bar', 'bar', 'bar', 'bar',\n 'foo', 'foo', 'foo'],\n 'B': ['one', 'one', 'one', 'two',\n 'one', 'one', 'one', 'two',\n 'two', 'two', 'one'],\n 'C': ['dull', 'dull', 'shiny', 'dull',\n 'dull', 'shiny', 'shiny', 'dull',\n 'shiny', 'shiny', 'shiny'],\n 'D': np.random.randn(11),\n 'E': np.random.randn(11),\n 'F': np.random.randn(11)})\n\n self.df = df.append(df, ignore_index=True)\n\n def test_crosstab_single(self):\n df = self.df\n result = crosstab(df['A'], df['C'])\n expected = df.groupby(['A', 'C']).size().unstack()\n tm.assert_frame_equal(result, expected.fillna(0).astype(np.int64))\n\n def test_crosstab_multiple(self):\n df = self.df\n\n result = crosstab(df['A'], [df['B'], df['C']])\n expected = df.groupby(['A', 'B', 'C']).size()\n expected = expected.unstack(\n 'B').unstack('C').fillna(0).astype(np.int64)\n tm.assert_frame_equal(result, expected)\n\n result = crosstab([df['B'], df['C']], df['A'])\n expected = df.groupby(['B', 'C', 'A']).size()\n expected = expected.unstack('A').fillna(0).astype(np.int64)\n tm.assert_frame_equal(result, expected)\n\n def test_crosstab_ndarray(self):\n a = np.random.randint(0, 5, size=100)\n b = np.random.randint(0, 3, size=100)\n c = np.random.randint(0, 10, size=100)\n\n df = DataFrame({'a': a, 'b': b, 'c': c})\n\n result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'))\n expected = crosstab(df['a'], [df['b'], df['c']])\n tm.assert_frame_equal(result, expected)\n\n result = crosstab([b, c], a, colnames=['a'], rownames=('b', 'c'))\n expected = crosstab([df['b'], df['c']], df['a'])\n tm.assert_frame_equal(result, expected)\n\n # assign arbitrary names\n result = crosstab(self.df['A'].values, self.df['C'].values)\n assert result.index.name == 'row_0'\n assert result.columns.name == 'col_0'\n\n def test_crosstab_non_aligned(self):\n # GH 17005\n a = pd.Series([0, 1, 1], index=['a', 'b', 'c'])\n b = pd.Series([3, 4, 3, 4, 3], index=['a', 'b', 'c', 'd', 'f'])\n c = np.array([3, 4, 3])\n\n expected = pd.DataFrame([[1, 0], [1, 1]],\n index=Index([0, 1], name='row_0'),\n columns=Index([3, 4], name='col_0'))\n\n result = crosstab(a, b)\n tm.assert_frame_equal(result, expected)\n\n result = crosstab(a, c)\n tm.assert_frame_equal(result, expected)\n\n def test_crosstab_margins(self):\n a = np.random.randint(0, 7, size=100)\n b = np.random.randint(0, 3, size=100)\n c = np.random.randint(0, 5, size=100)\n\n df = DataFrame({'a': a, 'b': b, 'c': c})\n\n result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),\n margins=True)\n\n assert result.index.names == ('a',)\n assert result.columns.names == ['b', 'c']\n\n all_cols = result['All', '']\n exp_cols = df.groupby(['a']).size().astype('i8')\n # to keep index.name\n exp_margin = Series([len(df)], index=Index(['All'], name='a'))\n exp_cols = exp_cols.append(exp_margin)\n exp_cols.name = ('All', '')\n\n tm.assert_series_equal(all_cols, exp_cols)\n\n all_rows = result.loc['All']\n exp_rows = df.groupby(['b', 'c']).size().astype('i8')\n exp_rows = exp_rows.append(Series([len(df)], index=[('All', '')]))\n exp_rows.name = 'All'\n\n exp_rows = exp_rows.reindex(all_rows.index)\n exp_rows = exp_rows.fillna(0).astype(np.int64)\n tm.assert_series_equal(all_rows, exp_rows)\n\n def test_crosstab_margins_set_margin_name(self):\n # GH 15972\n a = np.random.randint(0, 7, size=100)\n b = np.random.randint(0, 3, size=100)\n c = np.random.randint(0, 5, size=100)\n\n df = DataFrame({'a': a, 'b': b, 'c': c})\n\n result = crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),\n margins=True, margins_name='TOTAL')\n\n assert result.index.names == ('a',)\n assert result.columns.names == ['b', 'c']\n\n all_cols = result['TOTAL', '']\n exp_cols = df.groupby(['a']).size().astype('i8')\n # to keep index.name\n exp_margin = Series([len(df)], index=Index(['TOTAL'], name='a'))\n exp_cols = exp_cols.append(exp_margin)\n exp_cols.name = ('TOTAL', '')\n\n tm.assert_series_equal(all_cols, exp_cols)\n\n all_rows = result.loc['TOTAL']\n exp_rows = df.groupby(['b', 'c']).size().astype('i8')\n exp_rows = exp_rows.append(Series([len(df)], index=[('TOTAL', '')]))\n exp_rows.name = 'TOTAL'\n\n exp_rows = exp_rows.reindex(all_rows.index)\n exp_rows = exp_rows.fillna(0).astype(np.int64)\n tm.assert_series_equal(all_rows, exp_rows)\n\n for margins_name in [666, None, ['a', 'b']]:\n with pytest.raises(ValueError):\n crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'),\n margins=True, margins_name=margins_name)\n\n def test_crosstab_pass_values(self):\n a = np.random.randint(0, 7, size=100)\n b = np.random.randint(0, 3, size=100)\n c = np.random.randint(0, 5, size=100)\n values = np.random.randn(100)\n\n table = crosstab([a, b], c, values, aggfunc=np.sum,\n rownames=['foo', 'bar'], colnames=['baz'])\n\n df = DataFrame({'foo': a, 'bar': b, 'baz': c, 'values': values})\n\n expected = df.pivot_table('values', index=['foo', 'bar'],\n columns='baz', aggfunc=np.sum)\n tm.assert_frame_equal(table, expected)\n\n def test_crosstab_dropna(self):\n # GH 3820\n a = np.array(['foo', 'foo', 'foo', 'bar',\n 'bar', 'foo', 'foo'], dtype=object)\n b = np.array(['one', 'one', 'two', 'one',\n 'two', 'two', 'two'], dtype=object)\n c = np.array(['dull', 'dull', 'dull', 'dull',\n 'dull', 'shiny', 'shiny'], dtype=object)\n res = pd.crosstab(a, [b, c], rownames=['a'],\n colnames=['b', 'c'], dropna=False)\n m = MultiIndex.from_tuples([('one', 'dull'), ('one', 'shiny'),\n ('two', 'dull'), ('two', 'shiny')],\n names=['b', 'c'])\n tm.assert_index_equal(res.columns, m)\n\n def test_crosstab_no_overlap(self):\n # GS 10291\n\n s1 = pd.Series([1, 2, 3], index=[1, 2, 3])\n s2 = pd.Series([4, 5, 6], index=[4, 5, 6])\n\n actual = crosstab(s1, s2)\n expected = pd.DataFrame()\n\n tm.assert_frame_equal(actual, expected)\n\n def test_margin_dropna(self):\n # GH 12577\n # pivot_table counts null into margin ('All')\n # when margins=true and dropna=true\n\n df = pd.DataFrame({'a': [1, 2, 2, 2, 2, np.nan],\n 'b': [3, 3, 4, 4, 4, 4]})\n actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)\n expected = pd.DataFrame([[1, 0, 1], [1, 3, 4], [2, 3, 5]])\n expected.index = Index([1.0, 2.0, 'All'], name='a')\n expected.columns = Index([3, 4, 'All'], name='b')\n tm.assert_frame_equal(actual, expected)\n\n df = DataFrame({'a': [1, np.nan, np.nan, np.nan, 2, np.nan],\n 'b': [3, np.nan, 4, 4, 4, 4]})\n actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)\n expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])\n expected.index = Index([1.0, 2.0, 'All'], name='a')\n expected.columns = Index([3.0, 4.0, 'All'], name='b')\n tm.assert_frame_equal(actual, expected)\n\n df = DataFrame({'a': [1, np.nan, np.nan, np.nan, np.nan, 2],\n 'b': [3, 3, 4, 4, 4, 4]})\n actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)\n expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])\n expected.index = Index([1.0, 2.0, 'All'], name='a')\n expected.columns = Index([3, 4, 'All'], name='b')\n tm.assert_frame_equal(actual, expected)\n\n # GH 12642\n # _add_margins raises KeyError: Level None not found\n # when margins=True and dropna=False\n df = pd.DataFrame({'a': [1, 2, 2, 2, 2, np.nan],\n 'b': [3, 3, 4, 4, 4, 4]})\n actual = pd.crosstab(df.a, df.b, margins=True, dropna=False)\n expected = pd.DataFrame([[1, 0, 1], [1, 3, 4], [2, 4, 6]])\n expected.index = Index([1.0, 2.0, 'All'], name='a')\n expected.columns = Index([3, 4, 'All'], name='b')\n tm.assert_frame_equal(actual, expected)\n\n df = DataFrame({'a': [1, np.nan, np.nan, np.nan, 2, np.nan],\n 'b': [3, np.nan, 4, 4, 4, 4]})\n actual = pd.crosstab(df.a, df.b, margins=True, dropna=False)\n expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 4, 6]])\n expected.index = Index([1.0, 2.0, 'All'], name='a')\n expected.columns = Index([3.0, 4.0, 'All'], name='b')\n tm.assert_frame_equal(actual, expected)\n\n a = np.array(['foo', 'foo', 'foo', 'bar',\n 'bar', 'foo', 'foo'], dtype=object)\n b = np.array(['one', 'one', 'two', 'one',\n 'two', np.nan, 'two'], dtype=object)\n c = np.array(['dull', 'dull', 'dull', 'dull',\n 'dull', 'shiny', 'shiny'], dtype=object)\n\n actual = pd.crosstab(a, [b, c], rownames=['a'],\n colnames=['b', 'c'], margins=True, dropna=False)\n m = MultiIndex.from_arrays([['one', 'one', 'two', 'two', 'All'],\n ['dull', 'shiny', 'dull', 'shiny', '']],\n names=['b', 'c'])\n expected = DataFrame([[1, 0, 1, 0, 2], [2, 0, 1, 1, 5],\n [3, 0, 2, 1, 7]], columns=m)\n expected.index = Index(['bar', 'foo', 'All'], name='a')\n tm.assert_frame_equal(actual, expected)\n\n actual = pd.crosstab([a, b], c, rownames=['a', 'b'],\n colnames=['c'], margins=True, dropna=False)\n m = MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo', 'All'],\n ['one', 'two', 'one', 'two', '']],\n names=['a', 'b'])\n expected = DataFrame([[1, 0, 1], [1, 0, 1], [2, 0, 2], [1, 1, 2],\n [5, 2, 7]], index=m)\n expected.columns = Index(['dull', 'shiny', 'All'], name='c')\n tm.assert_frame_equal(actual, expected)\n\n actual = pd.crosstab([a, b], c, rownames=['a', 'b'],\n colnames=['c'], margins=True, dropna=True)\n m = MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo', 'All'],\n ['one', 'two', 'one', 'two', '']],\n names=['a', 'b'])\n expected = DataFrame([[1, 0, 1], [1, 0, 1], [2, 0, 2], [1, 1, 2],\n [5, 1, 6]], index=m)\n expected.columns = Index(['dull', 'shiny', 'All'], name='c')\n tm.assert_frame_equal(actual, expected)\n\n def test_crosstab_normalize(self):\n # Issue 12578\n df = pd.DataFrame({'a': [1, 2, 2, 2, 2], 'b': [3, 3, 4, 4, 4],\n 'c': [1, 1, np.nan, 1, 1]})\n\n rindex = pd.Index([1, 2], name='a')\n cindex = pd.Index([3, 4], name='b')\n full_normal = pd.DataFrame([[0.2, 0], [0.2, 0.6]],\n index=rindex, columns=cindex)\n row_normal = pd.DataFrame([[1.0, 0], [0.25, 0.75]],\n index=rindex, columns=cindex)\n col_normal = pd.DataFrame([[0.5, 0], [0.5, 1.0]],\n index=rindex, columns=cindex)\n\n # Check all normalize args\n tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='all'),\n full_normal)\n tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=True),\n full_normal)\n tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='index'),\n row_normal)\n tm.assert_frame_equal(\n pd.crosstab(df.a, df.b, normalize='columns').astype('f8'),\n col_normal)\n tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=1),\n pd.crosstab(df.a, df.b, normalize='columns'))\n tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=0),\n pd.crosstab(df.a, df.b, normalize='index'))\n\n row_normal_margins = pd.DataFrame([[1.0, 0],\n [0.25, 0.75],\n [0.4, 0.6]],\n index=pd.Index([1, 2, 'All'],\n name='a',\n dtype='object'),\n columns=pd.Index([3, 4], name='b',\n dtype='object'))\n col_normal_margins = pd.DataFrame([[0.5, 0, 0.2], [0.5, 1.0, 0.8]],\n index=pd.Index([1, 2], name='a',\n dtype='object'),\n columns=pd.Index([3, 4, 'All'],\n name='b',\n dtype='object'))\n\n all_normal_margins = pd.DataFrame([[0.2, 0, 0.2],\n [0.2, 0.6, 0.8],\n [0.4, 0.6, 1]],\n index=pd.Index([1, 2, 'All'],\n name='a',\n dtype='object'),\n columns=pd.Index([3, 4, 'All'],\n name='b',\n dtype='object'))\n tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='index',\n margins=True), row_normal_margins)\n tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='columns',\n margins=True).astype('f8'),\n col_normal_margins)\n tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=True,\n margins=True), all_normal_margins)\n\n # Test arrays\n pd.crosstab([np.array([1, 1, 2, 2]), np.array([1, 2, 1, 2])],\n np.array([1, 2, 1, 2]))\n\n # Test with aggfunc\n norm_counts = pd.DataFrame([[0.25, 0, 0.25],\n [0.25, 0.5, 0.75],\n [0.5, 0.5, 1]],\n index=pd.Index([1, 2, 'All'],\n name='a',\n dtype='object'),\n columns=pd.Index([3, 4, 'All'],\n name='b'))\n test_case = pd.crosstab(df.a, df.b, df.c, aggfunc='count',\n normalize='all',\n margins=True)\n tm.assert_frame_equal(test_case, norm_counts)\n\n df = pd.DataFrame({'a': [1, 2, 2, 2, 2], 'b': [3, 3, 4, 4, 4],\n 'c': [0, 4, np.nan, 3, 3]})\n\n norm_sum = pd.DataFrame([[0, 0, 0.],\n [0.4, 0.6, 1],\n [0.4, 0.6, 1]],\n index=pd.Index([1, 2, 'All'],\n name='a',\n dtype='object'),\n columns=pd.Index([3, 4, 'All'],\n name='b',\n dtype='object'))\n test_case = pd.crosstab(df.a, df.b, df.c, aggfunc=np.sum,\n normalize='all',\n margins=True)\n tm.assert_frame_equal(test_case, norm_sum)\n\n def test_crosstab_with_empties(self):\n # Check handling of empties\n df = pd.DataFrame({'a': [1, 2, 2, 2, 2], 'b': [3, 3, 4, 4, 4],\n 'c': [np.nan, np.nan, np.nan, np.nan, np.nan]})\n\n empty = pd.DataFrame([[0.0, 0.0], [0.0, 0.0]],\n index=pd.Index([1, 2],\n name='a',\n dtype='int64'),\n columns=pd.Index([3, 4], name='b'))\n\n for i in [True, 'index', 'columns']:\n calculated = pd.crosstab(df.a, df.b, values=df.c, aggfunc='count',\n normalize=i)\n tm.assert_frame_equal(empty, calculated)\n\n nans = pd.DataFrame([[0.0, np.nan], [0.0, 0.0]],\n index=pd.Index([1, 2],\n name='a',\n dtype='int64'),\n columns=pd.Index([3, 4], name='b'))\n\n calculated = pd.crosstab(df.a, df.b, values=df.c, aggfunc='count',\n normalize=False)\n tm.assert_frame_equal(nans, calculated)\n\n def test_crosstab_errors(self):\n # Issue 12578\n\n df = pd.DataFrame({'a': [1, 2, 2, 2, 2], 'b': [3, 3, 4, 4, 4],\n 'c': [1, 1, np.nan, 1, 1]})\n\n error = 'values cannot be used without an aggfunc.'\n with tm.assert_raises_regex(ValueError, error):\n pd.crosstab(df.a, df.b, values=df.c)\n\n error = 'aggfunc cannot be used without values'\n with tm.assert_raises_regex(ValueError, error):\n pd.crosstab(df.a, df.b, aggfunc=np.mean)\n\n error = 'Not a valid normalize argument'\n with tm.assert_raises_regex(ValueError, error):\n pd.crosstab(df.a, df.b, normalize='42')\n\n with tm.assert_raises_regex(ValueError, error):\n pd.crosstab(df.a, df.b, normalize=42)\n\n error = 'Not a valid margins argument'\n with tm.assert_raises_regex(ValueError, error):\n pd.crosstab(df.a, df.b, normalize='all', margins=42)\n\n def test_crosstab_with_categorial_columns(self):\n # GH 8860\n df = pd.DataFrame({'MAKE': ['Honda', 'Acura', 'Tesla',\n 'Honda', 'Honda', 'Acura'],\n 'MODEL': ['Sedan', 'Sedan', 'Electric',\n 'Pickup', 'Sedan', 'Sedan']})\n categories = ['Sedan', 'Electric', 'Pickup']\n df['MODEL'] = (df['MODEL'].astype('category')\n .cat.set_categories(categories))\n result = pd.crosstab(df['MAKE'], df['MODEL'])\n\n expected_index = pd.Index(['Acura', 'Honda', 'Tesla'], name='MAKE')\n expected_columns = pd.CategoricalIndex(categories,\n categories=categories,\n ordered=False,\n name='MODEL')\n expected_data = [[2, 0, 0], [2, 0, 1], [0, 1, 0]]\n expected = pd.DataFrame(expected_data,\n index=expected_index,\n columns=expected_columns)\n tm.assert_frame_equal(result, expected)\n\n def test_crosstab_with_numpy_size(self):\n # GH 4003\n df = pd.DataFrame({'A': ['one', 'one', 'two', 'three'] * 6,\n 'B': ['A', 'B', 'C'] * 8,\n 'C': ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 4,\n 'D': np.random.randn(24),\n 'E': np.random.randn(24)})\n result = pd.crosstab(index=[df['A'], df['B']],\n columns=[df['C']],\n margins=True,\n aggfunc=np.size,\n values=df['D'])\n expected_index = pd.MultiIndex(levels=[['All', 'one', 'three', 'two'],\n ['', 'A', 'B', 'C']],\n labels=[[1, 1, 1, 2, 2, 2, 3, 3, 3, 0],\n [1, 2, 3, 1, 2, 3, 1, 2, 3, 0]],\n names=['A', 'B'])\n expected_column = pd.Index(['bar', 'foo', 'All'],\n dtype='object',\n name='C')\n expected_data = np.array([[2., 2., 4.],\n [2., 2., 4.],\n [2., 2., 4.],\n [2., np.nan, 2.],\n [np.nan, 2., 2.],\n [2., np.nan, 2.],\n [np.nan, 2., 2.],\n [2., np.nan, 2.],\n [np.nan, 2., 2.],\n [12., 12., 24.]])\n expected = pd.DataFrame(expected_data,\n index=expected_index,\n columns=expected_column)\n tm.assert_frame_equal(result, expected)\n\n def test_crosstab_dup_index_names(self):\n # GH 13279\n s = pd.Series(range(3), name='foo')\n\n result = pd.crosstab(s, s)\n expected_index = pd.Index(range(3), name='foo')\n expected = pd.DataFrame(np.eye(3, dtype=np.int64),\n index=expected_index,\n columns=expected_index)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"names\", [['a', ('b', 'c')],\n [('a', 'b'), 'c']])\n def test_crosstab_tuple_name(self, names):\n s1 = pd.Series(range(3), name=names[0])\n s2 = pd.Series(range(1, 4), name=names[1])\n\n mi = pd.MultiIndex.from_arrays([range(3), range(1, 4)], names=names)\n expected = pd.Series(1, index=mi).unstack(1, fill_value=0)\n\n result = pd.crosstab(s1, s2)\n tm.assert_frame_equal(result, expected)\n\n def test_crosstab_unsorted_order(self):\n df = pd.DataFrame({\"b\": [3, 1, 2], 'a': [5, 4, 6]},\n index=['C', 'A', 'B'])\n result = pd.crosstab(df.index, [df.b, df.a])\n e_idx = pd.Index(['A', 'B', 'C'], name='row_0')\n e_columns = pd.MultiIndex.from_tuples([(1, 4), (2, 6), (3, 5)],\n names=['b', 'a'])\n expected = pd.DataFrame([[1, 0, 0], [0, 1, 0], [0, 0, 1]],\n index=e_idx,\n columns=e_columns)\n tm.assert_frame_equal(result, expected)\n"
] | [
[
"pandas.tseries.frequencies.to_offset",
"pandas.core.common._all_not_none",
"numpy.linspace",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas._libs.interval.IntervalTree",
"pandas.core.indexes.base.Index",
"numpy.concatenate",
"pandas._libs.interval.Interval",
"numpy.where",
"numpy.nextafter",
"pandas.compat.add_metaclass",
"pandas.core.config.get_option",
"pandas.core.dtypes.common.is_interval_dtype",
"numpy.arange",
"numpy.lexsort",
"pandas.core.common._not_none",
"pandas.core.dtypes.common.is_number",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.common.is_float",
"pandas.core.arrays.interval.IntervalArray",
"pandas.util._exceptions.rewrite_exception",
"pandas.core.indexes.base.default_pprint",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.core.dtypes.common.is_list_like",
"pandas.util._decorators.Appender",
"pandas.io.formats.format.IntervalArrayFormatter",
"pandas.core.common.maybe_box_datetimelike",
"pandas.core.indexes.base.ensure_index",
"numpy.timedelta64",
"pandas.core.common._any_none",
"pandas.core.dtypes.common.ensure_platform_int",
"numpy.array",
"pandas.core.arrays.interval.IntervalArray.from_tuples",
"pandas.core.arrays.interval.IntervalArray.from_arrays",
"pandas.core.dtypes.cast.maybe_downcast_to_dtype",
"pandas.core.common.count_not_none",
"pandas.core.common.is_bool_indexer",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.common.is_integer",
"pandas.core.dtypes.cast.find_common_type",
"pandas.core.indexes.multi.MultiIndex.from_arrays",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.dtypes.common.is_datetime_or_timedelta_dtype",
"pandas.core.dtypes.missing.isna",
"pandas._libs.interval.IntervalMixin.__new__",
"pandas.core.arrays.interval.IntervalArray.from_breaks"
],
[
"pandas.compat.set_function_name",
"pandas.core.dtypes.common.is_list_like",
"pandas.errors.AbstractMethodError",
"pandas.core.algorithms._factorize_array",
"numpy.asarray",
"pandas.util._validators.validate_fillna_kwargs",
"pandas.compat.numpy.function.validate_argsort_with_ascending",
"pandas.api.types.is_array_like",
"pandas.core.ops._get_op_name",
"numpy.argsort",
"numpy.array"
],
[
"pandas.compat.signature",
"pandas.io.formats.printing.pprint_thing"
],
[
"numpy.asarray",
"pandas.api.types.is_list_like",
"pandas.api.extensions.take",
"numpy.concatenate"
],
[
"numpy.random.lognormal",
"pandas.core.reshape.pivot.pivot_table",
"pandas.Series",
"pandas.PeriodIndex",
"numpy.asarray",
"pandas.compat.product",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"numpy.dtype",
"pandas.util.testing.assert_frame_equal",
"pandas.util.testing.assert_index_equal",
"numpy.random.randn",
"numpy.random.randint",
"pandas.crosstab",
"numpy.arange",
"numpy.eye",
"pandas.util.testing.assert_series_equal",
"pandas.DatetimeIndex",
"pandas.Index",
"pandas.Int64Index",
"pandas.core.reshape.pivot.crosstab",
"pandas.Categorical.from_codes",
"pandas.concat",
"pandas.MultiIndex",
"pandas.Categorical",
"pandas.date_range",
"pandas.pivot",
"numpy.array",
"pandas.pivot_table",
"pandas.CategoricalIndex",
"pandas.api.types.CategoricalDtype",
"pandas.util.testing.assert_raises_regex",
"pandas.Grouper",
"pandas.MultiIndex.from_arrays",
"pandas.Period",
"pandas.Timestamp",
"numpy.empty",
"pandas.compat.range"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.24",
"0.23",
"0.21",
"0.20"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.24"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
charliezjw/Neural-Signal-Decoder | [
"fb0df09ba0314724c7c90141bd47cc8fb0201b7a"
] | [
"try.py"
] | [
"import numpy as np\nimport tensorflow as tf\n\n# a = tf.placeholder(tf.int32, [None, 3])\n#\n# b = tf.convert_to_tensor(tf.argmax(tf.bincount(a[0])))\n# b = tf.stack([b, tf.argmax(tf.bincount(a[1]))], 0)\n# for i in range(2, 5):\n# max_indx = tf.argmax(tf.bincount(a[i]))\n# b = tf.concat([b, [max_indx]], 0)\n#\n# with tf.Session() as sess:\n# t1 = np.asarray([[1, 1, 0], [2, 4, 4], [6, 6, 6], [5, 5, 5], [2, 7, 7]])\n# t2, t3 = sess.run([b, max_indx], feed_dict={a: t1})\n# print(t2)\n# print(t3)\na = np.asarray(np.asarray([[1, 1, 0], [2, 4, 4], [6, 6, 6], [5, 5, 5], [2, 7, 7]]))\nb = np.zeros(a.shape[0])\nc = np.asarray([1, 4, 6, 7, 9])\n\nfor i in range(a.shape[0]):\n b[i] = np.argmax(np.bincount(a[i]))\n\nprint(np.mean(np.equal(b, c)))"
] | [
[
"numpy.asarray",
"numpy.zeros",
"numpy.bincount",
"numpy.equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lcintron/WhoopClient | [
"46ccc6c3e3b98f4b6c82cf8938056d72a22bd6b6"
] | [
"WhoopClient.py"
] | [
"import requests\nimport pandas as pd\nimport numpy as np\nimport configparser\nfrom datetime import datetime\nfrom dateutil import relativedelta, parser, rrule\nfrom dateutil.rrule import WEEKLY\n\n\nclass WhoopClient:\n '''A class to allow a user to login and store their authorization code,\n then perform pulls using the code in order to access different types of data'''\n def __init__(self,\n auth_code=None,\n whoop_id=None,\n current_datetime=datetime.utcnow()):\n self.auth_code = auth_code\n self.whoop_id = whoop_id\n self.current_datetime = current_datetime\n self.start_datetime = None\n self.all_data = None\n self.all_activities = None\n self.sport_dict = None\n self.all_sleep = None\n self.all_sleep_events = None\n\n def reset(self):\n self.auth_code = None\n self.whoop_id = None\n self.current_datetime = datetime.utcnow()\n self.start_datetime = None\n self.all_data = None\n self.all_activities = None\n self.sport_dict = None\n self.all_sleep = None\n self.all_sleep_events = None\n\n def pull_api(self, url, df=False):\n auth_code = self.auth_code\n headers = {'authorization': auth_code}\n pull = requests.get(url, headers=headers)\n if pull.status_code == 200 and len(pull.content) > 1:\n if df:\n d = pd.json_normalize(pull.json())\n return d\n else:\n return pull.json()\n else:\n return \"no response\"\n\n def pull_sleep_main(self, sleep_id):\n athlete_id = self.whoop_id\n sleep = self.pull_api(\n 'https://api-7.whoop.com/users/{}/sleeps/{}'.format(\n athlete_id, sleep_id))\n main_df = pd.json_normalize(sleep)\n return main_df\n\n def pull_sleep_events(self, sleep_id):\n athlete_id = self.whoop_id\n sleep = self.pull_api(\n 'https://api-7.whoop.com/users/{}/sleeps/{}'.format(\n athlete_id, sleep_id))\n events_df = pd.json_normalize(sleep['events'])\n events_df['id'] = sleep_id\n return events_df\n\n def get_authorization(self, user_ini):\n '''\n Function to get the authorization token and user id.\n This must be completed before a user can query the api\n '''\n\n config = configparser.ConfigParser()\n config.read(user_ini)\n username = config['whoop']['username']\n password = config['whoop']['password']\n\n headers = {\n \"username\": username,\n \"password\": password,\n \"grant_type\": \"password\",\n \"issueRefresh\": False\n }\n auth = requests.post(\"https://api-7.whoop.com/oauth/token\",\n json=headers)\n\n if auth.status_code == 200:\n content = auth.json()\n user_id = content['user']['id']\n token = content['access_token']\n start_time = content['user']['profile']['createdAt']\n self.whoop_id = user_id\n self.auth_code = 'bearer ' + token\n self.start_datetime = start_time\n print(\"Whoop: Authentication successful\")\n\n else:\n print(\n \"Authentication failed - please double check your credentials\")\n\n def get_keydata_all(self):\n '''\n This function returns a dataframe of WHOOP metrics for each day of WHOOP membership.\n In the resulting dataframe, each day is a row and contains strain, recovery, and sleep information\n '''\n\n if self.start_datetime:\n if self.all_data is not None:\n ## All data already pulled\n return self.all_data\n else:\n start_date = parser.isoparse(\n self.start_datetime).replace(tzinfo=None)\n end_time = 'T23:59:59.999Z'\n start_time = 'T00:00:00.000Z'\n intervals = rrule.rrule(freq=WEEKLY,\n interval=1,\n until=self.current_datetime,\n dtstart=start_date)\n date_range = [[\n d.strftime('%Y-%m-%d') + start_time,\n (d +\n relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')\n + end_time\n ] for d in intervals]\n all_data = pd.DataFrame()\n for dates in date_range:\n cycle_url = 'https://api-7.whoop.com/users/{}/cycles?end={}&start={}'.format(\n self.whoop_id, dates[1], dates[0])\n data = self.pull_api(cycle_url, df=True)\n all_data = pd.concat([all_data, data])\n all_data.reset_index(drop=True, inplace=True)\n\n ## fixing the day column so it's not a list\n all_data['days'] = all_data['days'].map(lambda d: d[0])\n all_data.rename(columns={\"days\": 'day'}, inplace=True)\n\n ## Putting all time into minutes instead of milliseconds\n sleep_cols = [\n 'qualityDuration', 'needBreakdown.baseline',\n 'needBreakdown.debt', 'needBreakdown.naps',\n 'needBreakdown.strain', 'needBreakdown.total'\n ]\n for sleep_col in sleep_cols:\n all_data['sleep.' + sleep_col] = all_data[\n 'sleep.' + sleep_col].astype(float).apply(\n lambda x: np.nan if np.isnan(x) else x / 60000)\n\n ## Making nap variable\n all_data['nap_duration'] = all_data['sleep.naps'].apply(\n lambda x: x[0]['qualityDuration'] / 60000\n if len(x) == 1 else (sum([\n y['qualityDuration'] for y in x\n if y['qualityDuration'] is not None\n ]) / 60000 if len(x) > 1 else 0))\n all_data.drop(['sleep.naps'], axis=1, inplace=True)\n ## dropping duplicates subsetting because of list columns\n all_data.drop_duplicates(subset=['day', 'sleep.id'],\n inplace=True)\n\n self.all_data = all_data\n return all_data\n else:\n print(\"Please run the authorization function first\")\n\n def get_activities_all(self):\n '''\n Activity data is pulled through the get_keydata functions so if the data pull is present, this function\n just transforms the activity column into a dataframe of activities, where each activity is a row.\n If it has not been pulled, this function runs the key data function then returns the activity dataframe'''\n\n if self.sport_dict:\n sport_dict = self.sport_dict\n else:\n sports = self.pull_api('https://api-7.whoop.com/sports')\n sport_dict = {sport['id']: sport['name'] for sport in sports}\n self.sport_dict = self.sport_dict\n\n if self.start_datetime:\n ## process activity data\n\n if self.all_data is not None:\n ## use existing\n data = self.all_data\n else:\n ## pull all data to process activities\n data = self.get_keydata_all()\n ## now process activities data\n act_data = pd.json_normalize(\n data[data['strain.workouts'].apply(len) > 0]\n ['strain.workouts'].apply(lambda x: x[0]))\n act_data[['during.upper', 'during.lower'\n ]] = act_data[['during.upper',\n 'during.lower']].apply(pd.to_datetime)\n act_data['total_minutes'] = act_data.apply(\n lambda x:\n (x['during.upper'] - x['during.lower']).total_seconds() / 60.0,\n axis=1)\n for z in range(0, 6):\n act_data['zone{}_minutes'.format(\n z + 1)] = act_data['zones'].apply(lambda x: x[z] / 60000.)\n act_data['sport_name'] = act_data.sportId.apply(\n lambda x: sport_dict[x])\n\n act_data['day'] = act_data['during.lower'].dt.strftime('%Y-%m-%d')\n act_data.drop(['zones', 'during.bounds'], axis=1, inplace=True)\n act_data.drop_duplicates(inplace=True)\n self.all_activities = act_data\n return act_data\n else:\n print(\"Whoop: Please run the authorization function first\")\n\n def get_sleep_all(self):\n '''\n This function returns all sleep metrics in a data frame, for the duration of user's WHOOP membership.\n Each row in the data frame represents one night of sleep\n '''\n if self.auth_code:\n if self.all_data is not None:\n ## use existing\n data = self.all_data\n else:\n ## pull timeframe data\n data = self.get_keydata_all()\n\n ## getting all the sleep ids\n if self.all_sleep is not None:\n ## All sleep data already pulled\n return self.all_sleep\n else:\n sleep_ids = data['sleep.id'].values.tolist()\n sleep_list = [int(x) for x in sleep_ids if pd.isna(x) == False]\n all_sleep = pd.DataFrame()\n for s in sleep_list:\n m = self.pull_sleep_main(s)\n all_sleep = pd.concat([all_sleep, m])\n\n ## Cleaning sleep data\n sleep_update = [\n 'qualityDuration', 'latency', 'debtPre', 'debtPost',\n 'needFromStrain', 'sleepNeed', 'habitualSleepNeed',\n 'timeInBed', 'lightSleepDuration', 'slowWaveSleepDuration',\n 'remSleepDuration', 'wakeDuration', 'arousalTime',\n 'noDataDuration', 'creditFromNaps', 'projectedSleep'\n ]\n\n for col in sleep_update:\n all_sleep[col] = all_sleep[col].astype(float).apply(\n lambda x: np.nan if np.isnan(x) else x / 60000)\n\n all_sleep.drop(['during.bounds'], axis=1, inplace=True)\n self.all_sleep = all_sleep.copy(deep=True)\n all_sleep.drop(['events'], axis=1, inplace=True)\n return all_sleep\n else:\n print(\"Whoop: Please run the authorization function first\")\n\n def get_sleep_events_all(self):\n '''\n This function returns all sleep events in a data frame, for the duration of user's WHOOP membership.\n Each row in the data frame represents an individual sleep event within an individual night of sleep.\n Sleep events can be joined against the sleep or main datasets by sleep id.\n All sleep times are returned in minutes.\n '''\n if self.auth_code:\n if self.all_data is not None:\n ## use existing\n data = self.all_data\n else:\n ## pull timeframe data\n data = self.get_keydata_all()\n\n ## getting all the sleep ids\n if self.all_sleep_events is not None:\n ## All sleep data already pulled\n return self.all_sleep_events\n else:\n if self.all_sleep is not None:\n sleep_events = self.all_sleep[['activityId', 'events']]\n all_sleep_events = pd.concat([\n pd.concat([\n pd.json_normalize(events),\n pd.DataFrame({'id': len(events) * [sleep]})\n ],\n axis=1) for events, sleep in\n zip(sleep_events['events'], sleep_events['activityId'])\n ])\n else:\n sleep_ids = data['sleep.id'].values.tolist()\n sleep_list = [\n int(x) for x in sleep_ids if pd.isna(x) == False\n ]\n all_sleep_events = pd.DataFrame()\n for s in sleep_list:\n events = self.pull_sleep_events(s)\n all_sleep_events = pd.concat(\n [all_sleep_events, events])\n\n ## Cleaning sleep events data\n all_sleep_events['during.lower'] = pd.to_datetime(\n all_sleep_events['during.lower'])\n all_sleep_events['during.upper'] = pd.to_datetime(\n all_sleep_events['during.upper'])\n all_sleep_events.drop(['during.bounds'], axis=1, inplace=True)\n all_sleep_events['total_minutes'] = all_sleep_events.apply(\n lambda x: (x['during.upper'] - x['during.lower']\n ).total_seconds() / 60.0,\n axis=1)\n\n self.all_sleep_events = all_sleep_events\n return all_sleep_events\n else:\n print(\"Whoop: Please run the authorization function first\")\n\n #returnTYpe = df, json\n def get_hr_all(self, returnType=None):\n '''\n This function will pull every heart rate measurement recorded for the life of WHOOP membership.\n The default return for this function is a list of lists, where each \"row\" contains the date, time, and hr value.\n The measurements are spaced out every ~6 seconds on average.\n\n To return a dataframe, set df=True. This will take a bit longer, but will return a data frame.\n\n NOTE: This api pull takes about 6 seconds per week of data ... or 1 minutes for 10 weeks of data,\n so be careful when you pull, it may take a while.\n '''\n if self.start_datetime:\n athlete_id = self.whoop_id\n start_date = parser.isoparse(\n self.start_datetime).replace(tzinfo=None)\n end_time = 'T23:59:59.999Z'\n start_time = 'T00:00:00.000Z'\n intervals = rrule.rrule(freq=WEEKLY,\n interval=1,\n until=self.current_datetime,\n dtstart=start_date)\n date_range = [[\n d.strftime('%Y-%m-%d') + start_time,\n (d + relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')\n + end_time\n ] for d in intervals]\n\n hr_list = []\n for dates in date_range:\n start = dates[0]\n end = dates[1]\n ul = '''https://api-7.whoop.com/users/{}/metrics/heart_rate?end={}&order=t&start={}&step=6'''.format(\n athlete_id, end, start)\n hr_vals = self.pull_api(ul)['values']\n hr_values = [[\n datetime.utcfromtimestamp(h['time'] / 1e3).date(),\n datetime.utcfromtimestamp(h['time'] / 1e3).time(),\n h['data']\n ] for h in hr_vals]\n hr_list.extend(hr_values)\n if returnType == \"df\":\n hr_df = pd.DataFrame(hr_list)\n hr_df.columns = ['date', 'time', 'hr']\n return hr_df\n\n elif returnType == \"json\":\n hr_json = [{\n 'datetime': str(h[0]) + 'T' + str(h[1]),\n 'hr': h[2]\n } for h in hr_list]\n return hr_json\n else:\n return hr_list\n else:\n print(\"Please run the authorization function first\")\n\n def get_keydata_timeframe(self,\n start,\n end=datetime.strftime(datetime.utcnow(),\n \"%Y-%m-%d\")):\n '''\n This function returns a dataframe of WHOOP metrics for each day in a specified time period.\n To use this function, provide a start and end date in string format as follows \"YYYY-MM-DD\".\n\n If no end date is specified, it will default to today's date.\n\n In the resulting dataframe, each day is a row and contains strain, recovery, and sleep information\n '''\n\n st = datetime.strptime(start, '%Y-%m-%d')\n e = datetime.strptime(end, '%Y-%m-%d')\n if st > e:\n if e > datetime.today():\n print(\"Please enter an end date earlier than tomorrow\")\n else:\n print(\n \"Please enter a start date that is earlier than your end date\"\n )\n else:\n if self.auth_code:\n end_time = 'T23:59:59.999Z'\n start_time = 'T00:00:00.000Z'\n intervals = rrule.rrule(freq=WEEKLY,\n interval=1,\n until=e,\n dtstart=st)\n date_range = [[\n d.strftime('%Y-%m-%d') + start_time,\n (d +\n relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')\n + end_time\n ] for d in intervals if d <= e]\n time_data = pd.DataFrame()\n for dates in date_range:\n cycle_url = 'https://api-7.whoop.com/users/{}/cycles?end={}&start={}'.format(\n self.whoop_id, dates[1], dates[0])\n data = self.pull_api(cycle_url, df=True)\n time_data = pd.concat([time_data, data])\n time_data.reset_index(drop=True, inplace=True)\n\n ## fixing the day column so it's not a list\n time_data['days'] = time_data['days'].map(lambda d: d[0])\n time_data.rename(columns={\"days\": 'day'}, inplace=True)\n\n ## Putting all time into minutes instead of milliseconds\n sleep_cols = [\n 'qualityDuration', 'needBreakdown.baseline',\n 'needBreakdown.debt', 'needBreakdown.naps',\n 'needBreakdown.strain', 'needBreakdown.total'\n ]\n for sleep_col in sleep_cols:\n time_data['sleep.' + sleep_col] = time_data[\n 'sleep.' + sleep_col].astype(float).apply(\n lambda x: np.nan if np.isnan(x) else x / 60000)\n\n ## Making nap variable\n time_data['nap_duration'] = time_data['sleep.naps'].apply(\n lambda x: x[0]['qualityDuration'] / 60000\n if len(x) == 1 else (sum([\n y['qualityDuration'] for y in x\n if y['qualityDuration'] is not None\n ]) / 60000 if len(x) > 1 else 0))\n time_data.drop(['sleep.naps'], axis=1, inplace=True)\n\n ## removing duplicates\n time_data.drop_duplicates(subset=['day', 'sleep.id'],\n inplace=True)\n\n return time_data\n else:\n print(\"Whoop: Please run the authorization function first\")\n\n def get_activities_timeframe(self,\n start,\n end=datetime.strftime(datetime.utcnow(),\n \"%Y-%m-%d\")):\n '''\n Activity data is pulled through the get_keydata functions so if the data pull is present, this function\n just transforms the activity column into a dataframe of activities, where each activity is a row.\n If it has not been pulled, this function runs the key data function then returns the activity dataframe\n\n If no end date is specified, it will default to today's date.\n '''\n\n st = datetime.strptime(start, '%Y-%m-%d')\n e = datetime.strptime(end, '%Y-%m-%d')\n if st > e:\n if e > datetime.today():\n print(\"Please enter an end date earlier than tomorrow\")\n else:\n print(\n \"Please enter a start date that is earlier than your end date\"\n )\n else:\n\n if self.auth_code:\n\n if self.sport_dict:\n sport_dict = self.sport_dict\n else:\n sports = self.pull_api('https://api-7.whoop.com/sports')\n sport_dict = {\n sport['id']: sport['name']\n for sport in sports\n }\n self.sport_dict = self.sport_dict\n\n ## process activity data\n if self.all_data is not None:\n ## use existing\n data = self.all_data\n data = data[(data.day >= start)\n & (data.day <= end)].copy(deep=True)\n else:\n ## pull timeframe data\n data = self.get_keydata_timeframe(start, end)\n ## now process activities data\n act_data = pd.json_normalize(\n data[data['strain.workouts'].apply(len) > 0]\n ['strain.workouts'].apply(lambda x: x[0]))\n act_data[['during.upper', 'during.lower'\n ]] = act_data[['during.upper',\n 'during.lower']].apply(pd.to_datetime)\n act_data['total_minutes'] = act_data.apply(\n lambda x: (x['during.upper'] - x['during.lower']\n ).total_seconds() / 60.0,\n axis=1)\n for z in range(0, 6):\n act_data['zone{}_minutes'.format(\n z +\n 1)] = act_data['zones'].apply(lambda x: x[z] / 60000.)\n act_data['sport_name'] = act_data.sportId.apply(\n lambda x: sport_dict[x])\n\n act_data['day'] = act_data['during.lower'].dt.strftime(\n '%Y-%m-%d')\n act_data.drop(['zones', 'during.bounds'], axis=1, inplace=True)\n act_data.drop_duplicates(inplace=True)\n self.all_activities = act_data\n return act_data\n else:\n print(\"Whoop: Please run the authorization function first\")\n\n def get_sleep_timeframe(self,\n start,\n end=datetime.strftime(datetime.utcnow(),\n \"%Y-%m-%d\")):\n '''\n This function returns sleep metrics in a data frame, for timeframe specified by the user.\n Each row in the data frame represents one night of sleep.\n\n If no end date is specified, it will default to today's date.\n\n All sleep times are returned in minutes.\n '''\n\n st = datetime.strptime(start, '%Y-%m-%d')\n e = datetime.strptime(end, '%Y-%m-%d')\n if st > e:\n if e > datetime.today():\n print(\"Whoop: Please enter an end date earlier than tomorrow\")\n else:\n print(\n \"Whoop: Please enter a start date that is earlier than your end date\"\n )\n else:\n if self.auth_code:\n if self.all_data is not None:\n ## use existing\n data = self.all_data\n data = data[(data.day >= start)\n & (data.day <= end)].copy(deep=True)\n else:\n ## pull timeframe data\n data = self.get_keydata_timeframe(start, end)\n\n ## getting all the sleep ids\n sleep_ids = data['sleep.id'].values.tolist()\n sleep_list = [int(x) for x in sleep_ids if pd.isna(x) == False]\n if self.all_sleep is not None:\n ## All sleep data already pulled so just filter\n all_sleep = self.all_sleep\n time_sleep = all_sleep[all_sleep.activityId.isin(\n sleep_list)]\n return time_sleep\n\n else:\n time_sleep = pd.DataFrame()\n for s in sleep_list:\n m = self.pull_sleep_main(s)\n time_sleep = pd.concat([time_sleep, m])\n\n ## Cleaning sleep data\n sleep_update = [\n 'qualityDuration', 'latency', 'debtPre', 'debtPost',\n 'needFromStrain', 'sleepNeed', 'habitualSleepNeed',\n 'timeInBed', 'lightSleepDuration',\n 'slowWaveSleepDuration', 'remSleepDuration',\n 'wakeDuration', 'arousalTime', 'noDataDuration',\n 'creditFromNaps', 'projectedSleep'\n ]\n\n for col in sleep_update:\n time_sleep[col] = time_sleep[col].astype(float).apply(\n lambda x: np.nan if np.isnan(x) else x / 60000)\n\n time_sleep.drop(['during.bounds', 'events'],\n axis=1,\n inplace=True)\n\n return time_sleep\n else:\n print(\"Whoop: Please run the authorization function first\")\n\n def get_sleep_events_timeframe(self,\n start,\n end=datetime.strftime(\n datetime.utcnow(), \"%Y-%m-%d\")):\n '''\n This function returns sleep events in a data frame, for the time frame specified by the user.\n Each row in the data frame represents an individual sleep event within an individual night of sleep.\n Sleep events can be joined against the sleep or main datasets by sleep id.\n\n If no end date is specified, it will default to today's date.\n '''\n\n st = datetime.strptime(start, '%Y-%m-%d')\n e = datetime.strptime(end, '%Y-%m-%d')\n if st > e:\n if e > datetime.today():\n print(\"Whoop: Please enter an end date earlier than tomorrow\")\n else:\n print(\n \"Whoop: Please enter a start date that is earlier than your end date\"\n )\n else:\n\n if self.auth_code:\n if self.all_data is not None:\n ## use existing\n data = self.all_data\n data = data[(data.day >= start)\n & (data.day <= end)].copy(deep=True)\n else:\n ## pull timeframe data\n data = self.get_keydata_timeframe(start, end)\n\n ## getting all the sleep ids\n sleep_ids = data['sleep.id'].values.tolist()\n sleep_list = [int(x) for x in sleep_ids if pd.isna(x) == False]\n if self.all_sleep_events is not None:\n ## All sleep data already pulled so just filter\n all_sleep_events = self.all_sleep_events\n time_sleep_events = all_sleep_events[\n all_sleep_events.id.isin(sleep_list)]\n return time_sleep_events\n\n else:\n if self.all_sleep is not None:\n sleep_events = self.all_sleep[['activityId', 'events']]\n time_sleep = sleep_events[sleep_events.id.isin(\n sleep_list)]\n time_sleep_events = pd.concat([\n pd.concat([\n pd.json_normalize(events),\n pd.DataFrame({'id': len(events) * [sleep]})\n ],\n axis=1) for events, sleep in\n zip(time_sleep['events'], time_sleep['activityId'])\n ])\n else:\n time_sleep_events = pd.DataFrame()\n for s in sleep_list:\n events = self.pull_sleep_events(s)\n time_sleep_events = pd.concat(\n [time_sleep_events, events])\n\n ## Cleaning sleep events data\n time_sleep_events['during.lower'] = pd.to_datetime(\n time_sleep_events['during.lower'])\n time_sleep_events['during.upper'] = pd.to_datetime(\n time_sleep_events['during.upper'])\n time_sleep_events.drop(['during.bounds'],\n axis=1,\n inplace=True)\n time_sleep_events[\n 'total_minutes'] = time_sleep_events.apply(\n lambda x: (x['during.upper'] - x['during.lower']\n ).total_seconds() / 60.0,\n axis=1)\n\n return time_sleep_events\n else:\n print(\"Whoop: Please run the authorization function first\")\n\n def get_hr_timeframe(self,\n start,\n end=datetime.strftime(datetime.utcnow(), \"%Y-%m-%d\"),\n returnType=None):\n '''\n This function will pull every heart rate measurement recorded, for the time frame specified by the user.\n The default return for this function is a list of lists, where each \"row\" contains the date, time, and hr value.\n The measurements are spaced out every ~6 seconds on average.\n\n To return a dataframe, set df=True. This will take a bit longer, but will return a data frame.\n\n If no end date is specified, it will default to today's date.\n\n NOTE: This api pull takes about 6 seconds per week of data ... or 1 minutes for 10 weeks of data,\n so be careful when you pull, it may take a while.\n '''\n\n st = datetime.strptime(start, '%Y-%m-%d')\n e = datetime.strptime(end, '%Y-%m-%d')\n if st > e:\n if e > datetime.today():\n print(\"Whoop: Please enter an end date earlier than tomorrow\")\n else:\n print(\n \"Whoop: Please enter a start date that is earlier than your end date\"\n )\n else:\n\n if self.start_datetime:\n athlete_id = self.whoop_id\n start_date = parser.isoparse(\n self.start_datetime).replace(tzinfo=None)\n end_time = 'T23:59:59.999Z'\n start_time = 'T00:00:00.000Z'\n ## using the st and e since it needs the datetime formatted date\n intervals = rrule.rrule(freq=WEEKLY,\n interval=1,\n until=e,\n dtstart=st)\n date_range = [[\n d.strftime('%Y-%m-%d') + start_time,\n (d +\n relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')\n + end_time\n ] for d in intervals]\n\n hr_list = []\n for dates in date_range:\n start = dates[0]\n end = dates[1]\n ul = '''https://api-7.whoop.com/users/{}/metrics/heart_rate?end={}&order=t&start={}&step=6'''.format(\n athlete_id, end, start)\n hr_vals = self.pull_api(ul)['values']\n hr_values = [[\n str(datetime.utcfromtimestamp(h['time'] / 1e3).date()),\n str(datetime.utcfromtimestamp(h['time'] / 1e3).time()),\n h['data']\n ] for h in hr_vals]\n hr_list.extend(hr_values)\n if returnType == \"df\":\n hr_df = pd.DataFrame(hr_list)\n hr_df.columns = ['date', 'time', 'hr']\n return hr_df\n elif returnType == \"json\":\n hr_json = [{\n 'datetime': str(h[0]) + 'T' + str(h[1]),\n 'hr': h[2]\n } for h in hr_list]\n return hr_json\n else:\n return hr_list\n else:\n print(\"Whoop: Please run the authorization function first\")\n"
] | [
[
"pandas.concat",
"pandas.to_datetime",
"numpy.isnan",
"pandas.json_normalize",
"pandas.DataFrame",
"pandas.isna"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0"
],
"scipy": [],
"tensorflow": []
}
] |
swidi/poemo-generation | [
"3a349ac3a6fc3e82b24410013bced60a24c2d8bf",
"3a349ac3a6fc3e82b24410013bced60a24c2d8bf",
"3a349ac3a6fc3e82b24410013bced60a24c2d8bf"
] | [
"third_party/texar-0.2.0/examples/bert/utils/data_utils.py",
"train_emosup.py",
"split.py"
] | [
"\"\"\"\nThis is the Data Loading Pipeline for Sentence Classifier Task from\nhttps://github.com/google-research/bert/blob/master/run_classifier.py\n\"\"\"\n# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport csv\nimport collections\nimport sys\nsys.path.append(os.path.dirname(__file__))\nimport tokenization\nimport tensorflow as tf\n\nclass InputExample():\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None):\n \"\"\"Constructs a InputExample.\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence.\n For single sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second\n sequence. Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n\nclass InputFeatures():\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids, label_id):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_test_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for prediction.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines\n\nclass SSTProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n if set_type == 'train' or set_type == 'dev':\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[0])\n # Single sentence classification, text_b doesn't exist\n text_b = None\n label = tokenization.convert_to_unicode(line[1])\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=text_b, label=label))\n if set_type == 'test':\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[1])\n # Single sentence classification, text_b doesn't exist\n text_b = None\n label = '0' # arbitrary set as 0\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=text_b, label=label))\n return examples\n\nclass XnliProcessor(DataProcessor):\n \"\"\"Processor for the XNLI data set.\"\"\"\n\n def __init__(self):\n self.language = \"zh\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n lines = self._read_tsv(\n os.path.join(data_dir, \"multinli\",\n \"multinli.train.%s.tsv\" % self.language))\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"train-%d\" % (i)\n text_a = tokenization.convert_to_unicode(line[0])\n text_b = tokenization.convert_to_unicode(line[1])\n label = tokenization.convert_to_unicode(line[2])\n if label == tokenization.convert_to_unicode(\"contradictory\"):\n label = tokenization.convert_to_unicode(\"contradiction\")\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=text_b, label=label))\n return examples\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n lines = self._read_tsv(os.path.join(data_dir, \"xnli.dev.tsv\"))\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"dev-%d\" % (i)\n language = tokenization.convert_to_unicode(line[0])\n if language != tokenization.convert_to_unicode(self.language):\n continue\n text_a = tokenization.convert_to_unicode(line[6])\n text_b = tokenization.convert_to_unicode(line[7])\n label = tokenization.convert_to_unicode(line[1])\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=text_b, label=label))\n return examples\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]\n\nclass MnliProcessor(DataProcessor):\n \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")),\n \"dev_matched\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")),\n \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type,\n tokenization.convert_to_unicode(line[0]))\n text_a = tokenization.convert_to_unicode(line[8])\n text_b = tokenization.convert_to_unicode(line[9])\n if set_type == \"test\":\n label = \"contradiction\"\n else:\n label = tokenization.convert_to_unicode(line[-1])\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=text_b, label=label))\n return examples\n\nclass MrpcProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")),\n \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")),\n \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")),\n \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[3])\n text_b = tokenization.convert_to_unicode(line[4])\n if set_type == \"test\":\n label = \"0\"\n else:\n label = tokenization.convert_to_unicode(line[0])\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=text_b, label=label))\n return examples\n\nclass ColaProcessor(DataProcessor):\n \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")),\n \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")),\n \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")),\n \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n # Only the test set has a header\n if set_type == \"test\" and i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n if set_type == \"test\":\n text_a = tokenization.convert_to_unicode(line[1])\n label = \"0\"\n else:\n text_a = tokenization.convert_to_unicode(line[3])\n label = tokenization.convert_to_unicode(line[1])\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=None, label=label))\n return examples\n\n\ndef convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention rule is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # segment_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # sigment_ids: 0 0 0 0 0 0 0\n #\n # Where \"segment_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n\n # here we disable the verbose printing of the data\n if ex_index < 0:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_ids length: %d\" % len(input_ids))\n tf.logging.info(\"input_mask: %s\" %\\\n \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" %\\\n \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n feature = InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id)\n return feature\n\n\ndef file_based_convert_examples_to_features(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for (ex_index, example) in enumerate(examples):\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n def create_int_feature(values):\n return tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n\n tf_example = tf.train.Example(\n features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal\n # percent of tokens from each, since if one sequence is very short then\n # each token that's truncated likely contains more information than a\n # longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef prepare_TFRecord_data(processor, tokenizer,\n data_dir, max_seq_length, output_dir):\n \"\"\"\n Args:\n processor: Data Preprocessor, which must have get_lables,\n get_train/dev/test/examples methods defined.\n tokenizer: The Sentence Tokenizer. Generally should be\n SentencePiece Model.\n data_dir: The input data directory.\n max_seq_length: Max sequence length.\n batch_size: mini-batch size.\n model: `train`, `eval` or `test`.\n output_dir: The directory to save the TFRecord in.\n \"\"\"\n label_list = processor.get_labels()\n\n train_examples = processor.get_train_examples(data_dir)\n train_file = os.path.join(output_dir, \"train.tf_record\")\n file_based_convert_examples_to_features(\n train_examples, label_list, max_seq_length,\n tokenizer, train_file)\n\n eval_examples = processor.get_dev_examples(data_dir)\n eval_file = os.path.join(output_dir, \"eval.tf_record\")\n file_based_convert_examples_to_features(\n eval_examples, label_list,\n max_seq_length, tokenizer, eval_file)\n\n test_examples = processor.get_test_examples(data_dir)\n test_file = os.path.join(output_dir, \"predict.tf_record\")\n file_based_convert_examples_to_features(\n test_examples, label_list,\n max_seq_length, tokenizer, test_file)\n",
"# Copyright 2019 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Example of fine-tuning OpenAI GPT-2 language model.\n Use this for base model and emosup.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport importlib\nimport numpy as np\nimport tensorflow as tf\nimport texar as tx\n\nfrom data_utils import model_utils, processor, utils\n\n# pylint: disable=invalid-name, too-many-locals, too-many-statements, no-member\n# pylint: disable=invalid-name, too-many-locals, too-many-statements, no-member\n# pylint: disable=too-many-branches\n\n\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"checkpoint\", None,\n \"Model checkpoint to resume training or for test.\")\nflags.DEFINE_string(\"pretrain_checkpoint\",\n \"gpt2_pretrained_models/model_117M/model.ckpt\",\n \"OpenAI pretrained model checkpoint. Ignored if \"\n \"'--checkpoint' is specified.\")\nflags.DEFINE_string(\"pretrained_model_dir\", \"gpt2_pretrained_models/model_117M\",\n \"The directory of pretrained model, for loading vocabuary, \"\n \"etc.\")\nflags.DEFINE_float(\"temperature\", 0.7,\n \"Softmax temperature for top-k sample decoding. Must be \"\n \"strictly greater than 0. Defaults to 0.7.\")\nflags.DEFINE_integer(\"top_k\", 40,\n \"The number of top most likely candidates from a vocab \"\n \"distribution.\")\nflags.DEFINE_string(\"config_train\", \"configs.config_train\",\n \"Configurations of GPT-2 training, including data and \"\n \"optimization hyperparameters.\")\nflags.DEFINE_string(\"config_type\", \"texar\",\n \"The configuration file type. Set to 'json' if the GPT-2 \"\n \"config file is in the same type of the official GPT-2 \"\n \"config file. Set to 'texar' if GPT-2 config file is in \"\n \"Texar type.\")\nflags.DEFINE_string(\"config_model\", \"configs.config_model\",\n \"The model configuration file to configure the model. \"\n \"The config file type is define by the 'config_type',\"\n \"it be of texar type or json type.\"\n \"For '--config_type=json', set the json config file path\"\n \"like: '--config_model gpt2_pretrained_models/model_117M/\"\n \"hparams.json';\"\n \"For '--config_type=texar', set the texar config file \"\n \"like: '--config_model configs.config_model'.\")\nflags.DEFINE_string(\"output_dir\", \"output/remove_space/\",\n \"The output directory where the model checkpoints will be \"\n \"written.\")\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\nflags.DEFINE_bool(\"do_eval\", False, \"Whether to run eval on the dev set.\")\nflags.DEFINE_bool(\"do_test\", False, \"Whether to run test on the test set.\")\nflags.DEFINE_bool(\"distributed\", False, \"Whether to run in distributed mode.\")\nflags.DEFINE_bool(\"finetune\", False, \"Whether to test on finetune mode.\")\nflags.DEFINE_bool(\"beam\", False, \"Whether to do a beam serach for inference?\")\nflags.DEFINE_bool(\"bpe_loss\", False, \"Whether to report loss bpe base or word base?\")\n\n\nconfig_train = importlib.import_module(FLAGS.config_train)\n\n\ndef _log(msg, log_fn=None):\n tf.logging.info(msg)\n if log_fn is None:\n log_fn = os.path.join(FLAGS.output_dir, config_train.name, 'log.txt')\n with open(log_fn, 'a') as flog:\n flog.write(msg + '\\n')\n\ndef _ids_to_text(ids, proc):\n eos_token_id = proc.encoder['<|endoftext|>']\n\n if ids[0] == eos_token_id:\n ids = ids[1:]\n text = proc.decode(ids)\n return text\n\ndef main(_):\n \"\"\"\n Builds the model and runs\n \"\"\"\n if FLAGS.distributed:\n import horovod.tensorflow as hvd\n hvd.init()\n\n tf.logging.set_verbosity(tf.logging.INFO)\n\n if len(config_train.name) > 0:\n output_dir = os.path.join(FLAGS.output_dir, config_train.name)\n else:\n output_dir = FLAGS.output_dir\n tx.utils.maybe_create_dir(output_dir)\n\n\n ## Loads GPT-2 model configuration\n\n if FLAGS.config_type == \"json\":\n gpt2_config = model_utils.transform_gpt2_to_texar_config(\n FLAGS.config_model)\n elif FLAGS.config_type == 'texar':\n gpt2_config = importlib.import_module(\n FLAGS.config_model)\n else:\n raise ValueError('Unknown config_type.')\n\n # Creates a data pre-processor for, e.g., BPE encoding\n proc = processor.get_encoder(FLAGS.pretrained_model_dir)\n\n max_decoding_length = config_train.max_decoding_length\n assert max_decoding_length <= gpt2_config.position_size, (\n \"max_decoding_length should not be greater than position_size. \"\n \"{}>{}\".format(max_decoding_length, gpt2_config.position_size))\n\n ## Loads data\n\n # Configures training data shard in distribued mode\n if FLAGS.distributed:\n config_train.train_hparam[\"dataset\"][\"num_shards\"] = hvd.size()\n config_train.train_hparam[\"dataset\"][\"shard_id\"] = hvd.rank()\n config_train.train_hparam[\"batch_size\"] //= hvd.size()\n\n datasets = {}\n #if FLAGS.do_train:\n train_dataset = tx.data.TFRecordData(hparams=config_train.train_hparam)\n datasets['train'] = train_dataset\n #if FLAGS.do_eval:\n dev_dataset = tx.data.TFRecordData(hparams=config_train.dev_hparam)\n datasets['dev'] = dev_dataset\n #if FLAGS.do_test:\n test_dataset = tx.data.TFRecordData(hparams=config_train.test_hparam)\n datasets['test'] = test_dataset\n iterator = tx.data.FeedableDataIterator(datasets)\n batch = iterator.get_next()\n batch_size = tf.shape(batch['x1x4_ids'])[0]\n\n ## Builds the GPT-2 model\n vocab_size = gpt2_config.vocab_size\n\n word_embedder = tx.modules.WordEmbedder(\n vocab_size=vocab_size,\n hparams=gpt2_config.embed)\n\n pos_embedder = tx.modules.PositionEmbedder(\n position_size=gpt2_config.position_size,\n hparams=gpt2_config.pos_embed)\n\n # Ties output layer with input word embedding\n output_layer = tf.transpose(word_embedder.embedding, (1, 0))\n\n decoder = tx.modules.TransformerDecoder(\n vocab_size=vocab_size,\n output_layer=output_layer,\n hparams=gpt2_config.decoder)\n\n # For training\n def _get_recon_loss(ids, full_len, prefix_len, mask_prefix=True, do_print=False):\n ids = ids[:,:tf.reduce_max(full_len)]\n batch_size__ = tf.shape(ids)[0]\n seq_len = tf.fill([batch_size__], tf.shape(ids)[1])\n pos_embeds = pos_embedder(sequence_length=seq_len)\n input_embeds = word_embedder(ids) + pos_embeds\n\n outputs = decoder(inputs=input_embeds, decoding_strategy='train_greedy')\n\n max_full_len = tf.reduce_max(full_len)\n ids = ids[:, :max_full_len]\n logits = outputs.logits[:, :max_full_len]\n\n if mask_prefix:\n loss_recon = tx.losses.sequence_sparse_softmax_cross_entropy(\n labels=ids[:, 1:],\n logits=logits[:, :-1, :],\n sequence_length=full_len-1,\n average_across_timesteps=False,\n sum_over_timesteps=False,\n average_across_batch=False,\n sum_over_batch=False)\n mask_recon = tf.sequence_mask(\n full_len-1,\n dtype=tf.float32)\n mask_recon_prefix = 1 - tf.sequence_mask(\n prefix_len-1,\n maxlen=max_full_len-1,#max_decoding_length-1,\n dtype=tf.float32)\n mask_recon = mask_recon * mask_recon_prefix\n\n if do_print:\n print_op_1 = tf.print(mask_recon)\n loss_recon_flat = tx.utils.reduce_with_weights(\n tensor=loss_recon,\n weights=mask_recon,\n average_across_remaining=False,\n sum_over_remaining=False,\n average_across_batch=False)\n print_op_2 = tf.print(loss_recon_flat)\n with tf.control_dependencies([print_op_1, print_op_2]):\n loss_recon = tx.utils.reduce_with_weights(\n tensor=loss_recon,\n weights=mask_recon,\n average_across_remaining=True,\n sum_over_remaining=False)\n return loss_recon, mask_recon, loss_recon_flat\n else:\n loss_recon = tx.utils.reduce_with_weights(\n tensor=loss_recon,\n weights=mask_recon,\n average_across_remaining=True,\n sum_over_remaining=False)\n else:\n loss_recon = tx.losses.sequence_sparse_softmax_cross_entropy(\n labels=ids[:, 1:],\n logits=logits[:, :-1, :],\n sequence_length=full_len-1,\n average_across_timesteps=True,\n sum_over_timesteps=False,\n average_across_batch=True,\n sum_over_batch=False)\n\n return loss_recon\n\n\n ## ROC Loss-1: ML loss\n x1_len = tf.placeholder(tf.int32, shape=[None], name='x1_len')\n x1x4_ids = tf.placeholder(tf.int32, shape=[None, None], name='x1x4_ids')\n x1x4_len = tf.placeholder(tf.int32, shape=[None], name='x1x4_len')\n\n loss_fine = _get_recon_loss(x1x4_ids, x1x4_len, x1_len)\n\n\n tau = tf.placeholder(tf.float32, shape=[], name='tau')\n\n end_token = proc.encoder['<|endoftext|>']\n\n loss = config_train.w_fine * loss_fine\n\n loss_dict = {\n 'loss': loss,\n 'loss_fine': config_train.w_fine * loss_fine,\n }\n\n ## Inference\n def _embedding_fn(ids, times):\n return word_embedder(ids) + pos_embedder(times)\n\n def _infer(context_name, target_name):\n helper = tx.modules.TopKSampleEmbeddingHelper(\n embedding=_embedding_fn,\n start_tokens=batch['%s_ids' % context_name][:, 0],\n end_token=end_token,\n top_k=FLAGS.top_k,\n softmax_temperature=FLAGS.temperature)\n outputs_infer, len_infer = decoder(\n context=batch['%s_ids' % context_name],\n context_sequence_length=batch['%s_len' % context_name],\n max_decoding_length=max_decoding_length,\n helper=helper)\n yy_ids = tx.utils.varlength_roll(\n outputs_infer.sample_id, -batch['%s_len' % context_name])\n yy_len = len_infer - batch['%s_len' % context_name]\n yy_ids = yy_ids[:, :tf.reduce_max(yy_len)]\n yy_logits = outputs_infer.logits\n yy_loss = _evaluate_loss_test(yy_logits, target_name, context_name)\n\n return yy_ids, yy_len, yy_loss\n\n def _evaluate_loss_test(logits, target_name, context_name, bpe_loss=FLAGS.bpe_loss):\n ids = batch['%s_ids' % target_name]\n full_len = batch['%s_len' % target_name]\n ids = ids[:, :tf.reduce_max(full_len)]\n\n # new code\n max_full_len = tf.reduce_max(full_len)\n logits = logits[:, :max_full_len]\n\n test_loss = tx.losses.sequence_sparse_softmax_cross_entropy(\n labels=ids[:, 1:],\n logits=logits[:, :-1, :],\n sequence_length=full_len - 1,\n average_across_timesteps=False,\n sum_over_timesteps=not bpe_loss, # True,\n average_across_batch=False,\n sum_over_batch=False)\n mask_recon = tf.sequence_mask(\n full_len - 1,\n dtype=tf.float32)\n mask_recon_prefix = 1 - tf.sequence_mask(\n batch['%s_len' % context_name] - 1,\n maxlen=max_full_len - 1, # max_decoding_length-1,\n dtype=tf.float32)\n mask_recon = mask_recon * mask_recon_prefix\n\n test_loss = tx.utils.reduce_with_weights(\n tensor=test_loss,\n weights=mask_recon,\n average_across_batch=bpe_loss,\n average_across_remaining=bpe_loss,\n sum_over_remaining=not bpe_loss)\n\n return test_loss # [bs,] ?\n\n\n\n x4_ids_fine, x4_len_fine, x4_loss_fine = _infer('x1', 'x1x4')\n\n\n ## Optimization\n\n def _get_beam_ids(context_name):\n # beam-search\n predictions = decoder(\n beam_width=5,\n length_penalty=config_train.length_penalty,\n embedding=_embedding_fn,\n context=batch['%s_ids' % context_name],\n context_sequence_length=batch['%s_len' % context_name],\n max_decoding_length=max_decoding_length,\n end_token=end_token,\n mode=tf.estimator.ModeKeys.PREDICT)\n\n beam_output_ids = tx.utils.varlength_roll(predictions[\"sample_id\"][:, :, 0], -batch['%s_len' % context_name])\n\n return beam_output_ids\n beam_search_ids = _get_beam_ids('x1')\n\n\n ## Optimization\n trainable_variables = tx.utils.collect_trainable_variables(\n [word_embedder, pos_embedder, decoder])\n\n global_step = tf.Variable(0, trainable=False)\n opt = tx.core.get_optimizer(\n global_step=global_step,\n hparams=config_train.opt)\n\n if FLAGS.distributed:\n opt = hvd.DistributedOptimizer(opt)\n\n train_op = tf.contrib.layers.optimize_loss(\n loss=loss,\n global_step=global_step,\n learning_rate=None,\n optimizer=opt,\n variables=trainable_variables)\n\n\n ## Train/eval/test routine\n saver = tf.train.Saver()\n saver_best = tf.train.Saver(max_to_keep=1)\n dev_best = {\n 'loss': 1e8, 'loss_fine': 1e8}\n\n\n def _log_losses(losses, step=None):\n loss_str = 'loss: %.4f, loss_fine: %.4f' % \\\n (losses['loss'], losses['loss_fine'])\n\n if step is not None:\n loss_str = 'step: %d, %s' % (step, loss_str)\n\n _log(loss_str)\n\n def _is_head():\n if not FLAGS.distributed:\n return True\n else:\n return hvd.rank() == 0\n\n def _train_epoch(sess, initial=False):\n \"\"\"Trains on the training set, and evaluates on the dev set\n periodically.\n \"\"\"\n iterator.restart_dataset(sess, 'train')\n\n while True:\n try:\n # (1) Get data and yy sample\n fetches_data = {\n 'batch': batch,\n 'batch_size': batch_size,\n }\n feed_dict_data = {\n iterator.handle: iterator.get_handle(sess, 'train'),\n tx.global_mode(): tf.estimator.ModeKeys.PREDICT,\n }\n rets_data = sess.run(fetches_data, feed_dict_data)\n\n\n # (2) Optimize loss\n feed_dict = {\n #x1_ids: rets_data['batch']['x1_ids'],\n x1_len: rets_data['batch']['x1_len'],\n x1x4_ids: rets_data['batch']['x1x4_ids'],\n x1x4_len: rets_data['batch']['x1x4_len'],\n tau: config_train.tau,\n tx.global_mode(): tf.estimator.ModeKeys.TRAIN,\n }\n\n fetches = {\n 'train_op': train_op,\n 'step': global_step,\n }\n fetches.update(loss_dict)\n\n rets = sess.run(fetches, feed_dict)\n step = rets['step']\n\n dis_steps = config_train.display_steps\n\n if _is_head() and dis_steps > 0 and step % dis_steps == 0:\n _log_losses(rets, step)\n\n eval_steps = config_train.eval_steps\n if _is_head() and eval_steps > 0 and step % eval_steps == 0:\n _dev_epoch(sess)\n sample_steps = config_train.sample_steps\n if _is_head() and sample_steps > 0 and step % sample_steps == 0:\n print('-----------testing-----------------')\n _test_epoch(sess, step=step)\n\n ckpt_steps = config_train.checkpoint_steps\n if _is_head() and ckpt_steps > 0 and step % ckpt_steps == 0:\n ckpt_fn = os.path.join(output_dir, 'model.ckpt')\n ckpt_fn = saver.save(sess, ckpt_fn, global_step=step)\n _log('Checkpoint to {}'.format(ckpt_fn))\n\n except tf.errors.OutOfRangeError:\n break\n\n def _dev_epoch(sess):\n \"\"\"Evaluates on the dev set.\n \"\"\"\n iterator.restart_dataset(sess, 'dev')\n\n results = tx.utils.AverageRecorder()\n nsamples = 0\n fetches = {}\n fetches.update(loss_dict)\n # i = 0\n\n while True:\n try:\n\n # (1) Get data and yy sample\n fetches_data = {\n 'batch': batch,\n 'batch_size': batch_size,\n }\n feed_dict_data = {\n iterator.handle: iterator.get_handle(sess, 'dev'),\n tx.global_mode(): tf.estimator.ModeKeys.PREDICT,\n }\n rets_data = sess.run(fetches_data, feed_dict_data)\n\n\n # (2) eval loss\n feed_dict = {\n #x1_ids: rets_data['batch']['x1_ids'],\n x1_len: rets_data['batch']['x1_len'],\n x1x4_ids: rets_data['batch']['x1x4_ids'],\n x1x4_len: rets_data['batch']['x1x4_len'],\n tau: config_train.tau,\n tx.global_mode(): tf.estimator.ModeKeys.PREDICT,\n }\n\n rets = sess.run(fetches, feed_dict)\n\n results.add(rets, weight=rets_data['batch_size'])\n nsamples += rets_data['batch_size']\n except tf.errors.OutOfRangeError:\n break\n\n _log_losses(results.avg())\n _log('nsamples: %d' % nsamples)\n\n avg_loss = results.avg('loss')\n if FLAGS.do_train and avg_loss < dev_best['loss']:\n dev_best.update(results.avg())\n ckpt_fn = os.path.join(output_dir, 'model_best.ckpt')\n ckpt_fn = saver_best.save(sess, ckpt_fn)\n _log('Checkpoint best to {}'.format(ckpt_fn))\n\n def _test_epoch(sess, step=None):\n \"\"\"Generates samples on the test set.\n \"\"\"\n iterator.restart_dataset(sess, 'test')\n\n _all_inputs = []\n _all_samples = []\n _all_loss = []\n\n if FLAGS.finetune:\n _log('Generation input: x1')\n fetches = {\n 'inputs': batch['x1_ids'],\n 'length': batch['x1_len'],\n 'samples_length': x4_len_fine,\n 'samples': x4_ids_fine,\n 'sample_loss': x4_loss_fine,\n 'outputs': batch['x1x4_ids'],\n 'out_length': batch['x1x4_len']\n }\n res_fn_appendix = \"x1\"\n\n\n while True:\n try:\n feed_dict = {\n iterator.handle: iterator.get_handle(sess, 'test'),\n tx.context.global_mode(): tf.estimator.ModeKeys.PREDICT,\n }\n rets = sess.run(fetches, feed_dict=feed_dict)\n\n _inputs = []\n for i, l in zip(rets['inputs'], rets['length']):\n # Delete padding\n _inputs.append(i[:l].tolist())\n _all_inputs.extend(_inputs)\n\n _samples = []\n _loss = []\n if not FLAGS.beam:\n for s, l in zip(rets['samples'], rets['samples_length']):\n _samples.append(s[:l].tolist())\n\n else:\n _samples.extend(h.tolist() for h in rets['samples'])\n _samples = utils.list_strip_eos(_samples, eos_token=proc.encoder['<|endoftext|>'])\n _all_samples.extend(_samples)\n\n\n except tf.errors.OutOfRangeError:\n break\n\n # Parse samples and write to file\n\n eos_token_id = proc.encoder['<|endoftext|>']\n\n _all_input_text = []\n for i in _all_inputs:\n if i[0] == eos_token_id:\n i = i[1:]\n i_text = proc.decode(i)\n _all_input_text.append(i_text)\n _all_input_text = tx.utils.strip_eos(_all_input_text,\n eos_token='<|endoftext|>')\n\n _all_samples_text = []\n for j, (i, s) in enumerate(zip(_all_inputs, _all_samples)):\n s_text = proc.decode(s)\n s_text = s_text.replace('\\n', ' ')\n # print(s_text)\n _all_samples_text.append(s_text)\n if j % 1000 == 0:\n print(\"{} stories is process of total {}\".format(j, len(_all_inputs)))\n\n _all_samples_text = tx.utils.strip_eos(_all_samples_text,\n eos_token='<|endoftext|>')\n\n if step is None:\n fn = \"test_samples_%s_sample40.tsv\" % res_fn_appendix\n else:\n fn = \"test_samples_%s_%d.tsv\" % (res_fn_appendix, step)\n output_file = os.path.join(output_dir, fn)\n _log('Write samples to {}'.format(output_file))\n if not FLAGS.beam:\n tx.utils.write_paired_text(\n _all_input_text, _all_samples_text, output_file)\n else:\n with open(output_file, 'w') as f:\n for item in _all_samples_text:\n f.write(\"%s\\n\" % item)\n\n\n # Broadcasts global variables from rank-0 process\n if FLAGS.distributed:\n bcast = hvd.broadcast_global_variables(0)\n\n session_config = tf.ConfigProto()\n if FLAGS.distributed:\n session_config.gpu_options.visible_device_list = str(hvd.local_rank())\n\n with tf.Session(config=session_config) as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n sess.run(tf.tables_initializer())\n\n# smry_writer = tf.summary.FileWriter(FLAGS.output_dir, graph=sess.graph)\n\n if FLAGS.distributed:\n bcast.run()\n\n #Restores trained model if specified\n if FLAGS.checkpoint:\n _log('Restore from {}'.format(FLAGS.checkpoint))\n saver.restore(sess, FLAGS.checkpoint)\n elif FLAGS.pretrain_checkpoint:\n _log('Restore from {}'.format(FLAGS.pretrain_checkpoint))\n model_utils.init_gpt2_checkpoint(sess, FLAGS.pretrain_checkpoint)\n print(\"\\nFinished loading\\n\")\n saver.save(sess, output_dir + '/gpt2_model.ckpt')\n\n\n\n\n iterator.initialize_dataset(sess)\n\n if FLAGS.do_train:\n for epoch in range(config_train.max_train_epoch):\n print(\"Training epoch {}\".format(epoch))\n _train_epoch(sess, epoch==0)\n saver.save(sess, output_dir + '/model.ckpt')\n\n if FLAGS.do_eval:\n _dev_epoch(sess)\n\n if FLAGS.do_test:\n _test_epoch(sess)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n\n\n",
"from sklearn.model_selection import train_test_split\ndata_folder=\"data_new/\"\ntrain_ratio=0.8\ntest_ratio=0.1\nvalidation_ratio=0.1\n\ntitles = [] \ntitles_file = open(data_folder+\"titles_all.txt\", \"r\")\ntitles = titles_file.read().splitlines()\n\nstanzas = [] \nstanzas_file = open(data_folder+\"stanzas_all.txt\", \"r\")\nstanzas = stanzas_file.read().splitlines()\n\nemotion_arcs = []\nemotion_arcs_file = open(data_folder+\"emo_all.txt\", \"r\")\nemotion_arcs = emotion_arcs_file.read().splitlines()\n\ntitle_train = open(data_folder + \"train_x1.txt\", \"w\")\ntitle_dev = open(data_folder + \"dev_x1.txt\", \"w\")\ntitle_test = open(data_folder + \"test_x1.txt\", \"w\")\n\npoem_train = open(data_folder + \"train_x4.txt\", \"w\")\npoem_dev = open(data_folder + \"dev_x4.txt\", \"w\")\npoem_test = open(data_folder + \"test_x4.txt\", \"w\")\n\nemotion_train = open(data_folder + \"train_mapped.txt\", \"w\")\nemotion_dev = open(data_folder + \"dev_mapped.txt\", \"w\")\nemotion_test = open(data_folder + \"test_mapped.txt\", \"w\")\n\ntitle_train_list, title_test_list, poem_train_list, poem_test_list, emotion_train_list, emotion_test_list = train_test_split(titles, stanzas, emotion_arcs, test_size=1 - train_ratio)\ntitle_dev_list, title_test_list, poem_dev_list, poem_test_list, emotion_dev_list, emotion_test_list, = train_test_split(title_test_list, poem_test_list, emotion_test_list, test_size=test_ratio / (test_ratio + validation_ratio))\n\nprint(\"Split:\")\nprint(len(title_train_list), len(title_test_list), len(title_dev_list))\ntitle_train.write(\"\\n\".join(title_train_list))\ntitle_train.write(\"\\n\")\ntitle_train.close()\ntitle_dev.write(\"\\n\".join(title_dev_list))\ntitle_dev.write(\"\\n\")\ntitle_dev.close()\ntitle_test.write(\"\\n\".join(title_test_list))\ntitle_test.write(\"\\n\")\ntitle_test.close()\n\npoem_train.write(\" <|endoftext|>\\n\".join(poem_train_list))\npoem_train.write(\" <|endoftext|>\\n\")\npoem_train.close()\npoem_dev.write(\" <|endoftext|>\\n\".join(poem_dev_list))\npoem_dev.write(\" <|endoftext|>\\n\")\npoem_dev.close()\npoem_test.write(\" <|endoftext|>\\n\".join(poem_test_list))\npoem_test.write(\" <|endoftext|>\\n\")\npoem_test.close()\n\nemotion_train.write(\"\\n\".join(emotion_train_list))\nemotion_train.write(\"\\n\")\nemotion_train.close()\nemotion_dev.write(\"\\n\".join(emotion_dev_list))\nemotion_dev.write(\"\\n\")\nemotion_dev.close()\nemotion_test.write(\"\\n\".join(emotion_test_list))\nemotion_test.write(\"\\n\")\nemotion_test.close()\nstanzas_file.close()\ntitles_file.close()\nemotion_arcs_file.close()\n\n"
] | [
[
"tensorflow.logging.info",
"tensorflow.train.Features",
"tensorflow.gfile.Open",
"tensorflow.python_io.TFRecordWriter"
],
[
"tensorflow.reduce_max",
"tensorflow.transpose",
"tensorflow.local_variables_initializer",
"tensorflow.Variable",
"tensorflow.shape",
"tensorflow.control_dependencies",
"tensorflow.tables_initializer",
"tensorflow.placeholder",
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"tensorflow.logging.info",
"tensorflow.logging.set_verbosity",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.print",
"tensorflow.sequence_mask",
"tensorflow.contrib.layers.optimize_loss",
"tensorflow.app.run"
],
[
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
simonlevine/x-transformer-icd | [
"17d0a84f8b8e1f69623a82c0afab26830c7a1eb8"
] | [
"app/lib/models.py"
] | [
"\"\"\"deserialize auto-icd models and provide a consistent interface\"\"\"\n\nimport typing as t\nimport json\nimport pickle\nfrom pathlib import Path\nimport numpy as np\nimport onnxruntime as rt\n\nAPP_ROOT = Path(\"./app\")\nASSETS_DIR = APP_ROOT/\"assets\"\n\n\nclass AutoICDModel:\n\n def __init__(self, onnx_model_fp):\n assert onnx_model_fp.exists()\n self.sess = rt.InferenceSession(str(onnx_model_fp.resolve()))\n\n def ___call__(self, free_text: str) -> t.Set[str]:\n raise NotImplementedError(\"Subclasses just provide model interaction logic!\")\n\n\n# class KissModel(AutoICDModel):\n\n# def __init__(self, onnx_model_fp, icd9_codes: t.List[str]):\n# \"\"\"because we are only loading a few codes,\n# we need to know which ones in otder to decode\n# decode the model output, which is a 1x|icd9_codes| matrix\"\"\"\n# super().__init__(onnx_model_fp)\n# self.icd9_codes = icd9_codes\n\n# def ___call__(self, free_text: str) -> t.Set[str]:\n# X = np.array([[free_text]])\n# predictions, predictions_proba \\\n# = sess.run(None, {\"free_text_input\": X})[0]\n# codes_predicted = [\n# code for prediction, code in zip(predictions, self.icd9_codes)\n# if prediction == 1 # i.e., if the code is predicted to be present\n# ]\n# codes2predicted_proba = {\n# code: proba for code, proba in zip(self.icd9_codes, predictions_proba)\n# }\n# return codes_predicted, codes2predicted_proba\n\n\n# def get_kiss_model():\n# onnx_model_fp = ASSETS_DIR/\"kiss_model.onnx\"\n# with open(ASSETS_DIR/\"kiss_model.onnx.metadata.json\") as f:\n# icd9_codes = json.load(f)[\"icd9_codes_relevant\"]\n# model = KissModel(onnx_model_fp, icd9_codes)\n# return model\n\n\nclass KissModel:\n \"\"\"Kiss Model using pickle for persistence\"\"\"\n\n def __init__(self):\n with open(ASSETS_DIR/\"kiss_model.pkl.metadata.json\") as f_meta:\n self.icd9_codes = json.load(f_meta)[\"icd9_codes_relevant\"]\n with open(ASSETS_DIR/\"kiss_model.pkl\", \"rb\") as f:\n self.model = pickle.loads(f.read())\n \n def __call__(self, free_text: str):\n X = np.array([free_text])\n predicted_codes_proba = self.model.predict_proba(X)\n return np.array([proba.tolist() for proba in predicted_codes_proba])[:,0,1]"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kayaei/pands-problem-set | [
"a7c48059e3024955794c67d9e6f969a42f4e3a6d"
] | [
"plotfunction.py"
] | [
"# Etem Kaya 16-Mar-2019\n\n# Solution to Problem-10.\n# File name: \"plotfunction.py\".\n\n# Problem-10: Write a program that displays a plot of the functions x, x2 & 2x\n# in the range [0, 4].\n\n#Import matplotlib and numpy packages \nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# setup the lenght and scale of the x axis\n# plt.axis([0, 4, 0, 15])\nx = np.arange(0.0, 4.0, 0.5)\n\n# define the functions y1, y2 and y3 \ny1 = x # f(x) function\ny2 = x**2 # f(x**2) function \ny3 = 2**x # f(2**x) function\n\n## plot the y1, y2 and y3 functions\nplt.plot(x, y1)\nplt.plot(x, y2)\nplt.plot(x, y3)\n\n# pionts where the y1, y2 and y3 functions intersect and_\n# mark the point where they intersect with orange and blue colours\nplt.plot(1, 1, 'or')\nplt.plot(2, 4, 'bo')\n\n## Config the graph\nplt.title('Plotting Graph for functions f(x), f(x^2) and f(2^x)')\nplt.xlabel('X - Axis')\nplt.ylabel('Y - Axis')\n\n# turnon grid lines visibility\nplt.grid(True)\n\n# setup plot legends for each line and their locations for display\nplt.legend(['y1 = x', 'y2 = x^2', 'y3 = 2^x'], loc='upper left')\n\n## plot the y1, y2 and y3 functions on the graph\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
srio/shadow3-scripts | [
"10712641333c29ca9854e9cc60d86cb321f3762b"
] | [
"ID09/run_wofry_polychromatic_partial_coherence.py"
] | [
"\n\n\n#\n# Import section\n#\nimport numpy\n\nfrom syned.beamline.beamline_element import BeamlineElement\nfrom syned.beamline.element_coordinates import ElementCoordinates\nfrom wofry.propagator.propagator import PropagationManager, PropagationElements, PropagationParameters\n\nfrom wofry.propagator.wavefront1D.generic_wavefront import GenericWavefront1D\n\nfrom wofryimpl.propagator.propagators1D.fresnel import Fresnel1D\nfrom wofryimpl.propagator.propagators1D.fresnel_convolution import FresnelConvolution1D\nfrom wofryimpl.propagator.propagators1D.fraunhofer import Fraunhofer1D\nfrom wofryimpl.propagator.propagators1D.integral import Integral1D\nfrom wofryimpl.propagator.propagators1D.fresnel_zoom import FresnelZoom1D\nfrom wofryimpl.propagator.propagators1D.fresnel_zoom_scaling_theorem import FresnelZoomScaling1D\n\n\n#\n# SOURCE========================\n#\n\n\n# def run_source(my_mode_index=0):\ndef run_source(my_mode_index=0,energy=20016.1):\n\n global coherent_mode_decomposition\n try:\n if my_mode_index == 0: raise Exception()\n tmp = coherent_mode_decomposition\n except:\n\n ########## SOURCE ##########\n\n #\n # create output_wavefront\n #\n #\n from wofryimpl.propagator.util.undulator_coherent_mode_decomposition_1d import \\\n UndulatorCoherentModeDecomposition1D\n coherent_mode_decomposition = UndulatorCoherentModeDecomposition1D(\n electron_energy=6,\n electron_current=0.2,\n undulator_period=0.017,\n undulator_nperiods=117.647,\n K=0.09683,\n photon_energy= energy,\n abscissas_interval=0.0001,\n number_of_points=2500,\n distance_to_screen=100,\n scan_direction='V',\n sigmaxx=3.63641e-06,\n sigmaxpxp=1.37498e-06,\n useGSMapproximation=False, )\n # make calculation\n coherent_mode_decomposition_results = coherent_mode_decomposition.calculate()\n\n mode_index = 0\n output_wavefront = coherent_mode_decomposition.get_eigenvector_wavefront(mode_index)\n output_wavefront = coherent_mode_decomposition.get_eigenvector_wavefront(my_mode_index)\n return output_wavefront\n\n\n#\n# BEAMLINE========================\n#\n\n\ndef run_beamline(output_wavefront):\n ########## OPTICAL SYSTEM ##########\n\n ########## OPTICAL ELEMENT NUMBER 1 ##########\n\n input_wavefront = output_wavefront.duplicate()\n from wofryimpl.beamline.optical_elements.ideal_elements.screen import WOScreen1D\n\n optical_element = WOScreen1D()\n\n # drift_before 27.066 m\n #\n # propagating\n #\n #\n propagation_elements = PropagationElements()\n beamline_element = BeamlineElement(optical_element=optical_element,\n coordinates=ElementCoordinates(p=27.066000, q=0.000000,\n angle_radial=numpy.radians(0.000000),\n angle_azimuthal=numpy.radians(0.000000)))\n propagation_elements.add_beamline_element(beamline_element)\n propagation_parameters = PropagationParameters(wavefront=input_wavefront, propagation_elements=propagation_elements)\n # self.set_additional_parameters(propagation_parameters)\n #\n propagation_parameters.set_additional_parameters('magnification_x', 20.0)\n propagation_parameters.set_additional_parameters('magnification_N', 1.0)\n #\n propagator = PropagationManager.Instance()\n try:\n propagator.add_propagator(Integral1D())\n except:\n pass\n output_wavefront = propagator.do_propagation(propagation_parameters=propagation_parameters,\n handler_name='INTEGRAL_1D')\n\n ########## OPTICAL ELEMENT NUMBER 2 ##########\n\n input_wavefront = output_wavefront.duplicate()\n from syned.beamline.shape import Rectangle\n boundary_shape = Rectangle(-0.0005, 0.0005, -0.0005, 0.0005)\n from wofryimpl.beamline.optical_elements.absorbers.slit import WOSlit1D\n optical_element = WOSlit1D(boundary_shape=boundary_shape)\n\n # no drift in this element\n output_wavefront = optical_element.applyOpticalElement(input_wavefront)\n\n ########## OPTICAL ELEMENT NUMBER 3 ##########\n\n input_wavefront = output_wavefront.duplicate()\n\n from orangecontrib.esrf.wofry.util.mirror import WOMirror1D\n\n optical_element = WOMirror1D.create_from_keywords(\n name='',\n shape=0,\n p_focus=44.54,\n q_focus=45.4695,\n grazing_angle_in=0.0025,\n p_distance=17.474,\n q_distance=11.3,\n zoom_factor=2,\n error_flag=1,\n error_file='/home/srio/Oasys/dabam_profile_140461924578000.dat',\n error_file_oversampling_factor=30,\n mirror_length=0,\n mirror_points=0,\n write_profile=0)\n\n # no drift in this element\n output_wavefront = optical_element.applyOpticalElement(input_wavefront)\n return output_wavefront\n\n\n#\n# MAIN FUNCTION========================\n#\n\n\n# def main():\ndef main(energy=20016.064):\n from srxraylib.plot.gol import plot, plot_image\n from orangecontrib.esrf.wofry.util.tally import TallyCoherentModes\n\n tally = TallyCoherentModes()\n for my_mode_index in range(10):\n output_wavefront = run_source(my_mode_index=my_mode_index,energy=energy)\n output_wavefront = run_beamline(output_wavefront)\n tally.append(output_wavefront)\n\n # tally.plot_cross_spectral_density(show=1, filename=\"\")\n # tally.plot_spectral_density(show=1, filename=\"\")\n # tally.plot_occupation(show=1, filename=\"\")\n\n tally.save_spectral_density(filename=\"id09_3mrad_spectral_density.dat\")\n tally.save_occupation(filename=\"id09_3mrad_occupation.dat\")\n\n\n#\n# MAIN========================\n#\n\n\nmain()\n\n#\n# MAIN========================\n#\n\nimport os\n# Energy = numpy.linspace(18000,22000,50)\nEnergy = numpy.linspace(18500,20500,100)\nfor energy in Energy:\n main(energy)\n command = \"mv id09_3mrad_spectral_density.dat results/id09_3mrad_spectral_density_%4d.dat\" % energy\n print(command)\n os.system(command)\n command = \"mv id09_3mrad_occupation.dat results/occupation_%4d.dat\" % energy\n print(command)\n os.system(command)"
] | [
[
"numpy.radians",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
brianzhang01/tskit | [
"e4d80810e19034cffa77bb14bc0b8d77537103ad"
] | [
"python/tests/test_metadata.py"
] | [
"# -*- coding: utf-8 -*-\n# MIT License\n#\n# Copyright (c) 2018-2019 Tskit Developers\n# Copyright (c) 2017 University of Oxford\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nTests for metadata handling.\n\"\"\"\nimport io\nimport json\nimport os\nimport tempfile\nimport unittest\nimport pickle\n\nimport numpy as np\nimport python_jsonschema_objects as pjs\nimport msprime\n\nimport tskit\n\n\nclass TestMetadataHdf5RoundTrip(unittest.TestCase):\n \"\"\"\n Tests that we can encode metadata under various formats and this will\n successfully round-trip through the HDF5 format.\n \"\"\"\n def setUp(self):\n fd, self.temp_file = tempfile.mkstemp(prefix=\"msp_hdf5meta_test_\")\n os.close(fd)\n\n def tearDown(self):\n os.unlink(self.temp_file)\n\n def test_json(self):\n ts = msprime.simulate(10, random_seed=1)\n tables = ts.dump_tables()\n nodes = tables.nodes\n # For each node, we create some Python metadata that can be JSON encoded.\n metadata = [\n {\"one\": j, \"two\": 2 * j, \"three\": list(range(j))} for j in range(len(nodes))]\n encoded, offset = tskit.pack_strings(map(json.dumps, metadata))\n nodes.set_columns(\n flags=nodes.flags, time=nodes.time, population=nodes.population,\n metadata_offset=offset, metadata=encoded)\n self.assertTrue(np.array_equal(nodes.metadata_offset, offset))\n self.assertTrue(np.array_equal(nodes.metadata, encoded))\n ts1 = tables.tree_sequence()\n for j, node in enumerate(ts1.nodes()):\n decoded_metadata = json.loads(node.metadata.decode())\n self.assertEqual(decoded_metadata, metadata[j])\n ts1.dump(self.temp_file)\n ts2 = tskit.load(self.temp_file)\n self.assertEqual(ts1.tables.nodes, ts2.tables.nodes)\n\n def test_pickle(self):\n ts = msprime.simulate(10, random_seed=1)\n tables = ts.dump_tables()\n # For each node, we create some Python metadata that can be pickled\n metadata = [\n {\"one\": j, \"two\": 2 * j, \"three\": list(range(j))}\n for j in range(ts.num_nodes)]\n encoded, offset = tskit.pack_bytes(list(map(pickle.dumps, metadata)))\n tables.nodes.set_columns(\n flags=tables.nodes.flags, time=tables.nodes.time,\n population=tables.nodes.population,\n metadata_offset=offset, metadata=encoded)\n self.assertTrue(np.array_equal(tables.nodes.metadata_offset, offset))\n self.assertTrue(np.array_equal(tables.nodes.metadata, encoded))\n ts1 = tables.tree_sequence()\n for j, node in enumerate(ts1.nodes()):\n decoded_metadata = pickle.loads(node.metadata)\n self.assertEqual(decoded_metadata, metadata[j])\n ts1.dump(self.temp_file)\n ts2 = tskit.load(self.temp_file)\n self.assertEqual(ts1.tables.nodes, ts2.tables.nodes)\n\n\nclass ExampleMetadata(object):\n \"\"\"\n Simple class that we can pickle/unpickle in metadata.\n \"\"\"\n def __init__(self, one=None, two=None):\n self.one = one\n self.two = two\n\n\nclass TestMetadataPickleDecoding(unittest.TestCase):\n \"\"\"\n Tests in which use pickle.pickle to decode metadata in nodes, sites and mutations.\n \"\"\"\n\n def test_nodes(self):\n tables = tskit.TableCollection(sequence_length=1)\n metadata = ExampleMetadata(one=\"node1\", two=\"node2\")\n pickled = pickle.dumps(metadata)\n tables.nodes.add_row(time=0.125, metadata=pickled)\n ts = tables.tree_sequence()\n node = ts.node(0)\n self.assertEqual(node.time, 0.125)\n self.assertEqual(node.metadata, pickled)\n unpickled = pickle.loads(node.metadata)\n self.assertEqual(unpickled.one, metadata.one)\n self.assertEqual(unpickled.two, metadata.two)\n\n def test_sites(self):\n tables = tskit.TableCollection(sequence_length=1)\n metadata = ExampleMetadata(one=\"node1\", two=\"node2\")\n pickled = pickle.dumps(metadata)\n tables.sites.add_row(position=0.1, ancestral_state=\"A\", metadata=pickled)\n ts = tables.tree_sequence()\n site = ts.site(0)\n self.assertEqual(site.position, 0.1)\n self.assertEqual(site.ancestral_state, \"A\")\n self.assertEqual(site.metadata, pickled)\n unpickled = pickle.loads(site.metadata)\n self.assertEqual(unpickled.one, metadata.one)\n self.assertEqual(unpickled.two, metadata.two)\n\n def test_mutations(self):\n tables = tskit.TableCollection(sequence_length=1)\n metadata = ExampleMetadata(one=\"node1\", two=\"node2\")\n pickled = pickle.dumps(metadata)\n tables.nodes.add_row(time=0)\n tables.sites.add_row(position=0.1, ancestral_state=\"A\")\n tables.mutations.add_row(site=0, node=0, derived_state=\"T\", metadata=pickled)\n ts = tables.tree_sequence()\n mutation = ts.site(0).mutations[0]\n self.assertEqual(mutation.site, 0)\n self.assertEqual(mutation.node, 0)\n self.assertEqual(mutation.derived_state, \"T\")\n self.assertEqual(mutation.metadata, pickled)\n unpickled = pickle.loads(mutation.metadata)\n self.assertEqual(unpickled.one, metadata.one)\n self.assertEqual(unpickled.two, metadata.two)\n\n\nclass TestJsonSchemaDecoding(unittest.TestCase):\n \"\"\"\n Tests in which use json-schema to decode the metadata.\n \"\"\"\n schema = \"\"\"{\n \"title\": \"Example Metadata\",\n \"type\": \"object\",\n \"properties\": {\n \"one\": {\"type\": \"string\"},\n \"two\": {\"type\": \"string\"}\n },\n \"required\": [\"one\", \"two\"]\n }\"\"\"\n\n def test_nodes(self):\n tables = tskit.TableCollection(sequence_length=1)\n builder = pjs.ObjectBuilder(json.loads(self.schema))\n ns = builder.build_classes()\n metadata = ns.ExampleMetadata(one=\"node1\", two=\"node2\")\n encoded = json.dumps(metadata.as_dict()).encode()\n tables.nodes.add_row(time=0.125, metadata=encoded)\n ts = tables.tree_sequence()\n node = ts.node(0)\n self.assertEqual(node.time, 0.125)\n self.assertEqual(node.metadata, encoded)\n decoded = ns.ExampleMetadata.from_json(node.metadata.decode())\n self.assertEqual(decoded.one, metadata.one)\n self.assertEqual(decoded.two, metadata.two)\n\n\nclass TestLoadTextMetadata(unittest.TestCase):\n \"\"\"\n Tests that use the load_text interface.\n \"\"\"\n\n def test_individuals(self):\n individuals = io.StringIO(\"\"\"\\\n id flags location metadata\n 0 1 0.0,1.0,0.0 abc\n 1 1 1.0,2.0 XYZ+\n 2 0 2.0,3.0,0.0 !@#$%^&*()\n \"\"\")\n i = tskit.parse_individuals(\n individuals, strict=False, encoding='utf8', base64_metadata=False)\n expected = [(1, [0.0, 1.0, 0.0], 'abc'),\n (1, [1.0, 2.0], 'XYZ+'),\n (0, [2.0, 3.0, 0.0], '!@#$%^&*()')]\n for a, b in zip(expected, i):\n self.assertEqual(a[0], b.flags)\n self.assertEqual(len(a[1]), len(b.location))\n for x, y in zip(a[1], b.location):\n self.assertEqual(x, y)\n self.assertEqual(a[2].encode('utf8'),\n b.metadata)\n\n def test_nodes(self):\n nodes = io.StringIO(\"\"\"\\\n id is_sample time metadata\n 0 1 0 abc\n 1 1 0 XYZ+\n 2 0 1 !@#$%^&*()\n \"\"\")\n n = tskit.parse_nodes(\n nodes, strict=False, encoding='utf8', base64_metadata=False)\n expected = ['abc', 'XYZ+', '!@#$%^&*()']\n for a, b in zip(expected, n):\n self.assertEqual(a.encode('utf8'),\n b.metadata)\n\n def test_sites(self):\n sites = io.StringIO(\"\"\"\\\n position ancestral_state metadata\n 0.1 A abc\n 0.5 C XYZ+\n 0.8 G !@#$%^&*()\n \"\"\")\n s = tskit.parse_sites(\n sites, strict=False, encoding='utf8', base64_metadata=False)\n expected = ['abc', 'XYZ+', '!@#$%^&*()']\n for a, b in zip(expected, s):\n self.assertEqual(a.encode('utf8'),\n b.metadata)\n\n def test_mutations(self):\n mutations = io.StringIO(\"\"\"\\\n site node derived_state metadata\n 0 2 C mno\n 0 3 G )(*&^%$#@!\n \"\"\")\n m = tskit.parse_mutations(\n mutations, strict=False, encoding='utf8', base64_metadata=False)\n expected = ['mno', ')(*&^%$#@!']\n for a, b in zip(expected, m):\n self.assertEqual(a.encode('utf8'),\n b.metadata)\n\n def test_populations(self):\n populations = io.StringIO(\"\"\"\\\n id metadata\n 0 mno\n 1 )(*&^%$#@!\n \"\"\")\n p = tskit.parse_populations(\n populations, strict=False, encoding='utf8', base64_metadata=False)\n expected = ['mno', ')(*&^%$#@!']\n for a, b in zip(expected, p):\n self.assertEqual(a.encode('utf8'),\n b.metadata)\n"
] | [
[
"numpy.array_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mahnooranjum/Python_Programming | [
"ba251e0e855842112efeb968d06458c60eaf1bd3",
"ba251e0e855842112efeb968d06458c60eaf1bd3",
"ba251e0e855842112efeb968d06458c60eaf1bd3",
"ba251e0e855842112efeb968d06458c60eaf1bd3",
"ba251e0e855842112efeb968d06458c60eaf1bd3"
] | [
"Misc/d3_heatmap.py",
"StatisticalTests_Snippets/U10_PvalWrappers8.py",
"Research_Autocolorization/m15_llandmarks2ab.py",
"Research_Autocolorization/m6_lhog2ab_n5.py",
"Research_Autocolorization/m2_lsift2ab_n7.py"
] | [
"'''\n Mahnoor Anjum\n Python:\n Trivariate Analysis\n'''\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport math\nimport random \nfrom mpl_toolkits.mplot3d import Axes3D\n# sns.set()\n\n\npath = 'data/private/savepath/'\nfilename = 'v3_1'\ngenpath = 'data/private/gen/'\ngenname = 'g3_1'\n\ndata = pd.read_csv(path + filename+'.csv')\ngen = pd.read_csv(genpath + genname + '.csv')\n\nk = 50\ndata = data.sample(k)\n\nx = data['x1']\ny = data['x2']\nz = data['x3']\n\nfig = plt.figure(figsize=(20,20))\n\ndata = pd.DataFrame({'X': x, 'Y': y, 'Z': z})\ndata_pivoted = data.pivot(\"X\", \"Y\", \"Z\")\nax = sns.heatmap(data_pivoted)\nax.set_xlabel('x1')\nax.set_ylabel('x2')\nax.set_xticks([])\nax.set_yticks([])\nax.set_title(str(k)+\"_samples\")\n\n",
"'''\n Implementation: Mahnoor Anjum \n Description:\n Intersection Test\n \n By:\n www.geeksforgeeks.org\n\n \n'''\n\nimport geopandas as gpd\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom scipy import stats\n\ndata = pd.read_csv('data/combined_kmeans25_100.csv')\n\nmethod = 'mannwhitneyu'\ndef custom(a, b):\n v,p = stats.mannwhitneyu(a, b)\n return p\n\ncorr_mat = data.corr(method = custom)\n\nfig, ax = plt.subplots(1,1, figsize = (10,4))\nax = sns.heatmap(corr_mat, cmap = 'YlGnBu', linewidths=.5, annot=True)\nax.set_title(str(method))\nplt.savefig(str(method) + '.png')\n",
"'''\n Author: Mahnoor Anjum\n Description:\n Autocolorization\n Model:\n neighboring pixels\n L + HAARS ----> A, B \n Data preprocessed by:\n https://github.com/Abdullah230\n'''\n\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nprint(tf.__version__)\n\ndata = pd.read_csv('data/M15/m.csv')\ncols = list(data.columns)\ncols.remove('a')\ncols.remove('b')\nX_train = data.loc[:, cols]\ny_train = data.loc[:, ['a', 'b']]\n\ndata_test = pd.read_csv('data/M15/test.csv')\nX_test = data_test.loc[:, cols]\ny_test = data_test.loc[:, ['a', 'b']]\n\nprint(X_train.shape)\nprint(y_train.shape)\nprint(X_test.shape)\nprint(y_test.shape)\n\nminiL = X_train.min()\nmaxiL = X_train.max()\nminiAB = y_train.min()\nmaxiAB = y_train.max()\n\nfrom sklearn.preprocessing import StandardScaler\nobj = StandardScaler() \nX_train = obj.fit_transform(X_train) \nX_test = obj.transform(X_test) \n\nobjy = StandardScaler() \ny_train = objy.fit_transform(y_train) \ny_test = objy.transform(y_test) \nY = y_train.shape[1]\n\nN, D = X_train.shape\n\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Input, Dense, Dropout\nfrom tensorflow.keras.models import Model\ni_layer = Input(shape = (D,))\n# h_layer = Dropout(0.4)(h_layer)\nh_layer = Dense(16, activation='relu')(i_layer)\nh_layer = Dropout(0.6)(h_layer)\nh_layer = Dense(8, activation='relu')(h_layer)\n#h_layer = Dropout(0.6)(h_layer)\n# h_layer = Dense(256, activation='relu')(h_layer)\no_layer = Dense(Y)(h_layer)\n\nmodel = Model(i_layer, o_layer)\n\nmodel.summary()\n\noptimizer = tf.keras.optimizers.RMSprop(0.001)\n\nmodel.compile(loss='mse',\n optimizer='adam',\n metrics=['mae', 'mse'])\n\nfrom tensorflow.keras.callbacks import EarlyStopping\ncallback = EarlyStopping(patience=3)\n\n#report = model.fit(X_train, y_train, epochs = 10)\nreport = model.fit(X_train, y_train, validation_data=(X_test, y_test), \\\n epochs = 100, batch_size = 64)\n\n\nplt.plot(report.history['loss'], label=\"loss\")\nplt.plot(report.history['val_loss'], label=\"validation_loss\")\nplt.legend()\n\nmodel.save('models/m15_landmarks') \n\n\n# print(\"Train eval: \", model.evaluate(X_train, y_train))\n# print(\"Test eval: \", model.evaluate(X_test, y_test))\n\ny_pred = model.predict(X_test)\ny_pred = objy.inverse_transform(y_pred)\ny_test = objy.inverse_transform(y_test)\nX_test = obj.inverse_transform(X_test)\nprint(y_test.shape)\nprint(y_pred.shape)\n\n\nshape = (64,64,1)\nimageL = X_test[:,264].reshape(shape)\nimagea = y_pred[:,0].reshape(shape)\nimageb = y_pred[:,1].reshape(shape)\n\nimage = np.concatenate((imageL, imagea, imageb), axis=2)\n\nimport cv2\nimageT = cv2.cvtColor(image.astype('float32'), cv2.COLOR_Lab2RGB)\ncv2.imshow('colored',imageT)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\nimageL = X_test[:,264].reshape(shape)\nimagea = y_test[:,0].reshape(shape)\nimageb = y_test[:,1].reshape(shape)\n\nimage = np.concatenate((imageL, imagea, imageb), axis=2)\nimageT = cv2.cvtColor(image.astype('float32'), cv2.COLOR_Lab2RGB)\ncv2.imshow('original',imageT)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n",
"'''\n Author: Mahnoor Anjum\n Description:\n Autocolorization\n Model:\n neighboring pixels\n L + HoGs ----> A, B \n Data preprocessed by:\n https://github.com/Abdullah230\n'''\n\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nprint(tf.__version__)\n\ndata = pd.read_csv('data/M6/m_n5.csv')\ncols = list(data.columns)\ncols.remove('a')\ncols.remove('b')\nX_train = data.loc[:, cols]\ny_train = data.loc[:, ['a', 'b']]\n\ndata_test = pd.read_csv('data/M6/test_n5.csv')\nX_test = data_test.loc[:, cols]\ny_test = data_test.loc[:, ['a', 'b']]\n\nprint(X_train.shape)\nprint(y_train.shape)\nprint(X_test.shape)\nprint(y_test.shape)\n\nminiL = X_train.min()\nmaxiL = X_train.max()\nminiAB = y_train.min()\nmaxiAB = y_train.max()\n\nfrom sklearn.preprocessing import StandardScaler\nobj = StandardScaler() \nX_train = obj.fit_transform(X_train) \nX_test = obj.transform(X_test) \n\nobjy = StandardScaler() \ny_train = objy.fit_transform(y_train) \ny_test = objy.transform(y_test) \nY = y_train.shape[1]\n\nN, D = X_train.shape\n\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Input, Dense, Dropout\nfrom tensorflow.keras.models import Model\ni_layer = Input(shape = (D,))\n# h_layer = Dropout(0.4)(h_layer)\nh_layer = Dense(64, activation='relu')(i_layer)\n#h_layer = Dropout(0.6)(h_layer)\nh_layer = Dense(64, activation='relu')(h_layer)\n#h_layer = Dropout(0.6)(h_layer)\n# h_layer = Dense(256, activation='relu')(h_layer)\no_layer = Dense(Y)(h_layer)\n\nmodel = Model(i_layer, o_layer)\n\nmodel.summary()\n\noptimizer = tf.keras.optimizers.RMSprop(0.001)\n\nmodel.compile(loss='mse',\n optimizer='adam',\n metrics=['mae', 'mse'])\n\n\n#report = model.fit(X_train, y_train, epochs = 10)\nreport = model.fit(X_train, y_train, validation_data=(X_test, y_test), \\\n epochs = 20)\n\n\nplt.plot(report.history['loss'], label=\"loss\")\nplt.plot(report.history['val_loss'], label=\"validation_loss\")\nplt.legend()\n\nmodel.save('models/m6_hog_n5') \n\n\nprint(\"Train eval: \", model.evaluate(X_train, y_train))\nprint(\"Test eval: \", model.evaluate(X_test, y_test))\n\ny_pred = model.predict(X_test)\ny_pred = objy.inverse_transform(y_pred)\ny_test = objy.inverse_transform(y_test)\nX_test = obj.inverse_transform(X_test)\nprint(y_test.shape)\nprint(y_pred.shape)\n\n\nshape = (174,142,1)\nimageL = X_test[:,0].reshape(shape)\nimagea = y_pred[:,0].reshape(shape)\nimageb = y_pred[:,1].reshape(shape)\n\nimage = np.concatenate((imageL, imagea, imageb), axis=2)\n\nimport cv2\nimageT = cv2.cvtColor(image.astype('float32'), cv2.COLOR_Lab2RGB)\ncv2.imshow('colored',imageT)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\nimageL = X_test[:,0].reshape(shape)\nimagea = y_test[:,0].reshape(shape)\nimageb = y_test[:,1].reshape(shape)\n\nimage = np.concatenate((imageL, imagea, imageb), axis=2)\nimageT = cv2.cvtColor(image.astype('float32'), cv2.COLOR_Lab2RGB)\ncv2.imshow('original',imageT)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n",
"'''\n Author: Mahnoor Anjum\n Description:\n Autocolorization\n Model:\n neighboring pixels\n L + SIFT ----> A, B \n Data preprocessed by:\n https://github.com/Abdullah230\n'''\n\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nprint(tf.__version__)\n\ndata = pd.read_csv('data/M2/m_n7.csv')\ncols = list(data.columns)\ncols.remove('a')\ncols.remove('b')\nX_train = data.loc[:, cols]\ny_train = data.loc[:, ['a', 'b']]\n\ndata_test = pd.read_csv('data/M2/test_n7_1.csv')\nX_test = data_test.loc[:, cols]\ny_test = data_test.loc[:, ['a', 'b']]\n\nprint(X_train.shape)\nprint(y_train.shape)\nprint(X_test.shape)\nprint(y_test.shape)\n\nminiL = X_train.min()\nmaxiL = X_train.max()\nminiAB = y_train.min()\nmaxiAB = y_train.max()\n\nfrom sklearn.preprocessing import StandardScaler\nobj = StandardScaler() \nX_train = obj.fit_transform(X_train) \nX_test = obj.transform(X_test) \n\nobjy = StandardScaler() \ny_train = objy.fit_transform(y_train) \ny_test = objy.transform(y_test) \nY = y_train.shape[1]\n\nN, D = X_train.shape\n\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Input, Dense\nfrom tensorflow.keras.models import Model\ni_layer = Input(shape = (D,))\nh_layer = Dense(32, activation='relu')(i_layer)\nh_layer = Dense(64, activation='relu')(h_layer)\nh_layer = Dense(128, activation='relu')(h_layer)\nh_layer = Dense(128, activation='relu')(h_layer)\no_layer = Dense(Y)(h_layer)\n\nmodel = Model(i_layer, o_layer)\n\nmodel.summary()\n\noptimizer = tf.keras.optimizers.RMSprop(0.01)\n\nmodel.compile(loss='mse',\n optimizer='adam',\n metrics=['mae', 'mse'])\n\n\n#report = model.fit(X_train, y_train, epochs = 10)\nreport = model.fit(X_train, y_train, validation_data=(X_test, y_test), \\\n epochs = 50)\n\n\nplt.plot(report.history['loss'], label=\"loss\")\nplt.plot(report.history['val_loss'], label=\"validation_loss\")\nplt.legend()\n\nmodel.save('models/m2_sift_n7') \n\nprint(\"Train eval: \", model.evaluate(X_train, y_train))\nprint(\"Test eval: \", model.evaluate(X_test, y_test))\n\ny_pred = model.predict(X_test)\ny_pred = objy.inverse_transform(y_pred)\ny_test = objy.inverse_transform(y_test)\nX_test = obj.inverse_transform(X_test)\nprint(y_test.shape)\nprint(y_pred.shape)\n\n\nshape = (174,142,1)\nimageL = X_test[:,0].reshape(shape)\nimagea = y_pred[:,0].reshape(shape)\nimageb = y_pred[:,1].reshape(shape)\n\nimage = np.concatenate((imageL, imagea, imageb), axis=2)\n\nimport cv2\nimageT = cv2.cvtColor(image.astype('float32'), cv2.COLOR_Lab2RGB)\ncv2.imshow('colored',imageT)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\nimageL = X_test[:,0].reshape(shape)\nimagea = y_test[:,0].reshape(shape)\nimageb = y_test[:,1].reshape(shape)\n\nimage = np.concatenate((imageL, imagea, imageb), axis=2)\nimageT = cv2.cvtColor(image.astype('float32'), cv2.COLOR_Lab2RGB)\ncv2.imshow('original',imageT)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"matplotlib.pyplot.figure"
],
[
"scipy.stats.mannwhitneyu",
"pandas.read_csv",
"matplotlib.pyplot.subplots"
],
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.optimizers.RMSprop",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"tensorflow.keras.layers.Dropout",
"sklearn.preprocessing.StandardScaler",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.Input"
],
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.optimizers.RMSprop",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"sklearn.preprocessing.StandardScaler",
"tensorflow.keras.layers.Input"
],
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.optimizers.RMSprop",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"sklearn.preprocessing.StandardScaler",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
mtzgroup/aimsprop | [
"464d88ad7a817da73027fd2ab7b12476bf59f83d",
"464d88ad7a817da73027fd2ab7b12476bf59f83d"
] | [
"aimsprop/pes.py",
"aimsprop/iam/diffraction.py"
] | [
"import numpy as np\n\nfrom .bundle import Bundle\n\n\ndef compute_pes(\n bundle: Bundle,\n carrier_frequency: float,\n alpha: float,\n eKT: np.ndarray,\n) -> Bundle:\n\n \"\"\"Compute the simple photoelectron spectroscopy, with Guassian blurring\n\n User is responsible for calculating and assigning properties to the bundle frames:\n Dyson Orbitals\n Ionization Potential (IP)\n\n Params:\n bundle: the Bundle object to compute the property for (modified in\n place)\n carrier_frequency: experimental probe pulse carrier frequency (hbar*omega)\n alpha: the Guassian blurring exponent\n eKT: electron energies\n\n Return:\n bundle: reference to the input Bundle object. The property\n key \"pes\" is set to computed PES property.\n \"\"\"\n\n for frame in bundle.frames:\n IPs = frame.properties[\"IP\"]\n dyson_norms = frame.properties[\"dyson_norms\"]\n pes = np.zeros_like(eKT)\n for ind, (state, IP) in enumerate(IPs):\n dyson_norm = dyson_norms[np.where(dyson_norms[:, 0] == state), 1][0]\n pes += (\n dyson_norm\n * np.sqrt(alpha / np.pi)\n * np.exp(-alpha * (carrier_frequency - IP - eKT) ** 2)\n )\n frame.properties[\"pes\"] = pes\n\n return bundle\n",
"import numpy as np\n\nfrom . import formfactor, rotation\n\n\ndef compute_diffraction(\n bundle,\n key,\n s,\n eta,\n L,\n nlebedev=74,\n nomega=12,\n mode=\"xray\",\n form=\"raw\",\n anisotropy=\"cos2\",\n print_level=False,\n):\n\n \"\"\"Compute the I(s, eta) elastic scattering signal for a Bundle.\n See aimsprop/notes/ued for details on this property.\n\n Notes:\n * All frames for each initial condition (IC) in bundle should be aligned so\n that the transition dipole moment from S0 -> Sex at t=0 is on z. This\n is required for proper computation of anisotropy.\n * All frames should be weighted by geometric considerations at the IC\n (e.g., conformational wells, Wigner weights, etc), by the cross\n section for the optical transition at the IC (e.g., oscillator\n strength and excitation energy window), and by the frame weight due\n to non-adiabatic dynamics.\n\n Params:\n bundle (Bundle) - the Bundle object to compute the property for (modified in\n place)\n key (str) - the name of the property.\n s (np.ndarray) - list of scattering vector norms in Angstrom^-1. The\n relationship between s and theta (scattering angle) is given as,\n s = 4 pi / L * sin(theta / 2).\n eta (np.ndarray) - list of azimuthal scattering angles in radians.\n L (float) - effective wavelength of scattering particle (x-ray\n wavelength or UED deBroglie wavelength) in Angstrom. Used to\n convert through scattering angle theta.\n nlebedev (int) - Lebedev number to use for solid angle orientation\n quadrature.\n nomega (int) - number of uniform quadrature points to use for plane\n orientation quadrature.\n mode (str) - 'xray' or 'ued' for selection of form factors\n form (str) - 'raw' or 'mod' for modified/raw diffraction intensities\n I(s) or M(s).\n anisotropy (str) - 'none' or 'cos2' for isotropic of cos^2 (z)\n anisotropty.\n print_level (bool) - print progress if true (useful to track long\n property computations)\n Result/Return:\n bundle - reference to the input Bundle object. The properties\n \"\"\"\n\n # Validity checks\n if mode not in [\"xray\", \"ued\"]:\n raise ValueError(\"Unknown mode: %s\" % mode)\n if form not in [\"raw\", \"mod\"]:\n raise ValueError(\"Unknown form: %s\" % form)\n if anisotropy not in [\"none\", \"cos2\"]:\n raise ValueError(\"Unknown anisotropy: %s\" % anisotropy)\n\n # Compute scattering angles via Bragg equation\n theta = 2.0 * np.arcsin(s * L / (4.0 * np.pi))\n tt, ee = np.meshgrid(theta, eta, indexing=\"ij\")\n ss, ee = np.meshgrid(s, eta, indexing=\"ij\")\n # Compute scattering vectors\n sx = ss * np.cos(tt / 2.0) * np.sin(ee)\n sy = ss * np.sin(tt / 2.0)\n sz = ss * np.cos(tt / 2.0) * np.cos(ee)\n\n # Get a rotation quadrature for the orientations of the frames\n if nlebedev == 1 and nomega == 1:\n # Fixed orientation\n Rs = [np.eye(3)]\n ws = [1.0]\n else:\n # Rotation quadrature\n Rs, ws = rotation.rotation_quadrature(nlebedev=nlebedev, nomega=nomega)\n\n # Get atomic form factors for appropriate x-ray/ued mode\n factors = formfactor.AtomicFormFactor.build_factors(bundle.frames[0], mode=mode)\n\n # Compute atomic scattering Iat\n D = np.zeros_like(sx)\n for A, factor in enumerate(factors):\n F = factor.evaluate_N(qx=sx, qy=sy, qz=sz, x=0.0, y=0.0, z=0.0)\n D += (np.abs(F) ** 2).real\n\n # Compute IAM scattering, integrating over all orientation angles\n for find, frame in enumerate(bundle.frames):\n if print_level:\n print((\"Frame %5d of %5d\" % (find, len(bundle.frames))))\n I = np.zeros_like(sx)\n for R, w in zip(Rs, ws):\n # cos(z)^2 pump anisotropy\n cos2 = R[2, 2] ** 2 if anisotropy == \"cos2\" else 1.0\n # Rotated molecule\n xyz = np.dot(frame.xyz, R)\n # Compute diffraction\n N = np.zeros_like(I, dtype=complex)\n for A, factor in enumerate(factors):\n x = xyz[A, 0]\n y = xyz[A, 1]\n z = xyz[A, 2]\n N += factor.evaluate_N(qx=sx, qy=sy, qz=sz, x=x, y=y, z=z)\n F = (np.abs(N) ** 2).real\n if form == \"mod\":\n F = (F - D) / D\n I += w * cos2 * F\n frame.properties[key] = I\n\n return bundle\n\n\ndef compute_diffraction_fast(\n bundle,\n key,\n s,\n eta,\n L,\n nlebedev=74,\n nomega=12,\n mode=\"xray\",\n form=\"raw\",\n anisotropy=\"cos2\",\n print_level=False,\n):\n\n \"\"\"Compute the I(s, eta) elastic scattering signal for a Bundle.\n See aimsprop/notes/ued for details on this property.\n\n Notes:\n * All frames for each initial condition (IC) in bundle should be aligned so\n that the transition dipole moment from S0 -> Sex at t=0 is on z. This\n is required for proper computation of anisotropy.\n * All frames should be weighted by geometric considerations at the IC\n (e.g., conformational wells, Wigner weights, etc), by the cross\n section for the optical transition at the IC (e.g., oscillator\n strength and excitation energy window), and by the frame weight due\n to non-adiabatic dynamics.\n\n Params:\n bundle (Bundle) - the Bundle object to compute the property for (modified in\n place)\n key (str) - the name of the property.\n s (np.ndarray) - list of scattering vector norms in Angstrom^-1. The\n relationship between s and theta (scattering angle) is given as,\n s = 4 pi / L * sin(theta / 2).\n eta (np.ndarray) - list of azimuthal scattering angles in radians.\n L (float) - effective wavelength of scattering particle (x-ray\n wavelength or UED deBroglie wavelength) in Angstrom. Used to\n convert through scattering angle theta.\n nlebedev (int) - Lebedev number to use for solid angle orientation\n quadrature.\n nomega (int) - number of uniform quadrature points to use for plane\n orientation quadrature.\n mode (str) - 'xray' or 'ued' for selection of form factors\n form (str) - 'raw' or 'mod' for modified/raw diffraction intensities\n I(s) or M(s).\n anisotropy (str) - 'none' or 'cos2' for isotropic of cos^2 (z)\n anisotropty.\n print_level (bool) - print progress if true (useful to track long\n property computations)\n Result/Return:\n bundle - reference to the input Bundle object. The properties\n \"\"\"\n\n # Validity checks\n if mode not in [\"xray\", \"ued\"]:\n raise ValueError(\"Unknown mode: %s\" % mode)\n if form not in [\"raw\", \"mod\"]:\n raise ValueError(\"Unknown form: %s\" % form)\n if anisotropy not in [\"none\", \"cos2\"]:\n raise ValueError(\"Unknown anisotropy: %s\" % anisotropy)\n\n # Get a rotation quadrature for the orientations of the frames\n if nlebedev == 1 and nomega == 1:\n # Fixed orientation\n Rs = [np.eye(3)]\n ws = [1.0]\n else:\n # Rotation quadrature\n Rs, ws = rotation.rotation_quadrature(nlebedev=nlebedev, nomega=nomega)\n\n # Get atomic form factors for appropriate x-ray/ued mode\n factors = formfactor.AtomicFormFactor.build_factors(bundle.frames[0], mode=mode)\n\n import lightspeed as ls\n\n from . import ext\n\n s2s = ls.Tensor.array(s)\n eta2s = ls.Tensor.array(eta)\n\n R2s = ls.Tensor.zeros((len(Rs), 3, 3))\n for (\n Rind,\n R,\n ) in enumerate(Rs):\n R2s[Rind, :, :] = R\n w2s = ls.Tensor.array(ws)\n\n fA = ls.Tensor.zeros((len(factors), s.size))\n for A, factor in enumerate(factors):\n fA[A, :] = factor.evaluate(qx=0.0, qy=0.0, qz=s)\n\n # Compute IAM scattering, integrating over all orientation angles\n for find, frame in enumerate(bundle.frames):\n if print_level:\n print((\"Frame %5d of %5d\" % (find, len(bundle.frames))))\n xyz = ls.Tensor.array(frame.xyz)\n I = ext.compute_diffraction(\n L,\n s2s,\n eta2s,\n xyz,\n fA,\n R2s,\n w2s,\n True if anisotropy == \"cos2\" else False,\n True if form == \"mod\" else False,\n )\n frame.properties[key] = np.array(I)\n\n return bundle\n\n\ndef compute_diffraction_moments_fast(\n bundle,\n key,\n s,\n L,\n nlebedev=74,\n nomega=12,\n mode=\"xray\",\n form=\"raw\",\n anisotropy=\"cos2\",\n print_level=False,\n):\n\n \"\"\"Compute the I(s, eta) elastic scattering moments for a Bundle.\n See aimsprop/notes/ued for details on this property.\n\n Notes:\n * All frames for each initial condition (IC) in bundle should be aligned so\n that the transition dipole moment from S0 -> Sex at t=0 is on z. This\n is required for proper computation of anisotropy.\n * All frames should be weighted by geometric considerations at the IC\n (e.g., conformational wells, Wigner weights, etc), by the cross\n section for the optical transition at the IC (e.g., oscillator\n strength and excitation energy window), and by the frame weight due\n to non-adiabatic dynamics.\n\n Params:\n bundle (Bundle) - the Bundle object to compute the property for (modified in\n place)\n key (str) - the name of the property.\n s (np.ndarray) - list of scattering vector norms in Angstrom^-1. The\n relationship between s and theta (scattering angle) is given as,\n s = 4 pi / L * sin(theta / 2).\n L (float) - effective wavelength of scattering particle (x-ray\n wavelength or UED deBroglie wavelength) in Angstrom. Used to\n convert through scattering angle theta.\n nlebedev (int) - Lebedev number to use for solid angle orientation\n quadrature.\n nomega (int) - number of uniform quadrature points to use for plane\n orientation quadrature.\n mode (str) - 'xray' or 'ued' for selection of form factors\n form (str) - 'raw' or 'mod' for modified/raw diffraction intensities\n I(s) or M(s).\n anisotropy (str) - 'none' or 'cos2' for isotropic of cos^2 (z)\n anisotropty.\n print_level (bool) - print progress if true (useful to track long\n property computations)\n Result/Return:\n bundle - reference to the input Bundle object. The properties \"key-0\"\n and \"key-2\" are added to each frame of the Bundle.\n \"\"\"\n\n # Validity checks\n if mode not in [\"xray\", \"ued\"]:\n raise ValueError(\"Unknown mode: %s\" % mode)\n if form not in [\"raw\", \"mod\"]:\n raise ValueError(\"Unknown form: %s\" % form)\n if anisotropy not in [\"none\", \"cos2\"]:\n raise ValueError(\"Unknown anisotropy: %s\" % anisotropy)\n\n # Special angles to collocate\n eta = np.array([0.0, np.pi / 2.0])\n\n # Get a rotation quadrature for the orientations of the frames\n if nlebedev == 1 and nomega == 1:\n # Fixed orientation\n Rs = [np.eye(3)]\n ws = [1.0]\n else:\n # Rotation quadrature\n Rs, ws = rotation.rotation_quadrature(nlebedev=nlebedev, nomega=nomega)\n\n # Get atomic form factors for appropriate x-ray/ued mode\n factors = formfactor.AtomicFormFactor.build_factors(bundle.frames[0], mode=mode)\n\n import lightspeed as ls\n\n from . import ext\n\n s2s = ls.Tensor.array(s)\n eta2s = ls.Tensor.array(eta)\n\n R2s = ls.Tensor.zeros((len(Rs), 3, 3))\n for (\n Rind,\n R,\n ) in enumerate(Rs):\n R2s[Rind, :, :] = R\n w2s = ls.Tensor.array(ws)\n\n fA = ls.Tensor.zeros((len(factors), s.size))\n for A, factor in enumerate(factors):\n fA[A, :] = factor.evaluate(qx=0.0, qy=0.0, qz=s)\n\n # Compute IAM scattering, integrating over all orientation angles\n for find, frame in enumerate(bundle.frames):\n if print_level:\n print((\"Frame %5d of %5d\" % (find, len(bundle.frames))))\n xyz = ls.Tensor.array(frame.xyz)\n I = ext.compute_diffraction(\n L,\n s2s,\n eta2s,\n xyz,\n fA,\n R2s,\n w2s,\n True if anisotropy == \"cos2\" else False,\n True if form == \"mod\" else False,\n )\n # Moment computation\n I0 = 0.5 * (I[:, 0] + I[:, 1])\n I1 = 0.5 * (I[:, 0] - I[:, 1])\n frame.properties[\"%s-0\" % key] = I0\n frame.properties[\"%s-2\" % key] = I1\n\n return bundle\n\n\ndef compute_diffraction_from_moments(\n bundle,\n key,\n eta,\n):\n\n for find, frame in enumerate(bundle.frames):\n I = np.outer(frame.properties[\"%s-0\" % key], np.cos(0 * eta)) + np.outer(\n frame.properties[\"%s-2\" % key], np.cos(2 * eta)\n )\n frame.properties[key] = I\n\n return bundle\n\n\ndef compute_diffraction_moments_analytical(\n bundle,\n key,\n s,\n L,\n mode=\"xray\",\n form=\"raw\",\n anisotropy=\"perpendicular\",\n print_level=False,\n):\n\n \"\"\"Compute the I(s, eta) elastic scattering moments for a Bundle.\n See aimsprop/notes/ued for details on this property.\n\n Notes:\n * All frames for each initial condition (IC) in bundle should be aligned so\n that the transition dipole moment from S0 -> Sex at t=0 is on z. This\n is required for proper computation of anisotropy.\n * All frames should be weighted by geometric considerations at the IC\n (e.g., conformational wells, Wigner weights, etc), by the cross\n section for the optical transition at the IC (e.g., oscillator\n strength and excitation energy window), and by the frame weight due\n to non-adiabatic dynamics.\n\n Params:\n bundle (Bundle) - the Bundle object to compute the property for (modified in\n place)\n key (str) - the name of the property.\n s (np.ndarray) - list of scattering vector norms in Angstrom^-1. The\n relationship between s and theta (scattering angle) is given as,\n s = 4 pi / L * sin(theta / 2).\n L (float) - effective wavelength of scattering particle (x-ray\n wavelength or UED deBroglie wavelength) in Angstrom. Used to\n convert through scattering angle theta.\n nlebedev (int) - Lebedev number to use for solid angle orientation\n quadrature.\n nomega (int) - number of uniform quadrature points to use for plane\n orientation quadrature.\n mode (str) - 'xray' or 'ued' for selection of form factors\n form (str) - 'raw' or 'mod' for modified/raw diffraction intensities\n I(s) or M(s).\n anisotropy (str) - 'none' or 'perpendicular' or 'parallel'\n print_level (bool) - print progress if true (useful to track long\n property computations)\n Result/Return:\n bundle - reference to the input Bundle object. The properties \"key-0\"\n and \"key-2\" are added to each frame of the Bundle.\n \"\"\"\n\n # Validity checks\n if mode not in [\"xray\", \"ued\"]:\n raise ValueError(\"Unknown mode: %s\" % mode)\n if form not in [\"raw\", \"mod\"]:\n raise ValueError(\"Unknown form: %s\" % form)\n if anisotropy not in [\"none\", \"perpendicular\", \"parallel\"]:\n raise ValueError(\"Unknown anisotropy: %s\" % anisotropy)\n\n # Compute scattering angles via Bragg equation\n theta = 2.0 * np.arcsin(s * L / (4.0 * np.pi))\n\n # Get atomic form factors for appropriate x-ray/ued mode\n factors = formfactor.AtomicFormFactor.build_factors(bundle.frames[0], mode=mode)\n\n # Collocate the atomic form factors\n f = np.zeros((len(factors), s.size))\n for A, factor in enumerate(factors):\n f[A, :] = factor.evaluate(qx=0.0, qy=0.0, qz=s)\n\n # Compute atomic scattering Iat\n D = np.zeros_like(s)\n for A, factor in enumerate(factors):\n D += f[A, :] ** 2\n\n # Selection fraction\n F = 1.0 if anisotropy == \"isotropic\" else 1.0 / 3.0\n\n # Pairs to include\n ABpairs = []\n for A in range(bundle.frames[0].xyz.shape[0]):\n for B in range(bundle.frames[0].xyz.shape[0]):\n if A >= B:\n continue\n ABpairs.append((A, B))\n\n # Diffraction moment computation\n for find, frame in enumerate(bundle.frames):\n if print_level:\n print((\"Frame %5d of %5d\" % (find, len(bundle.frames))))\n # Geometry\n xyz = frame.xyz\n # Target\n I0 = np.zeros_like(s)\n I2 = np.zeros_like(s)\n for A, B in ABpairs:\n # Geometry\n d = xyz[A, :] - xyz[B, :]\n r2 = np.sum(d ** 2)\n r = np.sqrt(r2)\n sg2 = np.sum(d[:2] ** 2) / r2\n sr = s * r\n # Bessel functions\n J0 = np.sin(sr) / sr\n J0[sr == 0.0] = 1.0\n J1sr = np.sin(sr) / sr ** 3 - np.cos(sr) / sr ** 2\n J1sr[sr == 0.0] = 1.0 / 3.0\n J2 = (3.0 / sr ** 2 - 1.0) * np.sin(sr) / sr - 3.0 * np.cos(sr) / sr ** 2\n J2[sr == 0.0] = 0.0\n # Kernels\n if anisotropy == \"isotropic\":\n I0 += 2.0 * f[A, :] * f[B, :] * J0\n elif anisotropy == \"perpendicular\":\n Iz = (\n 2.0\n * f[A, :]\n * f[B, :]\n * (\n J1sr\n - (sg2 + (2.0 - 3.0 * sg2) * np.cos(0.5 * theta) ** 2)\n * J2\n / 2.0\n )\n )\n Ix = 2.0 * f[A, :] * f[B, :] * (J1sr - (sg2) * J2 / 2.0)\n I0 += 0.5 * (Iz + Ix)\n I2 += 0.5 * (Iz - Ix)\n elif anisotropy == \"parallel\":\n I0 += (\n 2.0\n * f[A, :]\n * f[B, :]\n * (\n J1sr\n - (sg2 + (2.0 - 3.0 * sg2) * np.sin(0.5 * theta) ** 2)\n * J2\n / 2.0\n )\n )\n # Modified detector pattern\n if form == \"raw\":\n I0 += F * D\n if form == \"mod\":\n I0 /= D\n I2 /= D\n # Placement\n frame.properties[\"%s-0\" % (key)] = I0\n frame.properties[\"%s-2\" % (key)] = I2\n\n return bundle\n\n\n# TODO: These are deprecated, as they are not fully correct for elastic scattering\n# def compute_diffraction_moments(\n# bundle,\n# key,\n# q,\n# factors,\n# nlebedev,\n# nlebedev2,\n# nomega2,\n# nlegendre=2,\n# print_level=False,\n# ):\n#\n# \"\"\" Compute the IAM X-Ray Diffraction or UED moments property. See\n# aimsprop/notes/xray for details on these moments.\n#\n# Notes:\n# * All frames for each initial condition (IC) in bundle should be aligned so\n# that the transition dipole moment from S0 -> Sex at t=0 is on z. This\n# is required for proper computation of I2 (I0 is invariant to this).\n# * All frames should be weighted by geometric considerations at the IC\n# (e.g., conformational wells, Wigner weights, etc), by the cross\n# section for the optical transition at the IC (e.g., oscillator\n# strength and excitation energy window), and by the frame weight due\n# to non-adiabatic dynamics.\n#\n# Params:\n# bundle (Bundle) - the Bundle object to compute the property for (modified in\n# place)\n# key (str) - the name of the property.\n# q (np.ndarray) - the 1d array of |q| values to collocate the\n# diffraction signal to.\n# factors (list of AtomicFormFactor) - the list of AtomicFormFactor\n# objects. The choice of xray/ued is made by the \"mode\" field of each\n# AtomicFormFactor.\n# nlebedev (int) - a Lebedev number for the number of grid points to use\n# for angular integration of the Legendre coefficients.\n# nlebedev2 (int) - a Lebedev number for the number of grid points to use\n# for angular sampling in the Omega angle (surface angle).\n# nomega2 (int) - the number of grid points to use for angular sampling\n# in the omega angle (surface orientation).\n# nlegendre (even int) - the maximum Legendre polynomial order to\n# evaluate (usually 0 and 2 are the only Legendre polynomial\n# coefficients with any signal).\n# print_level (bool) - print progress if true (useful to track long\n# property computations)\n# Result/Return:\n# bundle - reference to the input Bundle object. The properties\n# key-l are set where l is [0, 2, ..., nlegendre].\n# \"\"\"\n# if nlegendre % 2: raise ValueError('Can only ask for even Legendre functions')\n#\n# # Lebedev grid for S(\\vec q)\n# leb = lebedev.Lebedev.build(nlebedev)\n# # Direct product grid for \\vec q\n# qx = np.outer(q, leb.x)\n# qy = np.outer(q, leb.y)\n# qz = np.outer(q, leb.z)\n#\n# # Rotation quadrature\n# Rs, ws = rotation.rotation_quadrature(nomega=nomega2, nlebedev=nlebedev2)\n#\n# for find, frame in enumerate(bundle.frames):\n# if print_level:\n# print 'Frame %5d of %5d' % (find, len(bundle.frames))\n# # Compute N(\\vec q) = \\sum_{A} f_A (\\vec q) * \\exp(-1.j * \\vec q * \\vec r)\n# N = np.zeros((len(q), len(leb.x)), dtype=complex)\n# for A, factor in enumerate(factors):\n# x = frame.xyz[A,0]\n# y = frame.xyz[A,1]\n# z = frame.xyz[A,2]\n# N += factor.evaluate_N(qx=qx,qy=qy,qz=qz,x=x,y=y,z=z)\n# # Compute I(\\vec q) = N(\\vec q)**2 for this frame\n# I = (np.abs(N)**2).real\n#\n# # Do angle integration\n# Ils = { l: np.zeros((len(q),)) for l in range(0, nlegendre+1, 2) }\n# qxyz = leb.xyz\n# for R2, w2 in zip(Rs, ws):\n# # Account for cos(z)^2 weight\n# cos2 = np.sum(R2[:,2])**2\n# # Rotate the lebedev grid\n# qxyz2 = np.dot(qxyz, R2)\n# qx2 = qxyz2[:,0]\n# qy2 = qxyz2[:,1]\n# qz2 = qxyz2[:,2]\n# # Weight by zonal harmonics (evaluated in rotated grid)\n# Y = legendre.zonal2(qz2, nlegendre)\n# for l in range(0, nlegendre+1, 2):\n# Ils[l] += w2 * cos2 * np.einsum('qw,w->q', I, leb.w * Y[l/2, :])\n#\n# # Assign the properties to frame\n# for l in range(0, nlegendre+1, 2):\n# frame.properties['%s-%d' % (key, l)] = Ils[l]\n#\n# return bundle\n#\n# def compute_diffraction_moment0(\n# bundle,\n# key,\n# q,\n# factors,\n# nlebedev,\n# print_level=False,\n# ):\n#\n# \"\"\" Compute the IAM X-Ray Diffraction or UED moment property for only l=0\n# (faster due to lack of rotation quadratures). See aimsprop/notes/xray for\n# details on this moment\n#\n# Notes:\n# * This moment is invariant to the orientation of the frames, so alignment to the IC\n# * All frames should be weighted by geometric considerations at the IC\n# (e.g., conformational wells, Wigner weights, etc), by the cross\n# section for the optical transition at the IC (e.g., oscillator\n# strength and excitation energy window), and by the frame weight due\n# to non-adiabatic dynamics.\n#\n# Params:\n# bundle (Bundle) - the Bundle object to compute the property for (modified in\n# place)\n# key (str) - the name of the property.\n# q (np.ndarray) - the 1d array of |q| values to collocate the\n# diffraction signal to.\n# factors (list of AtomicFormFactor) - the list of AtomicFormFactor\n# objects. The choice of xray/ued is made by the \"mode\" field of each\n# AtomicFormFactor.\n# nlebedev (int) - a Lebedev number for the number of grid points to use\n# for angular integration of the Legendre coefficients.\n# print_level (bool) - print progress if true (useful to track long\n# property computations)\n# Result/Return:\n# bundle - reference to the input Bundle object. The property\n# key-0 is set\n# \"\"\"\n#\n# # Lebedev grid for S(\\vec q)\n# leb = lebedev.Lebedev.build(nlebedev)\n# # Direct product grid for \\vec q\n# qx = np.outer(q, leb.x)\n# qy = np.outer(q, leb.y)\n# qz = np.outer(q, leb.z)\n#\n# for find, frame in enumerate(bundle.frames):\n# if print_level:\n# print 'Frame %5d of %5d' % (find, len(bundle.frames))\n# # Compute N(\\vec q) = \\sum_{A} f_A (\\vec q) * \\exp(-1.j * \\vec q * \\vec r)\n# N = np.zeros((len(q), len(leb.x)), dtype=complex)\n# for A, factor in enumerate(factors):\n# x = frame.xyz[A,0]\n# y = frame.xyz[A,1]\n# z = frame.xyz[A,2]\n# N += factor.evaluate_N(qx=qx,qy=qy,qz=qz,x=x,y=y,z=z)\n# # Compute I(\\vec q) = N(\\vec q)**2 for this frame\n# I = (np.abs(N)**2).real\n# # Compute Y00 (for common normalization)\n# Y00 = legendre.zonal2(leb.z, 0)[0]\n# # Integrate over S(2)\n# I0 = np.einsum('qw,w->q', I, leb.w * Y00)\n# # Assign the property to frame\n# frame.properties['%s-%d' % (key, 0)] = I0\n#\n# return bundle\n"
] | [
[
"numpy.where",
"numpy.exp",
"numpy.zeros_like",
"numpy.sqrt"
],
[
"numpy.dot",
"numpy.sqrt",
"numpy.meshgrid",
"numpy.arcsin",
"numpy.abs",
"numpy.eye",
"numpy.cos",
"numpy.sin",
"numpy.zeros_like",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
frssp/pymatgen | [
"5cc42912a12a265a603df7e34c856561f76edc1f",
"bdd977f065b66191557c7398b31a1571bc541fdb"
] | [
"dev_scripts/chemenv/equivalent_indices.py",
"pymatgen/analysis/diffraction/xrd.py"
] | [
"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\nfrom __future__ import division, unicode_literals\n\n\"\"\"\nDevelopment script of the ChemEnv utility to get the equivalent indices of the model coordination environments\n\"\"\"\n\n__author__ = \"David Waroquiers\"\n__copyright__ = \"Copyright 2012, The Materials Project\"\n__version__ = \"2.0\"\n__maintainer__ = \"David Waroquiers\"\n__email__ = \"[email protected]\"\n__date__ = \"Feb 20, 2016\"\n\nimport numpy as np\n\n\nif __name__ == '__main__':\n\n cg_symbol = 'O:6'\n equiv_list = []\n\n # O:6\n if cg_symbol == 'O:6':\n opposite_points = {0: 1,\n 1: 0,\n 2: 3,\n 3: 2,\n 4: 5,\n 5: 4}\n perp_plane = {0: [2, 3, 4, 5],\n 1: [2, 3, 4, 5],\n 2: [0, 1, 4, 5],\n 3: [0, 1, 4, 5],\n 4: [0, 1, 2, 3],\n 5: [0, 1, 2, 3]}\n # 0. any point\n for i0 in range(6):\n # 1. point opposite to point 0.\n i1 = opposite_points[i0]\n # 2. one of the 4 points in the perpendicular plane\n for i2 in perp_plane[i0]:\n # 3. point opposite to point 2.\n i3 = opposite_points[i2]\n remaining = range(6)\n remaining.remove(i0)\n remaining.remove(i1)\n remaining.remove(i2)\n remaining.remove(i3)\n # 4. one of the 2 remaining points\n for i4 in remaining:\n # 5. point opposite to point 4.\n i5 = opposite_points[i4]\n equiv_list.append([i0, i1, i2, i3, i4, i5])\n\n # PB:7\n if cg_symbol == 'PB:7':\n for i0 in range(5):\n for turn in [1, -1]:\n i1 = np.mod(i0+turn, 5)\n i2 = np.mod(i1+turn, 5)\n i3 = np.mod(i2+turn, 5)\n i4 = np.mod(i3+turn, 5)\n for i5 in [5, 6]:\n i6 = 5 if i5 == 6 else 6\n equiv_list.append([i0, i1, i2, i3, i4, i5, i6])\n\n # HB:8\n if cg_symbol == 'HB:8':\n for i0 in range(6):\n for turn in [1, -1]:\n i1 = np.mod(i0 + turn, 6)\n i2 = np.mod(i1 + turn, 6)\n i3 = np.mod(i2 + turn, 6)\n i4 = np.mod(i3 + turn, 6)\n i5 = np.mod(i4 + turn, 6)\n for i6 in [6, 7]:\n i7 = 6 if i6 == 7 else 7\n equiv_list.append([i0, i1, i2, i3, i4, i5, i6, i7])\n\n # SBT:8\n if cg_symbol == 'SBT:8':\n #0. any point on the square face without cap\n for i0 in [0, 1, 3, 4]:\n #1. point in this square face but also in the triangular plane of point 0\n #2. last point in the triangular plane of point 0\n if i0 < 3:\n i1 = 0 if i0 == 1 else 1\n i2 = 2\n else:\n i1 = 3 if i0 == 4 else 4\n i2 = 5\n #3.4.5. corresponding points in the opposite triangular plane to the one of points 0.1.2.\n i3 = np.mod(i0 + 3, 6)\n i4 = np.mod(i1 + 3, 6)\n i5 = np.mod(i2 + 3, 6)\n #6. cap point opposite to the first point\n i6 = 7 if i0 in [1, 4] else 6\n #7. last cap point\n i7 = 6 if i0 in [1, 4] else 7\n equiv_list.append([i0, i1, i2, i3, i4, i5, i6, i7])\n\n # SA:8\n if cg_symbol == 'SA:8':\n sf1 = [0, 2, 1, 3]\n sf2 = [4, 5, 7, 6]\n # 0. any point\n for i0 in range(8):\n # 1. point opposite to point 0. in the square face\n if i0 in [0, 2]:\n i1 = i0 + 1\n elif i0 in [1, 3]:\n i1 = i0 - 1\n elif i0 == 4:\n i1 = 7\n elif i0 == 5:\n i1 = 6\n elif i0 == 6:\n i1 = 5\n elif i0 == 7:\n i1 = 4\n # 2. one of the two last points in the square face\n sfleft = list(sf1) if i0 in sf1 else list(sf2)\n sfleft.remove(i0)\n sfleft.remove(i1)\n for i2 in sfleft:\n sfleft2 = list(sfleft)\n sfleft2.remove(i2)\n # 3. last point in the square face\n i3 = sfleft2[0]\n # 4. point opposite to point 3. and closest to point 0.\n i4 = 0\n\n # 3.4.5. corresponding points in the opposite triangular plane to the one of points 0.1.2.\n i3 = np.mod(i0 + 3, 6)\n i4 = np.mod(i1 + 3, 6)\n i5 = np.mod(i2 + 3, 6)\n # 6. cap point opposite to the first point\n i6 = 7 if i0 in [1, 4] else 6\n # 7. last cap point\n i7 = 6 if i0 in [1, 4] else 7\n equiv_list.append([i0, i1, i2, i3, i4, i5, i6, i7])\n\n print('Equivalent indices ({:d}) for {} : '.format(len(equiv_list), cg_symbol))\n print(equiv_list)",
"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\nfrom __future__ import division, unicode_literals\n\nfrom math import sin, cos, asin, pi, degrees, radians\nimport os\nimport collections\n\nimport numpy as np\nimport json\n\nfrom pymatgen.core.spectrum import Spectrum\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\nfrom pymatgen.util.plotting import add_fig_kwargs\n\n\"\"\"\nThis module implements an XRD pattern calculator.\n\"\"\"\n\n__author__ = \"Shyue Ping Ong\"\n__copyright__ = \"Copyright 2012, The Materials Project\"\n__version__ = \"0.1\"\n__maintainer__ = \"Shyue Ping Ong\"\n__email__ = \"[email protected]\"\n__date__ = \"5/22/14\"\n\n\n# XRD wavelengths in angstroms\nWAVELENGTHS = {\n \"CuKa\": 1.54184,\n \"CuKa2\": 1.54439,\n \"CuKa1\": 1.54056,\n \"CuKb1\": 1.39222,\n \"MoKa\": 0.71073,\n \"MoKa2\": 0.71359,\n \"MoKa1\": 0.70930,\n \"MoKb1\": 0.63229,\n \"CrKa\": 2.29100,\n \"CrKa2\": 2.29361,\n \"CrKa1\": 2.28970,\n \"CrKb1\": 2.08487,\n \"FeKa\": 1.93735,\n \"FeKa2\": 1.93998,\n \"FeKa1\": 1.93604,\n \"FeKb1\": 1.75661,\n \"CoKa\": 1.79026,\n \"CoKa2\": 1.79285,\n \"CoKa1\": 1.78896,\n \"CoKb1\": 1.63079,\n \"AgKa\": 0.560885,\n \"AgKa2\": 0.563813,\n \"AgKa1\": 0.559421,\n \"AgKb1\": 0.497082,\n}\n\nwith open(os.path.join(os.path.dirname(__file__),\n \"atomic_scattering_params.json\")) as f:\n ATOMIC_SCATTERING_PARAMS = json.load(f)\n\n\nclass XRDPattern(Spectrum):\n \"\"\"\n A representation of an XRDPattern\n \"\"\"\n\n XLABEL = \"$2\\\\Theta$\"\n YLABEL = \"Intensity\"\n\n def __init__(self, x, y, hkls, d_hkls):\n \"\"\"\n Args:\n x: Two theta angles.\n y: Intensities\n hkls: [{(h, k, l): mult}] {(h, k, l): mult} is a dict of Miller\n indices for all diffracted lattice facets contributing to each\n intensity.\n d_hkls: List of interplanar spacings.\n \"\"\"\n super(XRDPattern, self).__init__(x, y, hkls,d_hkls)\n self.hkls = hkls\n self.d_hkls = d_hkls\n\n\nclass XRDCalculator(object):\n \"\"\"\n Computes the XRD pattern of a crystal structure.\n\n This code is implemented by Shyue Ping Ong as part of UCSD's NANO106 -\n Crystallography of Materials. The formalism for this code is based on\n that given in Chapters 11 and 12 of Structure of Materials by Marc De\n Graef and Michael E. McHenry. This takes into account the atomic\n scattering factors and the Lorentz polarization factor, but not\n the Debye-Waller (temperature) factor (for which data is typically not\n available). Note that the multiplicity correction is not needed since\n this code simply goes through all reciprocal points within the limiting\n sphere, which includes all symmetrically equivalent facets. The algorithm\n is as follows\n\n 1. Calculate reciprocal lattice of structure. Find all reciprocal points\n within the limiting sphere given by :math:`\\\\frac{2}{\\\\lambda}`.\n\n 2. For each reciprocal point :math:`\\\\mathbf{g_{hkl}}` corresponding to\n lattice plane :math:`(hkl)`, compute the Bragg condition\n :math:`\\\\sin(\\\\theta) = \\\\frac{\\\\lambda}{2d_{hkl}}`\n\n 3. Compute the structure factor as the sum of the atomic scattering\n factors. The atomic scattering factors are given by\n\n .. math::\n\n f(s) = Z - 41.78214 \\\\times s^2 \\\\times \\\\sum\\\\limits_{i=1}^n a_i \\\n \\\\exp(-b_is^2)\n\n where :math:`s = \\\\frac{\\\\sin(\\\\theta)}{\\\\lambda}` and :math:`a_i`\n and :math:`b_i` are the fitted parameters for each element. The\n structure factor is then given by\n\n .. math::\n\n F_{hkl} = \\\\sum\\\\limits_{j=1}^N f_j \\\\exp(2\\\\pi i \\\\mathbf{g_{hkl}}\n \\\\cdot \\\\mathbf{r})\n\n 4. The intensity is then given by the modulus square of the structure\n factor.\n\n .. math::\n\n I_{hkl} = F_{hkl}F_{hkl}^*\n\n 5. Finally, the Lorentz polarization correction factor is applied. This\n factor is given by:\n\n .. math::\n\n P(\\\\theta) = \\\\frac{1 + \\\\cos^2(2\\\\theta)}\n {\\\\sin^2(\\\\theta)\\\\cos(\\\\theta)}\n \"\"\"\n\n # Tuple of available radiation keywords.\n AVAILABLE_RADIATION = tuple(WAVELENGTHS.keys())\n\n # Tolerance in which to treat two peaks as having the same two theta.\n TWO_THETA_TOL = 1e-5\n\n # Tolerance in which to treat a peak as effectively 0 if the scaled\n # intensity is less than this number. Since the max intensity is 100,\n # this means the peak must be less than 1e-5 of the peak intensity to be\n # considered as zero. This deals with numerical issues where systematic\n # absences do not cancel exactly to zero.\n SCALED_INTENSITY_TOL = 1e-3\n\n def __init__(self, wavelength=\"CuKa\", symprec=0, debye_waller_factors=None):\n \"\"\"\n Initializes the XRD calculator with a given radiation.\n\n Args:\n wavelength (str/float): The wavelength can be specified as either a\n float or a string. If it is a string, it must be one of the\n supported definitions in the AVAILABLE_RADIATION class\n variable, which provides useful commonly used wavelengths.\n If it is a float, it is interpreted as a wavelength in\n angstroms. Defaults to \"CuKa\", i.e, Cu K_alpha radiation.\n symprec (float): Symmetry precision for structure refinement. If\n set to 0, no refinement is done. Otherwise, refinement is\n performed using spglib with provided precision.\n debye_waller_factors ({element symbol: float}): Allows the\n specification of Debye-Waller factors. Note that these\n factors are temperature dependent.\n \"\"\"\n if isinstance(wavelength, float):\n self.wavelength = wavelength\n else:\n self.radiation = wavelength\n self.wavelength = WAVELENGTHS[wavelength]\n self.symprec = symprec\n self.debye_waller_factors = debye_waller_factors or {}\n\n def get_xrd_pattern(self, structure, scaled=True, two_theta_range=(0, 90)):\n \"\"\"\n Calculates the XRD pattern for a structure.\n\n Args:\n structure (Structure): Input structure\n scaled (bool): Whether to return scaled intensities. The maximum\n peak is set to a value of 100. Defaults to True. Use False if\n you need the absolute values to combine XRD plots.\n two_theta_range ([float of length 2]): Tuple for range of\n two_thetas to calculate in degrees. Defaults to (0, 90). Set to\n None if you want all diffracted beams within the limiting\n sphere of radius 2 / wavelength.\n\n Returns:\n (XRDPattern)\n \"\"\"\n if self.symprec:\n finder = SpacegroupAnalyzer(structure, symprec=self.symprec)\n structure = finder.get_refined_structure()\n\n wavelength = self.wavelength\n latt = structure.lattice\n is_hex = latt.is_hexagonal()\n\n # Obtained from Bragg condition. Note that reciprocal lattice\n # vector length is 1 / d_hkl.\n min_r, max_r = (0, 2 / wavelength) if two_theta_range is None else \\\n [2 * sin(radians(t / 2)) / wavelength for t in two_theta_range]\n\n # Obtain crystallographic reciprocal lattice points within range\n recip_latt = latt.reciprocal_lattice_crystallographic\n recip_pts = recip_latt.get_points_in_sphere(\n [[0, 0, 0]], [0, 0, 0], max_r)\n if min_r:\n recip_pts = [pt for pt in recip_pts if pt[1] >= min_r]\n\n # Create a flattened array of zs, coeffs, fcoords and occus. This is\n # used to perform vectorized computation of atomic scattering factors\n # later. Note that these are not necessarily the same size as the\n # structure as each partially occupied specie occupies its own\n # position in the flattened array.\n zs = []\n coeffs = []\n fcoords = []\n occus = []\n dwfactors = []\n\n for site in structure:\n for sp, occu in site.species_and_occu.items():\n zs.append(sp.Z)\n try:\n c = ATOMIC_SCATTERING_PARAMS[sp.symbol]\n except KeyError:\n raise ValueError(\"Unable to calculate XRD pattern as \"\n \"there is no scattering coefficients for\"\n \" %s.\" % sp.symbol)\n coeffs.append(c)\n dwfactors.append(self.debye_waller_factors.get(sp.symbol, 0))\n fcoords.append(site.frac_coords)\n occus.append(occu)\n\n zs = np.array(zs)\n coeffs = np.array(coeffs)\n fcoords = np.array(fcoords)\n occus = np.array(occus)\n dwfactors = np.array(dwfactors)\n peaks = {}\n two_thetas = []\n\n for hkl, g_hkl, ind in sorted(\n recip_pts, key=lambda i: (i[1], -i[0][0], -i[0][1], -i[0][2])):\n # Force miller indices to be integers.\n hkl = [int(round(i)) for i in hkl]\n if g_hkl != 0:\n\n d_hkl = 1 / g_hkl\n\n # Bragg condition\n theta = asin(wavelength * g_hkl / 2)\n\n # s = sin(theta) / wavelength = 1 / 2d = |ghkl| / 2 (d =\n # 1/|ghkl|)\n s = g_hkl / 2\n\n # Store s^2 since we are using it a few times.\n s2 = s ** 2\n\n # Vectorized computation of g.r for all fractional coords and\n # hkl.\n g_dot_r = np.dot(fcoords, np.transpose([hkl])).T[0]\n\n # Highly vectorized computation of atomic scattering factors.\n # Equivalent non-vectorized code is::\n #\n # for site in structure:\n # el = site.specie\n # coeff = ATOMIC_SCATTERING_PARAMS[el.symbol]\n # fs = el.Z - 41.78214 * s2 * sum(\n # [d[0] * exp(-d[1] * s2) for d in coeff])\n fs = zs - 41.78214 * s2 * np.sum(\n coeffs[:, :, 0] * np.exp(-coeffs[:, :, 1] * s2), axis=1)\n\n dw_correction = np.exp(-dwfactors * s2)\n\n # Structure factor = sum of atomic scattering factors (with\n # position factor exp(2j * pi * g.r and occupancies).\n # Vectorized computation.\n f_hkl = np.sum(fs * occus * np.exp(2j * pi * g_dot_r)\n * dw_correction)\n\n # Lorentz polarization correction for hkl\n lorentz_factor = (1 + cos(2 * theta) ** 2) / \\\n (sin(theta) ** 2 * cos(theta))\n\n # Intensity for hkl is modulus square of structure factor.\n i_hkl = (f_hkl * f_hkl.conjugate()).real\n\n two_theta = degrees(2 * theta)\n\n if is_hex:\n # Use Miller-Bravais indices for hexagonal lattices.\n hkl = (hkl[0], hkl[1], - hkl[0] - hkl[1], hkl[2])\n # Deal with floating point precision issues.\n ind = np.where(np.abs(np.subtract(two_thetas, two_theta)) <\n XRDCalculator.TWO_THETA_TOL)\n if len(ind[0]) > 0:\n peaks[two_thetas[ind[0][0]]][0] += i_hkl * lorentz_factor\n peaks[two_thetas[ind[0][0]]][1].append(tuple(hkl))\n else:\n peaks[two_theta] = [i_hkl * lorentz_factor, [tuple(hkl)],\n d_hkl]\n two_thetas.append(two_theta)\n\n # Scale intensities so that the max intensity is 100.\n max_intensity = max([v[0] for v in peaks.values()])\n x = []\n y = []\n hkls = []\n d_hkls = []\n for k in sorted(peaks.keys()):\n v = peaks[k]\n fam = get_unique_families(v[1])\n if v[0] / max_intensity * 100 > XRDCalculator.SCALED_INTENSITY_TOL:\n x.append(k)\n y.append(v[0])\n hkls.append(fam)\n d_hkls.append(v[2])\n xrd = XRDPattern(x, y, hkls, d_hkls)\n if scaled:\n xrd.normalize(mode=\"max\", value=100)\n return xrd\n\n def get_xrd_plot(self, structure, two_theta_range=(0, 90),\n annotate_peaks=True, ax=None, with_labels=True,\n fontsize=16):\n \"\"\"\n Returns the XRD plot as a matplotlib.pyplot.\n\n Args:\n structure: Input structure\n two_theta_range ([float of length 2]): Tuple for range of\n two_thetas to calculate in degrees. Defaults to (0, 90). Set to\n None if you want all diffracted beams within the limiting\n sphere of radius 2 / wavelength.\n annotate_peaks: Whether to annotate the peaks with plane\n information.\n ax: matplotlib :class:`Axes` or None if a new figure should be created.\n with_labels: True to add xlabels and ylabels to the plot.\n fontsize: (int) fontsize for peak labels.\n\n Returns:\n (matplotlib.pyplot)\n \"\"\"\n if ax is None:\n from pymatgen.util.plotting import pretty_plot\n plt = pretty_plot(16, 10)\n ax = plt.gca()\n else:\n # This to maintain the type of the return value.\n import matplotlib.pyplot as plt\n\n xrd = self.get_xrd_pattern(structure, two_theta_range=two_theta_range)\n\n for two_theta, i, hkls, d_hkl in zip(xrd.x, xrd.y, xrd.hkls, xrd.d_hkls):\n if two_theta_range[0] <= two_theta <= two_theta_range[1]:\n label = \", \".join([str(hkl) for hkl in hkls.keys()])\n ax.plot([two_theta, two_theta], [0, i], color='k',\n linewidth=3, label=label)\n if annotate_peaks:\n ax.annotate(label, xy=[two_theta, i],\n xytext=[two_theta, i], fontsize=fontsize)\n\n if with_labels:\n ax.set_xlabel(r\"$2\\theta$ ($^\\circ$)\")\n ax.set_ylabel(\"Intensities (scaled)\")\n\n if hasattr(ax, \"tight_layout\"):\n ax.tight_layout()\n\n return plt\n\n def show_xrd_plot(self, structure, two_theta_range=(0, 90),\n annotate_peaks=True):\n \"\"\"\n Shows the XRD plot.\n\n Args:\n structure (Structure): Input structure\n two_theta_range ([float of length 2]): Tuple for range of\n two_thetas to calculate in degrees. Defaults to (0, 90). Set to\n None if you want all diffracted beams within the limiting\n sphere of radius 2 / wavelength.\n annotate_peaks (bool): Whether to annotate the peaks with plane\n information.\n \"\"\"\n self.get_xrd_plot(structure, two_theta_range=two_theta_range,\n annotate_peaks=annotate_peaks).show()\n\n @add_fig_kwargs\n def plot_structures(self, structures, two_theta_range=(0, 90),\n annotate_peaks=True, fontsize=6, **kwargs):\n \"\"\"\n Plot XRD for multiple structures on the same figure.\n\n Args:\n structures (Structure): List of structures\n two_theta_range ([float of length 2]): Tuple for range of\n two_thetas to calculate in degrees. Defaults to (0, 90). Set to\n None if you want all diffracted beams within the limiting\n sphere of radius 2 / wavelength.\n annotate_peaks (bool): Whether to annotate the peaks with plane\n information.\n fontsize: (int) fontsize for peak labels.\n \"\"\"\n import matplotlib.pyplot as plt\n nrows = len(structures)\n fig, axes = plt.subplots(nrows=nrows, ncols=1, sharex=True, squeeze=False)\n\n for i, (ax, structure) in enumerate(zip(axes.ravel(), structures)):\n self.get_xrd_plot(structure, two_theta_range=two_theta_range,\n annotate_peaks=annotate_peaks,\n fontsize=fontsize, ax=ax, with_labels=i == nrows - 1)\n spg_symbol, spg_number = structure.get_space_group_info()\n ax.set_title(\"{} {} ({}) \".format(structure.formula, spg_symbol, spg_number))\n\n return fig\n\n\ndef get_unique_families(hkls):\n \"\"\"\n Returns unique families of Miller indices. Families must be permutations\n of each other.\n\n Args:\n hkls ([h, k, l]): List of Miller indices.\n\n Returns:\n {hkl: multiplicity}: A dict with unique hkl and multiplicity.\n \"\"\"\n # TODO: Definitely can be sped up.\n def is_perm(hkl1, hkl2):\n h1 = np.abs(hkl1)\n h2 = np.abs(hkl2)\n return all([i == j for i, j in zip(sorted(h1), sorted(h2))])\n\n unique = collections.defaultdict(list)\n for hkl1 in hkls:\n found = False\n for hkl2 in unique.keys():\n if is_perm(hkl1, hkl2):\n found = True\n unique[hkl2].append(hkl1)\n break\n if not found:\n unique[hkl1].append(hkl1)\n\n pretty_unique = {}\n for k, v in unique.items():\n pretty_unique[sorted(v)[-1]] = len(v)\n\n return pretty_unique\n"
] | [
[
"numpy.mod"
],
[
"numpy.abs",
"numpy.subtract",
"numpy.transpose",
"numpy.array",
"numpy.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MostaSchoolOfAI/crab | [
"1c1fc21e902e4ee422ab367d691df16978972f8c"
] | [
"scikits/crab/recommenders/knn/classes.py"
] | [
"\"\"\"\nGeneralized Recommender models.\n\nThis module contains basic memory recommender interfaces used throughout\nthe whole scikit-crab package.\n\nThe interfaces are realized as abstract base classes (ie., some optional\nfunctionality is provided in the interface itself, so that the interfaces\ncan be subclassed).\n\n\"\"\"\n\n# Author: Marcel Caraciolo <[email protected]>\n#\n# License: BSD Style.\nfrom sklearn.base import BaseEstimator\nfrom .base import ItemRecommender, UserRecommender\nfrom .item_strategies import ItemsNeighborhoodStrategy\nfrom .neighborhood_strategies import NearestNeighborsStrategy\nimport numpy as np\n\n\nclass ItemBasedRecommender(ItemRecommender):\n \"\"\"\n Item Based Collaborative Filtering Recommender.\n\n\n Parameters\n -----------\n data_model: The data model instance that will be data source\n for the recommender.\n\n similarity: The Item Similarity instance that will be used to\n score the items that will be recommended.\n\n items_selection_strategy: The item candidates strategy that you\n can choose for selecting the possible items to recommend.\n default = ItemsNeighborhoodStrategy\n\n capper: bool (default=True)\n Cap the preferences with maximum and minimum preferences\n in the model.\n with_preference: bool (default=False)\n Return the recommendations with the estimated preferences if True.\n\n Attributes\n -----------\n `model`: The data model instance that will be data source\n for the recommender.\n\n `similarity`: The Item Similarity instance that will be used to\n score the items that will be recommended.\n\n `items_selection_strategy`: The item candidates strategy that you\n can choose for selecting the possible items to recommend.\n default = ItemsNeighborhoodStrategy\n\n `capper`: bool (default=True)\n Cap the preferences with maximum and minimum preferences\n in the model.\n `with_preference`: bool (default=False)\n Return the recommendations with the estimated preferences if True.\n\n Examples\n -----------\n >>> from scikits.crab.models.classes import MatrixPreferenceDataModel\n >>> from scikits.crab.recommenders.knn.classes import ItemBasedRecommender\n >>> from scikits.crab.similarities.basic_similarities import ItemSimilarity\n >>> from scikits.crab.recommenders.knn.item_strategies import ItemsNeighborhoodStrategy\n >>> from scikits.crab.metrics.pairwise import euclidean_distances\n >>> movies = {'Marcel Caraciolo': {'Lady in the Water': 2.5, \\\n 'Snakes on a Plane': 3.5, \\\n 'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5, \\\n 'The Night Listener': 3.0}, \\\n 'Paola Pow': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5, \\\n 'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0, \\\n 'You, Me and Dupree': 3.5}, \\\n 'Leopoldo Pires': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0, \\\n 'Superman Returns': 3.5, 'The Night Listener': 4.0}, \\\n 'Lorena Abreu': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, \\\n 'The Night Listener': 4.5, 'Superman Returns': 4.0, \\\n 'You, Me and Dupree': 2.5}, \\\n 'Steve Gates': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \\\n 'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0, \\\n 'You, Me and Dupree': 2.0}, \\\n 'Sheldom': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \\\n 'The Night Listener': 3.0, 'Superman Returns': 5.0, \\\n 'You, Me and Dupree': 3.5}, \\\n 'Penny Frewman': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0, \\\n 'Superman Returns':4.0}, \\\n 'Maria Gabriela': {}}\n >>> model = MatrixPreferenceDataModel(movies)\n >>> items_strategy = ItemsNeighborhoodStrategy()\n >>> similarity = ItemSimilarity(model, euclidean_distances)\n >>> recsys = ItemBasedRecommender(model, similarity, items_strategy)\n >>> #Return the recommendations for the given user.\n >>> recsys.recommend('Leopoldo Pires')\n ['Just My Luck', 'You, Me and Dupree']\n >>> #Return the 2 explanations for the given recommendation.\n >>> recsys.recommended_because('Leopoldo Pires', 'Just My Luck',2)\n ['The Night Listener', 'Superman Returns']\n\n Notes\n -----------\n This ItemBasedRecommender does not yet provide\n suppot for rescorer functions.\n\n References\n -----------\n Item-based collaborative filtering recommendation algorithms by Sarwar\n http://portal.acm.org/citation.cfm?id=372071\n\n \"\"\"\n\n def __init__(self, model, similarity, items_selection_strategy=None,\n capper=True, with_preference=False):\n ItemRecommender.__init__(self, model, with_preference)\n self.similarity = similarity\n self.capper = capper\n if items_selection_strategy is None:\n self.items_selection_strategy = ItemsNeighborhoodStrategy()\n else:\n self.items_selection_strategy = items_selection_strategy\n\n def recommend(self, user_id, how_many=None, **params):\n '''\n Return a list of recommended items, ordered from most strongly\n recommend to least.\n\n Parameters\n ----------\n user_id: int or string\n User for which recommendations are to be computed.\n how_many: int\n Desired number of recommendations (default=None ALL)\n\n '''\n self._set_params(**params)\n\n candidate_items = self.all_other_items(user_id)\n\n recommendable_items = self._top_matches(user_id, \\\n candidate_items, how_many)\n\n return recommendable_items\n\n def estimate_preference(self, user_id, item_id, **params):\n '''\n Parameters\n ----------\n user_id: int or string\n User for which recommendations are to be computed.\n\n item_id: int or string\n ID of item for which wants to find the estimated preference.\n\n Returns\n -------\n Return an estimated preference if the user has not expressed a\n preference for the item, or else the user's actual preference for the\n item. If a preference cannot be estimated, returns None.\n '''\n preference = self.model.preference_value(user_id, item_id)\n\n if not np.isnan(preference):\n return preference\n\n #TODO: It needs optimization\n prefs = self.model.preferences_from_user(user_id)\n\n if not self.model.has_preference_values():\n prefs = [(pref, 1.0) for pref in prefs]\n\n similarities = \\\n np.array([self.similarity.get_similarity(item_id, to_item_id) \\\n for to_item_id, pref in prefs if to_item_id != item_id]).flatten()\n\n prefs = np.array([pref for it, pref in prefs])\n prefs_sim = np.sum(prefs[~np.isnan(similarities)] *\n similarities[~np.isnan(similarities)])\n total_similarity = np.sum(similarities)\n\n #Throw out the estimate if it was based on no data points,\n #of course, but also if based on\n #just one. This is a bit of a band-aid on the 'stock'\n #item-based algorithm for the moment.\n #The reason is that in this case the estimate is, simply,\n #the user's rating for one item\n #that happened to have a defined similarity.\n #The similarity score doesn't matter, and that\n #seems like a bad situation.\n if total_similarity == 0.0 or \\\n not similarities[~np.isnan(similarities)].size:\n return np.nan\n\n estimated = prefs_sim / total_similarity\n\n if self.capper:\n max_p = self.model.maximum_preference_value()\n min_p = self.model.minimum_preference_value()\n estimated = max_p if estimated > max_p else min_p \\\n if estimated < min_p else estimated\n return estimated\n\n def all_other_items(self, user_id, **params):\n '''\n Parameters\n ----------\n user_id: int or string\n User for which recommendations are to be computed.\n\n Returns\n ---------\n Return items in the `model` for which the user has not expressed\n the preference and could possibly be recommended to the user.\n\n '''\n return self.items_selection_strategy.candidate_items(user_id, \\\n self.model)\n\n def _top_matches(self, source_id, target_ids, how_many=None, **params):\n '''\n Parameters\n ----------\n target_ids: array of shape [n_target_ids]\n\n source_id: int or string\n item id to compare against.\n\n how_many: int\n Desired number of most top items to recommend (default=None ALL)\n\n Returns\n --------\n Return the top N matches\n It can be user_ids or item_ids.\n '''\n #Empty target_ids\n if target_ids.size == 0:\n return np.array([])\n\n estimate_preferences = np.vectorize(self.estimate_preference)\n\n preferences = estimate_preferences(source_id, target_ids)\n\n preference_values = preferences[~np.isnan(preferences)]\n target_ids = target_ids[~np.isnan(preferences)]\n\n sorted_preferences = np.lexsort((preference_values,))[::-1]\n\n sorted_preferences = sorted_preferences[0:how_many] \\\n if how_many and sorted_preferences.size > how_many \\\n else sorted_preferences\n\n if self.with_preference:\n top_n_recs = [(target_ids[ind], \\\n preferences[ind]) for ind in sorted_preferences]\n else:\n top_n_recs = [target_ids[ind]\n for ind in sorted_preferences]\n\n return top_n_recs\n\n def most_similar_items(self, item_id, how_many=None):\n '''\n Return the most similar items to the given item, ordered\n from most similar to least.\n\n Parameters\n -----------\n item_id: int or string\n ID of item for which to find most similar other items\n\n how_many: int\n Desired number of most similar items to find (default=None ALL)\n '''\n old_how_many = self.similarity.num_best\n #+1 since it returns the identity.\n self.similarity.num_best = how_many + 1 \\\n if how_many is not None else None\n similarities = self.similarity[item_id]\n self.similarity.num_best = old_how_many\n\n return np.array([item for item, pref in similarities \\\n if item != item_id and not np.isnan(pref)])\n\n def recommended_because(self, user_id, item_id, how_many=None, **params):\n '''\n Returns the items that were most influential in recommending a\n given item to a given user. In most implementations, this\n method will return items that the user prefers and that\n are similar to the given item.\n\n Parameters\n -----------\n user_id : int or string\n ID of the user who was recommended the item\n\n item_id: int or string\n ID of item that was recommended\n\n how_many: int\n Maximum number of items to return (default=None ALL)\n\n Returns\n ----------\n The list of items ordered from most influential in\n recommended the given item to least\n '''\n preferences = self.model.preferences_from_user(user_id)\n\n if self.model.has_preference_values():\n similarities = \\\n np.array([self.similarity.get_similarity(item_id, to_item_id) \\\n for to_item_id, pref in preferences\n if to_item_id != item_id]).flatten()\n prefs = np.array([pref for it, pref in preferences])\n item_ids = np.array([it for it, pref in preferences])\n else:\n similarities = \\\n np.array([self.similarity.get_similarity(item_id, to_item_id) \\\n for to_item_id in preferences\n if to_item_id != item_id]).flatten()\n prefs = np.array([1.0 for it in preferences])\n item_ids = np.array(preferences)\n\n scores = prefs[~np.isnan(similarities)] * \\\n (1.0 + similarities[~np.isnan(similarities)])\n\n sorted_preferences = np.lexsort((scores,))[::-1]\n\n sorted_preferences = sorted_preferences[0:how_many] \\\n if how_many and sorted_preferences.size > how_many \\\n else sorted_preferences\n\n if self.with_preference:\n top_n_recs = [(item_ids[ind], \\\n prefs[ind]) for ind in sorted_preferences]\n else:\n top_n_recs = [item_ids[ind]\n for ind in sorted_preferences]\n\n return top_n_recs\n\n\n#=====================\n#User Based Recommender\n\nclass UserBasedRecommender(UserRecommender):\n \"\"\"\n User Based Collaborative Filtering Recommender.\n\n\n Parameters\n -----------\n data_model: The data model instance that will be data source\n for the recommender.\n\n similarity: The User Similarity instance that will be used to\n score the users that are the most similar to the user.\n\n neighborhood_strategy: The user neighborhood strategy that you\n can choose for selecting the most similar users to find\n the items to recommend.\n default = NearestNeighborsStrategy\n\n capper: bool (default=True)\n Cap the preferences with maximum and minimum preferences\n in the model.\n with_preference: bool (default=False)\n Return the recommendations with the estimated preferences if True.\n\n Attributes\n -----------\n `model`: The data model instance that will be data source\n for the recommender.\n\n `similarity`: The User Similarity instance that will be used to\n score the users that are the most similar to the user.\n\n `neighborhood_strategy`: The user neighborhood strategy that you\n can choose for selecting the most similar users to find\n the items to recommend.\n default = NearestNeighborsStrategy\n\n `capper`: bool (default=True)\n Cap the preferences with maximum and minimum preferences\n in the model.\n `with_preference`: bool (default=False)\n Return the recommendations with the estimated preferences if True.\n\n Examples\n -----------\n >>> from scikits.crab.models.classes import MatrixPreferenceDataModel\n >>> from scikits.crab.recommenders.knn.classes import UserBasedRecommender\n >>> from scikits.crab.similarities.basic_similarities import UserSimilarity\n >>> from scikits.crab.recommenders.knn.neighborhood_strategies import NearestNeighborsStrategy\n >>> from scikits.crab.metrics.pairwise import euclidean_distances\n >>> movies = {'Marcel Caraciolo': {'Lady in the Water': 2.5, \\\n 'Snakes on a Plane': 3.5, \\\n 'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5, \\\n 'The Night Listener': 3.0}, \\\n 'Paola Pow': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5, \\\n 'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0, \\\n 'You, Me and Dupree': 3.5}, \\\n 'Leopoldo Pires': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0, \\\n 'Superman Returns': 3.5, 'The Night Listener': 4.0}, \\\n 'Lorena Abreu': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, \\\n 'The Night Listener': 4.5, 'Superman Returns': 4.0, \\\n 'You, Me and Dupree': 2.5}, \\\n 'Steve Gates': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \\\n 'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0, \\\n 'You, Me and Dupree': 2.0}, \\\n 'Sheldom': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \\\n 'The Night Listener': 3.0, 'Superman Returns': 5.0, \\\n 'You, Me and Dupree': 3.5}, \\\n 'Penny Frewman': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0, \\\n 'Superman Returns':4.0}, \\\n 'Maria Gabriela': {}}\n >>> model = MatrixPreferenceDataModel(movies)\n >>> nhood_strategy = NearestNeighborsStrategy()\n >>> similarity = UserSimilarity(model, euclidean_distances)\n >>> recsys = UserBasedRecommender(model, similarity, nhood_strategy)\n >>> #Return the recommendations for the given user.\n >>> recsys.recommend('Leopoldo Pires')\n ['Just My Luck', 'You, Me and Dupree']\n >>> #Return the 2 explanations for the given recommendation.\n >>> recsys.recommended_because('Leopoldo Pires', 'Just My Luck',2)\n ['Lorena Abreu', 'Marcel Caraciolo']\n\n Notes\n -----------\n This UserBasedRecommender does not yet provide\n suppot for rescorer functions.\n\n References\n -----------\n User-based collaborative filtering recommendation algorithms by\n\n \"\"\"\n\n def __init__(self, model, similarity, neighborhood_strategy=None,\n capper=True, with_preference=False):\n UserRecommender.__init__(self, model, with_preference)\n self.similarity = similarity\n self.capper = capper\n if neighborhood_strategy is None:\n self.neighborhood_strategy = NearestNeighborsStrategy()\n else:\n self.neighborhood_strategy = neighborhood_strategy\n\n def all_other_items(self, user_id, **params):\n '''\n Parameters\n ----------\n user_id: int or string\n User for which recommendations are to be computed. (default= 'user_similarity')\n\n Optional Parameters\n --------------------\n n_similarity: string\n The similarity used in the neighborhood strategy\n\n distance: the metrics.pairwise function to set.\n The pairwise function to compute the similarity (default = euclidean_distances)\n\n nhood_size: int\n The neighborhood size (default=None ALL)\n\n minimal_similarity: float\n minimal similarity required for neighbors (default = 0.0)\n\n sampling_rate: int\n percentage of users to consider when building neighborhood\n (default = 1)\n\n Returns\n ---------\n Return items in the `model` for which the user has not expressed\n the preference and could possibly be recommended to the user.\n\n '''\n n_similarity = params.pop('n_similarity', 'user_similarity')\n distance = params.pop('distance', self.similarity.distance)\n nhood_size = params.pop('nhood_size', None)\n\n nearest_neighbors = self.neighborhood_strategy.user_neighborhood(user_id,\n self.model, n_similarity, distance, nhood_size, **params)\n\n items_from_user_id = self.model.items_from_user(user_id)\n possible_items = []\n for to_user_id in nearest_neighbors:\n possible_items.extend(self.model.items_from_user(to_user_id))\n\n possible_items = np.unique(np.array(possible_items).flatten())\n\n return np.setdiff1d(possible_items, items_from_user_id)\n\n def estimate_preference(self, user_id, item_id, **params):\n '''\n Parameters\n ----------\n user_id: int or string\n User for which recommendations are to be computed.\n\n item_id: int or string\n ID of item for which wants to find the estimated preference.\n\n Returns\n -------\n Return an estimated preference if the user has not expressed a\n preference for the item, or else the user's actual preference for the\n item. If a preference cannot be estimated, returns None.\n '''\n\n preference = self.model.preference_value(user_id, item_id)\n if not np.isnan(preference):\n return preference\n\n n_similarity = params.pop('n_similarity', 'user_similarity')\n distance = params.pop('distance', self.similarity.distance)\n nhood_size = params.pop('nhood_size', None)\n\n nearest_neighbors = self.neighborhood_strategy.user_neighborhood(user_id,\n self.model, n_similarity, distance, nhood_size, **params)\n\n preference = 0.0\n total_similarity = 0.0\n\n similarities = np.array([self.similarity.get_similarity(user_id, to_user_id)\n for to_user_id in nearest_neighbors]).flatten()\n\n prefs = np.array([self.model.preference_value(to_user_id, item_id)\n for to_user_id in nearest_neighbors])\n\n \n # prefs = prefs[~np.isnan(prefs)]\n # similarities = similarities[~np.isnan(prefs)]\n\n prefs_sim = np.sum(prefs[~np.isnan(similarities)] *\n similarities[~np.isnan(similarities)])\n total_similarity = np.sum(similarities)\n\n #Throw out the estimate if it was based on no data points,\n #of course, but also if based on just one. This is a bit\n #of a band-aid on the 'stock' item-based algorithm for\n #the moment. The reason is that in this case the estimate\n #is, simply, the user's rating for one item that happened\n #to have a defined similarity. The similarity score doesn't\n #matter, and that seems like a bad situation.\n if total_similarity == 0.0 or \\\n not similarities[~np.isnan(similarities)].size:\n return np.nan\n\n estimated = prefs_sim / total_similarity\n\n if self.capper:\n max_p = self.model.maximum_preference_value()\n min_p = self.model.minimum_preference_value()\n estimated = max_p if estimated > max_p else min_p \\\n if estimated < min_p else estimated\n\n return estimated\n\n def most_similar_users(self, user_id, how_many=None):\n '''\n Return the most similar users to the given user, ordered\n from most similar to least.\n\n Parameters\n -----------\n user_id: int or string\n ID of user for which to find most similar other users\n\n how_many: int\n Desired number of most similar users to find (default=None ALL)\n '''\n old_how_many = self.similarity.num_best\n #+1 since it returns the identity.\n self.similarity.num_best = how_many + 1 \\\n if how_many is not None else None\n similarities = self.similarity[user_id]\n self.similarity.num_best = old_how_many\n return np.array([to_user_id for to_user_id, pref in similarities \\\n if user_id != to_user_id and not np.isnan(pref)])\n\n def recommend(self, user_id, how_many=None, **params):\n '''\n Return a list of recommended items, ordered from most strongly\n recommend to least.\n\n Parameters\n ----------\n user_id: int or string\n User for which recommendations are to be computed.\n how_many: int\n Desired number of recommendations (default=None ALL)\n\n '''\n\n self.set_params(**params)\n\n candidate_items = self.all_other_items(user_id, **params)\n\n recommendable_items = self._top_matches(user_id, \\\n candidate_items, how_many)\n\n return recommendable_items\n\n def _top_matches(self, source_id, target_ids, how_many=None, **params):\n '''\n Parameters\n ----------\n target_ids: array of shape [n_target_ids]\n\n source_id: int or string\n item id to compare against.\n\n how_many: int\n Desired number of most top items to recommend (default=None ALL)\n\n Returns\n --------\n Return the top N matches\n It can be user_ids or item_ids.\n '''\n #Empty target_ids\n if target_ids.size == 0:\n return np.array([])\n\n estimate_preferences = np.vectorize(self.estimate_preference)\n\n preferences = estimate_preferences(source_id, target_ids)\n\n preference_values = preferences[~np.isnan(preferences)]\n target_ids = target_ids[~np.isnan(preferences)]\n\n sorted_preferences = np.lexsort((preference_values,))[::-1]\n\n sorted_preferences = sorted_preferences[0:how_many] \\\n if how_many and sorted_preferences.size > how_many \\\n else sorted_preferences\n\n if self.with_preference:\n top_n_recs = [(target_ids[ind], \\\n preferences[ind]) for ind in sorted_preferences]\n else:\n top_n_recs = [target_ids[ind]\n for ind in sorted_preferences]\n\n return top_n_recs\n\n def recommended_because(self, user_id, item_id, how_many=None, **params):\n '''\n Returns the users that were most influential in recommending a\n given item to a given user. In most implementations, this\n method will return users that prefers the recommended item and that\n are similar to the given user.\n\n Parameters\n -----------\n user_id : int or string\n ID of the user who was recommended the item\n\n item_id: int or string\n ID of item that was recommended\n\n how_many: int\n Maximum number of items to return (default=None ALL)\n\n Returns\n ----------\n The list of items ordered from most influential in\n recommended the given item to least\n '''\n preferences = self.model.preferences_for_item(item_id)\n\n if self.model.has_preference_values():\n similarities = \\\n np.array([self.similarity.get_similarity(user_id, to_user_id) \\\n for to_user_id, pref in preferences\n if to_user_id != user_id]).flatten()\n prefs = np.array([pref for it, pref in preferences])\n user_ids = np.array([usr for usr, pref in preferences])\n else:\n similarities = \\\n np.array([self.similarity.get_similarity(user_id, to_user_id) \\\n for to_user_id in preferences\n if to_user_id != user_id]).flatten()\n prefs = np.array([1.0 for it in preferences])\n user_ids = np.array(preferences)\n\n scores = prefs[~np.isnan(similarities)] * \\\n (1.0 + similarities[~np.isnan(similarities)])\n\n sorted_preferences = np.lexsort((scores,))[::-1]\n\n sorted_preferences = sorted_preferences[0:how_many] \\\n if how_many and sorted_preferences.size > how_many \\\n else sorted_preferences\n\n if self.with_preference:\n top_n_recs = [(user_ids[ind], \\\n prefs[ind]) for ind in sorted_preferences]\n else:\n top_n_recs = [user_ids[ind]\n for ind in sorted_preferences]\n\n return top_n_recs\n"
] | [
[
"numpy.isnan",
"numpy.lexsort",
"numpy.setdiff1d",
"numpy.vectorize",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Dipeshtamboli/domain-shift | [
"3f29577df6ab7269ad69a5fc651b63ed78708f0b",
"3f29577df6ab7269ad69a5fc651b63ed78708f0b"
] | [
"data_statistics.py",
"plot_tsne.py"
] | [
"import pdb\r\nimport numpy as np\r\nimport os\r\nimport glob\r\nimport torch\r\nimport torch.nn as nn\r\nimport torchvision.models as models\r\nimport torchvision.transforms as transforms\r\nfrom torch.autograd import Variable\r\nfrom PIL import Image\r\nfrom tqdm import tqdm\r\n\r\nrelative_path = 'datasets/resnet_features_subset_office31/'\r\n# relative_path = 'datasets/office-31_10_class_subset/'\r\n\r\nall_npys = glob.glob(os.path.dirname(os.path.realpath(__file__))+'/'+relative_path+\"**/*.npy\" , recursive=True)\r\n\r\nnum_plot_classes = 31\r\nall_features = np.zeros((num_plot_classes*3*5,1000))\r\nall_feat = {\r\n \"amazon\": np.zeros((num_plot_classes*5,1000)),\r\n \"dslr\": np.zeros((num_plot_classes*5,1000)),\r\n \"webcam\": np.zeros((num_plot_classes*5,1000)),\r\n}\r\ndomain_names =[]\r\nclass_names = []\r\ncounter = 0\r\nfor i, npy_loc in enumerate(all_npys):\r\n unique_labels, unique_counts = np.unique(class_names, return_counts=True)\r\n domain = npy_loc.split('/')[-3]\r\n class_name = npy_loc.split('/')[-2]\r\n\r\n if len(np.unique(class_names)) < num_plot_classes or class_name in class_names:\r\n all_features[counter] = np.load(npy_loc)\r\n counter += 1\r\n domain_names.append(domain)\r\n class_names.append(class_name)",
"import os\r\nimport pdb\r\nimport numpy as np\r\nfrom scipy import io\r\nfrom sklearn.manifold import TSNE\r\nfrom matplotlib import pyplot as plt\r\nimport matplotlib\r\nimport seaborn as sns\r\nimport time\r\nimport glob\r\n\r\nstart_time = time.time()\r\n# load all the npy feature vectors\r\n\r\nrelative_path = 'datasets/resnet_features_complete_office31/'\r\n# relative_path = 'datasets/resnet_features_subset_office31/'\r\n# relative_path = 'datasets/office-31_10_class_subset/'\r\n\r\nall_npys = glob.glob(os.path.dirname(os.path.realpath(__file__))+'/'+relative_path+\"**/*.npy\" , recursive=True)\r\n\r\nnum_plot_classes = 31\r\nall_features = np.zeros((num_plot_classes*3*5,1000))\r\ndomain_names =[]\r\nclass_names = []\r\ncounter = 0\r\nfor i, npy_loc in enumerate(all_npys):\r\n unique_labels, unique_counts = np.unique(class_names, return_counts=True)\r\n domain = npy_loc.split('/')[-3]\r\n if not domain == \"dslr\":\r\n continue\r\n class_name = npy_loc.split('/')[-2]\r\n\r\n if len(np.unique(class_names)) < num_plot_classes or class_name in class_names:\r\n if counter>= len(all_features):\r\n # np.insert(all_features, counter, np.load(npy_loc))\r\n all_features = np.concatenate((all_features, np.load(npy_loc)), axis=0)\r\n else:\r\n all_features[counter] = np.load(npy_loc)\r\n counter += 1\r\n domain_names.append(domain)\r\n class_names.append(class_name)\r\n\r\ntsne = TSNE(n_jobs=16)\r\nembeddings = tsne.fit_transform(all_features)\r\nvis_x = embeddings[:, 0]\r\nvis_y = embeddings[:, 1]\r\n\r\nsns.set(rc={'figure.figsize':(11.7,8.27)})\r\n\r\npalette = sns.color_palette(\"bright\", num_plot_classes)\r\n# palette = sns.color_palette(\"RdPu\", 31)\r\n\r\n# pdb.set_trace()\r\n# plot = sns.scatterplot(vis_x, vis_y, hue=class_names, style = domain_names, markers=['P', 'o', 'X'], palette=palette)\r\nplot = sns.scatterplot(vis_x, vis_y, hue=class_names, style = domain_names, markers=['o'], palette=palette)\r\nplot.get_legend().set_title(\"Classes\")\r\n\r\n# handles, labels = plot.get_legend_handles_labels()\r\n# labels[-1] = \"gen\"\r\n# labels[-2] = \"conv\"\r\n# plot.legend(handles, labels) \r\nplot.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), ncol=1)\r\nplt.tight_layout()\r\nplt.savefig(f\"TSNE_plots/office-31-dslr-{num_plot_classes}_classes_complete_dataset.png\")\r\nprint(\"--- {} mins {} secs---\".format((time.time() - start_time)//60,(time.time() - start_time)%60))\r\n# pdb.set_trace()"
] | [
[
"numpy.load",
"numpy.zeros",
"numpy.unique"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.unique",
"matplotlib.pyplot.savefig",
"sklearn.manifold.TSNE",
"numpy.load",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hephaex/probability | [
"740d0db0bf2b1e1a04cfd0b55481c44380b3cb05",
"740d0db0bf2b1e1a04cfd0b55481c44380b3cb05",
"740d0db0bf2b1e1a04cfd0b55481c44380b3cb05",
"740d0db0bf2b1e1a04cfd0b55481c44380b3cb05",
"740d0db0bf2b1e1a04cfd0b55481c44380b3cb05",
"740d0db0bf2b1e1a04cfd0b55481c44380b3cb05"
] | [
"tensorflow_probability/python/distributions/poisson_lognormal.py",
"tensorflow_probability/python/distributions/multivariate_student_t.py",
"tensorflow_probability/python/bijectors/transpose.py",
"tensorflow_probability/python/monte_carlo/expectation_test.py",
"tensorflow_probability/python/bijectors/reciprocal_test.py",
"tensorflow_probability/examples/deep_exponential_family.py"
] | [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The PoissonLogNormalQuadratureCompound distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_probability.python.bijectors import exp as exp_bijector\nfrom tensorflow_probability.python.distributions import categorical\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.distributions import normal\nfrom tensorflow_probability.python.distributions import poisson\nfrom tensorflow_probability.python.distributions import seed_stream\nfrom tensorflow_probability.python.distributions import transformed_distribution\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import reparameterization\n\n\n__all__ = [\n \"PoissonLogNormalQuadratureCompound\",\n \"quadrature_scheme_lognormal_gauss_hermite\",\n \"quadrature_scheme_lognormal_quantiles\",\n]\n\n\ndef quadrature_scheme_lognormal_gauss_hermite(\n loc, scale, quadrature_size,\n validate_args=False, name=None): # pylint: disable=unused-argument\n \"\"\"Use Gauss-Hermite quadrature to form quadrature on positive-reals.\n\n Note: for a given `quadrature_size`, this method is generally less accurate\n than `quadrature_scheme_lognormal_quantiles`.\n\n Args:\n loc: `float`-like (batch of) scalar `Tensor`; the location parameter of\n the LogNormal prior.\n scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of\n the LogNormal prior.\n quadrature_size: Python `int` scalar representing the number of quadrature\n points.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n name: Python `str` name prefixed to Ops created by this class.\n\n Returns:\n grid: (Batch of) length-`quadrature_size` vectors representing the\n `log_rate` parameters of a `Poisson`.\n probs: (Batch of) length-`quadrature_size` vectors representing the\n weight associate with each `grid` value.\n \"\"\"\n with tf.name_scope(name, \"vector_diffeomixture_quadrature_gauss_hermite\",\n [loc, scale]):\n grid, probs = np.polynomial.hermite.hermgauss(deg=quadrature_size)\n grid = grid.astype(loc.dtype.as_numpy_dtype)\n probs = probs.astype(loc.dtype.as_numpy_dtype)\n probs /= np.linalg.norm(probs, ord=1, keepdims=True)\n probs = tf.convert_to_tensor(value=probs, name=\"probs\", dtype=loc.dtype)\n # The following maps the broadcast of `loc` and `scale` to each grid\n # point, i.e., we are creating several log-rates that correspond to the\n # different Gauss-Hermite quadrature points and (possible) batches of\n # `loc` and `scale`.\n grid = (loc[..., tf.newaxis] + np.sqrt(2.) * scale[..., tf.newaxis] * grid)\n return grid, probs\n\n\ndef quadrature_scheme_lognormal_quantiles(\n loc, scale, quadrature_size,\n validate_args=False, name=None):\n \"\"\"Use LogNormal quantiles to form quadrature on positive-reals.\n\n Args:\n loc: `float`-like (batch of) scalar `Tensor`; the location parameter of\n the LogNormal prior.\n scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of\n the LogNormal prior.\n quadrature_size: Python `int` scalar representing the number of quadrature\n points.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n name: Python `str` name prefixed to Ops created by this class.\n\n Returns:\n grid: (Batch of) length-`quadrature_size` vectors representing the\n `log_rate` parameters of a `Poisson`.\n probs: (Batch of) length-`quadrature_size` vectors representing the\n weight associate with each `grid` value.\n \"\"\"\n with tf.name_scope(name, \"quadrature_scheme_lognormal_quantiles\",\n [loc, scale]):\n # Create a LogNormal distribution.\n dist = transformed_distribution.TransformedDistribution(\n distribution=normal.Normal(loc=loc, scale=scale),\n bijector=exp_bijector.Exp(),\n validate_args=validate_args)\n batch_ndims = dist.batch_shape.ndims\n if batch_ndims is None:\n batch_ndims = tf.shape(input=dist.batch_shape_tensor())[0]\n\n def _compute_quantiles():\n \"\"\"Helper to build quantiles.\"\"\"\n # Omit {0, 1} since they might lead to Inf/NaN.\n zero = tf.zeros([], dtype=dist.dtype)\n edges = tf.linspace(zero, 1., quadrature_size + 3)[1:-1]\n # Expand edges so its broadcast across batch dims.\n edges = tf.reshape(\n edges,\n shape=tf.concat(\n [[-1], tf.ones([batch_ndims], dtype=tf.int32)], axis=0))\n quantiles = dist.quantile(edges)\n # Cyclically permute left by one.\n perm = tf.concat([tf.range(1, 1 + batch_ndims), [0]], axis=0)\n quantiles = tf.transpose(a=quantiles, perm=perm)\n return quantiles\n quantiles = _compute_quantiles()\n\n # Compute grid as quantile midpoints.\n grid = (quantiles[..., :-1] + quantiles[..., 1:]) / 2.\n # Set shape hints.\n grid.set_shape(dist.batch_shape.concatenate([quadrature_size]))\n\n # By construction probs is constant, i.e., `1 / quadrature_size`. This is\n # important, because non-constant probs leads to non-reparameterizable\n # samples.\n probs = tf.fill(\n dims=[quadrature_size], value=1. / tf.cast(quadrature_size, dist.dtype))\n\n return grid, probs\n\n\nclass PoissonLogNormalQuadratureCompound(distribution.Distribution):\n \"\"\"`PoissonLogNormalQuadratureCompound` distribution.\n\n The `PoissonLogNormalQuadratureCompound` is an approximation to a\n Poisson-LogNormal [compound distribution](\n https://en.wikipedia.org/wiki/Compound_probability_distribution), i.e.,\n\n ```none\n p(k|loc, scale)\n = int_{R_+} dl LogNormal(l | loc, scale) Poisson(k | l)\n approx= sum{ prob[d] Poisson(k | lambda(grid[d])) : d=0, ..., deg-1 }\n ```\n\n By default, the `grid` is chosen as quantiles of the `LogNormal` distribution\n parameterized by `loc`, `scale` and the `prob` vector is\n `[1. / quadrature_size]*quadrature_size`.\n\n In the non-approximation case, a draw from the LogNormal prior represents the\n Poisson rate parameter. Unfortunately, the non-approximate distribution lacks\n an analytical probability density function (pdf). Therefore the\n `PoissonLogNormalQuadratureCompound` class implements an approximation based\n on [quadrature](https://en.wikipedia.org/wiki/Numerical_integration).\n\n Note: although the `PoissonLogNormalQuadratureCompound` is approximately the\n Poisson-LogNormal compound distribution, it is itself a valid distribution.\n Viz., it possesses a `sample`, `log_prob`, `mean`, `variance`, etc. which are\n all mutually consistent.\n\n #### Mathematical Details\n\n The `PoissonLogNormalQuadratureCompound` approximates a Poisson-LogNormal\n [compound distribution](\n https://en.wikipedia.org/wiki/Compound_probability_distribution). Using\n variable-substitution and [numerical quadrature](\n https://en.wikipedia.org/wiki/Numerical_integration) (default:\n based on `LogNormal` quantiles) we can redefine the distribution to be a\n parameter-less convex combination of `deg` different Poisson samples.\n\n That is, defined over positive integers, this distribution is parameterized\n by a (batch of) `loc` and `scale` scalars.\n\n The probability density function (pdf) is,\n\n ```none\n pdf(k | loc, scale, deg)\n = sum{ prob[d] Poisson(k | lambda=exp(grid[d]))\n : d=0, ..., deg-1 }\n ```\n\n #### Examples\n\n ```python\n tfd = tfp.distributions\n\n # Create two batches of PoissonLogNormalQuadratureCompounds, one with\n # prior `loc = 0.` and another with `loc = 1.` In both cases `scale = 1.`\n pln = tfd.PoissonLogNormalQuadratureCompound(\n loc=[0., -0.5],\n scale=1.,\n quadrature_size=10,\n validate_args=True)\n \"\"\"\n\n def __init__(self,\n loc,\n scale,\n quadrature_size=8,\n quadrature_fn=quadrature_scheme_lognormal_quantiles,\n validate_args=False,\n allow_nan_stats=True,\n name=\"PoissonLogNormalQuadratureCompound\"):\n \"\"\"Constructs the PoissonLogNormalQuadratureCompound`.\n\n Note: `probs` returned by (optional) `quadrature_fn` are presumed to be\n either a length-`quadrature_size` vector or a batch of vectors in 1-to-1\n correspondence with the returned `grid`. (I.e., broadcasting is only\n partially supported.)\n\n Args:\n loc: `float`-like (batch of) scalar `Tensor`; the location parameter of\n the LogNormal prior.\n scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of\n the LogNormal prior.\n quadrature_size: Python `int` scalar representing the number of quadrature\n points.\n quadrature_fn: Python callable taking `loc`, `scale`,\n `quadrature_size`, `validate_args` and returning `tuple(grid, probs)`\n representing the LogNormal grid and corresponding normalized weight.\n normalized) weight.\n Default value: `quadrature_scheme_lognormal_quantiles`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`,\n statistics (e.g., mean, mode, variance) use the value \"`NaN`\" to\n indicate the result is undefined. When `False`, an exception is raised\n if one or more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n\n Raises:\n TypeError: if `quadrature_grid` and `quadrature_probs` have different base\n `dtype`.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name, values=[loc, scale]) as name:\n dtype = dtype_util.common_dtype([loc, scale], tf.float32)\n if loc is not None:\n loc = tf.convert_to_tensor(value=loc, name=\"loc\", dtype=dtype)\n if scale is not None:\n scale = tf.convert_to_tensor(value=scale, dtype=dtype, name=\"scale\")\n self._quadrature_grid, self._quadrature_probs = tuple(quadrature_fn(\n loc, scale, quadrature_size, validate_args))\n\n dt = self._quadrature_grid.dtype\n if dt.base_dtype != self._quadrature_probs.dtype.base_dtype:\n raise TypeError(\"Quadrature grid dtype ({}) does not match quadrature \"\n \"probs dtype ({}).\".format(\n dt.name, self._quadrature_probs.dtype.name))\n\n self._distribution = poisson.Poisson(\n log_rate=self._quadrature_grid,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats)\n\n self._mixture_distribution = categorical.Categorical(\n logits=tf.math.log(self._quadrature_probs),\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats)\n\n self._loc = loc\n self._scale = scale\n self._quadrature_size = quadrature_size\n\n super(PoissonLogNormalQuadratureCompound, self).__init__(\n dtype=dt,\n reparameterization_type=reparameterization.NOT_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n graph_parents=[loc, scale],\n name=name)\n\n @property\n def mixture_distribution(self):\n \"\"\"Distribution which randomly selects a Poisson with quadrature param.\"\"\"\n return self._mixture_distribution\n\n @property\n def distribution(self):\n \"\"\"Base Poisson parameterized by a quadrature grid.\"\"\"\n return self._distribution\n\n @property\n def loc(self):\n \"\"\"Location parameter of the LogNormal prior.\"\"\"\n return self._loc\n\n @property\n def scale(self):\n \"\"\"Scale parameter of the LogNormal prior.\"\"\"\n return self._scale\n\n @property\n def quadrature_size(self):\n return self._quadrature_size\n\n def _batch_shape_tensor(self):\n return tf.broadcast_dynamic_shape(\n self.distribution.batch_shape_tensor(),\n tf.shape(input=self.mixture_distribution.logits))[:-1]\n\n def _batch_shape(self):\n return tf.broadcast_static_shape(\n self.distribution.batch_shape,\n self.mixture_distribution.logits.shape)[:-1]\n\n def _event_shape(self):\n return tf.TensorShape([])\n\n def _sample_n(self, n, seed=None):\n # Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get\n # ids as a [n]-shaped vector.\n batch_size = self.batch_shape.num_elements()\n if batch_size is None:\n batch_size = tf.reduce_prod(input_tensor=self.batch_shape_tensor())\n # We need to \"sample extra\" from the mixture distribution if it doesn't\n # already specify a probs vector for each batch coordinate.\n # We only support this kind of reduced broadcasting, i.e., there is exactly\n # one probs vector for all batch dims or one for each.\n stream = seed_stream.SeedStream(\n seed, salt=\"PoissonLogNormalQuadratureCompound\")\n ids = self._mixture_distribution.sample(\n sample_shape=concat_vectors(\n [n],\n distribution_util.pick_vector(\n self.mixture_distribution.is_scalar_batch(),\n [batch_size],\n np.int32([]))),\n seed=stream())\n # We need to flatten batch dims in case mixture_distribution has its own\n # batch dims.\n ids = tf.reshape(\n ids,\n shape=concat_vectors([n],\n distribution_util.pick_vector(\n self.is_scalar_batch(), np.int32([]),\n np.int32([-1]))))\n\n # Stride `quadrature_size` for `batch_size` number of times.\n offset = tf.range(\n start=0,\n limit=batch_size * self._quadrature_size,\n delta=self._quadrature_size,\n dtype=ids.dtype)\n ids += offset\n rate = tf.gather(tf.reshape(self.distribution.rate, shape=[-1]), ids)\n rate = tf.reshape(\n rate, shape=concat_vectors([n], self.batch_shape_tensor()))\n return tf.random.poisson(lam=rate, shape=[], dtype=self.dtype, seed=seed)\n\n def _log_prob(self, x):\n return tf.reduce_logsumexp(\n input_tensor=(self.mixture_distribution.logits +\n self.distribution.log_prob(x[..., tf.newaxis])),\n axis=-1)\n\n def _mean(self):\n return tf.exp(\n tf.reduce_logsumexp(\n input_tensor=self.mixture_distribution.logits +\n self.distribution.log_rate,\n axis=-1))\n\n def _variance(self):\n return tf.exp(self._log_variance())\n\n def _stddev(self):\n return tf.exp(0.5 * self._log_variance())\n\n def _log_variance(self):\n # Following calculation is based on law of total variance:\n #\n # Var[Z] = E[Var[Z | V]] + Var[E[Z | V]]\n #\n # where,\n #\n # Z|v ~ interpolate_affine[v](distribution)\n # V ~ mixture_distribution\n #\n # thus,\n #\n # E[Var[Z | V]] = sum{ prob[d] Var[d] : d=0, ..., deg-1 }\n # Var[E[Z | V]] = sum{ prob[d] (Mean[d] - Mean)**2 : d=0, ..., deg-1 }\n v = tf.stack(\n [\n # log(self.distribution.variance()) = log(Var[d]) = log(rate[d])\n self.distribution.log_rate,\n # log((Mean[d] - Mean)**2)\n 2. * tf.math.log(\n tf.abs(self.distribution.mean() -\n self._mean()[..., tf.newaxis])),\n ],\n axis=-1)\n return tf.reduce_logsumexp(\n input_tensor=self.mixture_distribution.logits[..., tf.newaxis] + v,\n axis=[-2, -1])\n\n\ndef concat_vectors(*args):\n \"\"\"Concatenates input vectors, statically if possible.\"\"\"\n args_ = [tf.get_static_value(x) for x in args]\n if any(vec is None for vec in args_):\n return tf.concat(args, axis=0)\n return [val for vec in args_ for val in vec]\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Multivariate Student's t-distribution.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_probability.python import math\nfrom tensorflow_probability.python.distributions import chi2 as chi2_lib\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.distributions import mvn_linear_operator\nfrom tensorflow_probability.python.distributions import seed_stream\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import reparameterization\n\n__all__ = [\n \"MultivariateStudentTLinearOperator\",\n]\n\n\ndef _broadcast_to_shape(x, shape):\n return x + tf.zeros(shape=shape, dtype=x.dtype)\n\n\nclass MultivariateStudentTLinearOperator(distribution.Distribution):\n \"\"\"The [Multivariate Student's t-distribution](\n\n https://en.wikipedia.org/wiki/Multivariate_t-distribution) on `R^k`.\n\n #### Mathematical Details\n\n The probability density function (pdf) is,\n\n ```none\n pdf(x; df, loc, Sigma) = (1 + ||y||**2 / df)**(-0.5 (df + k)) / Z\n where,\n y = inv(Sigma) (x - loc)\n Z = abs(det(Sigma)) sqrt(df pi)**k Gamma(0.5 df) / Gamma(0.5 (df + k))\n ```\n\n where:\n\n * `df` is a positive scalar.\n * `loc` is a vector in `R^k`,\n * `Sigma` is a positive definite `shape' matrix in `R^{k x k}`, parameterized\n as `scale @ scale.T` in this class,\n * `Z` denotes the normalization constant, and,\n * `||y||**2` denotes the squared Euclidean norm of `y`.\n\n The Multivariate Student's t-distribution distribution is a member of the\n [location-scale\n family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be\n constructed as,\n\n ```none\n X ~ MultivariateT(loc=0, scale=1) # Identity scale, zero shift.\n Y = scale @ X + loc\n ```\n\n #### Examples\n\n ```python\n tfd = tfp.distributions\n\n # Initialize a single 3-variate Student's t.\n df = 3.\n loc = [1., 2, 3]\n scale = [[ 0.6, 0. , 0. ],\n [ 0.2, 0.5, 0. ],\n [ 0.1, -0.3, 0.4]]\n sigma = tf.matmul(scale, scale, adjoint_b=True)\n # ==> [[ 0.36, 0.12, 0.06],\n # [ 0.12, 0.29, -0.13],\n # [ 0.06, -0.13, 0.26]]\n\n mvt = tfd.MultivariateStudentTLinearOperator(\n df=df,\n loc=loc,\n scale=tf.linalg.LinearOperatorLowerTriangular(scale))\n\n # Covariance is closely related to the sigma matrix (for df=3, it is 3x of the\n # sigma matrix).\n\n mvt.covariance().eval()\n # ==> [[ 1.08, 0.36, 0.18],\n # [ 0.36, 0.87, -0.39],\n # [ 0.18, -0.39, 0.78]]\n\n # Compute the pdf of an`R^3` observation; return a scalar.\n mvt.prob([-1., 0, 1]).eval() # shape: []\n\n \"\"\"\n\n def __init__(self,\n df,\n loc,\n scale,\n validate_args=False,\n allow_nan_stats=True,\n name=\"MultivariateStudentTLinearOperator\"):\n \"\"\"Construct Multivariate Student's t-distribution on `R^k`.\n\n The `batch_shape` is the broadcast shape between `df`, `loc` and `scale`\n arguments.\n\n The `event_shape` is given by last dimension of the matrix implied by\n `scale`. The last dimension of `loc` must broadcast with this.\n\n Additional leading dimensions (if any) will index batches.\n\n Args:\n df: A positive floating-point `Tensor`. Has shape `[B1, ..., Bb]` where `b\n >= 0`.\n loc: Floating-point `Tensor`. Has shape `[B1, ..., Bb, k]` where `k` is\n the event size.\n scale: Instance of `LinearOperator` with a floating `dtype` and shape\n `[B1, ..., Bb, k, k]`.\n validate_args: Python `bool`, default `False`. Whether to validate input\n with asserts. If `validate_args` is `False`, and the inputs are invalid,\n correct behavior is not guaranteed.\n allow_nan_stats: Python `bool`, default `True`. If `False`, raise an\n exception if a statistic (e.g. mean/variance/etc...) is undefined for\n any batch member If `True`, batch members with valid parameters leading\n to undefined statistics will return NaN for this statistic.\n name: The name to give Ops created by the initializer.\n\n Raises:\n TypeError: if not `scale.dtype.is_floating`.\n ValueError: if not `scale.is_positive_definite`.\n \"\"\"\n parameters = dict(locals())\n if not scale.dtype.is_floating:\n raise TypeError(\"`scale` must have floating-point dtype.\")\n if validate_args and not scale.is_positive_definite:\n raise ValueError(\"`scale` must be positive definite.\")\n\n with tf.name_scope(name, values=[df, loc] + scale.graph_parents) as name:\n dtype = dtype_util.common_dtype([df, loc, scale],\n preferred_dtype=tf.float32)\n\n with tf.control_dependencies(\n [tf.compat.v1.assert_positive(df, message=\"`df` must be positive.\"\n )] if validate_args else []):\n self._df = tf.identity(\n tf.convert_to_tensor(value=df, dtype=dtype), name=\"df\")\n self._loc = tf.convert_to_tensor(value=loc, name=\"loc\", dtype=dtype)\n self._scale = scale\n\n super(MultivariateStudentTLinearOperator, self).__init__(\n dtype=dtype,\n reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,\n parameters=parameters,\n graph_parents=[self._df, self._loc] + self._scale.graph_parents,\n name=name,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats)\n self._parameters = parameters\n\n @property\n def loc(self):\n \"\"\"The location parameter of the distribution.\n\n `loc` applies an elementwise shift to the distribution.\n\n ```none\n X ~ MultivariateT(loc=0, scale=1) # Identity scale, zero shift.\n Y = scale @ X + loc\n ```\n\n Returns:\n The `loc` `Tensor`.\n \"\"\"\n return self._loc\n\n @property\n def scale(self):\n \"\"\"The scale parameter of the distribution.\n\n `scale` applies an affine scale to the distribution.\n\n ```none\n X ~ MultivariateT(loc=0, scale=1) # Identity scale, zero shift.\n Y = scale @ X + loc\n ```\n\n Returns:\n The `scale` `LinearOperator`.\n \"\"\"\n return self._scale\n\n @property\n def df(self):\n \"\"\"The degrees of freedom of the distribution.\n\n This controls the degrees of freedom of the distribution. The tails of the\n distribution get more heavier the smaller `df` is. As `df` goes to\n infinitiy, the distribution approaches the Multivariate Normal with the same\n `loc` and `scale`.\n\n Returns:\n The `df` `Tensor`.\n \"\"\"\n return self._df\n\n def _batch_shape_tensor(self):\n shape_list = [\n self.scale.batch_shape_tensor(),\n tf.shape(input=self.df),\n tf.shape(input=self.loc)[:-1]\n ]\n return functools.reduce(tf.broadcast_dynamic_shape, shape_list)\n\n def _batch_shape(self):\n shape_list = [self.scale.batch_shape, self.df.shape, self.loc.shape[:-1]]\n return functools.reduce(tf.broadcast_static_shape, shape_list)\n\n def _event_shape_tensor(self):\n return self.scale.range_dimension_tensor()[tf.newaxis]\n\n def _event_shape(self):\n return self.scale.range_dimension\n\n def _sample_shape(self):\n return tf.concat([self.batch_shape_tensor(), self.event_shape_tensor()], -1)\n\n def _sample_n(self, n, seed=None):\n # Like with the univariate Student's t, sampling can be implemented as a\n # ratio of samples from a multivariate gaussian with the appropriate\n # covariance matrix and a sample from the chi-squared distribution.\n seed = seed_stream.SeedStream(seed, salt=\"multivariate t\")\n\n loc = _broadcast_to_shape(self.loc, self._sample_shape())\n mvn = mvn_linear_operator.MultivariateNormalLinearOperator(\n loc=tf.zeros_like(loc), scale=self.scale)\n normal_samp = mvn.sample(n, seed=seed())\n\n df = _broadcast_to_shape(self.df, self.batch_shape_tensor())\n chi2 = chi2_lib.Chi2(df=df)\n chi2_samp = chi2.sample(n, seed=seed())\n\n return (self._loc +\n normal_samp * tf.math.rsqrt(chi2_samp / self._df)[..., tf.newaxis])\n\n def _log_normalization(self):\n num_dims = tf.cast(self.event_shape_tensor()[0], self.dtype)\n return (tf.math.lgamma(self.df / 2.) + num_dims / 2. *\n (tf.math.log(self.df) + np.log(np.pi)) +\n self.scale.log_abs_determinant() - tf.math.lgamma(\n (num_dims + self.df) / 2.))\n\n def _log_unnormalized_prob(self, value):\n value -= self._loc\n value = self.scale.solve(value[..., tf.newaxis])\n\n num_dims = tf.cast(self.event_shape_tensor()[0], self.dtype)\n mahalanobis = tf.norm(tensor=value, axis=[-1, -2])\n return -(num_dims + self.df) / 2. * math.log1psquare(\n mahalanobis / tf.sqrt(self.df))\n\n def _log_prob(self, value):\n return self._log_unnormalized_prob(value) - self._log_normalization()\n\n @distribution_util.AppendDocstring(\n \"\"\"The mean of Student's T equals `loc` if `df > 1`, otherwise it is\n `NaN`. If `self.allow_nan_stats=False`, then an exception will be raised\n rather than returning `NaN`.\"\"\")\n def _mean(self):\n mean = _broadcast_to_shape(self.loc, self._sample_shape())\n df = _broadcast_to_shape(self.df[..., tf.newaxis], tf.shape(input=mean))\n\n if self.allow_nan_stats:\n nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())\n return tf.where(df > 1., mean,\n tf.fill(tf.shape(input=mean), nan, name=\"nan\"))\n else:\n with tf.control_dependencies([\n tf.compat.v1.assert_less(\n tf.cast(1., self.dtype),\n df,\n message=\"mean not defined for components of df <= 1\"),\n ]):\n return tf.identity(mean)\n\n def _mode(self):\n return _broadcast_to_shape(self.loc, self._sample_shape())\n\n def _std_var_helper(self, statistic, statistic_name, statistic_ndims,\n df_factor_fn):\n \"\"\"Helper to compute stddev, covariance and variance.\"\"\"\n df = tf.reshape(\n self.df,\n tf.concat([\n tf.shape(input=self.df),\n tf.ones([statistic_ndims], dtype=tf.int32)\n ], -1))\n df = _broadcast_to_shape(df, tf.shape(input=statistic))\n # We need to put the tf.where inside the outer tf.where to ensure we never\n # hit a NaN in the gradient.\n denom = tf.where(df > 2., df - 2., tf.ones_like(df))\n statistic = statistic * df_factor_fn(df / denom)\n # When 1 < df <= 2, stddev/variance are infinite.\n inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype())\n result_where_defined = tf.where(\n df > 2., statistic, tf.fill(tf.shape(input=statistic), inf, name=\"inf\"))\n\n if self.allow_nan_stats:\n nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())\n return tf.where(df > 1., result_where_defined,\n tf.fill(tf.shape(input=statistic), nan, name=\"nan\"))\n else:\n with tf.control_dependencies([\n tf.compat.v1.assert_less(\n tf.cast(1., self.dtype),\n df,\n message=statistic_name +\n \" not defined for components of df <= 1\"),\n ]):\n return tf.identity(result_where_defined)\n\n @distribution_util.AppendDocstring(\"\"\"\n The covariance for Multivariate Student's t equals\n\n ```\n scale @ scale.T * df / (df - 2), when df > 2\n infinity, when 1 < df <= 2\n NaN, when df <= 1\n ```\n\n If `self.allow_nan_stats=False`, then an exception will be raised\n rather than returning `NaN`.\"\"\")\n def _covariance(self):\n if distribution_util.is_diagonal_scale(self.scale):\n mvn_cov = tf.linalg.diag(tf.square(self.scale.diag_part()))\n else:\n mvn_cov = self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)\n\n cov_shape = tf.concat(\n [self._sample_shape(), self._event_shape_tensor()], -1)\n mvn_cov = _broadcast_to_shape(mvn_cov, cov_shape)\n return self._std_var_helper(mvn_cov, \"covariance\", 2, lambda x: x)\n\n @distribution_util.AppendDocstring(\"\"\"\n The variance for Student's T equals\n\n ```none\n diag(scale @ scale.T) * df / (df - 2), when df > 2\n infinity, when 1 < df <= 2\n NaN, when df <= 1\n ```\n\n If `self.allow_nan_stats=False`, then an exception will be raised\n rather than returning `NaN`.\"\"\")\n def _variance(self):\n if distribution_util.is_diagonal_scale(self.scale):\n mvn_var = tf.square(self.scale.diag_part())\n elif (isinstance(self.scale, tf.linalg.LinearOperatorLowRankUpdate) and\n self.scale.is_self_adjoint):\n mvn_var = tf.linalg.diag_part(self.scale.matmul(self.scale.to_dense()))\n else:\n mvn_var = tf.linalg.diag_part(\n self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))\n\n mvn_var = _broadcast_to_shape(mvn_var, self._sample_shape())\n return self._std_var_helper(mvn_var, \"variance\", 1, lambda x: x)\n\n @distribution_util.AppendDocstring(\"\"\"\n The standard deviation for Student's T equals\n\n ```none\n sqrt(diag(scale @ scale.T)) * df / (df - 2), when df > 2\n infinity, when 1 < df <= 2\n NaN, when df <= 1\n ```\n \"\"\")\n def _stddev(self):\n if distribution_util.is_diagonal_scale(self.scale):\n mvn_std = tf.abs(self.scale.diag_part())\n elif (isinstance(self.scale, tf.linalg.LinearOperatorLowRankUpdate) and\n self.scale.is_self_adjoint):\n mvn_std = tf.sqrt(\n tf.linalg.diag_part(self.scale.matmul(self.scale.to_dense())))\n else:\n mvn_std = tf.sqrt(\n tf.linalg.diag_part(\n self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)))\n\n mvn_std = _broadcast_to_shape(mvn_std, self._sample_shape())\n return self._std_var_helper(mvn_std, \"standard deviation\", 1, tf.sqrt)\n\n def _entropy(self):\n df = _broadcast_to_shape(self.df, self.batch_shape_tensor())\n num_dims = tf.cast(self.event_shape_tensor()[0], self.dtype)\n\n def _lbeta(concentration0, concentration1):\n return (tf.math.lgamma(concentration1) + tf.math.lgamma(concentration0) -\n tf.math.lgamma(concentration0 + concentration1))\n\n shape_factor = self._scale.log_abs_determinant()\n beta_factor = num_dims / 2. * (\n tf.math.log(df) + np.log(np.pi)) - tf.math.lgamma(\n num_dims / 2.) + _lbeta(num_dims / 2., df / 2.)\n digamma_factor = (num_dims + df) / 2. * (\n tf.math.digamma((num_dims + df) / 2.) - tf.math.digamma(df / 2.))\n return shape_factor + beta_factor + digamma_factor\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Transpose bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow as tf\n\nfrom tensorflow_probability.python.bijectors import bijector\n\n\n__all__ = [\n 'Transpose',\n]\n\n\nclass Transpose(bijector.Bijector):\n \"\"\"Compute `Y = g(X) = transpose_rightmost_dims(X, rightmost_perm)`.\n\n This bijector is semantically similar to `tf.transpose` except that it\n transposes only the rightmost \"event\" dimensions. That is, unlike\n `tf.transpose` the `perm` argument is itself a permutation of\n `tf.range(rightmost_transposed_ndims)` rather than `tf.range(tf.rank(x))`,\n i.e., users specify the (rightmost) dimensions to permute, not all dimensions.\n\n The actual (forward) transformation is:\n\n ```python\n def forward(x, perm):\n sample_batch_ndims = tf.rank(x) - tf.size(perm)\n perm = tf.concat([\n tf.range(sample_batch_ndims),\n sample_batch_ndims + perm,\n ], axis=0)\n return tf.transpose(x, perm)\n ```\n\n #### Examples\n\n ```python\n tfp.bijectors.Transpose(perm=[1, 0]).forward(\n [\n [[1, 2],\n [3, 4]],\n [[5, 6],\n [7, 8]],\n ])\n # ==>\n # [\n # [[1, 3],\n # [2, 4]],\n # [[5, 7],\n # [6, 8]],\n # ]\n\n # Using `rightmost_transposed_ndims=2` means this bijector has the same\n # semantics as `tf.matrix_transpose`.\n tfp.bijectors.Transpose(rightmost_transposed_ndims=2).inverse(\n [\n [[1, 3],\n [2, 4]],\n [[5, 7],\n [6, 8]],\n ])\n # ==>\n # [\n # [[1, 2],\n # [3, 4]],\n # [[5, 6],\n # [7, 8]],\n # ]\n ```\n\n \"\"\"\n\n def __init__(self, perm=None, rightmost_transposed_ndims=None,\n validate_args=False, name='transpose'):\n \"\"\"Instantiates the `Transpose` bijector.\n\n Args:\n perm: Positive `int32` vector-shaped `Tensor` representing permutation of\n rightmost dims (for forward transformation). Note that the `0`th index\n represents the first of the rightmost dims and the largest value must be\n `rightmost_transposed_ndims - 1` and corresponds to `tf.rank(x) - 1`.\n Only one of `perm` and `rightmost_transposed_ndims` can (and must) be\n specified.\n Default value:\n `tf.range(start=rightmost_transposed_ndims, limit=-1, delta=-1)`.\n rightmost_transposed_ndims: Positive `int32` scalar-shaped `Tensor`\n representing the number of rightmost dimensions to permute.\n Only one of `perm` and `rightmost_transposed_ndims` can (and must) be\n specified.\n Default value: `tf.size(perm)`.\n validate_args: Python `bool` indicating whether arguments should be\n checked for correctness.\n name: Python `str` name given to ops managed by this object.\n\n Raises:\n ValueError: if both or neither `perm` and `rightmost_transposed_ndims` are\n specified.\n NotImplementedError: if `rightmost_transposed_ndims` is not known prior to\n graph execution.\n \"\"\"\n with tf.name_scope(name, values=[perm, rightmost_transposed_ndims]):\n if (rightmost_transposed_ndims is None) == (perm is None):\n raise ValueError('Must specify exactly one of '\n '`rightmost_transposed_ndims` and `perm`.')\n if rightmost_transposed_ndims is not None:\n rightmost_transposed_ndims = tf.convert_to_tensor(\n value=rightmost_transposed_ndims,\n dtype=np.int32,\n name='rightmost_transposed_ndims')\n rightmost_transposed_ndims_ = tf.get_static_value(\n rightmost_transposed_ndims)\n with tf.control_dependencies(_maybe_validate_rightmost_transposed_ndims(\n rightmost_transposed_ndims, validate_args)):\n rightmost_transposed_ndims = tf.identity(rightmost_transposed_ndims)\n perm = tf.range(\n start=rightmost_transposed_ndims - 1,\n limit=-1,\n delta=-1,\n name='perm')\n else: # perm is not None:\n perm = tf.convert_to_tensor(value=perm, dtype=np.int32, name='perm')\n rightmost_transposed_ndims = tf.size(\n input=perm, name='rightmost_transposed_ndims')\n rightmost_transposed_ndims_ = tf.get_static_value(\n rightmost_transposed_ndims)\n with tf.control_dependencies(_maybe_validate_perm(perm, validate_args)):\n perm = tf.identity(perm)\n\n # TODO(b/110828604): If bijector base class ever supports dynamic\n # `min_event_ndims`, then this class already works dynamically and the\n # following five lines can be removed.\n if rightmost_transposed_ndims_ is None:\n raise NotImplementedError('`rightmost_transposed_ndims` must be '\n 'known prior to graph execution.')\n else:\n rightmost_transposed_ndims_ = int(rightmost_transposed_ndims_)\n\n self._perm = perm\n self._rightmost_transposed_ndims = rightmost_transposed_ndims\n super(Transpose, self).__init__(\n forward_min_event_ndims=rightmost_transposed_ndims_,\n graph_parents=[perm, rightmost_transposed_ndims],\n is_constant_jacobian=True,\n validate_args=validate_args,\n name=name)\n\n @property\n def perm(self):\n return self._perm\n\n @property\n def rightmost_transposed_ndims(self):\n return self._rightmost_transposed_ndims\n\n def _forward(self, x):\n return self._transpose(x, self.perm)\n\n def _inverse(self, y):\n return self._transpose(y, tf.argsort(self.perm))\n\n def _inverse_log_det_jacobian(self, y):\n return tf.constant(0, dtype=y.dtype)\n\n def _forward_log_det_jacobian(self, x):\n return tf.constant(0, dtype=x.dtype)\n\n def _transpose(self, x, perm):\n sample_batch_ndims = tf.rank(x) - self.rightmost_transposed_ndims\n perm = tf.concat([\n tf.range(sample_batch_ndims),\n sample_batch_ndims + perm,\n ], axis=0)\n return tf.transpose(a=x, perm=perm)\n\n\ndef _maybe_validate_rightmost_transposed_ndims(\n rightmost_transposed_ndims, validate_args, name=None):\n \"\"\"Checks that `rightmost_transposed_ndims` is valid.\"\"\"\n with tf.name_scope(name, 'maybe_validate_rightmost_transposed_ndims',\n [rightmost_transposed_ndims]):\n assertions = []\n if not rightmost_transposed_ndims.dtype.is_integer:\n raise TypeError('`rightmost_transposed_ndims` must be integer type.')\n\n if rightmost_transposed_ndims.shape.ndims is not None:\n if rightmost_transposed_ndims.shape.ndims != 0:\n raise ValueError('`rightmost_transposed_ndims` must be a scalar, '\n 'saw rank: {}.'.format(\n rightmost_transposed_ndims.shape.ndims))\n elif validate_args:\n assertions += [tf.compat.v1.assert_rank(rightmost_transposed_ndims, 0)]\n\n rightmost_transposed_ndims_ = tf.get_static_value(\n rightmost_transposed_ndims)\n msg = '`rightmost_transposed_ndims` must be non-negative.'\n if rightmost_transposed_ndims_ is not None:\n if rightmost_transposed_ndims_ < 0:\n raise ValueError(msg[:-1] + ', saw: {}.'.format(\n rightmost_transposed_ndims_))\n elif validate_args:\n assertions += [\n tf.compat.v1.assert_non_negative(\n rightmost_transposed_ndims, message=msg)\n ]\n\n return assertions\n\n\ndef _maybe_validate_perm(perm, validate_args, name=None):\n \"\"\"Checks that `perm` is valid.\"\"\"\n with tf.name_scope(name, 'maybe_validate_perm', [perm]):\n assertions = []\n if not perm.dtype.is_integer:\n raise TypeError('`perm` must be integer type')\n\n msg = '`perm` must be a vector.'\n if perm.shape.ndims is not None:\n if perm.shape.ndims != 1:\n raise ValueError(\n msg[:-1] + ', saw rank: {}.'.format(perm.shape.ndims))\n elif validate_args:\n assertions += [tf.compat.v1.assert_rank(perm, 1, message=msg)]\n\n perm_ = tf.get_static_value(perm)\n msg = '`perm` must be a valid permutation vector.'\n if perm_ is not None:\n if not np.all(np.arange(np.size(perm_)) == np.sort(perm_)):\n raise ValueError(msg[:-1] + ', saw: {}.'.format(perm_))\n elif validate_args:\n assertions += [\n tf.compat.v1.assert_equal(\n tf.sort(perm), tf.range(tf.size(input=perm)), message=msg)\n ]\n\n return assertions\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for Monte Carlo Ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.monte_carlo.expectation import _get_samples\n\ntfd = tfp.distributions\n\n\nclass GetSamplesTest(tf.test.TestCase):\n \"\"\"Test the private method 'get_samples'.\"\"\"\n\n def test_raises_if_both_z_and_n_are_none(self):\n dist = tfd.Normal(loc=0., scale=1.)\n z = None\n n = None\n seed = None\n with self.assertRaisesRegexp(ValueError, 'exactly one'):\n _get_samples(dist, z, n, seed)\n\n def test_raises_if_both_z_and_n_are_not_none(self):\n dist = tfd.Normal(loc=0., scale=1.)\n z = dist.sample(seed=42)\n n = 1\n seed = None\n with self.assertRaisesRegexp(ValueError, 'exactly one'):\n _get_samples(dist, z, n, seed)\n\n def test_returns_n_samples_if_n_provided(self):\n dist = tfd.Normal(loc=0., scale=1.)\n z = None\n n = 10\n seed = None\n z = _get_samples(dist, z, n, seed)\n self.assertEqual((10,), z.shape)\n\n def test_returns_z_if_z_provided(self):\n dist = tfd.Normal(loc=0., scale=1.)\n z = dist.sample(10, seed=42)\n n = None\n seed = None\n z = _get_samples(dist, z, n, seed)\n self.assertEqual((10,), z.shape)\n\n\nclass ExpectationTest(tf.test.TestCase):\n\n def test_works_correctly(self):\n x = tf.constant([-1e6, -100, -10, -1, 1, 10, 100, 1e6])\n\n # We use the prefex \"efx\" to mean \"E_p[f(X)]\".\n f = lambda u: u\n with tf.GradientTape(persistent=True) as tape:\n tape.watch(x)\n p = tfd.Normal(loc=x, scale=1.)\n efx_true = x\n samples = p.sample(int(1e5), seed=1)\n efx_reparam = tfp.monte_carlo.expectation(f, samples, p.log_prob)\n efx_score = tfp.monte_carlo.expectation(f, samples, p.log_prob,\n use_reparametrization=False)\n\n [\n efx_true_,\n efx_reparam_,\n efx_score_,\n efx_true_grad_,\n efx_reparam_grad_,\n efx_score_grad_,\n ] = self.evaluate([\n efx_true,\n efx_reparam,\n efx_score,\n tape.gradient(efx_true, x),\n tape.gradient(efx_reparam, x),\n tape.gradient(efx_score, x),\n ])\n\n self.assertAllEqual(np.ones_like(efx_true_grad_), efx_true_grad_)\n\n self.assertAllClose(efx_true_, efx_reparam_, rtol=0.005, atol=0.)\n self.assertAllClose(efx_true_, efx_score_, rtol=0.005, atol=0.)\n\n self.assertAllEqual(np.ones_like(efx_true_grad_, dtype=np.bool),\n np.isfinite(efx_reparam_grad_))\n self.assertAllEqual(np.ones_like(efx_true_grad_, dtype=np.bool),\n np.isfinite(efx_score_grad_))\n\n self.assertAllClose(efx_true_grad_, efx_reparam_grad_,\n rtol=0.03, atol=0.)\n # Variance is too high to be meaningful, so we'll only check those which\n # converge.\n self.assertAllClose(efx_true_grad_[2:-2],\n efx_score_grad_[2:-2],\n rtol=0.05, atol=0.)\n\n def test_docstring_example_normal(self):\n num_draws = int(1e5)\n mu_p = tf.constant(0.)\n mu_q = tf.constant(1.)\n with tf.GradientTape(persistent=True) as tape:\n tape.watch(mu_p)\n tape.watch(mu_q)\n p = tfd.Normal(loc=mu_p, scale=1.)\n q = tfd.Normal(loc=mu_q, scale=2.)\n exact_kl_normal_normal = tfd.kl_divergence(p, q)\n approx_kl_normal_normal = tfp.monte_carlo.expectation(\n f=lambda x: p.log_prob(x) - q.log_prob(x),\n samples=p.sample(num_draws, seed=42),\n log_prob=p.log_prob,\n use_reparametrization=(p.reparameterization_type ==\n tfd.FULLY_REPARAMETERIZED))\n [exact_kl_normal_normal_, approx_kl_normal_normal_] = self.evaluate([\n exact_kl_normal_normal, approx_kl_normal_normal])\n self.assertEqual(\n True,\n p.reparameterization_type == tfd.FULLY_REPARAMETERIZED)\n self.assertAllClose(exact_kl_normal_normal_, approx_kl_normal_normal_,\n rtol=0.01, atol=0.)\n\n # Compare gradients. (Not present in `docstring`.)\n gradp = lambda fp: tape.gradient(fp, mu_p)\n gradq = lambda fq: tape.gradient(fq, mu_q)\n [\n gradp_exact_kl_normal_normal_,\n gradq_exact_kl_normal_normal_,\n gradp_approx_kl_normal_normal_,\n gradq_approx_kl_normal_normal_,\n ] = self.evaluate([\n gradp(exact_kl_normal_normal),\n gradq(exact_kl_normal_normal),\n gradp(approx_kl_normal_normal),\n gradq(approx_kl_normal_normal),\n ])\n self.assertAllClose(gradp_exact_kl_normal_normal_,\n gradp_approx_kl_normal_normal_,\n rtol=0.01, atol=0.)\n self.assertAllClose(gradq_exact_kl_normal_normal_,\n gradq_approx_kl_normal_normal_,\n rtol=0.01, atol=0.)\n\n def test_docstring_example_bernoulli(self):\n num_draws = int(1e5)\n probs_p = tf.constant(0.4)\n probs_q = tf.constant(0.7)\n with tf.GradientTape(persistent=True) as tape:\n tape.watch(probs_p)\n tape.watch(probs_q)\n p = tfd.Bernoulli(probs=probs_p)\n q = tfd.Bernoulli(probs=probs_q)\n exact_kl_bernoulli_bernoulli = tfp.monte_carlo.expectation(\n f=lambda x: p.log_prob(x) - q.log_prob(x),\n samples=p.sample(num_draws, seed=42),\n log_prob=p.log_prob,\n use_reparametrization=(\n p.reparameterization_type == tfd.FULLY_REPARAMETERIZED))\n approx_kl_bernoulli_bernoulli = tfd.kl_divergence(p, q)\n [\n exact_kl_bernoulli_bernoulli_,\n approx_kl_bernoulli_bernoulli_,\n ] = self.evaluate([\n exact_kl_bernoulli_bernoulli,\n approx_kl_bernoulli_bernoulli,\n ])\n self.assertEqual(False,\n p.reparameterization_type == tfd.FULLY_REPARAMETERIZED)\n self.assertAllClose(\n exact_kl_bernoulli_bernoulli_,\n approx_kl_bernoulli_bernoulli_,\n rtol=0.01,\n atol=0.)\n print(exact_kl_bernoulli_bernoulli_, approx_kl_bernoulli_bernoulli_)\n\n # Compare gradients. (Not present in `docstring`.)\n gradp = lambda fp: tape.gradient(fp, probs_p)\n gradq = lambda fq: tape.gradient(fq, probs_q)\n [\n gradp_exact_kl_bernoulli_bernoulli_,\n gradq_exact_kl_bernoulli_bernoulli_,\n gradp_approx_kl_bernoulli_bernoulli_,\n gradq_approx_kl_bernoulli_bernoulli_,\n ] = self.evaluate([\n gradp(exact_kl_bernoulli_bernoulli),\n gradq(exact_kl_bernoulli_bernoulli),\n gradp(approx_kl_bernoulli_bernoulli),\n gradq(approx_kl_bernoulli_bernoulli),\n ])\n # Notice that variance (i.e., `rtol`) is higher when using score-trick.\n self.assertAllClose(\n gradp_exact_kl_bernoulli_bernoulli_,\n gradp_approx_kl_bernoulli_bernoulli_,\n rtol=0.05,\n atol=0.)\n self.assertAllClose(\n gradq_exact_kl_bernoulli_bernoulli_,\n gradq_approx_kl_bernoulli_bernoulli_,\n rtol=0.03,\n atol=0.)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for tensorflow_probability.python.bijectors.reciprocal.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_probability.python import bijectors as tfb\nfrom tensorflow_probability.python.bijectors import bijector_test_util\n\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass ReciprocalTest(tf.test.TestCase, parameterized.TestCase):\n \"\"\"Tests correctness of the `b(x) = 1 / x` bijector.\"\"\"\n\n @parameterized.named_parameters(\n dict(\n testcase_name='positive',\n lower=1e-3,\n upper=10.\n ),\n dict(\n testcase_name='negative',\n lower=-10.,\n upper=-1e-3\n )\n )\n def testBijector(self, lower, upper):\n bijector = tfb.Reciprocal()\n self.assertEqual('reciprocal', bijector.name)\n x = tf.linspace(lower, upper, 100)\n y = 1. / x\n self.assertAllClose(self.evaluate(y), self.evaluate(bijector.forward(x)))\n self.assertAllClose(self.evaluate(x), self.evaluate(bijector.inverse(y)))\n\n @parameterized.named_parameters(\n dict(\n testcase_name='positive',\n lower_x=.1,\n upper_x=10.\n ),\n dict(\n testcase_name='negative',\n lower_x=-10.,\n upper_x=-.1\n )\n )\n def testScalarCongruency(self, lower_x, upper_x):\n bijector = tfb.Reciprocal()\n bijector_test_util.assert_scalar_congruency(\n bijector, lower_x=lower_x, upper_x=upper_x, eval_func=self.evaluate,\n rtol=0.05)\n\n @parameterized.named_parameters(\n dict(\n testcase_name='positive',\n lower=.1,\n upper=10.\n ),\n dict(\n testcase_name='negative',\n lower=-10.,\n upper=-.1\n )\n )\n def testBijectiveAndFinite(self, lower, upper):\n bijector = tfb.Reciprocal()\n x = np.linspace(lower, upper, num=100).astype(np.float32)\n y = np.linspace(lower, upper, num=100).astype(np.float32)\n bijector_test_util.assert_bijective_and_finite(\n bijector, x, y, eval_func=self.evaluate, event_ndims=0)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Trains a sparse Gamma deep exponential family on NIPS 2011 conference papers.\n\nWe apply a sparse Gamma deep exponential family [3] as a topic model on the\ncollection of NIPS 2011 conference papers [2]. Note that [3] applies score\nfunction gradients with advanced variance reduction techniques; instead we apply\nimplicit reparameterization gradients [1]. Preliminary experiments for this\nmodel and task suggest that implicit reparameterization exhibits lower gradient\nvariance and trains faster.\n\nWith default flags, fitting the model takes ~60s for 10,000 steps on a GTX\n1080 Ti. The following results are after 120,000 steps.\n\nTopic 0: let distribution set strategy distributions given learning\n information use property\nTopic 1: functions problem risk function submodular cut level\n clustering sets performance\nTopic 2: action value learning regret reward actions algorithm optimal\n state return\nTopic 3: posterior stochastic approach information based using prior\n mean divergence since\nTopic 4: player inference game propagation experts static query expert\n base variables\nTopic 5: algorithm set loss weak algorithms optimal submodular online\n cost setting\nTopic 6: sparse sparsity norm solution learning penalty greedy\n structure wise regularization\nTopic 7: learning training linear kernel using coding accuracy\n performance dataset based\nTopic 8: object categories image features examples classes images\n class objects visual\nTopic 9: data manifold matrix points dimensional point low linear\n gradient optimization\n\n#### References\n\n[1]: Michael Figurnov, Shakir Mohamed, Andriy Mnih. Implicit Reparameterization\n Gradients, 2018.\n https://arxiv.org/abs/1805.08498.\n[2]: Valerio Perrone and Paul A Jenkins and Dario Spano and Yee Whye Teh.\n Poisson Random Fields for Dynamic Feature Models, 2016.\n https://arxiv.org/abs/1611.07460\n[3]: Rajesh Ranganath, Linpeng Tang, Laurent Charlin, David M. Blei. Deep\n Exponential Families. In _Artificial Intelligence and Statistics_, 2015.\n https://arxiv.org/abs/1411.2581\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport csv\nimport os\nimport time\n\n# Dependency imports\nfrom absl import flags\nimport numpy as np\nfrom six.moves import urllib\nimport tensorflow as tf\n\nfrom tensorflow_probability import edward2 as ed\n\nflags.DEFINE_float(\"learning_rate\",\n default=1e-4,\n help=\"Initial learning rate.\")\nflags.DEFINE_integer(\"max_steps\",\n default=200000,\n help=\"Number of training steps to run.\")\nflags.DEFINE_list(\"layer_sizes\",\n default=[\"100\", \"30\", \"15\"],\n help=\"Comma-separated list denoting number of latent \"\n \"variables (stochastic units) per layer.\")\nflags.DEFINE_float(\"shape\",\n default=0.1,\n help=\"Shape hyperparameter for Gamma priors on latents.\")\nflags.DEFINE_string(\"data_dir\",\n default=os.path.join(os.getenv(\"TEST_TMPDIR\", \"/tmp\"),\n \"deep_exponential_family/data\"),\n help=\"Directory where data is stored (if using real data).\")\nflags.DEFINE_string(\"model_dir\",\n default=os.path.join(os.getenv(\"TEST_TMPDIR\", \"/tmp\"),\n \"deep_exponential_family/\"),\n help=\"Directory to put the model's fit.\")\nflags.DEFINE_bool(\"fake_data\",\n default=None,\n help=\"If true, uses fake data. Defaults to real data.\")\n\nFLAGS = flags.FLAGS\n\n\ndef deep_exponential_family(data_size, feature_size, units, shape):\n \"\"\"A multi-layered topic model over a documents-by-terms matrix.\"\"\"\n w2 = ed.Gamma(0.1, 0.3, sample_shape=[units[2], units[1]], name=\"w2\")\n w1 = ed.Gamma(0.1, 0.3, sample_shape=[units[1], units[0]], name=\"w1\")\n w0 = ed.Gamma(0.1, 0.3, sample_shape=[units[0], feature_size], name=\"w0\")\n\n z2 = ed.Gamma(0.1, 0.1, sample_shape=[data_size, units[2]], name=\"z2\")\n z1 = ed.Gamma(shape, shape / tf.matmul(z2, w2), name=\"z1\")\n z0 = ed.Gamma(shape, shape / tf.matmul(z1, w1), name=\"z0\")\n x = ed.Poisson(tf.matmul(z0, w0), name=\"x\")\n return x\n\n\ndef trainable_positive_deterministic(shape, min_loc=1e-3, name=None):\n \"\"\"Learnable Deterministic distribution over positive reals.\"\"\"\n with tf.compat.v1.variable_scope(\n None, default_name=\"trainable_positive_deterministic\"):\n unconstrained_loc = tf.compat.v1.get_variable(\"unconstrained_loc\", shape)\n loc = tf.maximum(tf.nn.softplus(unconstrained_loc), min_loc)\n rv = ed.Deterministic(loc=loc, name=name)\n return rv\n\n\ndef trainable_gamma(shape, min_concentration=1e-3, min_scale=1e-5, name=None):\n \"\"\"Learnable Gamma via concentration and scale parameterization.\"\"\"\n with tf.compat.v1.variable_scope(None, default_name=\"trainable_gamma\"):\n unconstrained_concentration = tf.compat.v1.get_variable(\n \"unconstrained_concentration\",\n shape,\n initializer=tf.compat.v1.initializers.random_normal(\n mean=0.5, stddev=0.1))\n unconstrained_scale = tf.compat.v1.get_variable(\n \"unconstrained_scale\",\n shape,\n initializer=tf.compat.v1.initializers.random_normal(stddev=0.1))\n concentration = tf.maximum(tf.nn.softplus(unconstrained_concentration),\n min_concentration)\n rate = tf.maximum(1. / tf.nn.softplus(unconstrained_scale), 1. / min_scale)\n rv = ed.Gamma(concentration=concentration, rate=rate, name=name)\n return rv\n\n\ndef deep_exponential_family_variational(data_size, feature_size, units):\n \"\"\"Posterior approx. for deep exponential family p(w{0,1,2}, z{1,2,3} | x).\"\"\"\n qw2 = trainable_positive_deterministic([units[2], units[1]], name=\"qw2\")\n qw1 = trainable_positive_deterministic([units[1], units[0]], name=\"qw1\")\n qw0 = trainable_positive_deterministic([units[0], feature_size], name=\"qw0\")\n qz2 = trainable_gamma([data_size, units[2]], name=\"qz2\")\n qz1 = trainable_gamma([data_size, units[1]], name=\"qz1\")\n qz0 = trainable_gamma([data_size, units[0]], name=\"qz0\")\n return qw2, qw1, qw0, qz2, qz1, qz0\n\n\ndef make_value_setter(**model_kwargs):\n \"\"\"Creates a value-setting interceptor.\n\n Args:\n **model_kwargs: dict of str to Tensor. Keys are the names of random variable\n in the model to which this interceptor is being applied. Values are\n Tensors to set their value to.\n\n Returns:\n set_values: Function which sets the value of intercepted ops.\n \"\"\"\n def set_values(f, *args, **kwargs):\n \"\"\"Sets random variable values to its aligned value.\"\"\"\n name = kwargs.get(\"name\")\n if name in model_kwargs:\n kwargs[\"value\"] = model_kwargs[name]\n return ed.interceptable(f)(*args, **kwargs)\n return set_values\n\n\ndef load_nips2011_papers(path):\n \"\"\"Loads NIPS 2011 conference papers.\n\n The NIPS 1987-2015 data set is in the form of a 11,463 x 5,812 matrix of\n per-paper word counts, containing 11,463 words and 5,811 NIPS conference\n papers (Perrone et al., 2016). We subset to papers in 2011 and words appearing\n in at least two documents and having a total word count of at least 10.\n\n Built from the Observations Python package.\n\n Args:\n path: str.\n Path to directory which either stores file or otherwise file will\n be downloaded and extracted there. Filename is `NIPS_1987-2015.csv`.\n\n Returns:\n bag_of_words: np.ndarray of shape [num_documents, num_words]. Each element\n denotes the number of occurrences of a specific word in a specific\n document.\n words: List of strings, denoting the words for `bag_of_words`'s columns.\n \"\"\"\n path = os.path.expanduser(path)\n filename = \"NIPS_1987-2015.csv\"\n filepath = os.path.join(path, filename)\n if not os.path.exists(filepath):\n url = (\"https://archive.ics.uci.edu/ml/machine-learning-databases/\"\n \"00371/NIPS_1987-2015.csv\")\n if not tf.io.gfile.exists(path):\n tf.io.gfile.makedirs(path)\n print(\"Downloading %s to %s\" % (url, filepath))\n urllib.request.urlretrieve(url, filepath)\n\n with open(filepath) as f:\n iterator = csv.reader(f)\n documents = next(iterator)[1:]\n words = []\n x_train = []\n for row in iterator:\n words.append(row[0])\n x_train.append(row[1:])\n\n x_train = np.array(x_train, dtype=np.int)\n\n # Subset to documents in 2011 and words appearing in at least two documents\n # and have a total word count of at least 10.\n doc_idx = [i for i, document in enumerate(documents)\n if document.startswith(\"2011\")]\n documents = [documents[doc] for doc in doc_idx]\n x_train = x_train[:, doc_idx]\n word_idx = np.logical_and(np.sum(x_train != 0, 1) >= 2,\n np.sum(x_train, 1) >= 10)\n words = [word for word, idx in zip(words, word_idx) if idx]\n bag_of_words = x_train[word_idx, :].T\n return bag_of_words, words\n\n\ndef main(argv):\n del argv # unused\n FLAGS.layer_sizes = [int(layer_size) for layer_size in FLAGS.layer_sizes]\n if len(FLAGS.layer_sizes) != 3:\n raise NotImplementedError(\"Specifying fewer or more than 3 layers is not \"\n \"currently available.\")\n if tf.io.gfile.exists(FLAGS.model_dir):\n tf.compat.v1.logging.warning(\n \"Warning: deleting old log directory at {}\".format(FLAGS.model_dir))\n tf.io.gfile.rmtree(FLAGS.model_dir)\n tf.io.gfile.makedirs(FLAGS.model_dir)\n\n if FLAGS.fake_data:\n bag_of_words = np.random.poisson(1., size=[10, 25])\n words = [str(i) for i in range(25)]\n else:\n bag_of_words, words = load_nips2011_papers(FLAGS.data_dir)\n\n total_count = np.sum(bag_of_words)\n bag_of_words = tf.cast(bag_of_words, dtype=tf.float32)\n data_size, feature_size = bag_of_words.shape\n\n # Compute expected log-likelihood. First, sample from the variational\n # distribution; second, compute the log-likelihood given the sample.\n qw2, qw1, qw0, qz2, qz1, qz0 = deep_exponential_family_variational(\n data_size,\n feature_size,\n FLAGS.layer_sizes)\n\n with ed.tape() as model_tape:\n with ed.interception(make_value_setter(w2=qw2, w1=qw1, w0=qw0,\n z2=qz2, z1=qz1, z0=qz0)):\n posterior_predictive = deep_exponential_family(data_size,\n feature_size,\n FLAGS.layer_sizes,\n FLAGS.shape)\n\n log_likelihood = posterior_predictive.distribution.log_prob(bag_of_words)\n log_likelihood = tf.reduce_sum(input_tensor=log_likelihood)\n tf.compat.v1.summary.scalar(\"log_likelihood\", log_likelihood)\n\n # Compute analytic KL-divergence between variational and prior distributions.\n kl = 0.\n for rv_name, variational_rv in [(\"z0\", qz0), (\"z1\", qz1), (\"z2\", qz2),\n (\"w0\", qw0), (\"w1\", qw1), (\"w2\", qw2)]:\n kl += tf.reduce_sum(\n input_tensor=variational_rv.distribution.kl_divergence(\n model_tape[rv_name].distribution))\n\n tf.compat.v1.summary.scalar(\"kl\", kl)\n\n elbo = log_likelihood - kl\n tf.compat.v1.summary.scalar(\"elbo\", elbo)\n optimizer = tf.compat.v1.train.AdamOptimizer(FLAGS.learning_rate)\n train_op = optimizer.minimize(-elbo)\n\n sess = tf.compat.v1.Session()\n summary = tf.compat.v1.summary.merge_all()\n summary_writer = tf.compat.v1.summary.FileWriter(FLAGS.model_dir, sess.graph)\n start_time = time.time()\n\n sess.run(tf.compat.v1.global_variables_initializer())\n for step in range(FLAGS.max_steps):\n start_time = time.time()\n _, elbo_value = sess.run([train_op, elbo])\n if step % 500 == 0:\n duration = time.time() - start_time\n print(\"Step: {:>3d} Loss: {:.3f} ({:.3f} sec)\".format(\n step, elbo_value, duration))\n summary_str = sess.run(summary)\n summary_writer.add_summary(summary_str, step)\n summary_writer.flush()\n\n # Compute perplexity of the full data set. The model's negative\n # log-likelihood of data is upper bounded by the variational objective.\n negative_log_likelihood = -elbo_value\n perplexity = np.exp(negative_log_likelihood / total_count)\n print(\"Negative log-likelihood <= {:0.3f}\".format(\n negative_log_likelihood))\n print(\"Perplexity <= {:0.3f}\".format(perplexity))\n\n # Print top 10 words for first 10 topics.\n qw0_values = sess.run(qw0)\n for k in range(min(10, FLAGS.layer_sizes[-1])):\n top_words_idx = qw0_values[k, :].argsort()[-10:][::-1]\n top_words = \" \".join([words[i] for i in top_words_idx])\n print(\"Topic {}: {}\".format(k, top_words))\n\nif __name__ == \"__main__\":\n tf.compat.v1.app.run()\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"numpy.sqrt",
"tensorflow.zeros",
"tensorflow.cast",
"tensorflow.random.poisson",
"tensorflow.linspace",
"tensorflow.reduce_logsumexp",
"tensorflow.broadcast_static_shape",
"tensorflow.name_scope",
"numpy.polynomial.hermite.hermgauss",
"tensorflow.TensorShape",
"tensorflow.shape",
"tensorflow.get_static_value",
"tensorflow.transpose",
"tensorflow.range",
"tensorflow.reshape",
"numpy.int32",
"numpy.linalg.norm",
"tensorflow.ones",
"tensorflow.math.log"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.norm",
"numpy.log",
"tensorflow.compat.v1.assert_positive",
"tensorflow.shape",
"tensorflow.zeros",
"tensorflow.cast",
"tensorflow.ones_like",
"tensorflow.identity",
"tensorflow.math.rsqrt",
"tensorflow.ones",
"tensorflow.math.log",
"tensorflow.zeros_like",
"tensorflow.math.digamma",
"tensorflow.name_scope",
"tensorflow.sqrt",
"tensorflow.math.lgamma"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.sort",
"tensorflow.identity",
"numpy.sort",
"numpy.size",
"tensorflow.argsort",
"tensorflow.name_scope",
"tensorflow.compat.v1.assert_rank",
"tensorflow.rank",
"tensorflow.get_static_value",
"tensorflow.compat.v1.assert_non_negative",
"tensorflow.size"
],
[
"numpy.ones_like",
"tensorflow.constant",
"numpy.isfinite",
"tensorflow.test.main",
"tensorflow.GradientTape"
],
[
"tensorflow.linspace",
"tensorflow.test.main",
"numpy.linspace"
],
[
"tensorflow.reduce_sum",
"tensorflow.cast",
"numpy.exp",
"tensorflow.compat.v1.app.run",
"tensorflow.compat.v1.summary.merge_all",
"tensorflow.compat.v1.train.AdamOptimizer",
"numpy.random.poisson",
"tensorflow.compat.v1.variable_scope",
"tensorflow.matmul",
"tensorflow.io.gfile.exists",
"tensorflow.compat.v1.get_variable",
"tensorflow.io.gfile.makedirs",
"tensorflow.compat.v1.summary.scalar",
"tensorflow.io.gfile.rmtree",
"numpy.array",
"numpy.sum",
"tensorflow.compat.v1.summary.FileWriter",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.initializers.random_normal",
"tensorflow.nn.softplus"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
endymecy/NDIToolbox | [
"f7a0a642b4a778d9d0c131871f4bfb9822ecb3da",
"f7a0a642b4a778d9d0c131871f4bfb9822ecb3da"
] | [
"models/tests/test_dataio.py",
"models/tests/test_preview_window_model.py"
] | [
"\"\"\"test_dataio.py - tests the dataio module\n\nChris R. Coughlin (TRI/Austin, Inc.)\n\"\"\"\n\n__author__ = 'Chris R. Coughlin'\n\nimport unittest\nfrom models import dataio\nfrom controllers import pathfinder\nfrom utils.skiptest import skipIfModuleNotInstalled\nimport h5py\nimport numpy as np\nimport numpy.testing\nimport scipy.misc\nimport os\nimport random\n\n\nclass TestDataIO(unittest.TestCase):\n \"\"\"Tests Data IO functions\"\"\"\n\n def setUp(self):\n self.sample_data = np.array(self.random_data())\n self.sample_data_basename = \"sample.dat\"\n self.sample_data_file = os.path.join(os.path.dirname(__file__),\n self.sample_data_basename)\n with h5py.File(self.sample_data_file, 'w') as fidout:\n fidout.create_dataset(self.sample_data_basename, data=self.sample_data)\n\n def random_data(self):\n \"\"\"Returns a list of random data\"\"\"\n return [random.uniform(-100, 100) for i in range(25)]\n\n def test_save_data(self):\n \"\"\"Verify save_data function saves NumPy array to disk\"\"\"\n sample_filename = \"test_savedata.dat\"\n sample_path = os.path.join(os.path.dirname(__file__), sample_filename)\n dataio.save_data(sample_path, self.sample_data)\n self.assertTrue(os.path.exists(sample_path + \".hdf5\"))\n with h5py.File(sample_path + \".hdf5\", \"r\") as fidin:\n froot, ext = os.path.splitext(os.path.basename(sample_filename))\n for key in fidin.keys():\n if key.startswith(froot):\n read_data = fidin[key][...]\n self.assertTrue(np.array_equal(self.sample_data, read_data))\n if os.path.exists(sample_path + \".hdf5\"):\n os.remove(sample_path + \".hdf5\")\n\n def test_get_data(self):\n \"\"\"Verify get_data function returns a NumPy array\"\"\"\n read_data = dataio.get_data(self.sample_data_file)\n self.assertTrue(np.array_equal(self.sample_data, read_data))\n\n def test_get_data_slice(self):\n \"\"\"Verify get_data function returns a slice if specified\"\"\"\n slice_idx = np.s_[5:15]\n read_hyperslab = dataio.get_data(self.sample_data_file, slice_idx)\n self.assertTrue(np.array_equal(self.sample_data[slice_idx], read_hyperslab))\n\n def test_get_txt_data(self):\n \"\"\"Verify retrieval of ASCII delimited data\"\"\"\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',\n '1.25 from hole Single Column.asc')\n assert(os.path.exists(sample_data_file))\n import_params = {'delimiter': None}\n expected_data = np.loadtxt(sample_data_file, delimiter=import_params['delimiter'])\n retrieved_data = dataio.get_txt_data(sample_data_file, **import_params)\n self.assertTrue(np.array_equal(expected_data, retrieved_data))\n\n def test_import_txt(self):\n \"\"\"Verify import of ASCII delimited data files\"\"\"\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',\n '1.25 from hole Single Column.asc')\n assert(os.path.exists(sample_data_file))\n import_params = {'delimiter': None}\n expected_data = np.loadtxt(sample_data_file, delimiter=import_params['delimiter'])\n dataio.import_txt(sample_data_file, **import_params)\n dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(sample_data_file) + \".hdf5\")\n self.assertTrue(os.path.exists(dest_file))\n with h5py.File(dest_file, \"r\") as fidin:\n root, ext = os.path.splitext(os.path.basename(dest_file))\n for key in fidin.keys():\n if key.startswith(root):\n read_data = fidin[key][...]\n self.assertTrue(np.array_equal(expected_data, read_data))\n try:\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # file in use\n pass\n\n def test_export_txt(self):\n \"\"\"Verify export of data to delimited ASCII\"\"\"\n # Use integer data to avoid the floating point conversion to/from files\n sample_data = self.sample_data.astype(np.int64)\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',\n 'sample.hdf5')\n dest_file = os.path.join(os.path.dirname(__file__), 'support_files',\n 'sample.txt')\n with h5py.File(sample_data_file, \"w\") as fidout:\n fidout.create_dataset(os.path.basename(sample_data_file), data=sample_data)\n export_params = {'delimiter': ','}\n dataio.export_txt(dest_file, sample_data_file, **export_params)\n retrieved_data = np.genfromtxt(dest_file, delimiter=export_params['delimiter'])\n self.assertTrue(np.array_equal(sample_data, retrieved_data))\n try:\n if os.path.exists(sample_data_file):\n os.remove(sample_data_file)\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # file in use\n pass\n\n def test_export3D_txt(self):\n \"\"\"Verify export of 3D data to delimited ASCII\"\"\"\n x_size = 5\n y_size = 4\n z_size = 6\n sample_data = np.empty((y_size, x_size, z_size))\n for xidx in range(x_size):\n for yidx in range(y_size):\n for zidx in range(z_size):\n sample_data[yidx, xidx, zidx] = int(random.uniform(-100, 100))\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'sample3d.hdf5')\n dest_file = os.path.join(os.path.dirname(__file__), 'support_files', 'sample3d.txt')\n with h5py.File(sample_data_file, \"w\") as fidout:\n fidout.create_dataset(os.path.basename(sample_data_file), data=sample_data)\n export_params = {'delimiter': ','}\n dataio.export_txt(dest_file, sample_data_file, **export_params)\n retrieved_data = np.empty(sample_data.shape)\n with open(dest_file, \"rb\") as fidin:\n zidx = 0\n for line in fidin:\n if not line.startswith('#'):\n x, y, z = line.split(export_params['delimiter'])\n x = int(x)\n y = int(y)\n z = float(z.strip())\n retrieved_data[y, x, zidx] = z\n zidx += 1\n if zidx > sample_data.shape[2]-1:\n zidx = 0\n self.assertTrue(np.array_equal(sample_data, retrieved_data))\n try:\n if os.path.exists(sample_data_file):\n os.remove(sample_data_file)\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # file in use\n pass\n\n @skipIfModuleNotInstalled(\"dicom\")\n def test_get_dicom_data(self):\n \"\"\"Verify retrieval of DICOM / DICONDE data\"\"\"\n import dicom\n diconde_folder = os.path.join(os.path.dirname(__file__), 'support_files')\n for root, dirs, files in os.walk(diconde_folder):\n for fname in files:\n dicom_data_file = os.path.join(root, fname)\n basename, ext = os.path.splitext(dicom_data_file)\n # Simple check to ensure we're looking at DICOM files\n if ext.lower() == '.dcm':\n dicom_data = dicom.read_file(dicom_data_file)\n dicom_arr = dicom_data.pixel_array\n retrieved_data = dataio.get_dicom_data(dicom_data_file)\n self.assertTrue(np.array_equal(dicom_arr, retrieved_data))\n\n @skipIfModuleNotInstalled(\"dicom\")\n def test_import_dicom(self):\n \"\"\"Verify import of DICOM / DICONDE data\"\"\"\n # Load the ASTM DICONDE example files,\n # save, then ensure the resulting arrays\n # are identical\n import dicom\n\n diconde_folder = os.path.join(os.path.dirname(__file__), 'support_files')\n for root, dirs, files in os.walk(diconde_folder):\n for fname in files:\n dicom_data_file = os.path.join(root, fname)\n basename, ext = os.path.splitext(dicom_data_file)\n # Simple check to ensure we're looking at DICOM files\n if ext.lower() == '.dcm':\n dicom_data = dicom.read_file(dicom_data_file)\n dicom_arr = dicom_data.pixel_array\n dataio.import_dicom(dicom_data_file)\n dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(dicom_data_file) + \".hdf5\")\n self.assertTrue(os.path.exists(dest_file))\n with h5py.File(dest_file, \"r\") as fidin:\n froot, ext = os.path.splitext(os.path.basename(dest_file))\n for key in fidin.keys():\n if key.startswith(froot):\n read_data = fidin[key][...]\n self.assertTrue(np.array_equal(dicom_arr, read_data))\n try:\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # File in use\n pass\n\n def test_get_img_data(self):\n \"\"\"Verify retrieval of bitmap data\"\"\"\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',\n 'austin_sky320x240.jpg')\n assert(os.path.exists(sample_data_file))\n expected_data = scipy.misc.imread(sample_data_file, flatten=True)\n retrieved_data = dataio.get_img_data(sample_data_file, flatten=True)\n self.assertTrue(np.array_equal(expected_data, retrieved_data))\n\n def test_import_img(self):\n \"\"\"Verify import of images\"\"\"\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',\n 'austin_sky320x240.jpg')\n assert(os.path.exists(sample_data_file))\n expected_data = scipy.misc.imread(sample_data_file, flatten=True)\n dataio.import_img(sample_data_file, flatten=True)\n dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(sample_data_file) + \".hdf5\")\n self.assertTrue(os.path.exists(dest_file))\n with h5py.File(dest_file, \"r\") as fidin:\n root, ext = os.path.splitext(os.path.basename(dest_file))\n for key in fidin.keys():\n if key.startswith(root):\n read_data = fidin[key][...]\n self.assertTrue(np.array_equal(expected_data, read_data))\n try:\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # file in use\n pass\n\n def test_get_utwin_tof_data(self):\n \"\"\"Verify retrieval of UTWin Time Of Flight data through convenience function\"\"\"\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')\n tof_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_tofdata.npy')\n tof_resolution = 0.01\n assert(os.path.exists(tof_data_file))\n expected_tof_data = np.load(tof_data_file) * tof_resolution\n returned_tof_data = dataio.get_utwin_tof_data(sample_data_file)[0]\n numpy.testing.assert_array_almost_equal(expected_tof_data, returned_tof_data, decimal=3)\n\n def test_import_utwin_tof(self):\n \"\"\"Verify import of UTWin Time Of Flight data through convenience function\"\"\"\n tof_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_tofdata.npy')\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')\n tof_resolution = 0.01\n expected_tof_data = np.load(tof_data_file) * tof_resolution\n root, ext = os.path.splitext(os.path.basename(sample_data_file))\n dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(root) + \"_tofdata0.csc.hdf5\")\n dataio.import_utwin_tof(sample_data_file)\n self.assertTrue(os.path.exists(dest_file))\n with h5py.File(dest_file, \"r\") as fidin:\n root, ext = os.path.splitext(os.path.basename(dest_file))\n for key in fidin.keys():\n if key.startswith(root):\n read_data = fidin[key][...]\n numpy.testing.assert_array_almost_equal(expected_tof_data, read_data, decimal=3)\n try:\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # file in use\n pass\n\n def test_get_utwin_amp_data(self):\n \"\"\"Verify retrieval of UTWin amplitude data through convenience function\"\"\"\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')\n amp_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_ampdata.npy')\n assert(os.path.exists(amp_data_file))\n expected_tof_data = np.load(amp_data_file)\n self.assertTrue(np.array_equal(expected_tof_data, dataio.get_utwin_amp_data(sample_data_file)[0]))\n\n def test_import_utwin_amp(self):\n \"\"\"Verify import of UTWin amplitude data through convenience function\"\"\"\n amp_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_ampdata.npy')\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')\n expected_amp_data = np.load(amp_data_file)\n root, ext = os.path.splitext(os.path.basename(sample_data_file))\n dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(root) + \"_ampdata0.csc.hdf5\")\n dataio.import_utwin_amp(sample_data_file)\n self.assertTrue(os.path.exists(dest_file))\n with h5py.File(dest_file, \"r\") as fidin:\n root, ext = os.path.splitext(os.path.basename(dest_file))\n for key in fidin.keys():\n if key.startswith(root):\n read_data = fidin[key][...]\n self.assertTrue(np.array_equal(expected_amp_data, read_data))\n try:\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # file in use\n pass\n\n def test_get_utwin_data(self):\n \"\"\"Verify returning UTWin data\"\"\"\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')\n sample_reader = dataio.UTWinCScanDataFile(sample_data_file)\n sample_reader.read_data()\n expected_data = sample_reader.data\n returned_data = dataio.get_utwin_data(sample_data_file)\n for datatype in expected_data:\n self.assertTrue(np.array_equal(expected_data[datatype], returned_data[datatype]))\n\n def test_get_winspect_data(self):\n \"\"\"Verify retrieval of Winspect data through convenience function\"\"\"\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'sample_data.sdt')\n assert(os.path.exists(sample_data_file))\n scan_reader = dataio.WinspectReader(sample_data_file)\n expected_data_list = scan_reader.get_winspect_data()\n retrieved_data_list = dataio.get_winspect_data(sample_data_file)\n self.assertEqual(len(expected_data_list), len(retrieved_data_list))\n for data_array_idx in range(len(expected_data_list)):\n self.assertTrue(np.array_equal(expected_data_list[data_array_idx].data, retrieved_data_list[data_array_idx].data))\n\n def test_import_winspect(self):\n \"\"\"Verify import of Winspect data through convenience function\"\"\"\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'sample_data.sdt')\n assert(os.path.exists(sample_data_file))\n output_basename, ext = os.path.splitext(sample_data_file)\n amp_dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(output_basename) + \"_ampdata0\" + ext + \".hdf5\")\n waveform_dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(output_basename) + \"_waveformdata0\" + ext + \".hdf5\")\n dataio.import_winspect(sample_data_file)\n expected_data_list = dataio.get_winspect_data(sample_data_file)\n for dataset in expected_data_list:\n if \"amplitude\" in dataset.data_type:\n dest_file = amp_dest_file\n elif \"waveform\" in dataset.data_type:\n dest_file = waveform_dest_file\n with h5py.File(dest_file, \"r\") as fidin:\n root, ext = os.path.splitext(os.path.basename(dest_file))\n for key in fidin.keys():\n if key.startswith(root):\n read_data = fidin[key][...]\n self.assertTrue(np.array_equal(dataset.data, read_data))\n try:\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # file in use\n pass\n\n def tearDown(self):\n if os.path.exists(self.sample_data_file + \".hdf5\"):\n os.remove(self.sample_data_file + \".hdf5\")\n if os.path.exists(self.sample_data_file):\n os.remove(self.sample_data_file)\n\n\nclass TestUTWinCScanReader(unittest.TestCase):\n \"\"\"Tests the UTWinCScanReader class\"\"\"\n\n def setUp(self):\n self.sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')\n assert(os.path.exists(self.sample_data_file))\n self.cscan_reader = dataio.UTWinCscanReader()\n\n def test_basicfile_parameters(self):\n \"\"\"Verify the basic parameters of the CSC file format are correct\"\"\"\n self.assertEqual(self.cscan_reader.header_string_length, 15)\n expected_message_ids = {'CSCAN_DATA': 2300,\n 'WAVEFORM_pre240': 2016,\n 'WAVEFORM_post240': 2303,\n 'UTSAVE_UTCD0': 2010,\n 'UTSAVE_UTCD1': 2011,\n 'UTSAVE_UTCD2': 2012,\n 'UTSAVE_UTCD4': 2014,\n 'UTSAVE_UTPro0': 253,\n 'PROJECT': 301,\n 'UTSAVE_UTHead': 100,\n 'UTSAVE_UTCScan0': 750,\n 'UTSAVE_UTCD10': 2020,\n 'UTSAVE_UTCScan3': 753}\n self.assertDictEqual(expected_message_ids, self.cscan_reader.message_ids)\n\n def test_is_cscanfile(self):\n \"\"\"Verify reader correctly identifies CSC files\"\"\"\n self.assertTrue(self.cscan_reader.is_cscanfile(self.sample_data_file))\n\n def test_msg_info(self):\n \"\"\"Verify reader correctly returns message ID and length\"\"\"\n with open(self.sample_data_file, \"rb\") as fidin:\n fidin.seek(self.cscan_reader.header_string_length)\n first_message = (100, 14)\n self.assertTupleEqual(first_message, self.cscan_reader.msg_info(fidin))\n\n def test_find_message(self):\n \"\"\"Verify find_message returns the expected file positions\"\"\"\n expected_file_positions = ((2014, 38037),\n (2011, 38059),\n (2010, 38003),\n (2012, 422075),\n (2010, 38003),\n (2010, 38003))\n for message_id, expected_pos in expected_file_positions:\n self.assertEqual(self.cscan_reader.find_message(self.sample_data_file, message_id), expected_pos)\n\n def test_find_blocks(self):\n \"\"\"Verify find_blocks returns the file positions for the specified message ID\"\"\"\n # Search for UTSave_UTAD0 (Message ID 950) - contains A/D settings for each channel\n expected_filed_positions = [173, 920, 1667, 2414, 3161, 3908, 4655, 5402]\n self.assertListEqual(expected_filed_positions, self.cscan_reader.find_blocks(self.sample_data_file, 950))\n\n def test_read_field(self):\n \"\"\"Verify read_field correctly parses the specified message block\"\"\"\n start_pos = self.cscan_reader.find_message(self.sample_data_file, 950)\n self.assertTrue(start_pos != -1)\n with open(self.sample_data_file, \"rb\") as fidin:\n fidin.seek(start_pos)\n # Read a sample of A/D settings for the first channel\n expected_ad_delay = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]\n expected_ad_width = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]\n expected_ad_blanking_width = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]\n expected_ad_gain = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]\n expected_ad_offset = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]\n expected_ad_trigger_level = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]\n expected_ad_trigger_rate = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]\n with open(self.sample_data_file, \"rb\") as fidin:\n fidin.seek(start_pos)\n ad_delay = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])\n ad_width = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])\n ad_blanking_width = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])\n ad_gain = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])\n ad_offset = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])\n ad_trigger_level = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])\n ad_trigger_rate = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])\n self.assertAlmostEqual(expected_ad_delay, ad_delay)\n self.assertAlmostEqual(expected_ad_width, ad_width)\n self.assertAlmostEqual(expected_ad_blanking_width, ad_blanking_width)\n self.assertAlmostEqual(expected_ad_gain, ad_gain)\n self.assertAlmostEqual(expected_ad_offset, ad_offset)\n self.assertAlmostEqual(expected_ad_trigger_level, ad_trigger_level)\n self.assertAlmostEqual(expected_ad_trigger_rate, ad_trigger_rate)\n\n\nclass TestUTWinCScanDataFile(unittest.TestCase):\n \"\"\"Tests the UTWinCScanDataFile class.\n\n Note: the sample UTWin data files available to TRI as of May 2013 are export-controlled and can't be\n distributed, which in turn limits the tests that can be performed. The UTWinCScanDataFile class has been\n tested against real inspection data, however without additional sample files you should consider the code\n experimental. For more details, contact TRI.\n \"\"\"\n\n def setUp(self):\n self.sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')\n self.cscan_datafile = dataio.UTWinCScanDataFile(self.sample_data_file)\n\n def test_get_scan_version(self):\n \"\"\"Verify get_scan_version returns the correct scan version\"\"\"\n self.assertEqual(self.cscan_datafile.get_scan_version(), 117)\n\n def test_read_scan_properties(self):\n \"\"\"Verify read_scan_properties correctly compiles required scan settings\"\"\"\n # Read a sample of the most important properties, verify read\n important_scan_properties = {'n_height':320,\n 'n_width':600,\n 'rf_length':2994,\n 'channel_active':[1, 0, 0, 0, 0, 0, 0, 0]}\n for idx in important_scan_properties.keys():\n prop = important_scan_properties[idx]\n if not isinstance(prop, list):\n self.assertEqual(prop, self.cscan_datafile.scan_properties[idx])\n else:\n self.assertListEqual(prop, self.cscan_datafile.scan_properties[idx])\n\n def test_read_tof_data(self):\n \"\"\"Verify read_tof_data correctly reads Time Of Flight data\"\"\"\n # Verify one TOF dataset\n tof_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_tofdata.npy')\n tof_resolution = 0.01\n assert(os.path.exists(tof_data_file))\n expected_tof_data = np.load(tof_data_file) * tof_resolution\n self.cscan_datafile.read_tof_data()\n numpy.testing.assert_array_almost_equal(expected_tof_data, self.cscan_datafile.data['tof'][0], decimal=3)\n\n def test_read_amplitude_data(self):\n \"\"\"Verify read_amplitude_data correctly reads amplitude data\"\"\"\n amp_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_ampdata.npy')\n assert(os.path.exists(amp_data_file))\n expected_amp_data = np.load(amp_data_file)\n self.cscan_datafile.read_amplitude_data()\n self.assertTrue(np.array_equal(expected_amp_data, self.cscan_datafile.data['amplitude'][0]))\n\n def test_import_tof(self):\n \"\"\"Verify import of Time Of Flight data\"\"\"\n tof_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_tofdata.npy')\n tof_resolution = 0.01\n csc_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData')\n assert(os.path.exists(tof_data_file))\n expected_tof_data = np.load(tof_data_file) * tof_resolution\n dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(csc_data_file) + \"_tofdata0.csc.hdf5\")\n self.cscan_datafile.import_tof_data()\n self.assertTrue(os.path.exists(dest_file))\n with h5py.File(dest_file, \"r\") as fidin:\n root, ext = os.path.splitext(os.path.basename(dest_file))\n for key in fidin.keys():\n if key.startswith(root):\n read_data = fidin[key][...]\n numpy.testing.assert_array_almost_equal(expected_tof_data, read_data, decimal=3)\n try:\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # file in use\n pass\n\n def test_import_amp(self):\n \"\"\"Verify import of amplitude data\"\"\"\n amp_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_ampdata.npy')\n csc_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData')\n assert(os.path.exists(amp_data_file))\n expected_amp_data = np.load(amp_data_file)\n dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(csc_data_file) + \"_ampdata0.csc.hdf5\")\n self.cscan_datafile.import_amplitude_data()\n self.assertTrue(os.path.exists(dest_file))\n with h5py.File(dest_file, \"r\") as fidin:\n root, ext = os.path.splitext(os.path.basename(dest_file))\n for key in fidin.keys():\n if key.startswith(root):\n read_data = fidin[key][...]\n self.assertTrue(np.array_equal(expected_amp_data, read_data))\n try:\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # file in use\n pass\n\n\nclass TestWinspectReader(unittest.TestCase):\n \"\"\"Tests the WinspectReader class.\"\"\"\n\n def setUp(self):\n self.sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',\n 'sample_data.sdt')\n assert(os.path.exists(self.sample_data_file))\n self.scan_reader = dataio.WinspectReader(self.sample_data_file)\n\n def test_find_numbers(self):\n \"\"\"Verify find_numbers static method correctly pulls numbers from strings\"\"\"\n float_strings = {\"0.000000 mm\":0.0, \"0.775995 Usec\":0.775995}\n int_strings = {\"35 18 0 22 3 112 \":[35, 18, 0, 22, 3, 112],\n \"Number of Sample Points : 3500\":3500}\n bad_strings = {\"Ramshackle\":[], \"\":[]}\n for string in float_strings:\n self.assertAlmostEqual(float_strings[string], self.scan_reader.find_numbers(string))\n\n def test_get_winspect_data(self):\n \"\"\"Verify returning the list of arrays read from the data file\"\"\"\n data_reader = dataio.WinspectDataFile(self.sample_data_file)\n data_reader.read_data()\n expected_data_list = data_reader.datasets\n retrieved_data_list = self.scan_reader.get_winspect_data()\n self.assertEqual(len(expected_data_list), len(retrieved_data_list))\n for data_array_idx in range(len(expected_data_list)):\n self.assertTrue(np.array_equal(expected_data_list[data_array_idx].data, retrieved_data_list[data_array_idx].data))\n\n def test_import_winspect(self):\n \"\"\"Verify importing datasets\"\"\"\n output_basename, ext = os.path.splitext(self.sample_data_file)\n amp_dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(output_basename) + \"_ampdata0\" + ext + \".hdf5\")\n waveform_dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(output_basename) + \"_waveformdata0\" + ext + \".hdf5\")\n self.scan_reader.import_winspect()\n data_reader = dataio.WinspectDataFile(self.sample_data_file)\n data_reader.read_data()\n expected_data_list = data_reader.datasets\n for dataset in expected_data_list:\n if \"amplitude\" in dataset.data_type:\n dest_file = amp_dest_file\n elif \"waveform\" in dataset.data_type:\n dest_file = waveform_dest_file\n with h5py.File(dest_file, \"r\") as fidin:\n root, ext = os.path.splitext(os.path.basename(dest_file))\n for key in fidin.keys():\n if key.startswith(root):\n read_data = fidin[key][...]\n self.assertTrue(np.array_equal(dataset.data, read_data))\n try:\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # file in use\n pass\n\n\nif __name__ == \"__main__\":\n random.seed()\n unittest.main()",
"\"\"\"test_preview_window_model.py - tests the preview_window_model\r\n\r\nChris R. Coughlin (TRI/Austin, Inc.)\r\n\"\"\"\r\n\r\n__author__ = 'Chris R. Coughlin'\r\n\r\nfrom models import preview_window_model\r\nimport h5py\r\nimport numpy as np\r\nimport os\r\nimport random\r\nimport unittest\r\n\r\nclass TestPreviewWindowModel(unittest.TestCase):\r\n \"\"\"Tests the PreviewWindowModel\"\"\"\r\n\r\n def setUp(self):\r\n self.mock_ctrl = \" \"\r\n self.sample_data = np.array(self.random_data())\r\n self.sample_data_file = os.path.normpath(os.path.join(os.path.dirname(__file__),\r\n \"sample.dat\"))\r\n #np.savetxt(self.sample_data_file, self.sample_data)\r\n with h5py.File(self.sample_data_file, 'w') as fidout:\r\n fidout.create_dataset(os.path.basename(self.sample_data_file), data=self.sample_data)\r\n\r\n def random_data(self):\r\n \"\"\"Generates a random list of data\"\"\"\r\n return np.array([random.uniform(-100, 100) for i in range(25)])\r\n\r\n def random3D_data(self):\r\n \"\"\"Generates a random 3D array of data\"\"\"\r\n raw_array = np.array([random.uniform(-100, 100) for i in range(24)])\r\n three_d_array = raw_array.reshape((3, 2, 4))\r\n return three_d_array\r\n\r\n def test_init(self):\r\n \"\"\"Verify instantiation and initial settings\"\"\"\r\n a_model = preview_window_model.PreviewWindowModel(self.mock_ctrl, self.sample_data_file)\r\n self.assertEqual(self.sample_data_file, a_model.data_file)\r\n self.assertIsNone(a_model.data)\r\n\r\n def test_load_data(self):\r\n \"\"\"Verify load_data method returns numpy data array\"\"\"\r\n a_model = preview_window_model.PreviewWindowModel(self.mock_ctrl, self.sample_data_file)\r\n a_model.load_data()\r\n self.assertTrue(np.array_equal(self.sample_data, a_model.data))\r\n\r\n def test_slice_data(self):\r\n \"\"\"Verify a 3D array is replaced by a 2D slice\"\"\"\r\n a_model = preview_window_model.PreviewWindowModel(self.mock_ctrl, self.sample_data_file)\r\n three_d_array = self.random3D_data()\r\n a_model.data = three_d_array\r\n slice_idx = random.choice(range(three_d_array.shape[2]))\r\n expected_data = three_d_array[:, :, slice_idx]\r\n a_model.slice_data(slice_idx)\r\n self.assertTrue(np.array_equal(expected_data, a_model.data))\r\n\r\n def tearDown(self):\r\n if os.path.exists(self.sample_data_file + \".hdf5\"):\r\n os.remove(self.sample_data_file + \".hdf5\")\r\n\r\nif __name__ == \"__main__\":\r\n random.seed()\r\n unittest.main()"
] | [
[
"numpy.fromfile",
"numpy.array_equal",
"numpy.genfromtxt",
"numpy.loadtxt",
"numpy.load",
"numpy.empty"
],
[
"numpy.array_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Kohulan/Decimer-Python | [
"17373e02faedb28ba94742f61001bb3c6b015798"
] | [
"Networks/4_layer_net_Parameter_optimization.py"
] | [
"'''\r\n * This Software is under the MIT License\r\n * Refer to LICENSE or https://opensource.org/licenses/MIT for more information\r\n * Written by Kohulan Rajan\r\n * © 2019\r\n'''\r\n#Parallelized datareading network\r\n\r\nimport tensorflow as tf\r\nimport os\r\nimport sys\r\nimport numpy as np\r\nimport matplotlib as mpl\r\nimport csv\r\nmpl.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nfrom datetime import datetime\r\nfrom numpy import array\r\nimport pickle\r\nimport lz4.frame as lz\r\nimport multiprocessing\r\nnp.set_printoptions(threshold=np.nan)\r\n\r\n#Set the Desired Gpu from the cluster\r\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\r\n\r\n#Set Hidden neurons count\r\nhidden_neurons_list_I = [2,4,8,16,32,64,128,512,1024,2048,4096]\r\nhidden_neurons_list_II = [2,4,8,16,32,64,128,512,1024,2048,4096]\r\n\r\n#Set Batch Size\r\nbatch_sizer_list = [500,1000]\r\n\r\n#Set Learning rate\r\nlearning_rate_list = [0.001,0.003,0.005,0.007,0.009,0.01]\r\n\r\n#Paramter Optimizing loops\r\nfor hidden_neurons_I in range(len(hidden_neurons_list_I)):\r\n\tfor hidden_neurons_II in range(len(hidden_neurons_list_II)):\r\n\t\tfor batch_sizer in range(len(batch_sizer_list)):\r\n\t\t\tfor learning_rate_ in range(len(learning_rate_list)):\r\n\t\t\t\tf = open(\"/Results/Batch Size_{}_learning_rate_{}_hidden_neurons_{}_x_{}.txt\".format(batch_sizer_list[batch_sizer],learning_rate_list[learning_rate_],hidden_neurons_list_I[hidden_neurons_I],hidden_neurons_list_II[hidden_neurons_II]), 'w',0)\r\n\t\t\t\tsys.stdout = f\r\n\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Network Started\")\r\n\r\n\t\t\t\t#Data input from image data\r\n\r\n\t\t\t\t#labels\r\n\t\t\t\tdef label_data(is_test=False):\r\n\t\t\t\t\tdata_path = \"train\"\r\n\t\t\t\t\tif is_test:\r\n\t\t\t\t\t\tdata_path = \"test\"\r\n\t\t\t\t\tmyFile = open('/Data/Potential'+data_path+'_labels.csv',\"r\")\r\n\t\t\t\t\tlabels = []\r\n\t\t\t\t\tfor row in myFile:\r\n\t\t\t\t\t\tx = int(row.strip().split(\",\")[1])\r\n\t\t\t\t\t\tlabels.append(x)\r\n\t\t\t\t\tmyFile.close()\r\n\t\t\t\t\treturn np.asarray(labels)\r\n\r\n\t\t\t\ty_train = label_data()\r\n\t\t\t\ty_test = label_data(is_test=True)\r\n\r\n\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Labels loaded !!\")\r\n\r\n\t\t\t\t#Image array data\r\n\t\t\t\tTrain_Images = pickle.load( open(\"/Data/train_compressed.txt\",\"rb\"))\r\n\t\t\t\tTest_Images = pickle.load( open(\"/Data/test_compressed.txt\",\"rb\"))\r\n\t\t\t\ttrain_items = Train_Images.items()\r\n\t\t\t\ttest_items = Test_Images.items()\r\n\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Loading done! Train\",len(train_items))\r\n\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Loading done! Test\",len(test_items))\r\n\r\n\t\t\t\t#one hot vector transformation\r\n\t\t\t\tdef one_hot(y, n_labels):\r\n\t\t\t\t\tmat = np.zeros((len(y), n_labels))\r\n\t\t\t\t\tfor i, val in enumerate(y):\r\n\t\t\t\t\t\tmat[i, val] = 1\r\n\t\t\t\t\treturn mat\r\n\r\n\t\t\t\t# Parameters\r\n\t\t\t\tlearning_rate = learning_rate_list[learning_rate_]\r\n\t\t\t\ttraining_epochs = 20\r\n\t\t\t\tbatch_size = batch_sizer_list[batch_sizer]\r\n\t\t\t\tdisplay_step = 1\r\n\t\t\t\ttestbatch_size = 1000\r\n\t\t\t\ttotaltrain_batch = len(train_items)/batch_size\r\n\t\t\t\ttotaltest_batch = len(test_items)/testbatch_size\r\n\r\n\t\t\t\t# Network Parameters\r\n\t\t\t\tn_hidden_1 = hidden_neurons_list_I[hidden_neurons_I] # 1st layer number of neurons\r\n\t\t\t\tn_hidden_2 = hidden_neurons_list_II[hidden_neurons_II] # 1st layer number of neurons\r\n\t\t\t\tn_input = 256*256 # Data input (Image shape: 1024 * 1024)\r\n\t\t\t\tn_classes = 36 # Bond_Count\r\n\r\n\t\t\t\t# tf Graph input\r\n\t\t\t\tX = tf.placeholder(\"float\", [None, n_input])\r\n\t\t\t\tY = tf.placeholder(\"float\", [None, n_classes])\r\n\r\n\t\t\t\t# Store layers weight & bias\r\n\t\t\t\tweights = {\r\n\t\t\t\t\t'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\r\n\t\t\t\t\t'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\r\n\t\t\t\t\t'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))\r\n\t\t\t\t}\r\n\t\t\t\tbiases = {\r\n\t\t\t\t\t'b1': tf.Variable(tf.random_normal([n_hidden_1])),\r\n\t\t\t\t\t'b2': tf.Variable(tf.random_normal([n_hidden_2])),\r\n\t\t\t\t\t'out': tf.Variable(tf.random_normal([n_classes]))\r\n\t\t\t\t}\r\n\r\n\t\t\t\t# Create model\r\n\t\t\t\tdef multilayer_perceptron(x):\r\n\t\t\t\t\t# Fully Connected Hidden Layers\r\n\t\t\t\t\tlayer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\r\n\t\t\t\t\tlayer_1 = tf.nn.relu(layer_1)\r\n\t\t\t\t\t\r\n\t\t\t\t\tlayer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\r\n\t\t\t\t\tlayer_2 = tf.nn.relu(layer_2)\r\n\r\n\t\t\t\t\t# Output fully connected layer with a neuron for each class\r\n\t\t\t\t\tout_layer = tf.matmul(layer_2, weights['out']) + biases['out']\r\n\t\t\t\t\treturn out_layer\r\n\r\n\t\t\t\t# Construct model\r\n\t\t\t\tlogits = multilayer_perceptron(X)\r\n\r\n\t\t\t\t# Define loss and optimizer\r\n\t\t\t\tloss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))\r\n\t\t\t\toptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\r\n\t\t\t\ttrain_op = optimizer.minimize(loss_op)\r\n\r\n\t\t\t\t# Initializing the variables\r\n\t\t\t\tinit = tf.global_variables_initializer()\r\n\r\n\t\t\t\t# encoding labels to one_hot vectors\r\n\t\t\t\ty_data_enc = one_hot(y_train, n_classes)\r\n\t\t\t\ty_test_enc = one_hot(y_test, n_classes)\r\n\r\n\t\t\t\t# Evaluate model (with test logits, for dropout to be disabled)\r\n\t\t\t\tcorrect_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))\r\n\t\t\t\taccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n\r\n\t\t\t\t# Evaluate the errors, mean,median and maximum errors\r\n\t\t\t\tpred = tf.argmax(logits, 1)\r\n\t\t\t\tpred_difference = tf.subtract(tf.argmax(Y, 1),tf.argmax(logits, 1))\r\n\t\t\t\tmean_error=[]\r\n\t\t\t\tmedian_error=[]\r\n\t\t\t\tmaximum_error=[]\r\n\t\t\t\t\r\n\t\t\t\t#Initiating data for plots\r\n\t\t\t\tloss_history = []\r\n\t\t\t\tacc_history = []\r\n\t\t\t\tvalid_history = []\r\n\t\t\t\tacc_valid_history = []\r\n\t\t\t\tdifference_history = []\r\n\t\t\t\ttest_loss_history = []\r\n\t\t\t\ttest_accuracy_history = []\r\n\r\n\t\t\t\tprint (\"Data decompression for test batch started!\")\r\n\r\n\t\t\t\t#-----------------------------------------------------------------------------------------------------------------\r\n\t\t\t\tprint (\"Total available threads for multiprocessing: \",multiprocessing.cpu_count())\r\n\r\n\t\t\t\t#Decompressing Lines Test\r\n\t\t\t\tdef decomp_test(k):\r\n\t\t\t\t\tstrarraytest = (lz.decompress(Test_Images.values()[k]))\r\n\t\t\t\t\tfloatarray_test = np.fromstring(strarraytest, dtype=float, sep=',')\r\n\t\t\t\t\tfloatarray32_test = np.array(floatarray_test).astype(np.float32)\r\n\t\t\t\t\tencoded_array_test=(1.0-floatarray32_test/255.0)\r\n\t\t\t\t\treturn encoded_array_test\r\n\r\n\t\t\t\tpool_test = multiprocessing.Pool()\r\n\r\n\t\t\t\tdef decomp_train(j):\r\n\t\t\t\t\tstrarray = (lz.decompress(Train_Images.values()[j]))\r\n\t\t\t\t\tfloatarray = np.fromstring(strarray, dtype=float, sep=',')\r\n\t\t\t\t\tfloatarray32 = np.array(floatarray).astype(np.float32)\r\n\t\t\t\t\tencoded_array=(1.0-floatarray32/255.0)\r\n\t\t\t\t\treturn encoded_array\r\n\t\t\t\t\r\n\t\t\t\tpool_train = multiprocessing.Pool()\r\n\t\t\t\t\r\n\t\t\t\t#Network training\r\n\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Training Started\")\r\n\t\t\t\tconfig = tf.ConfigProto(allow_soft_placement=True)\r\n\t\t\t\tconfig.gpu_options.allow_growth = True\r\n\t\t\t\tconfig.gpu_options.allocator_type = 'BFC'\r\n\r\n\t\t\t\twith tf.Session(config=config) as sess:\r\n\t\t\t\t\tsess.run(init)\r\n\r\n\t\t\t\t\t# Training cycle\r\n\t\t\t\t\tfor epoch in range(training_epochs):\r\n\t\t\t\t\t\tavg_cost = 0\r\n\t\t\t\t\t\tprint (\"total batch\",totaltrain_batch)\r\n\t\t\t\t\t\tcounter=0\r\n\t\t\t\t\t\ttotal_correct_preds = 0\r\n\t\t\t\t\t\tTrain_loss_per_batch = 0\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t# Loop over all batches\r\n\t\t\t\t\t\tfor l in range(totaltrain_batch):\r\n\t\t\t\t\t\t\tprint (\"bathc\",l)\r\n\t\t\t\t\t\t\tprint (\"tests\",\"count\",counter,\"batchsize\",counter+batch_size)\r\n\t\t\t\t\t\t\ttrain_batchX = pool_train.map(decomp_train,range(counter,counter+batch_size))\r\n\t\t\t\t\t\t\tbatch_x=train_batchX\r\n\t\t\t\t\t\t\tbatch_y=y_data_enc[counter:(counter+len(train_batchX))]\r\n\t\t\t\t\t\t\t_, c = sess.run([train_op, loss_op], feed_dict={X: batch_x,Y: batch_y})\r\n\t\t\t\t\t\t\tTrain_loss_per_batch += c \r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\r\n\t\t\t\t\t\t\t#Validation and calculating training accuracy\r\n\t\t\t\t\t\t\t_, accu_train = sess.run([loss_op, accuracy], feed_dict={X: batch_x,Y: batch_y})\r\n\t\t\t\t\t\t\tvalid_history.append(accu_train)\r\n\t\t\t\t\t\t\ttotal_correct_preds += accu_train\r\n\t\t\t\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"train Accuracy:\",accu_train)\r\n\t\t\t\t\r\n\t\t\t\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),counter,\"batch over\")\r\n\t\t\t\t\t\t\tcounter += len(train_batchX)\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\tvalidation_accuracy = total_correct_preds/totaltrain_batch\r\n\t\t\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Train accuracy:\",validation_accuracy)\r\n\t\t\t\t\t\tacc_valid_history.append(validation_accuracy)\r\n\t\t\t\t\t\tloss_history.append(Train_loss_per_batch/totaltrain_batch)\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t#Testing\r\n\t\t\t\t\t\tcounter_test = 0\r\n\t\t\t\t\t\tAll_test_loss = 0\r\n\t\t\t\t\t\tAll_error = 0\r\n\t\t\t\t\t\ttest_accuracy_perbatch = 0\r\n\t\t\t\t\t\tfor test_set in range(totaltest_batch):\r\n\t\t\t\t\t\t\tX_test = pool_test.map(decomp_test,range(counter_test,counter_test+testbatch_size))\r\n\t\t\t\t\t\t\tY_test = y_test_enc[counter_test:(counter_test+len(X_test))]\r\n\t\t\t\t\r\n\t\t\t\t\t\t\ttest_acc = accuracy.eval({X: X_test, Y: Y_test})\r\n\t\t\t\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Accuracy:\", test_acc)\r\n\t\t\t\t\t\t\ttest_accuracy_perbatch += test_acc\r\n\t\t\t\t\t\t\ttest_loss_batch,predict,error = sess.run([loss_op,pred,pred_difference], feed_dict={X: X_test, Y: Y_test})\r\n\t\t\t\t\t\t\tAll_test_loss += test_loss_batch\r\n\t\t\t\t\t\t\tAll_error += error\r\n\t\t\t\t\t\t\t#print(predict)\r\n\t\t\t\t\t\t\tcounter_test += len(X_test)\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t#Statistics\t\r\n\t\t\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Final Test Accuracy:\",test_accuracy_perbatch/totaltest_batch)\t\r\n\t\t\t\t\t\tmean_error.append(np.absolute(np.mean(All_error/totaltest_batch)))\t\r\n\t\t\t\t\t\tmedian_error.append(np.absolute(np.median(All_error/totaltest_batch)))\t\r\n\t\t\t\t\t\tmaximum_error.append(np.absolute(np.amax(All_error/totaltest_batch)))\t\r\n\t\t\t\t\t\ttest_loss_history.append(All_test_loss/totaltest_batch)\t\r\n\t\t\t\t\t\ttest_accuracy_history.append(test_accuracy_perbatch/totaltest_batch)\t\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t# Display logs per epoch step\t\r\n\t\t\t\t\t\tif epoch % display_step == 0:\t\r\n\t\t\t\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Epoch:\", '%04d' % (epoch+1))\t\r\n\t\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Optimization Finished!\")\t\r\n\t\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Network completed\")\t\r\n\t\t\t\t\tf.close()\t\r\n\t\t\t\t\tpool_train.close()\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t# Final results for various bond counts\r\n\t\t\t\t\tfile_append = open('/Results/Final_Report.txt' , 'a+')\r\n\t\t\t\t\tsys.stdout = file_append\r\n\t\t\t\t\tprint(\"\\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\")\r\n\t\t\t\t\tprint(\"Batch Size_{}_learning_rate_{}_hidden_neurons_{}_x_{}.txt\".format(batch_sizer_list[batch_sizer],learning_rate_list[learning_rate_],hidden_neurons_list_I[hidden_neurons_I],hidden_neurons_list_II[hidden_neurons_II]))\r\n\t\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Final Train accuracy:\",validation_accuracy)\r\n\t\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Final Test Accuracy:\",test_accuracy_perbatch/totaltest_batch)\r\n\t\t\t\t\tcounter_test_x = 0\r\n\t\t\t\t\tprediction_difference = 0\r\n\t\t\t\t\tfor testing in range(totaltest_batch):\r\n\t\t\t\t\t\tX_test = pool_test.map(decomp_test,range(counter_test_x,counter_test_x+testbatch_size))\r\n\t\t\t\t\t\tY_test = y_test_enc[counter_test_x:(counter_test_x+len(X_test))]\r\n\t\t\t\t\t\t_, predict,prediction_difference_batch = sess.run([loss_op,pred,pred_difference], feed_dict={X: X_test, Y: Y_test})\r\n\t\t\t\t\t\tprediction_difference += prediction_difference_batch\r\n\t\t\t\t\t\tcounter_test_x += len(X_test)\r\n\t\t\t\t\t\t\r\n\t\t\t\t\tprediction_window = np.absolute(prediction_difference)\r\n\t\t\t\t\tpool_test.close()\r\n\t\t\t\t\tfor j in range(10):\r\n\t\t\t\t\t\tcount_error = 0\r\n\t\t\t\t\t\tfor i in prediction_window:\r\n\t\t\t\t\t\t\tif i<=j: \r\n\t\t\t\t\t\t\t\tcount_error+=1\r\n\t\t\t\t\t\tWindow_accuracy = float(count_error)/len(test_items)*100\r\n\t\t\t\t\t\tprint(\"Currectly predicted bond count with error less than\",j,\"bonds, Accuracy ={:.2f}\".format(Window_accuracy))\r\n\t\t\t\tfile_append.close()\r\n \r\n #Matplot plot depiction\r\n\t\t\t\tplt.subplot(3,1,1)\r\n\t\t\t\tplt.plot(loss_history, '-o', label='Train Loss value')\r\n\t\t\t\tplt.title('Training & Tesing Loss')\r\n\t\t\t\tplt.xlabel('Epoch x Batches')\r\n\t\t\t\tplt.ylabel('Loss Value')\r\n\t\t\t\tplt.plot(test_loss_history, '-o', label='Test Loss value')\r\n\t\t\t\tplt.xlabel('Epoch x Batches')\r\n\t\t\t\tplt.ylabel('Loss Value')\r\n\t\t\t\tplt.legend(ncol=2, loc='upper right')\r\n\t\t\t\tplt.subplot(3,1,2)\r\n\t\t\t\tplt.gca().set_ylim([0,1.0])\r\n\t\t\t\tplt.plot(acc_valid_history, '-o', label='Train Accuracy value')\r\n\t\t\t\tplt.plot(test_accuracy_history, '-o', label='Test Accuracy value')\r\n\t\t\t\t#plt.plot(difference_history, '-o', label='Train-Test Accuracy')\r\n\t\t\t\tplt.title('Train & Test Accuracy')\r\n\t\t\t\tplt.xlabel('Batches')\r\n\t\t\t\tplt.ylabel('Accuracy')\r\n\t\t\t\tplt.legend(ncol=2, loc='lower right')\r\n\t\t\t\tplt.subplot(3,1,3)\r\n\t\t\t\tplt.plot(mean_error, '-o', label='Mean of error')\r\n\t\t\t\tplt.plot(median_error, '-o', label='Median of error')\r\n\t\t\t\tplt.plot(maximum_error, '-o', label='Maximum error')\r\n\t\t\t\tplt.xlabel('Batches')\r\n\t\t\t\tplt.ylabel('Error')\r\n\t\t\t\tplt.legend(ncol=2, loc='lower right')\r\n\t\t\t\tplt.gcf().set_size_inches(15, 30)\r\n\t\t\t\tplt.savefig(\"/Results/Batch Size_{}_learning_rate_{}_hidden_neurons_{}_x_{}.png\".format(batch_sizer_list[batch_sizer],learning_rate_list[learning_rate_],hidden_neurons_list_I[hidden_neurons_I],hidden_neurons_list_II[hidden_neurons_II]))\r\n\t\t\t\tplt.close()"
] | [
[
"matplotlib.pyplot.legend",
"numpy.amax",
"numpy.asarray",
"tensorflow.cast",
"matplotlib.pyplot.plot",
"numpy.mean",
"tensorflow.train.AdamOptimizer",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"tensorflow.ConfigProto",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.close",
"tensorflow.Session",
"tensorflow.argmax",
"tensorflow.matmul",
"matplotlib.pyplot.title",
"numpy.median",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"numpy.array",
"matplotlib.pyplot.ylabel",
"tensorflow.nn.relu",
"numpy.absolute",
"matplotlib.use",
"numpy.set_printoptions",
"numpy.fromstring",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"matplotlib.pyplot.xlabel",
"tensorflow.random_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
PyJedi/quantum | [
"3f4a3c320e048b8a8faf3a10339975d2d5366fb6"
] | [
"tensorflow_quantum/core/ops/batch_util_test.py"
] | [
"# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test parallel Cirq simulations.\"\"\"\nimport numpy as np\nimport tensorflow as tf\nfrom absl.testing import parameterized\nfrom scipy import stats\nimport cirq\n\nfrom tensorflow_quantum.core.ops import batch_util\nfrom tensorflow_quantum.python import util\n\nBATCH_SIZE = 12\nN_QUBITS = 5\nPAULI_LENGTH = 3\nSYMBOLS = ['alpha', 'beta', 'gamma']\n\n\ndef _get_mixed_batch(qubits, symbols, size):\n circuit1, resolver1 = util.random_circuit_resolver_batch(qubits, size // 2)\n circuit2, resolver2 = util.random_symbol_circuit_resolver_batch(\n qubits, symbols, size // 2)\n return circuit1 + circuit2, resolver1 + resolver2\n\n\ndef _pad_state(sim, state, n):\n if isinstance(sim, cirq.sim.sparse_simulator.Simulator):\n state = state.final_state\n if isinstance(sim, cirq.DensityMatrixSimulator):\n state = state.final_density_matrix\n return np.pad(state, (0, (1 << n) - state.shape[-1]),\n 'constant',\n constant_values=-2)\n\n\ndef _expectation_helper(sim, circuit, params, op):\n if isinstance(sim, cirq.sim.sparse_simulator.Simulator):\n state = sim.simulate(circuit, params).final_state.astype(np.complex128)\n return [\n op.expectation_from_wavefunction(\n state,\n dict(\n zip(sorted(circuit.all_qubits()),\n (j for j in range(len(circuit.all_qubits())))))).real\n ]\n if isinstance(sim, cirq.DensityMatrixSimulator):\n state = sim.simulate(circuit, params).final_density_matrix\n return [\n sum(\n x._expectation_from_density_matrix_no_validation(\n state,\n dict(\n zip(sorted(circuit.all_qubits()), (\n j\n for j in range(len(circuit.all_qubits()))))))\n for x in op)\n ]\n\n return NotImplemented\n\n\ndef _sample_helper(sim, state, n_qubits, n_samples):\n if isinstance(sim, cirq.sim.sparse_simulator.Simulator):\n return cirq.sample_state_vector(state.final_state,\n list(range(n_qubits)),\n repetitions=n_samples)\n if isinstance(sim, cirq.DensityMatrixSimulator):\n return cirq.sample_density_matrix(state.final_density_matrix,\n list(range(n_qubits)),\n repetitions=n_samples)\n\n return NotImplemented\n\n\nclass BatchUtilTest(tf.test.TestCase, parameterized.TestCase):\n \"\"\"Test cases for BatchUtils main functions.\"\"\"\n\n @parameterized.parameters([{\n 'sim': cirq.DensityMatrixSimulator()\n }, {\n 'sim': cirq.sim.sparse_simulator.Simulator()\n }])\n def test_batch_simulate_state(self, sim):\n \"\"\"Test variable sized wavefunction output.\"\"\"\n circuit_batch, resolver_batch = _get_mixed_batch(\n cirq.GridQubit.rect(1, N_QUBITS), SYMBOLS, BATCH_SIZE)\n results = batch_util.batch_calculate_state(circuit_batch,\n resolver_batch, sim)\n\n for circuit, resolver, result in zip(circuit_batch, resolver_batch,\n results):\n r = _pad_state(sim, sim.simulate(circuit, resolver), N_QUBITS)\n self.assertAllClose(r, result, rtol=1e-5, atol=1e-5)\n\n self.assertDTypeEqual(results, np.complex64)\n\n @parameterized.parameters([{\n 'sim': cirq.DensityMatrixSimulator()\n }, {\n 'sim': cirq.sim.sparse_simulator.Simulator()\n }])\n def test_batch_expectation(self, sim):\n \"\"\"Test expectation.\"\"\"\n qubits = cirq.GridQubit.rect(1, N_QUBITS)\n circuit_batch, resolver_batch = _get_mixed_batch(\n qubits + [cirq.GridQubit(9, 9)], SYMBOLS, BATCH_SIZE)\n ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE)\n\n results = batch_util.batch_calculate_expectation(\n circuit_batch, resolver_batch, [[x] for x in ops], sim)\n\n for circuit, resolver, result, op in zip(circuit_batch, resolver_batch,\n results, ops):\n r = _expectation_helper(sim, circuit, resolver, op)\n self.assertAllClose(r, result, rtol=1e-5, atol=1e-5)\n\n self.assertDTypeEqual(results, np.float32)\n\n @parameterized.parameters([{\n 'sim': cirq.DensityMatrixSimulator()\n }, {\n 'sim': cirq.sim.sparse_simulator.Simulator()\n }])\n def test_batch_sampled_expectation(self, sim):\n \"\"\"Test expectation.\"\"\"\n qubits = cirq.GridQubit.rect(1, N_QUBITS)\n circuit_batch, resolver_batch = _get_mixed_batch(\n qubits + [cirq.GridQubit(9, 9)], SYMBOLS, BATCH_SIZE)\n\n ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE)\n n_samples = [[1000] for _ in range(len(ops))]\n\n results = batch_util.batch_calculate_sampled_expectation(\n circuit_batch, resolver_batch, [[x] for x in ops], n_samples, sim)\n\n for circuit, resolver, result, op in zip(circuit_batch, resolver_batch,\n results, ops):\n r = _expectation_helper(sim, circuit, resolver, op)\n self.assertAllClose(r, result, rtol=1.0, atol=1e-1)\n\n self.assertDTypeEqual(results, np.float32)\n\n @parameterized.parameters([{\n 'sim': cirq.DensityMatrixSimulator()\n }, {\n 'sim': cirq.sim.sparse_simulator.Simulator()\n }])\n def test_batch_sample_basic(self, sim):\n \"\"\"Test sampling.\"\"\"\n n_samples = 1\n n_qubits = 8\n qubits = cirq.GridQubit.rect(1, n_qubits)\n circuit = cirq.Circuit(*cirq.Z.on_each(*qubits[:n_qubits // 2]),\n *cirq.X.on_each(*qubits[n_qubits // 2:]))\n\n test_results = batch_util.batch_sample([circuit],\n [cirq.ParamResolver({})],\n n_samples, sim)\n\n state = sim.simulate(circuit, cirq.ParamResolver({}))\n expected_results = _sample_helper(sim, state, len(qubits), n_samples)\n\n self.assertAllEqual(expected_results, test_results[0])\n self.assertDTypeEqual(test_results, np.int32)\n\n @parameterized.parameters([{\n 'sim': cirq.DensityMatrixSimulator()\n }, {\n 'sim': cirq.sim.sparse_simulator.Simulator()\n }])\n def test_batch_sample(self, sim):\n \"\"\"Test sampling.\"\"\"\n n_samples = 2000 * (2**N_QUBITS)\n\n circuit_batch, resolver_batch = _get_mixed_batch(\n cirq.GridQubit.rect(1, N_QUBITS), SYMBOLS, BATCH_SIZE)\n\n results = batch_util.batch_sample(circuit_batch, resolver_batch,\n n_samples, sim)\n\n tfq_histograms = []\n for r in results:\n tfq_histograms.append(\n np.histogram(r.dot(1 << np.arange(r.shape[-1] - 1, -1, -1)),\n range=(0, 2**N_QUBITS),\n bins=2**N_QUBITS)[0])\n\n cirq_histograms = []\n for circuit, resolver in zip(circuit_batch, resolver_batch):\n state = sim.simulate(circuit, resolver)\n r = _sample_helper(sim, state, len(circuit.all_qubits()), n_samples)\n cirq_histograms.append(\n np.histogram(r.dot(1 << np.arange(r.shape[-1] - 1, -1, -1)),\n range=(0, 2**N_QUBITS),\n bins=2**N_QUBITS)[0])\n\n for a, b in zip(tfq_histograms, cirq_histograms):\n self.assertLess(stats.entropy(a + 1e-8, b + 1e-8), 0.005)\n\n self.assertDTypeEqual(results, np.int32)\n\n @parameterized.parameters([{\n 'sim': cirq.DensityMatrixSimulator()\n }, {\n 'sim': cirq.sim.sparse_simulator.Simulator()\n }])\n def test_empty_circuits(self, sim):\n \"\"\"Test functions with empty circuits.\"\"\"\n # Common preparation\n resolver_batch = [cirq.ParamResolver({}) for _ in range(BATCH_SIZE)]\n circuit_batch = [cirq.Circuit() for _ in range(BATCH_SIZE)]\n qubits = cirq.GridQubit.rect(1, N_QUBITS)\n ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE)\n n_samples = [[1000] for _ in range(len(ops))]\n # If there is no op on a qubit, the expectation answer is -2.0\n true_expectation = (-2.0,)\n\n # (1) Test expectation\n results = batch_util.batch_calculate_expectation(\n circuit_batch, resolver_batch, [[x] for x in ops], sim)\n\n for _, _, result, _ in zip(circuit_batch, resolver_batch, results, ops):\n self.assertAllClose(true_expectation, result, rtol=1e-5, atol=1e-5)\n\n self.assertDTypeEqual(results, np.float32)\n\n # (2) Test sampled_expectation\n results = batch_util.batch_calculate_sampled_expectation(\n circuit_batch, resolver_batch, [[x] for x in ops], n_samples, sim)\n\n for _, _, result, _ in zip(circuit_batch, resolver_batch, results, ops):\n self.assertAllClose(true_expectation, result, rtol=1.0, atol=1e-1)\n\n self.assertDTypeEqual(results, np.float32)\n\n # (3) Test state\n results = batch_util.batch_calculate_state(circuit_batch,\n resolver_batch, sim)\n\n for circuit, resolver, result in zip(circuit_batch, resolver_batch,\n results):\n r = _pad_state(sim, sim.simulate(circuit, resolver), 0)\n self.assertAllClose(r, result, rtol=1e-5, atol=1e-5)\n\n self.assertDTypeEqual(results, np.complex64)\n\n # (4) Test sampling\n n_samples = 2000 * (2**N_QUBITS)\n results = batch_util.batch_sample(circuit_batch, resolver_batch,\n n_samples, sim)\n\n for circuit, resolver, a in zip(circuit_batch, resolver_batch, results):\n state = sim.simulate(circuit, resolver)\n r = _sample_helper(sim, state, len(circuit.all_qubits()), n_samples)\n self.assertAllClose(r, a, atol=1e-5)\n\n self.assertDTypeEqual(results, np.int32)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"numpy.arange",
"tensorflow.test.main",
"numpy.pad",
"scipy.stats.entropy"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
maxgreat/dsve-loc | [
"dd6807d02c0d5fd3e215be8e5c7a88e73102e561"
] | [
"text_features_extraction.py"
] | [
"\"\"\"\r\n****************** COPYRIGHT AND CONFIDENTIALITY INFORMATION ******************\r\nCopyright (c) 2018 [Thomson Licensing]\r\nAll Rights Reserved\r\nThis program contains proprietary information which is a trade secret/business \\\r\nsecret of [Thomson Licensing] and is protected, even if unpublished, under \\\r\napplicable Copyright laws (including French droit d'auteur) and/or may be \\\r\nsubject to one or more patent(s).\r\nRecipient is to retain this program in confidence and is not permitted to use \\\r\nor make copies thereof other than as permitted in a written agreement with \\\r\n[Thomson Licensing] unless otherwise expressly allowed by applicable laws or \\\r\nby [Thomson Licensing] under express agreement.\r\nThomson Licensing is a company of the group TECHNICOLOR\r\n*******************************************************************************\r\nThis scripts permits one to reproduce training and experiments of:\r\n Engilberge, M., Chevallier, L., Pérez, P., & Cord, M. (2018, April).\r\n Finding beans in burgers: Deep semantic-visual embedding with localization.\r\n In Proceedings of CVPR (pp. 3984-3993)\r\n\r\nAuthor: Martin Engilberge\r\n\"\"\"\r\n\r\nimport argparse\r\nimport time\r\n\r\nimport numpy as np\r\nimport torch\r\n\r\nfrom misc.dataset import TextDataset\r\nfrom misc.model import joint_embedding\r\nfrom misc.utils import save_obj, collate_fn_cap_padded\r\nfrom torch.utils.data import DataLoader\r\n\r\n\r\ndevice = torch.device(\"cuda\")\r\n# device = torch.device(\"cpu\") # uncomment to run with cpu\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n parser = argparse.ArgumentParser(description='Extract embedding representation for images')\r\n parser.add_argument(\"-p\", '--path', dest=\"model_path\", help='Path to the weights of the model to evaluate', required=True)\r\n parser.add_argument(\"-d\", '--data', dest=\"data_path\", help='path to the file containing the sentence to embed')\r\n parser.add_argument(\"-o\", '--output', dest=\"output_path\", help='path of the output file', default=\"./text_embedding\")\r\n parser.add_argument(\"-bs\", \"--batch_size\", help=\"The size of the batches\", type=int, default=1)\r\n\r\n args = parser.parse_args()\r\n\r\n print(\"Loading model from:\", args.model_path)\r\n checkpoint = torch.load(args.model_path, map_location=lambda storage, loc: storage)\r\n\r\n join_emb = joint_embedding(checkpoint['args_dict'])\r\n join_emb.load_state_dict(checkpoint[\"state_dict\"])\r\n\r\n for param in join_emb.parameters():\r\n param.requires_grad = False\r\n\r\n join_emb.to(device)\r\n join_emb.eval()\r\n\r\n dataset = TextDataset(args.data_path)\r\n print(\"Dataset size: \", len(dataset))\r\n\r\n dataset_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=3, pin_memory=True, collate_fn=collate_fn_cap_padded)\r\n\r\n caps_enc = list()\r\n\r\n print(\"### Starting sentence embedding ###\")\r\n end = time.time()\r\n for i, (caps, length) in enumerate(dataset_loader, 0):\r\n\r\n input_caps = caps.to(device)\r\n\r\n with torch.no_grad():\r\n _, output_emb = join_emb(None, input_caps, length)\r\n\r\n caps_enc.append(output_emb.cpu().data.numpy())\r\n\r\n if i % 100 == 99:\r\n print(str((i + 1) * args.batch_size) + \"/\" + str(len(dataset)) + \" captions encoded - Time per batch: \" + str((time.time() - end)) + \"s\")\r\n\r\n end = time.time()\r\n\r\n print(\"Processing done -> saving\")\r\n caps_stack = np.vstack(caps_enc)\r\n\r\n save_obj(caps_stack, args.output_path)\r\n print(\"The data has been save to \", args.output_path)\r\n"
] | [
[
"torch.load",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.device",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MIRCen/brukerapi-python | [
"5455800895924c69bf839fa621fa7a06d343b4ff"
] | [
"test/test_jcampdx.py"
] | [
"from brukerapi.jcampdx import JCAMPDX\nimport numpy as np\nfrom pathlib import Path\nimport pytest\n\[email protected](reason=\"in progress\")\ndef test_jcampdx(test_jcampdx_data):\n\n j = JCAMPDX(Path(test_jcampdx_data[1]) / test_jcampdx_data[0]['path'])\n for key, ref in test_jcampdx_data[0]['parameters'].items():\n parameter_test = j.get_parameter(key)\n size_test= parameter_test.size\n value_test= parameter_test.value\n type_test = value_test.__class__\n\n value_ref = ref['value']\n size_ref = ref['size']\n type_ref = ref['type']\n\n #test SIZE\n if size_ref == 'None':\n size_ref = None\n if isinstance(size_ref, list):\n size_ref = tuple(size_ref)\n elif isinstance(size_ref, int):\n size_ref = (size_ref,)\n assert size_ref == size_test\n\n #test TYPE\n assert type_ref == type_test.__name__\n\n #test VALUE\n if isinstance(value_test, np.ndarray):\n value_ref = np.array(value_ref)\n assert np.array_equal(value_ref, value_test)\n elif isinstance(value_test, list):\n assert value_test == value_ref\n else:\n assert value_ref == value_test\n\n"
] | [
[
"numpy.array",
"numpy.array_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lkelvinm/OpenAeroStruct | [
"395075d28783c1b99b4ab25ddf034000caf9cd0d",
"395075d28783c1b99b4ab25ddf034000caf9cd0d"
] | [
"openaerostruct/structures/section_properties_tube.py",
"openaerostruct/structures/wingbox_geometry.py"
] | [
"from __future__ import division, print_function\nimport numpy as np\n\nfrom openmdao.api import ExplicitComponent\n\nclass SectionPropertiesTube(ExplicitComponent):\n \"\"\"\n Compute geometric properties for a tube element.\n The thicknesses are added to the interior of the element, so the\n 'radius' value is the outer radius of the tube.\n\n parameters\n ----------\n radius : numpy array\n Outer radii for each FEM element.\n thickness : numpy array\n Tube thickness for each FEM element.\n\n Returns\n -------\n A : numpy array\n Cross-sectional area for each FEM element.\n Iy : numpy array\n Area moment of inertia around the y-axis for each FEM element.\n Iz : numpy array\n Area moment of inertia around the z-axis for each FEM element.\n J : numpy array\n Polar moment of inertia for each FEM element.\n \"\"\"\n\n def initialize(self):\n self.options.declare('surface', types=dict)\n\n def setup(self):\n self.surface = surface = self.options['surface']\n\n self.ny = surface['num_y']\n\n self.add_input('radius', val=np.ones((self.ny - 1)), units='m')\n self.add_input('thickness', val=np.ones((self.ny - 1)) * .1, units='m')\n self.add_output('A', val=np.zeros((self.ny - 1)), units='m**2')\n self.add_output('Iy', val=np.zeros((self.ny - 1)), units='m**4')\n self.add_output('Iz', val=np.zeros((self.ny - 1)), units='m**4')\n self.add_output('J', val=np.zeros((self.ny - 1)), units='m**4')\n\n a = np.arange((self.ny - 1))\n self.declare_partials('*', '*', rows=a, cols=a)\n self.set_check_partial_options(wrt='*', method='cs')\n\n def compute(self, inputs, outputs):\n pi = np.pi\n\n # Add thickness to the interior of the radius.\n # The outer radius is the inputs['radius'] amount.\n r1 = inputs['radius'] - inputs['thickness']\n r2 = inputs['radius']\n\n # Compute the area, area moments of inertia, and polar moment of inertia\n outputs['A'] = pi * (r2**2 - r1**2)\n outputs['Iy'] = pi * (r2**4 - r1**4) / 4.\n outputs['Iz'] = pi * (r2**4 - r1**4) / 4.\n outputs['J'] = pi * (r2**4 - r1**4) / 2.\n\n def compute_partials(self, inputs, partials):\n pi = np.pi\n radius = inputs['radius'].real\n t = inputs['thickness'].real\n r1 = radius - t\n r2 = radius\n\n dr1_dr = 1.\n dr2_dr = 1.\n dr1_dt = -1.\n dr2_dt = 0.\n\n r1_3 = r1**3\n r2_3 = r2**3\n\n partials['A', 'radius'] = 2 * pi * (r2 * dr2_dr - r1 * dr1_dr)\n partials['A', 'thickness'] = 2 * pi * (r2 * dr2_dt - r1 * dr1_dt)\n partials['Iy', 'radius'] = pi * (r2_3 * dr2_dr - r1_3 * dr1_dr)\n partials['Iy', 'thickness'] = pi * (r2_3 * dr2_dt - r1_3 * dr1_dt)\n partials['Iz', 'radius'] = pi * (r2_3 * dr2_dr - r1_3 * dr1_dr)\n partials['Iz', 'thickness'] = pi * (r2_3 * dr2_dt - r1_3 * dr1_dt)\n partials['J', 'radius'] = 2 * pi * (r2_3 * dr2_dr - r1_3 * dr1_dr)\n partials['J', 'thickness'] = 2 * pi * (r2_3 * dr2_dt - r1_3 * dr1_dt)\n",
"from __future__ import division, print_function\nimport numpy as np\n\nfrom openmdao.api import ExplicitComponent\nfrom openaerostruct.structures.utils import norm\n\nclass WingboxGeometry(ExplicitComponent):\n \"\"\"\n OpenMDAO component that performs mesh manipulation functions. It reads in\n the initial mesh from the surface dictionary and outputs the altered\n mesh based on the geometric design variables.\n\n Depending on the design variables selected or the supplied geometry information,\n only some of the follow parameters will actually be given to this component.\n If parameters are not active (they do not deform the mesh), then\n they will not be given to this component.\n\n Parameters\n ----------\n sweep : float\n Shearing sweep angle in degrees.\n dihedral : float\n Dihedral angle in degrees.\n twist[ny] : numpy array\n 1-D array of rotation angles for each wing slice in degrees.\n chord_dist[ny] : numpy array\n Chord length for each panel edge.\n taper : float\n Taper ratio for the wing; 1 is untapered, 0 goes to a point at the tip.\n\n Returns\n -------\n mesh[nx, ny, 3] : numpy array\n Modified mesh based on the initial mesh in the surface dictionary and\n the geometric design variables.\n \"\"\"\n\n def initialize(self):\n self.options.declare('surface', types=dict)\n\n def setup(self):\n self.surface = self.options['surface']\n nx, ny = self.surface['num_x'], self.surface['num_y']\n\n self.add_input('mesh', val=np.zeros((nx, ny, 3)))\n\n self.add_output('streamwise_chords', val=np.ones((ny - 1)))\n self.add_output('fem_chords', val=np.ones((ny - 1)))\n self.add_output('fem_twists', val=np.ones((ny - 1)))\n\n self.declare_partials('*', '*', method='fd')\n\n def compute(self, inputs, outputs):\n mesh = inputs['mesh']\n vectors = mesh[-1, :, :] - mesh[0, :, :]\n streamwise_chords = np.sqrt(np.sum(vectors**2, axis=1))\n streamwise_chords = 0.5 * streamwise_chords[:-1] + 0.5 * streamwise_chords[1:]\n\n # Chord lengths for the panel strips at the panel midpoint\n outputs['streamwise_chords'] = streamwise_chords.copy()\n\n fem_twists = np.zeros(streamwise_chords.shape)\n fem_chords = streamwise_chords.copy()\n\n surface = self.surface\n\n # Gets the shear center by looking at the four corners.\n # Assumes same spar thickness for front and rear spar.\n w = (surface['data_x_upper'][0] *(surface['data_y_upper'][0]-surface['data_y_lower'][0]) + \\\n surface['data_x_upper'][-1]*(surface['data_y_upper'][-1]-surface['data_y_lower'][-1])) / \\\n ( (surface['data_y_upper'][0]-surface['data_y_lower'][0]) + (surface['data_y_upper'][-1]-surface['data_y_lower'][-1]))\n\n # TODO: perhaps replace this or link with existing nodes computation\n nodes = (1-w) * mesh[0, :, :] + w * mesh[-1, :, :]\n\n mesh_vectors = mesh[-1, :, :] - mesh[0, :, :]\n\n # Loop over spanwise elements\n for ielem in range(mesh.shape[1] - 1):\n\n # Obtain the element nodes\n P0 = nodes[ielem, :]\n P1 = nodes[ielem+1, :]\n\n elem_vec = (P1 - P0) # vector along element\n temp_vec = elem_vec.copy()\n temp_vec[0] = 0. # vector along element without x component\n\n # This is used to get chord length normal to FEM element.\n # To be clear, this 3D angle sweep measure.\n # This is the projection to the wing orthogonal to the FEM direction.\n cos_theta_fe_sweep = norm(temp_vec) / norm(elem_vec)\n fem_chords[ielem] = fem_chords[ielem] * cos_theta_fe_sweep\n\n outputs['fem_chords'] = fem_chords\n\n # Loop over spanwise elements\n for ielem in range(mesh.shape[1] - 1):\n\n # The following is used to approximate the twist angle for the section normal to the FEM element\n mesh_vec_0 = mesh_vectors[ielem]\n temp_mesh_vectors_0 = mesh_vec_0.copy()\n temp_mesh_vectors_0[2] = 0.\n\n cos_twist_0 = norm(temp_mesh_vectors_0) / norm(mesh_vec_0)\n\n if cos_twist_0 > 1.:\n theta_0 = 0. # to prevent nan in case value for arccos is greater than 1 due to machine precision\n else:\n theta_0 = np.arccos(cos_twist_0)\n\n mesh_vec_1 = mesh_vectors[ielem + 1]\n temp_mesh_vectors_1 = mesh_vec_1.copy()\n temp_mesh_vectors_1[2] = 0.\n\n cos_twist_1 = norm(temp_mesh_vectors_1) / norm(mesh_vec_1)\n\n if cos_twist_1 > 1.:\n theta_1 = 0. # to prevent nan in case value for arccos is greater than 1 due to machine precision\n else:\n theta_1 = np.arccos(cos_twist_1)\n\n fem_twists[ielem] = (theta_0 + theta_1) / 2 * streamwise_chords[ielem] / fem_chords[ielem]\n outputs['fem_twists'] = fem_twists\n"
] | [
[
"numpy.arange",
"numpy.zeros",
"numpy.ones"
],
[
"numpy.arccos",
"numpy.zeros",
"numpy.sum",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mo-cmyk/wbgapi | [
"a0f8658b7a74ec79256d7b66ff58cb95726e89aa"
] | [
"wbgapi/data.py"
] | [
"\n'''Access World Bank API data\n'''\n\nimport wbgapi as w\ntry:\n import numpy as np\n import pandas as pd\nexcept ImportError:\n np = None\n pd = None\n\ndef fetch(series, economy='all', time='all', mrv=None, mrnev=None, skipBlanks=False, labels=False, skipAggs=False, numericTimeKeys=False, params={}, db=None, **dimensions):\n '''Retrieve rows of data for the current database\n\n Arguments:\n series: a series identifier or list-like, e.g., SP.POP.TOTL\n\n economy: an economy identifier or list-like, e.g., 'BRA' or ['USA', 'CAN', 'MEX']\n\n time: a time identifier or list-like, e.g., 'YR2015' or range(2010,2020).\n Both element keys and values are acceptable\n\n mrv: return only the specified number of most recent values (same time period for all economies)\n\n mrnev: return only the specified number of non-empty most recent values (time period varies)\n\n skipBlanks: skip empty observations\n\n labels: include both dimension id and name (e.g., ZWE & Zimbabwe, not just ZWE)\n\n skipAggs: skip aggregates\n\n numericTimeKeys: store the time object by value (e.g., 2014) instead of key ('YR2014') if value is numeric\n\n params: extra query parameters to pass to the API\n\n dimensions: extra dimensions, database specific (e.g., version)\n\n Returns:\n A generator object\n\n Examples:\n # print name and population of all economies for all available years\n for elem in wbgapi.data.fetch('SP.POP.TOTL',labels=True):\n print(elem['economy']['value'], elem['time']['value'], elem['value'])\n\n # fetch data for Brazil for odd-numbered years\n for elem in wbgapi.data.fetch('NY.GDP.PCAP.CD', 'BRA', range(2011,2020,2)):\n print(elem['value'])\n\n # most recent poverty rates for all LAC countries\n for elem in wbgapi.data.fetch('SI.POV.NAHC', economy=wb.region.members('LAC'), mrnev=1):\n print(elem['economy'], elem['time'], elem['value'])\n\n # dict of most recent population data for economies over 100000\n popData = {i['economy']: i['value'] for i in wbgapi.data.fetch('SP.POP.TOTL', mrnev=1, skipAggs=True) if i['value'] > 100000}\n \n '''\n\n if db is None:\n db = w.db\n\n concepts = w.source.concepts(db)\n concept_keys = {v['key']: k for k,v in concepts.items()}\n params_ = {}\n params_.update(params)\n if mrv:\n params_['mrv'] = mrv\n elif mrnev:\n params_['mrnev'] = mrnev\n\n # you can thus pass series, economy, and time in the dimensions array, and those will overwrite the explicit parameters\n dimensions_ = {'series': series, 'economy': economy, 'time': time}\n dimensions_.update(dimensions)\n\n url = 'sources/{}'.format(db)\n keys = ['series', 'economy', 'time']\n values = {}\n for k,v in dimensions_.items():\n if k not in concepts:\n raise KeyError('{} is not a concept in database {}'.format(k, db))\n\n if k not in keys:\n keys.append(k)\n\n url += '/{}/{}'.format(concepts[k]['key'], '{' + k + '}')\n values[k] = w.queryParam(v, concept=k, db=db)\n\n aggs = w.economy.aggregates()\n\n for row in w.refetch(url, keys, params=params_, **values):\n if skipBlanks and row['value'] is None:\n continue\n\n skip = False\n\n x = {'value': row['value']}\n for elem in row['variable']:\n key = concept_keys[elem['concept'].lower()]\n if key == 'economy' and skipAggs and elem['id'] in aggs:\n skip = True\n break\n\n if not skip:\n if labels:\n del(elem['concept'])\n x[key] = elem\n if key == 'economy':\n x[key]['aggregate'] = elem['id'] in aggs\n elif key == 'time' and numericTimeKeys and elem['value'].isdigit():\n x[key]['id'] = int(elem['value'])\n else:\n x[key] = elem['id']\n if key == 'economy':\n x['aggregate'] = elem['id'] in aggs\n elif key == 'time' and numericTimeKeys and elem['value'].isdigit():\n x[key] = int(elem['value'])\n\n if not skip:\n yield x\n\ndef FlatFrame(series, economy='all', time='all', mrv=None, mrnev=None, skipBlanks=False, labels=False, skipAggs=False, params={}, db=None, **dimensions):\n '''Retrieve a flat pandas dataframe (1 row per observation)\n\n Arguments:\n series: a series identifier or list-like, e.g., SP.POP.TOTL\n\n economy: an economy identifier or list-like, e.g., 'BRA' or ['USA', 'CAN', 'MEX']\n\n time: a time identifier or list-like, e.g., 'YR2015' or range(2010,2020).\n Both element keys and values are acceptable\n\n mrv: return only the specified number of most recent values (same time period for all economies)\n\n mrnev: return only the specified number of non-empty most recent values (time period varies)\n\n skipBlanks: skip empty observations\n\n labels: return the dimension name instead of the identifier\n\n skipAggs: skip aggregates\n\n params: extra query parameters to pass to the API\n\n dimensions: extra dimensions, database specific (e.g., version)\n \n Returns:\n a pandas DataFrame\n\n Notes:\n values in the time column are numeric if possible (2015 not 'YR2015')\n '''\n\n if pd is None:\n raise ModuleNotFoundError('you must install pandas to use this feature')\n\n key = 'value' if labels else 'id'\n df = None\n\n # we set numericTimeKeys=True so that time values will always be numeric if possible\n for row in fetch(series, economy, time, mrv=mrv, mrnev=mrnev, skipBlanks=skipBlanks, labels=True, numericTimeKeys=True, skipAggs=skipAggs, params=params, db=db, **dimensions):\n if df is None:\n # this assumes that the API returns the same object structure in every row, so we can use the first as a template\n columns = row.keys()\n df = pd.DataFrame(columns=columns)\n\n df.loc[len(df)] = [row[i][key] if type(row[i]) is dict else row[i] for i in columns]\n\n return df\n\ndef DataFrame(series, economy='all', time='all', index=None, columns=None, mrv=None, mrnev=None, skipBlanks=False, labels=False, skipAggs=False, numericTimeKeys=False, timeColumns=False, params={}, db=None, **dimensions):\n '''Retrieve a 2-dimensional pandas dataframe. \n \n Arguments:\n series: a series identifier or list-like, e.g., SP.POP.TOTL\n\n economy: an economy identifier or list-like, e.g., 'BRA' or ['USA', 'CAN', 'MEX']\n\n time: a time identifier or list-like, e.g., 'YR2015' or range(2010,2020).\n Both element keys and values are acceptable\n\n index: name or list of dimensions for the DataFrame's index, e.g., 'economy'. If None then the function\n will define the index based on your request. Note: to get a dataframe with no index\n (i.e., 0-based integers) call `reset_index()` with on the return value of this function.\n\n columns: name of the dimension for the DataFrame's columns, e.g., 'series'. If None then the function\n will define columns based on your request.\n\n mrv: return only the specified number of most recent values (same time period for all economies)\n\n mrnev: return only the specified number of non-empty most recent values (time period varies)\n\n skipBlanks: skip empty observations\n\n labels: include the dimension name for rows\n\n skipAggs: skip aggregates\n\n numericTimeKeys: store the time object by value (e.g., 2014) instead of key ('YR2014') if value is numeric\n\n timeColumns: add extra columns to show the time dimension for each series/economy\n If 'auto' then the function will guess based on other parameters\n\n params: extra query parameters to pass to the API\n\n dimensions: extra dimensions, database specific (e.g., version)\n \n Returns:\n a pandas DataFrame\n\n Examples:\n # 5 years of population data (with economy names)\n wbgapi.data.DataFrame('SP.POP.TOTL', time=range(2010,2020),labels=True)\n\n # Most recent poverty and income data for LAC\n wbgapi.data.DataFrame(['SI.POV.NAHC', 'NY.GDP.PCAP.CD'], economy=wb.region.members('LAC'),mrnev=1,timeColumns=True)\n\n # Fetch most recent CO2 emissions for each country and merge its income group\n wbgapi.data.DataFrame('EN.ATM.CO2E.PC',mrnev=1).join(wbgapi.economy.DataFrame()['incomeLevel'])\n\n # Top 10 emitters per capita\n wbgapi.data.DataFrame('EN.ATM.CO2E.PC',mrnev=1,labels=True).sort_values('EN.ATM.CO2E.PC',ascending=False).head(10)\n\n Notes:\n timeColumns currently defaults to False so that the default column composition is consistent. This may change to 'auto'\n at some point, so that mrv behavior is more intuitive for data discovery\n '''\n\n def frame(index):\n\n if len(index) > 1:\n i = [[]] * len(index)\n return pd.DataFrame(index=pd.MultiIndex(levels=i, codes=i, names=tuple(index)))\n\n df = pd.DataFrame()\n df.index.name = index[0]\n return df\n\n def is_single(x):\n\n if type(x) is str:\n if x == 'all':\n return False\n elif x == 'mrv':\n return True\n\n # not necessary to pass db since we don't actually care about the parameters just the count of them\n return len(w.queryParam(x).split(';')) == 1\n\n if pd is None:\n raise ModuleNotFoundError('you must install pandas to use this feature')\n\n # set up the axes by looking at the index/column parameters\n concepts = ['economy','series','time']\n for k,v in w.source.concepts(db).items():\n if k not in concepts:\n concepts.insert(0, k)\n\n if type(index) is str:\n index = [index]\n\n if index is None or columns is None:\n # we need to infer at least one dimension\n\n dimensions_ = {'series': series, 'economy': economy, 'time': time}\n dimensions_.update(dimensions)\n\n axes = concepts.copy()\n\n # now we reduce axes by eliminating any dimension consisting of \n # one element not defined in the calling parameters, with a stop\n # if we reduce to 2 dimensions\n x = concepts.copy()\n x.reverse()\n for k in x:\n if len(axes) == 2:\n break\n\n if k == columns or (type(index) is list and k in index):\n continue\n\n values = dimensions_.get(k, 'all')\n if k == 'time' and (mrv == 1 or mrnev == 1 or is_single(values)):\n axes.remove(k)\n if timeColumns == 'auto' and (mrv == 1 or mrnev == 1):\n timeColumns = True\n\n elif is_single(values):\n axes.remove(k)\n\n if columns is None and index is None:\n columns = axes.pop(-1)\n index = axes\n elif columns is None:\n # try to guess a column based on what index doesn't define\n x = list(filter(lambda x: x not in index, axes))\n if len(x) > 0:\n columns = x[-1]\n elif (set(concepts) - set(list)) > 0:\n # index has claimed all non-singular dimensions, so set columns from the full concepts list\n x = list(filter(lambda x: x not in index, concepts))\n columns = x[-1]\n else:\n # index is the same as the concepts list. That's not allowed\n raise ValueError('one dimension must be a column')\n\n elif index is None:\n axes.remove(columns)\n index = axes\n\n # sanity checks\n if type(columns) is not str or columns not in concepts:\n raise ValueError('columns must be None or a dimension')\n\n if type(index) is not list or len(set(index) - set(concepts)) > 0:\n raise ValueError('index must be None or a dimension list')\n\n if columns in index:\n raise ValueError('columns cannot be an element in index')\n\n if columns == 'time' or 'time' in index or timeColumns == 'auto':\n timeColumns = False\n\n # for now let's see if it works to build the dataframe dynamically\n df = frame(index)\n dummy = pd.Series() # empty series - never assigned actual values\n ts_suffix = ':T'\n concepts = w.source.concepts(db)\n if labels:\n # create a separate dataframe for labels so that we can control the column position below\n df2 = frame(index)\n\n for row in fetch(series, economy, time, mrv=mrv, mrnev=mrnev, skipBlanks=skipBlanks, labels=True, skipAggs=skipAggs, numericTimeKeys=numericTimeKeys, params=params, db=db, **dimensions):\n column_key = row[columns]['id']\n if len(index) == 1:\n index_key = row[index[0]]['id']\n else:\n index_key = tuple(map(lambda x: row[x]['id'], index))\n\n # this logic only assigns values to locations that don't yet exist. First observations thus take precedent over subsequent ones\n if pd.isna(df.get(column_key, dummy).get(index_key)):\n df.loc[index_key, column_key] = np.nan if row['value'] is None else row['value']\n if timeColumns:\n df.loc[index_key, column_key + ts_suffix] = row['time']['value']\n\n if labels:\n for i in index:\n df2.loc[index_key, concepts[i]['value']] = row[i]['value']\n \n df.sort_index(axis=0,inplace=True)\n df.sort_index(axis=1,inplace=True)\n if labels:\n return df2.join(df)\n # return pd.concat([df2,df], axis=1, sort=False)\n \n return df\n \n\ndef get(series, economy, time='all', mrv=None, mrnev=None, labels=False, numericTimeKeys=False, db=None, **dimensions):\n '''Retrieve a single data point for the current database\n\n Arguments:\n series: a series identifier\n\n economy: an economy identifier\n\n time: a time identifier. Both element keys and values are acceptable\n\n mrv: return only the specified number of most recent values (same time period for all economies)\n\n mrnev: return only the specified number of non-empty most recent values (time period varies)\n\n labels: include both dimension id and name (e.g., ZWE & Zimbabwe, not just ZWE)\n\n numericTimeKeys: store the time object by value (e.g., 2014) instead of key ('YR2014') if value is numeric\n\n dimensions: extra dimensions, database specific (e.g., version)\n\n Returns:\n a data observation\n\n Notes:\n This function simply calls fetch() and returns the first result. Hence, you should set mrv or mrnev to 1, or set\n time to a single value to get predictable results.\n\n Example:\n # print the last population estimate for France\n print(wbgapi.data.get('SP.POP.TOTL', 'FRA', mrnev=1)['value'])\n '''\n\n for row in fetch(series, economy, time, mrv=mrv, mrnev=mrnev, labels=labels, numericTimeKeys=numericTimeKeys, params={'per_page': 1}, db=db, **dimensions):\n return row\n\ndef footnote(series, economy, time, db=None):\n '''Return the footnote for a single data point, if any\n\n Arguments:\n series: a series identifier\n\n economy: an economy identifier\n\n time: a time identifier. Both element keys and values are acceptable\n\n Returns:\n footnote text, or None\n\n Example:\n print(wbgapi.data.footnote('SP.POP.TOTL', 'FRA', 2015))\n '''\n\n if db is None:\n db = w.db\n\n # note that this only supports singular footnote references at this point, although the interface suggests otherwise\n url = 'sources/{source}/footnote/{economy}~{series}~{time}/metadata'\n try:\n for row in w.metadata(url, ['series'], source=db, series=series, economy=economy, time=w.queryParam(time, 'time', db=db)):\n return row.metadata['FootNote']\n except:\n pass # will return None then\n\n"
] | [
[
"pandas.Series",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
kpoeppel/pytorch_probgraph | [
"b78595ab03bbe92595ad2f6b35f5dd8bf84d6da0"
] | [
"examples/Model_HM_RWS.py"
] | [
"\nimport site\nsite.addsitedir('..')\n\nimport torch\nfrom pytorch_probgraph import BernoulliLayer\nfrom pytorch_probgraph import InteractionLinear\nfrom pytorch_probgraph import HelmholtzMachine\nfrom itertools import chain\nfrom tqdm import tqdm\n\nclass Model_HM_RWS(torch.nn.Module):\n def __init__(self):\n super().__init__()\n layer0 = BernoulliLayer(torch.nn.Parameter(torch.zeros([1, 1, 28, 28]), requires_grad=True))\n layer1 = BernoulliLayer(torch.nn.Parameter(torch.zeros([1, 200]), requires_grad=True))\n layer2 = BernoulliLayer(torch.nn.Parameter(torch.zeros([1, 200]), requires_grad=True))\n\n interactionUp1 = InteractionLinear(layer0.bias.shape[1:], layer1.bias.shape[1:])\n interactionDown1 = InteractionLinear(layer1.bias.shape[1:], layer0.bias.shape[1:])\n interactionUp2 = InteractionLinear(layer1.bias.shape[1:], layer2.bias.shape[1:])\n interactionDown2 = InteractionLinear(layer2.bias.shape[1:], layer1.bias.shape[1:])\n\n parameters = chain(*[m.parameters() for m in [layer0, layer1, layer2, interactionUp1, interactionUp2, interactionDown1, interactionDown2]])\n opt = torch.optim.Adam(parameters)\n\n self.model = HelmholtzMachine([layer0, layer1, layer2],\n [interactionUp1, interactionUp2],\n [interactionDown1, interactionDown2],\n optimizer=opt)\n #print(interaction.weight.shape)\n\n def train(self, data, epochs=1, device=None):\n for epoch in range(epochs):\n for dat in data:\n self.model.trainReweightedWS(dat.to(device), ksamples=5)\n if isinstance(data, tqdm):\n data = tqdm(data)\n #print(torch.sum(self.model.interaction.weight))\n\n def loglikelihood(self, data):\n return self.model.loglikelihood(data, ksamples=100).cpu().detach()\n\n def generate(self, N=1):\n return self.model.sampleAll(N=N)[0][0].cpu()\n"
] | [
[
"torch.optim.Adam",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
acmore/ray | [
"9f0f54266064e203b0bdcc9d3fa947cb4518ebc0",
"9f0f54266064e203b0bdcc9d3fa947cb4518ebc0",
"9f0f54266064e203b0bdcc9d3fa947cb4518ebc0"
] | [
"rllib/utils/torch_ops.py",
"rllib/utils/memory.py",
"rllib/agents/es/es_torch_policy.py"
] | [
"import numpy as np\n\nfrom ray.rllib.utils import try_import_tree\nfrom ray.rllib.utils.framework import try_import_torch\n\ntorch, _ = try_import_torch()\ntree = try_import_tree()\n\n\ndef explained_variance(y, pred):\n y_var = torch.var(y, dim=[0])\n diff_var = torch.var(y - pred, dim=[0])\n min_ = torch.Tensor([-1.0])\n return torch.max(\n min_.to(device=torch.device(\"cuda\"))\n if torch.cuda.is_available() else min_,\n 1 - (diff_var / y_var))\n\n\ndef global_norm(tensors):\n \"\"\"Returns the global L2 norm over a list of tensors.\n\n output = sqrt(SUM(t ** 2 for t in tensors)),\n where SUM reduces over all tensors and over all elements in tensors.\n\n Args:\n tensors (List[torch.Tensor]): The list of tensors to calculate the\n global norm over.\n \"\"\"\n # List of single tensors' L2 norms: SQRT(SUM(xi^2)) over all xi in tensor.\n single_l2s = [\n torch.pow(torch.sum(torch.pow(t, 2.0)), 0.5) for t in tensors\n ]\n # Compute global norm from all single tensors' L2 norms.\n return torch.pow(sum(torch.pow(l2, 2.0) for l2 in single_l2s), 0.5)\n\n\ndef huber_loss(x, delta=1.0):\n \"\"\"Reference: https://en.wikipedia.org/wiki/Huber_loss\"\"\"\n return torch.where(\n torch.abs(x) < delta,\n torch.pow(x, 2.0) * 0.5, delta * (torch.abs(x) - 0.5 * delta))\n\n\ndef l2_loss(x):\n \"\"\"Computes half the L2 norm of a tensor without the sqrt.\n\n output = sum(x ** 2) / 2\n \"\"\"\n return torch.sum(torch.pow(x, 2.0)) / 2.0\n\n\ndef reduce_mean_ignore_inf(x, axis):\n \"\"\"Same as torch.mean() but ignores -inf values.\"\"\"\n mask = torch.ne(x, float(\"-inf\"))\n x_zeroed = torch.where(mask, x, torch.zeros_like(x))\n return torch.sum(x_zeroed, axis) / torch.sum(mask.float(), axis)\n\n\ndef minimize_and_clip(optimizer, clip_val=10):\n \"\"\"Clips gradients found in `optimizer.param_groups` to given value.\n\n Ensures the norm of the gradients for each variable is clipped to\n `clip_val`\n \"\"\"\n for param_group in optimizer.param_groups:\n for p in param_group[\"params\"]:\n if p.grad is not None:\n torch.nn.utils.clip_grad_norm_(p.grad, clip_val)\n\n\ndef sequence_mask(lengths, maxlen=None, dtype=None):\n \"\"\"Offers same behavior as tf.sequence_mask for torch.\n\n Thanks to Dimitris Papatheodorou\n (https://discuss.pytorch.org/t/pytorch-equivalent-for-tf-sequence-mask/\n 39036).\n \"\"\"\n if maxlen is None:\n maxlen = lengths.max()\n\n mask = ~(torch.ones((len(lengths), maxlen)).to(\n lengths.device).cumsum(dim=1).t() > lengths).t()\n mask.type(dtype or torch.bool)\n\n return mask\n\n\ndef convert_to_non_torch_type(stats):\n \"\"\"Converts values in `stats` to non-Tensor numpy or python types.\n\n Args:\n stats (any): Any (possibly nested) struct, the values in which will be\n converted and returned as a new struct with all torch tensors\n being converted to numpy types.\n\n Returns:\n Any: A new struct with the same structure as `stats`, but with all\n values converted to non-torch Tensor types.\n \"\"\"\n\n # The mapping function used to numpyize torch Tensors.\n def mapping(item):\n if isinstance(item, torch.Tensor):\n return item.cpu().item() if len(item.size()) == 0 else \\\n item.cpu().detach().numpy()\n else:\n return item\n\n return tree.map_structure(mapping, stats)\n\n\ndef convert_to_torch_tensor(stats, device=None):\n \"\"\"Converts any struct to torch.Tensors.\n\n stats (any): Any (possibly nested) struct, the values in which will be\n converted and returned as a new struct with all leaves converted\n to torch tensors.\n\n Returns:\n Any: A new struct with the same structure as `stats`, but with all\n values converted to torch Tensor types.\n \"\"\"\n\n def mapping(item):\n if torch.is_tensor(item):\n return item if device is None else item.to(device)\n tensor = torch.from_numpy(np.asarray(item))\n # Floatify all float64 tensors.\n if tensor.dtype == torch.double:\n tensor = tensor.float()\n return tensor if device is None else tensor.to(device)\n\n return tree.map_structure(mapping, stats)\n\n\ndef atanh(x):\n return 0.5 * torch.log((1 + x) / (1 - x))\n",
"import numpy as np\n\n\ndef aligned_array(size, dtype, align=64):\n \"\"\"Returns an array of a given size that is 64-byte aligned.\n\n The returned array can be efficiently copied into GPU memory by TensorFlow.\n \"\"\"\n\n n = size * dtype.itemsize\n empty = np.empty(n + (align - 1), dtype=np.uint8)\n data_align = empty.ctypes.data % align\n offset = 0 if data_align == 0 else (align - data_align)\n if n == 0:\n # stop np from optimising out empty slice reference\n output = empty[offset:offset + 1][0:0].view(dtype)\n else:\n output = empty[offset:offset + n].view(dtype)\n\n assert len(output) == size, len(output)\n assert output.ctypes.data % align == 0, output.ctypes.data\n return output\n\n\ndef concat_aligned(items):\n \"\"\"Concatenate arrays, ensuring the output is 64-byte aligned.\n\n We only align float arrays; other arrays are concatenated as normal.\n\n This should be used instead of np.concatenate() to improve performance\n when the output array is likely to be fed into TensorFlow.\n \"\"\"\n\n if len(items) == 0:\n return []\n elif len(items) == 1:\n # we assume the input is aligned. In any case, it doesn't help\n # performance to force align it since that incurs a needless copy.\n return items[0]\n elif (isinstance(items[0], np.ndarray)\n and items[0].dtype in [np.float32, np.float64, np.uint8]):\n dtype = items[0].dtype\n flat = aligned_array(sum(s.size for s in items), dtype)\n batch_dim = sum(s.shape[0] for s in items)\n new_shape = (batch_dim, ) + items[0].shape[1:]\n output = flat.reshape(new_shape)\n assert output.ctypes.data % 64 == 0, output.ctypes.data\n np.concatenate(items, out=output)\n return output\n else:\n return np.concatenate(items)\n",
"# Code in this file is adapted from:\n# https://github.com/openai/evolution-strategies-starter.\n\nimport gym\nimport numpy as np\n\nimport ray\nfrom ray.rllib.models import ModelCatalog\nfrom ray.rllib.policy.sample_batch import SampleBatch\nfrom ray.rllib.policy.torch_policy_template import build_torch_policy\nfrom ray.rllib.utils import try_import_tree\nfrom ray.rllib.utils.filter import get_filter\nfrom ray.rllib.utils.framework import try_import_torch\nfrom ray.rllib.utils.spaces.space_utils import get_base_struct_from_space, \\\n unbatch\nfrom ray.rllib.utils.torch_ops import convert_to_torch_tensor\n\ntorch, _ = try_import_torch()\ntree = try_import_tree()\n\n\ndef before_init(policy, observation_space, action_space, config):\n policy.action_noise_std = config[\"action_noise_std\"]\n policy.action_space_struct = get_base_struct_from_space(action_space)\n policy.preprocessor = ModelCatalog.get_preprocessor_for_space(\n observation_space)\n policy.observation_filter = get_filter(config[\"observation_filter\"],\n policy.preprocessor.shape)\n policy.single_threaded = config.get(\"single_threaded\", False)\n\n def _set_flat_weights(policy, theta):\n pos = 0\n theta_dict = policy.model.state_dict()\n new_theta_dict = {}\n\n for k in sorted(theta_dict.keys()):\n shape = policy.param_shapes[k]\n num_params = int(np.prod(shape))\n new_theta_dict[k] = torch.from_numpy(\n np.reshape(theta[pos:pos + num_params], shape))\n pos += num_params\n policy.model.load_state_dict(new_theta_dict)\n\n def _get_flat_weights(policy):\n # Get the parameter tensors.\n theta_dict = policy.model.state_dict()\n # Flatten it into a single np.ndarray.\n theta_list = []\n for k in sorted(theta_dict.keys()):\n theta_list.append(torch.reshape(theta_dict[k], (-1, )))\n cat = torch.cat(theta_list, dim=0)\n return cat.numpy()\n\n type(policy).set_flat_weights = _set_flat_weights\n type(policy).get_flat_weights = _get_flat_weights\n\n def _compute_actions(policy,\n obs_batch,\n add_noise=False,\n update=True,\n **kwargs):\n # Batch is given as list -> Try converting to numpy first.\n if isinstance(obs_batch, list) and len(obs_batch) == 1:\n obs_batch = obs_batch[0]\n observation = policy.preprocessor.transform(obs_batch)\n observation = policy.observation_filter(\n observation[None], update=update)\n\n observation = convert_to_torch_tensor(observation)\n dist_inputs, _ = policy.model({\n SampleBatch.CUR_OBS: observation\n }, [], None)\n dist = policy.dist_class(dist_inputs, policy.model)\n action = dist.sample()\n\n def _add_noise(single_action, single_action_space):\n single_action = single_action.detach().numpy()\n if add_noise and isinstance(single_action_space, gym.spaces.Box):\n single_action += np.random.randn(*single_action.shape) * \\\n policy.action_noise_std\n return single_action\n\n action = tree.map_structure(_add_noise, action,\n policy.action_space_struct)\n action = unbatch(action)\n return action\n\n type(policy).compute_actions = _compute_actions\n\n\ndef after_init(policy, observation_space, action_space, config):\n state_dict = policy.model.state_dict()\n policy.param_shapes = {\n k: tuple(state_dict[k].size())\n for k in sorted(state_dict.keys())\n }\n policy.num_params = sum(np.prod(s) for s in policy.param_shapes.values())\n\n\ndef make_model_and_action_dist(policy, observation_space, action_space,\n config):\n # Policy network.\n dist_class, dist_dim = ModelCatalog.get_action_dist(\n action_space,\n config[\"model\"], # model_options\n dist_type=\"deterministic\",\n framework=\"torch\")\n model = ModelCatalog.get_model_v2(\n policy.preprocessor.observation_space,\n action_space,\n num_outputs=dist_dim,\n model_config=config[\"model\"],\n framework=\"torch\")\n # Make all model params not require any gradients.\n for p in model.parameters():\n p.requires_grad = False\n return model, dist_class\n\n\nESTorchPolicy = build_torch_policy(\n name=\"ESTorchPolicy\",\n loss_fn=None,\n get_default_config=lambda: ray.rllib.agents.es.es.DEFAULT_CONFIG,\n before_init=before_init,\n after_init=after_init,\n make_model_and_action_dist=make_model_and_action_dist)\n"
] | [
[
"numpy.asarray"
],
[
"numpy.concatenate",
"numpy.empty"
],
[
"numpy.reshape",
"numpy.random.randn",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kanekosh/openconcept | [
"7878e5725eed78a023136b58250361531c7c7654"
] | [
"openconcept/analysis/performance/solver_phases.py"
] | [
"from __future__ import division\nfrom openmdao.api import Group, ExplicitComponent, IndepVarComp, BalanceComp, ImplicitComponent\nimport openconcept.api as oc\nfrom openconcept.analysis.atmospherics.compute_atmos_props import ComputeAtmosphericProperties\nfrom openconcept.analysis.aerodynamics import Lift, StallSpeed\nfrom openconcept.utilities.math import ElementMultiplyDivideComp, AddSubtractComp\nfrom openconcept.utilities.math.integrals import Integrator\nfrom openconcept.utilities.linearinterp import LinearInterpolator\nfrom openconcept.utilities.math.integrals import Integrator\nimport numpy as np\nimport copy\n\nclass ClimbAngleComp(ExplicitComponent):\n \"\"\"\n Computes steady climb angle based on excess thrust.\n\n This is a helper function\n and shouldn't be instantiated in the top-level model directly.\n\n Inputs\n ------\n drag : float\n Aircraft drag at v2 (climb out) flight condition (scalar, N)\n weight : float\n Takeoff weight (scalar, kg)\n thrust : float\n Thrust at the v2 (climb out) flight condition (scalar, N)\n\n Outputs\n -------\n gamma : float\n Climb out flight path angle (scalar, rad)\n\n Options\n -------\n num_nodes : int\n Number of points to run\n \"\"\"\n def initialize(self):\n self.options.declare('num_nodes', default=1)\n\n def setup(self):\n nn = self.options['num_nodes']\n self.add_input('drag', units='N',shape=(nn,))\n self.add_input('weight', units='kg', shape=(nn,))\n self.add_input('thrust', units='N',shape=(nn,))\n self.add_output('gamma', units='rad',shape=(nn,))\n\n self.declare_partials(['gamma'], ['weight','thrust','drag'], cols=np.arange(0,nn), rows=np.arange(0,nn))\n\n def compute(self, inputs, outputs):\n g = 9.80665 #m/s^2\n outputs['gamma'] = np.arcsin((inputs['thrust']-inputs['drag'])/inputs['weight']/g)\n\n def compute_partials(self, inputs, J):\n g = 9.80665 #m/s^2\n interior_qty = (inputs['thrust']-inputs['drag'])/inputs['weight']/g\n d_arcsin = 1/np.sqrt(1-interior_qty**2)\n J['gamma','thrust'] = d_arcsin/inputs['weight']/g\n J['gamma','drag'] = -d_arcsin/inputs['weight']/g\n J['gamma','weight'] = -d_arcsin*(inputs['thrust']-inputs['drag'])/inputs['weight']**2/g\n\n\nclass FlipVectorComp(ExplicitComponent):\n \"\"\"\n Reverses the order of an OpenMDAO vector\n\n This is a helper function\n and shouldn't be instantiated in the top-level model directly.\n\n Inputs\n ------\n vec_in : float\n Incoming vector in forward order\n\n Outputs\n -------\n vec_out : float\n Reversed order version of vec_in\n\n Options\n -------\n num_nodes : int\n Number of points to run\n negative : boolean\n Whether to apply a negative scaler. Default False preserves vector values.\n True returns all values with negative sign.\n units : string or None\n Units for vec_in and vec_out (Default None)\n Specify as an OpenMDAO unit string (e.g. 'kg')\n \"\"\"\n def initialize(self):\n self.options.declare('num_nodes',default=1)\n self.options.declare('negative',default=False)\n self.options.declare('units',default=None)\n\n def setup(self):\n nn = self.options['num_nodes']\n units = self.options['units']\n self.add_input('vec_in', units=units, shape=(nn,))\n self.add_output('vec_out', units=units, shape=(nn,))\n negative = self.options['negative']\n if negative:\n scaler = -1\n else:\n scaler = 1\n self.declare_partials(['vec_out'],['vec_in'],rows=np.arange(nn-1,-1,-1),cols=np.arange(0,nn,1),val=scaler*np.ones((nn,)))\n\n def compute(self, inputs, outputs):\n negative = self.options['negative']\n if negative:\n scaler = -1\n else:\n scaler = 1\n outputs['vec_out'] = scaler * np.flip(inputs['vec_in'], 0)\n\n\nclass BFLImplicitSolve(ImplicitComponent):\n \"\"\"\n Computes a residual equation so Newton solver can set v1 to analyze balanced field length\n\n This residual is equal to zero if:\n - The rejected takeoff and engine-out takeoff distances are equal, or:\n - V1 is equal to VR and the engine out takeoff distance is longer than the RTO distance\n\n Since this is a discontinous function, the partial derivatives are written in a special way\n to 'coax' the V1 value into the right setting with a Newton step. It's kind of a hack.\n\n Inputs\n ------\n distance_continue : float\n Engine-out takeoff distance (scalar, m)\n distance_abort : float\n Distance to full-stop when takeoff is rejected at V1 (scalar, m)\n takeoff|vr : float\n Rotation speed (scalar, m/s)\n\n Outputs\n -------\n takeoff|v1 : float\n Decision speed (scalar, m/s)\n\n \"\"\"\n def setup(self):\n self.add_input('distance_continue', units='m')\n self.add_input('distance_abort', units='m')\n self.add_input('takeoff|vr', units='m/s')\n self.add_output('takeoff|v1', units='m/s',val=20,lower=10,upper=150)\n self.declare_partials('takeoff|v1',['distance_continue','distance_abort','takeoff|v1','takeoff|vr'])\n\n def apply_nonlinear(self, inputs, outputs, residuals):\n speedtol = 1e-1\n disttol = 0\n #force the decision speed to zero\n if inputs['takeoff|vr'] < outputs['takeoff|v1'] + speedtol:\n residuals['takeoff|v1'] = inputs['takeoff|vr'] - outputs['takeoff|v1']\n else:\n residuals['takeoff|v1'] = inputs['distance_continue'] - inputs['distance_abort']\n\n #if you are within vtol on the correct side but the stopping distance bigger, use the regular mode\n if inputs['takeoff|vr'] >= outputs['takeoff|v1'] and inputs['takeoff|vr'] - outputs['takeoff|v1'] < speedtol and (inputs['distance_abort'] - inputs['distance_continue']) > disttol:\n residuals['takeoff|v1'] = inputs['distance_continue'] - inputs['distance_abort']\n\n\n def linearize(self, inputs, outputs, partials):\n speedtol = 1e-1\n disttol = 0\n\n if inputs['takeoff|vr'] < outputs['takeoff|v1'] + speedtol:\n partials['takeoff|v1','distance_continue'] = 0\n partials['takeoff|v1','distance_abort'] = 0\n partials['takeoff|v1','takeoff|vr'] = 1\n partials['takeoff|v1','takeoff|v1'] = -1\n else:\n partials['takeoff|v1','distance_continue'] = 1\n partials['takeoff|v1','distance_abort'] = -1\n partials['takeoff|v1','takeoff|vr'] = 0\n partials['takeoff|v1','takeoff|v1'] = 0\n\n if inputs['takeoff|vr'] >= outputs['takeoff|v1'] and inputs['takeoff|vr'] - outputs['takeoff|v1'] < speedtol and (inputs['distance_abort'] - inputs['distance_continue']) > disttol:\n partials['takeoff|v1','distance_continue'] = 1\n partials['takeoff|v1','distance_abort'] = -1\n partials['takeoff|v1','takeoff|vr'] = 0\n partials['takeoff|v1','takeoff|v1'] = 0\n\nclass Groundspeeds(ExplicitComponent):\n \"\"\"\n Computes groundspeed for vectorial true airspeed and true vertical speed.\n\n This is a helper function for the main mission analysis routines\n and shouldn't be instantiated directly.\n\n Inputs\n ------\n fltcond|vs : float\n Vertical speed for all mission phases (vector, m/s)\n fltcond|Utrue : float\n True airspeed for all mission phases (vector, m/s)\n\n Outputs\n -------\n fltcond|groundspeed : float\n True groundspeed for all mission phases (vector, m/s)\n fltcond|cosgamma : float\n Cosine of the flght path angle for all mission phases (vector, dimensionless)\n fltcond|singamma : float\n Sine of the flight path angle for all mission phases (vector, dimensionless)\n\n Options\n -------\n num_nodes : int\n Number of points to run\n \"\"\"\n def initialize(self):\n\n self.options.declare('num_nodes',default=1,desc=\"Number of Simpson intervals to use per seg (eg. climb, cruise, descend). Number of analysis points is 2N+1\")\n\n def setup(self):\n nn = self.options['num_nodes']\n self.add_input('fltcond|vs', units='m/s',shape=(nn,))\n self.add_input('fltcond|Utrue', units='m/s',shape=(nn,))\n self.add_output('fltcond|groundspeed', units='m/s',shape=(nn,))\n self.add_output('fltcond|cosgamma', shape=(nn,), desc='Cosine of the flight path angle')\n self.add_output('fltcond|singamma', shape=(nn,), desc='sin of the flight path angle' )\n self.declare_partials(['fltcond|groundspeed','fltcond|cosgamma','fltcond|singamma'], ['fltcond|vs','fltcond|Utrue'], rows=range(nn), cols=range(nn))\n\n def compute(self, inputs, outputs):\n\n nn = self.options['num_nodes']\n #compute the groundspeed on climb and desc\n inside = inputs['fltcond|Utrue']**2-inputs['fltcond|vs']**2\n groundspeed = np.sqrt(inside)\n groundspeed_fixed = np.sqrt(np.where(np.less(inside, 0.0), 0.01, inside))\n #groundspeed = np.sqrt(inputs['fltcond|Utrue']**2-inputs['fltcond|vs']**2)\n #groundspeed_fixed= np.where(np.isnan(groundspeed),0,groundspeed)\n outputs['fltcond|groundspeed'] = groundspeed_fixed\n outputs['fltcond|singamma'] = np.where(np.isnan(groundspeed),1,inputs['fltcond|vs'] / inputs['fltcond|Utrue'])\n outputs['fltcond|cosgamma'] = groundspeed_fixed / inputs['fltcond|Utrue']\n\n def compute_partials(self, inputs, J):\n inside = inputs['fltcond|Utrue']**2-inputs['fltcond|vs']**2\n groundspeed = np.sqrt(inside)\n groundspeed_fixed = np.sqrt(np.where(np.less(inside, 0.0), 0.01, inside))\n J['fltcond|groundspeed','fltcond|vs'] = np.where(np.isnan(groundspeed),0,(1/2) / groundspeed_fixed * (-2) * inputs['fltcond|vs'])\n J['fltcond|groundspeed','fltcond|Utrue'] = np.where(np.isnan(groundspeed),0, (1/2) / groundspeed_fixed * 2 * inputs['fltcond|Utrue'])\n J['fltcond|singamma','fltcond|vs'] = np.where(np.isnan(groundspeed), 0, 1 / inputs['fltcond|Utrue'])\n J['fltcond|singamma','fltcond|Utrue'] = np.where(np.isnan(groundspeed), 0, - inputs['fltcond|vs'] / inputs['fltcond|Utrue'] ** 2)\n J['fltcond|cosgamma','fltcond|vs'] = J['fltcond|groundspeed','fltcond|vs'] / inputs['fltcond|Utrue']\n J['fltcond|cosgamma','fltcond|Utrue'] = (J['fltcond|groundspeed','fltcond|Utrue'] * inputs['fltcond|Utrue'] - groundspeed_fixed) / inputs['fltcond|Utrue']**2\n\nclass HorizontalAcceleration(ExplicitComponent):\n \"\"\"\n Computes acceleration during takeoff run and effectively forms the T-D residual.\n\n Inputs\n ------\n weight : float\n Aircraft weight (scalar, kg)\n drag : float\n Aircraft drag at each analysis point (vector, N)\n lift : float\n Aircraft lift at each analysis point (vector, N)\n thrust : float\n Thrust at each TO analysis point (vector, N)\n fltcond|singamma : float\n The sine of the flight path angle gamma (vector, dimensionless)\n braking : float\n Effective rolling friction multiplier at each point (vector, dimensionless)\n\n Outputs\n -------\n accel_horiz : float\n Aircraft horizontal acceleration (vector, m/s**2)\n\n Options\n -------\n num_nodes : int\n Number of analysis points to run\n \"\"\"\n def initialize(self):\n self.options.declare('num_nodes',default=1)\n\n def setup(self):\n nn = self.options['num_nodes']\n g = 9.80665 #m/s^2\n self.add_input('weight', units='kg', shape=(nn,))\n self.add_input('drag', units='N',shape=(nn,))\n self.add_input('lift', units='N',shape=(nn,))\n self.add_input('thrust', units='N',shape=(nn,))\n self.add_input('fltcond|singamma',shape=(nn,))\n self.add_input('braking',shape=(nn,))\n\n self.add_output('accel_horiz', units='m/s**2', shape=(nn,))\n arange=np.arange(nn)\n self.declare_partials(['accel_horiz'], ['weight','drag','lift','thrust','braking'], rows=arange, cols=arange)\n self.declare_partials(['accel_horiz'], ['fltcond|singamma'], rows=arange, cols=arange, val=-g*np.ones((nn,)))\n\n\n def compute(self, inputs, outputs):\n nn = self.options['num_nodes']\n g = 9.80665 #m/s^2\n m = inputs['weight']\n floor_vec = np.where(np.less((g-inputs['lift']/m),0.0),0.0,1.0)\n accel = inputs['thrust']/m - inputs['drag']/m - floor_vec*inputs['braking']*(g-inputs['lift']/m) - g*inputs['fltcond|singamma']\n outputs['accel_horiz'] = accel\n\n def compute_partials(self, inputs, J):\n g = 9.80665 #m/s^2\n m = inputs['weight']\n floor_vec = np.where(np.less((g-inputs['lift']/m),0.0),0.0,1.0)\n J['accel_horiz','thrust'] = 1/m\n J['accel_horiz','drag'] = -1/m\n J['accel_horiz','braking'] = -floor_vec*(g-inputs['lift']/m)\n J['accel_horiz','lift'] = floor_vec*inputs['braking']/m\n J['accel_horiz','weight'] = (inputs['drag']-inputs['thrust']-floor_vec*inputs['braking']*inputs['lift'])/m**2\n\nclass VerticalAcceleration(ExplicitComponent):\n \"\"\"\n Computes acceleration during takeoff run in the vertical plane.\n Only used during full unsteady takeoff performance analysis due to stability issues\n\n Inputs\n ------\n weight : float\n Aircraft weight (scalar, kg)\n drag : float\n Aircraft drag at each analysis point (vector, N)\n lift : float\n Aircraft lift at each analysis point (vector, N)\n thrust : float\n Thrust at each TO analysis point (vector, N)\n fltcond|singamma : float\n The sine of the flight path angle gamma (vector, dimensionless)\n fltcond|cosgamma : float\n The sine of the flight path angle gamma (vector, dimensionless)\n\n Outputs\n -------\n accel_vert : float\n Aircraft horizontal acceleration (vector, m/s**2)\n\n Options\n -------\n num_nodes : int\n Number of analysis points to run\n \"\"\"\n def initialize(self):\n self.options.declare('num_nodes',default=1)\n\n def setup(self):\n nn = self.options['num_nodes']\n g = 9.80665 #m/s^2\n self.add_input('weight', units='kg', shape=(nn,))\n self.add_input('drag', units='N',shape=(nn,))\n self.add_input('lift', units='N',shape=(nn,))\n self.add_input('thrust', units='N',shape=(nn,))\n self.add_input('fltcond|singamma',shape=(nn,))\n self.add_input('fltcond|cosgamma',shape=(nn,))\n\n self.add_output('accel_vert', units='m/s**2', shape=(nn,),upper=2.5*g,lower=-1*g)\n arange=np.arange(nn)\n self.declare_partials(['accel_vert'], ['weight','drag','lift','thrust','fltcond|singamma','fltcond|cosgamma'], rows=arange, cols=arange)\n\n\n def compute(self, inputs, outputs):\n nn = self.options['num_nodes']\n g = 9.80665 #m/s^2\n cosg = inputs['fltcond|cosgamma']\n sing = inputs['fltcond|singamma']\n accel = (inputs['lift']*cosg + (inputs['thrust']-inputs['drag'])*sing - g*inputs['weight'])/inputs['weight']\n accel = np.clip(accel, -g, 2.5*g)\n outputs['accel_vert'] = accel\n\n def compute_partials(self, inputs, J):\n g = 9.80665 #m/s^2\n m = inputs['weight']\n cosg = inputs['fltcond|cosgamma']\n sing = inputs['fltcond|singamma']\n\n J['accel_vert','thrust'] = sing / m\n J['accel_vert','drag'] = -sing / m\n J['accel_vert','lift'] = cosg / m\n J['accel_vert','fltcond|singamma'] = (inputs['thrust']-inputs['drag']) / m\n J['accel_vert','fltcond|cosgamma'] = inputs['lift'] / m\n J['accel_vert','weight'] = -(inputs['lift']*cosg + (inputs['thrust']-inputs['drag'])*sing)/m**2\n\nclass SteadyFlightCL(ExplicitComponent):\n \"\"\"\n Computes lift coefficient at each analysis point\n\n This is a helper function for the main mission analysis routine\n and shouldn't be instantiated directly.\n\n Inputs\n ------\n weight : float\n Aircraft weight at each analysis point (vector, kg)\n fltcond|q : float\n Dynamic pressure at each analysis point (vector, Pascal)\n ac|geom|wing|S_ref : float\n Reference wing area (scalar, m**2)\n fltcond|cosgamma : float\n Cosine of the flght path angle for all mission phases (vector, dimensionless)\n\n Outputs\n -------\n fltcond|CL : float\n Lift coefficient (vector, dimensionless)\n\n Options\n -------\n num_nodes : int\n Number of analysis nodes to run\n mission_segments : list\n The list of mission segments to track\n \"\"\"\n def initialize(self):\n\n self.options.declare('num_nodes',default=5,desc=\"Number of Simpson intervals to use per seg (eg. climb, cruise, descend). Number of analysis points is 2N+1\")\n self.options.declare('mission_segments',default=['climb','cruise','descent'])\n def setup(self):\n nn = self.options['num_nodes']\n arange = np.arange(nn)\n self.add_input('weight', units='kg', shape=(nn,))\n self.add_input('fltcond|q', units='N * m**-2', shape=(nn,))\n self.add_input('ac|geom|wing|S_ref', units='m **2')\n self.add_input('fltcond|cosgamma', val=1.0, shape=(nn,))\n self.add_output('fltcond|CL',shape=(nn,))\n self.declare_partials(['fltcond|CL'], ['weight','fltcond|q',\"fltcond|cosgamma\"], rows=arange, cols=arange)\n self.declare_partials(['fltcond|CL'], ['ac|geom|wing|S_ref'], rows=arange, cols=np.zeros(nn))\n\n def compute(self, inputs, outputs):\n g = 9.80665 #m/s^2\n outputs['fltcond|CL'] = inputs['fltcond|cosgamma']*g*inputs['weight']/inputs['fltcond|q']/inputs['ac|geom|wing|S_ref']\n\n def compute_partials(self, inputs, J):\n g = 9.80665 #m/s^2\n J['fltcond|CL','weight'] = inputs['fltcond|cosgamma']*g/inputs['fltcond|q']/inputs['ac|geom|wing|S_ref']\n J['fltcond|CL','fltcond|q'] = - inputs['fltcond|cosgamma']*g*inputs['weight'] / inputs['fltcond|q']**2 / inputs['ac|geom|wing|S_ref']\n J['fltcond|CL','ac|geom|wing|S_ref'] = - inputs['fltcond|cosgamma']*g*inputs['weight'] / inputs['fltcond|q'] / inputs['ac|geom|wing|S_ref']**2\n J['fltcond|CL','fltcond|cosgamma'] = g*inputs['weight']/inputs['fltcond|q']/inputs['ac|geom|wing|S_ref']\n\nclass GroundRollPhase(oc.PhaseGroup):\n \"\"\"\n This component group models the ground roll phase of a takeoff (acceleration before flight)\n User-settable parameters include:\n throttle (default 100 percent)\n rolling friction coeff (default 0.03 for accelerating segments and 0.4 for braking)\n propulsor_active (default 1 for v0 to v1, 0 for v1 to vr and braking) to model engine failure\n altitude (fltcond|h)\n\n The BaseAircraftGroup object is passed in.\n The BaseAircraftGroup should be built to accept the following inputs\n and return the following outputs.\n The outputs should be promoted to the top level in the component.\n\n Inputs\n ------\n range : float\n Total distance travelled (vector, m)\n fltcond|h : float\n Altitude (vector, m)\n fltcond|vs : float\n Vertical speed (vector, m/s)\n fltcond|Ueas : float\n Equivalent airspeed (vector, m/s)\n fltcond|Utrue : float\n True airspeed (vector, m/s)\n fltcond|p : float\n Pressure (vector, Pa)\n fltcond|rho : float\n Density (vector, kg/m3)\n fltcond|T : float\n Temperature (vector, K)\n fltcond|q : float\n Dynamic pressure (vector, Pa)\n fltcond|CL : float\n Lift coefficient (vector, dimensionless)\n throttle : float\n Motor / propeller throttle setting scaled from 0 to 1 or slightly more (vector, dimensionless)\n propulsor_active : float\n If a multi-propulsor airplane, a failure condition should be modeled in the propulsion model by multiplying throttle by propulsor_active.\n It will generally be 1.0 unless a failure condition is being modeled, in which case it will be 0 (vector, dimensionless)\n braking : float\n Brake friction coefficient (default 0.4 for dry runway braking, 0.03 for resistance unbraked)\n Should not be applied in the air or nonphysical effects will result (vector, dimensionless)\n lift : float\n Lift force (vector, N)\n\n Outputs\n -------\n thrust : float\n Total thrust force produced by all propulsors (vector, N)\n drag : float\n Total drag force in the airplane axis produced by all sources of drag (vector, N)\n weight : float\n Weight (mass, really) of the airplane at each point in time. (vector, kg)\n ac|geom|wing|S_ref\n Wing reference area (scalar, m**2)\n ac|aero|CLmax_TO\n CLmax with flaps in max takeoff position (scalar, dimensionless)\n ac|weights|MTOW\n Maximum takeoff weight (scalar, kg)\n \"\"\"\n\n def initialize(self):\n self.options.declare('num_nodes',default=1)\n self.options.declare('flight_phase',default=None,desc='Phase of flight e.g. v0v1, cruise')\n self.options.declare('aircraft_model',default=None)\n\n def setup(self):\n nn = self.options['num_nodes']\n ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=[\"*\"])\n # set CL = 0.1 for the ground roll per Raymer's book\n ivcomp.add_output('fltcond|CL', val=np.ones((nn,))*0.1)\n ivcomp.add_output('vr_vstall_mult',val=1.1)\n ivcomp.add_output('fltcond|h',val=np.zeros((nn,)),units='m')\n ivcomp.add_output('fltcond|vs',val=np.zeros((nn,)),units='m/s')\n ivcomp.add_output('zero_speed',val=2,units='m/s')\n\n\n flight_phase = self.options['flight_phase']\n if flight_phase == 'v0v1':\n ivcomp.add_output('braking',val=np.ones((nn,))*0.03)\n ivcomp.add_output('propulsor_active',val=np.ones((nn,)))\n ivcomp.add_output('throttle',val=np.ones((nn,)))\n zero_start = True\n elif flight_phase == 'v1vr':\n ivcomp.add_output('braking',val=np.ones((nn,))*0.03)\n ivcomp.add_output('propulsor_active',val=np.zeros((nn,)))\n ivcomp.add_output('throttle',val=np.ones((nn,)))\n zero_start = False\n\n elif flight_phase == 'v1v0':\n ivcomp.add_output('braking',val=0.4*np.ones((nn,)))\n ivcomp.add_output('propulsor_active',val=np.zeros((nn,)))\n ivcomp.add_output('throttle',val=np.zeros((nn,)))\n zero_start=False\n\n self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=True), promotes_inputs=['*'], promotes_outputs=['*'])\n self.add_subsystem('gs',Groundspeeds(num_nodes=nn),promotes_inputs=['*'],promotes_outputs=['*'])\n # add the user-defined aircraft model\n self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn,flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])\n\n self.add_subsystem('lift',Lift(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('stall',StallSpeed(),promotes_inputs=[('CLmax','ac|aero|CLmax_TO'),('weight','ac|weights|MTOW'),'ac|geom|wing|S_ref'],promotes_outputs=['*'])\n self.add_subsystem('vrspeed',ElementMultiplyDivideComp(output_name='takeoff|vr',input_names=['Vstall_eas','vr_vstall_mult'],input_units=['m/s',None]),promotes_inputs=['*'],promotes_outputs=['*'])\n\n\n self.add_subsystem('haccel',HorizontalAcceleration(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n if flight_phase == 'v1v0':\n #unfortunately need to shoot backwards to avoid negative airspeeds\n #reverse the order of the accelerations so the last one is first (and make them negative)\n self.add_subsystem('flipaccel', FlipVectorComp(num_nodes=nn, units='m/s**2', negative=True), promotes_inputs=[('vec_in','accel_horiz')])\n #integrate the timesteps in reverse from near zero speed.\n ode_integ = self.add_subsystem('ode_integ', Integrator(num_nodes=nn, method='simpson', diff_units='s',time_setup='duration'), promotes_inputs=['*'], promotes_outputs=['*'])\n ode_integ.add_integrand('vel_q', units='m/s', rate_name='vel_dqdt', start_name='zero_speed', end_name='fltcond|Utrue_initial', lower=1.5) \n self.connect('flipaccel.vec_out','vel_dqdt')\n #flip the result of the reverse integration again so the flight condition is forward and consistent with everythign else\n self.add_subsystem('flipvel', FlipVectorComp(num_nodes=nn, units='m/s', negative=False), promotes_outputs=[('vec_out','fltcond|Utrue')])\n self.connect('vel_q','flipvel.vec_in')\n # now set the time step so that backwards shooting results in the correct 'initial' segment airspeed\n self.add_subsystem('v0constraint',BalanceComp(name='duration',units='s',eq_units='m/s',rhs_name='fltcond|Utrue_initial',lhs_name='takeoff|v1',val=10.,upper=100.,lower=1.),\n promotes_inputs=['*'],promotes_outputs=['duration'])\n else:\n # forward shooting for these acceleration segmentes\n ode_integ = self.add_subsystem('ode_integ', Integrator(num_nodes=nn, method='simpson', diff_units='s',time_setup='duration'), promotes_inputs=['*'], promotes_outputs=['*'])\n ode_integ.add_integrand('fltcond|Utrue', units='m/s', rate_name='accel_horiz', start_name='fltcond|Utrue_initial', end_name='fltcond|Utrue_final', lower=1.5)\n if flight_phase == 'v0v1':\n self.connect('zero_speed','fltcond|Utrue_initial')\n self.add_subsystem('v1constraint',BalanceComp(name='duration',units='s',eq_units='m/s',rhs_name='fltcond|Utrue_final',lhs_name='takeoff|v1',val=10.,upper=100.,lower=1.),\n promotes_inputs=['*'],promotes_outputs=['duration'])\n elif flight_phase == 'v1vr':\n self.add_subsystem('vrconstraint',BalanceComp(name='duration',units='s',eq_units='m/s',rhs_name='fltcond|Utrue_final',lhs_name='takeoff|vr',val=5.,upper=12.,lower=0.0),\n promotes_inputs=['*'],promotes_outputs=['duration'])\n\n if zero_start:\n ode_integ.add_integrand('range', rate_name='fltcond|groundspeed', units='m', zero_start=True)\n else:\n ode_integ.add_integrand('range', rate_name='fltcond|groundspeed', units='m')\n\nclass RotationPhase(oc.PhaseGroup):\n \"\"\"\n This group models the transition from ground roll to climb out during a takeoff\n using force balance in the vertical and horizontal directions.\n\n User-settable parameters include:\n throttle (default 100 percent)\n rolling friction coeff (default 0.03 for accelerating segments and 0.4 for braking)\n propulsor_active (default 1 for v0 to v1, 0 for v1 to vr and braking) to model engine failure\n altitude (fltcond|h)\n obstacle clearance hight (h_obs) default 35 feet per FAR 25\n Rotation CL/CLmax ratio (default 0.83)\n\n The BaseAircraftGroup object is passed in.\n The BaseAircraftGroup should be built to accept the following inputs\n and return the following outputs.\n The outputs should be promoted to the top level in the component.\n\n Inputs\n ------\n range : float\n Total distance travelled (vector, m)\n fltcond|h : float\n Altitude (vector, m)\n fltcond|vs : float\n Vertical speed (vector, m/s)\n fltcond|Ueas : float\n Equivalent airspeed (vector, m/s)\n fltcond|Utrue : float\n True airspeed (vector, m/s)\n fltcond|p : float\n Pressure (vector, Pa)\n fltcond|rho : float\n Density (vector, kg/m3)\n fltcond|T : float\n Temperature (vector, K)\n fltcond|q : float\n Dynamic pressure (vector, Pa)\n fltcond|CL : float\n Lift coefficient (vector, dimensionless)\n throttle : float\n Motor / propeller throttle setting scaled from 0 to 1 or slightly more (vector, dimensionless)\n propulsor_active : float\n If a multi-propulsor airplane, a failure condition should be modeled in the propulsion model by multiplying throttle by propulsor_active.\n It will generally be 1.0 unless a failure condition is being modeled, in which case it will be 0 (vector, dimensionless)\n braking : float\n Percentage brakes applied, from 0 to 1. Should not be applied in the air or nonphysical effects will result (vector, dimensionless)\n lift : float\n Lift force (vector, N)\n\n Outputs\n -------\n thrust : float\n Total thrust force produced by all propulsors (vector, N)\n drag : float\n Total drag force in the airplane axis produced by all sources of drag (vector, N)\n weight : float\n Weight (mass, really) of the airplane at each point in time. Generally will need to be integrated by Dymos as a state with a rate source (vector, kg)\n ac|geom|wing|S_ref\n Wing reference area (scalar, m**2)\n ac|aero|CLmax_TO\n CLmax with flaps in max takeoff position (scalar, dimensionless)\n ac|weights|MTOW\n Maximum takeoff weight (scalar, kg)\n \"\"\"\n\n def initialize(self):\n self.options.declare('num_nodes',default=1)\n self.options.declare('flight_phase',default=None)\n self.options.declare('aircraft_model',default=None)\n\n def setup(self):\n nn = self.options['num_nodes']\n ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=[\"*\"])\n ivcomp.add_output('CL_rotate_mult', val=np.ones((nn,))*0.83)\n ivcomp.add_output('h_obs', val=35, units='ft')\n flight_phase = self.options['flight_phase']\n if flight_phase == 'rotate':\n ivcomp.add_output('braking',val=np.zeros((nn,)))\n ivcomp.add_output('propulsor_active',val=np.zeros((nn,)))\n ivcomp.add_output('throttle',val=np.ones((nn,)))\n\n self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=True), promotes_inputs=['*'], promotes_outputs=['*'])\n self.add_subsystem('gs',Groundspeeds(num_nodes=nn),promotes_inputs=['*'],promotes_outputs=['*'])\n clcomp = self.add_subsystem('clcomp',ElementMultiplyDivideComp(output_name='fltcond|CL', input_names=['CL_rotate_mult','ac|aero|CLmax_TO'],\n vec_size=[nn,1], length=1),\n promotes_inputs=['*'], promotes_outputs=['*'])\n self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn,flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])\n\n\n self.add_subsystem('lift',Lift(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('haccel',HorizontalAcceleration(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('vaccel',VerticalAcceleration(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n \n # TODO always starts from zero altitude\n self.add_subsystem('clear_obstacle',BalanceComp(name='duration',units='s',val=1,eq_units='m',rhs_name='fltcond|h_final',lhs_name='h_obs',lower=0.1,upper=15),\n promotes_inputs=['*'],promotes_outputs=['duration'])\n int1 = self.add_subsystem('intvelocity', Integrator(num_nodes=nn, method='simpson',diff_units='s',time_setup='duration'), promotes_outputs=['*'], promotes_inputs=['*'])\n int1.add_integrand('fltcond|Utrue', rate_name='accel_horiz', units='m/s', lower=0.1)\n int2 = self.add_subsystem('intrange', Integrator(num_nodes=nn, method='simpson',diff_units='s',time_setup='duration'), promotes_outputs=['*'], promotes_inputs=['*'])\n int2.add_integrand('range', rate_name='fltcond|groundspeed', units='m') \n int3 = self.add_subsystem('intvs', Integrator(num_nodes=nn, method='simpson',diff_units='s',time_setup='duration'), promotes_outputs=['*'], promotes_inputs=['*'])\n int3.add_integrand('fltcond|vs', rate_name='accel_vert', units='m/s', zero_start=True) \n int4 = self.add_subsystem('inth', Integrator(num_nodes=nn, method='simpson',diff_units='s',time_setup='duration'), promotes_outputs=['*'], promotes_inputs=['*'])\n int4.add_integrand('fltcond|h', rate_name='fltcond|vs', units='m', zero_start=True) \n\nclass SteadyFlightPhase(oc.PhaseGroup):\n \"\"\"\n This component group models steady flight conditions.\n Settable mission parameters include:\n Airspeed (fltcond|Ueas)\n Vertical speed (fltcond|vs)\n Duration of the segment (duration)\n\n Throttle is set automatically to ensure steady flight\n\n The BaseAircraftGroup object is passed in.\n The BaseAircraftGroup should be built to accept the following inputs\n and return the following outputs.\n The outputs should be promoted to the top level in the component.\n\n Inputs\n ------\n range : float\n Total distance travelled (vector, m)\n fltcond|h : float\n Altitude (vector, m)\n fltcond|vs : float\n Vertical speed (vector, m/s)\n fltcond|Ueas : float\n Equivalent airspeed (vector, m/s)\n fltcond|Utrue : float\n True airspeed (vector, m/s)\n fltcond|p : float\n Pressure (vector, Pa)\n fltcond|rho : float\n Density (vector, kg/m3)\n fltcond|T : float\n Temperature (vector, K)\n fltcond|q : float\n Dynamic pressure (vector, Pa)\n fltcond|CL : float\n Lift coefficient (vector, dimensionless)\n throttle : float\n Motor / propeller throttle setting scaled from 0 to 1 or slightly more (vector, dimensionless)\n propulsor_active : float\n If a multi-propulsor airplane, a failure condition should be modeled in the propulsion model by multiplying throttle by propulsor_active.\n It will generally be 1.0 unless a failure condition is being modeled, in which case it will be 0 (vector, dimensionless)\n braking : float\n Brake friction coefficient (default 0.4 for dry runway braking, 0.03 for resistance unbraked)\n Should not be applied in the air or nonphysical effects will result (vector, dimensionless)\n lift : float\n Lift force (vector, N)\n\n Outputs\n -------\n thrust : float\n Total thrust force produced by all propulsors (vector, N)\n drag : float\n Total drag force in the airplane axis produced by all sources of drag (vector, N)\n weight : float\n Weight (mass, really) of the airplane at each point in time. (vector, kg)\n ac|geom|wing|S_ref\n Wing reference area (scalar, m**2)\n ac|aero|CLmax_TO\n CLmax with flaps in max takeoff position (scalar, dimensionless)\n ac|weights|MTOW\n Maximum takeoff weight (scalar, kg)\n \"\"\"\n def initialize(self):\n self.options.declare('num_nodes',default=1)\n self.options.declare('flight_phase',default=None,desc='Phase of flight e.g. v0v1, cruise')\n self.options.declare('aircraft_model',default=None)\n\n def setup(self):\n nn = self.options['num_nodes']\n ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=[\"*\"])\n ivcomp.add_output('propulsor_active', val=np.ones(nn))\n ivcomp.add_output('braking', val=np.zeros(nn))\n ivcomp.add_output('fltcond|Ueas',val=np.ones((nn,))*90, units='m/s')\n ivcomp.add_output('fltcond|vs',val=np.ones((nn,))*1, units='m/s')\n ivcomp.add_output('zero_accel',val=np.zeros((nn,)),units='m/s**2')\n \n integ = self.add_subsystem('ode_integ', Integrator(num_nodes=nn, diff_units='s', time_setup='duration', method='simpson'), promotes_inputs=['fltcond|vs', 'fltcond|groundspeed'], promotes_outputs=['fltcond|h', 'range'])\n integ.add_integrand('fltcond|h', rate_name='fltcond|vs', val=1.0, units='m')\n self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=False), promotes_inputs=['*'], promotes_outputs=['*'])\n self.add_subsystem('gs',Groundspeeds(num_nodes=nn),promotes_inputs=['*'],promotes_outputs=['*'])\n # add the user-defined aircraft model\n self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn, flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('clcomp',SteadyFlightCL(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('lift',Lift(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('haccel',HorizontalAcceleration(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n integ.add_integrand('range', rate_name='fltcond|groundspeed', val=1.0, units='m')\n self.add_subsystem('steadyflt',BalanceComp(name='throttle',val=np.ones((nn,))*0.5,lower=0.01,upper=2.0,units=None,normalize=False,eq_units='m/s**2',rhs_name='accel_horiz',lhs_name='zero_accel',rhs_val=np.zeros((nn,))),\n promotes_inputs=['accel_horiz','zero_accel'],promotes_outputs=['throttle'])\n\n# class OldSteadyFlightPhase(Group):\n# \"\"\"\n# This component group models steady flight conditions.\n# Settable mission parameters include:\n# Airspeed (fltcond|Ueas)\n# Vertical speed (fltcond|vs)\n# Duration of the segment (duration)\n\n# Throttle is set automatically to ensure steady flight\n\n# The BaseAircraftGroup object is passed in.\n# The BaseAircraftGroup should be built to accept the following inputs\n# and return the following outputs.\n# The outputs should be promoted to the top level in the component.\n\n# Inputs\n# ------\n# range : float\n# Total distance travelled (vector, m)\n# fltcond|h : float\n# Altitude (vector, m)\n# fltcond|vs : float\n# Vertical speed (vector, m/s)\n# fltcond|Ueas : float\n# Equivalent airspeed (vector, m/s)\n# fltcond|Utrue : float\n# True airspeed (vector, m/s)\n# fltcond|p : float\n# Pressure (vector, Pa)\n# fltcond|rho : float\n# Density (vector, kg/m3)\n# fltcond|T : float\n# Temperature (vector, K)\n# fltcond|q : float\n# Dynamic pressure (vector, Pa)\n# fltcond|CL : float\n# Lift coefficient (vector, dimensionless)\n# throttle : float\n# Motor / propeller throttle setting scaled from 0 to 1 or slightly more (vector, dimensionless)\n# propulsor_active : float\n# If a multi-propulsor airplane, a failure condition should be modeled in the propulsion model by multiplying throttle by propulsor_active.\n# It will generally be 1.0 unless a failure condition is being modeled, in which case it will be 0 (vector, dimensionless)\n# braking : float\n# Brake friction coefficient (default 0.4 for dry runway braking, 0.03 for resistance unbraked)\n# Should not be applied in the air or nonphysical effects will result (vector, dimensionless)\n# lift : float\n# Lift force (vector, N)\n\n# Outputs\n# -------\n# thrust : float\n# Total thrust force produced by all propulsors (vector, N)\n# drag : float\n# Total drag force in the airplane axis produced by all sources of drag (vector, N)\n# weight : float\n# Weight (mass, really) of the airplane at each point in time. (vector, kg)\n# ac|geom|wing|S_ref\n# Wing reference area (scalar, m**2)\n# ac|aero|CLmax_TO\n# CLmax with flaps in max takeoff position (scalar, dimensionless)\n# ac|weights|MTOW\n# Maximum takeoff weight (scalar, kg)\n# \"\"\"\n# def initialize(self):\n# self.options.declare('num_nodes',default=1)\n# self.options.declare('flight_phase',default=None,desc='Phase of flight e.g. v0v1, cruise')\n# self.options.declare('aircraft_model',default=None)\n\n# def setup(self):\n# nn = self.options['num_nodes']\n# ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=[\"*\"])\n# ivcomp.add_output('propulsor_active', val=np.ones(nn))\n# ivcomp.add_output('braking', val=np.zeros(nn))\n# ivcomp.add_output('fltcond|Ueas',val=np.ones((nn,))*90, units='m/s')\n# ivcomp.add_output('fltcond|vs',val=np.ones((nn,))*1, units='m/s')\n# ivcomp.add_output('zero_accel',val=np.zeros((nn,)),units='m/s**2')\n \n# self.add_subsystem('inth',Integrator(num_nodes=nn, method='simpson', quantity_units='m', diff_units='s', time_setup='duration'),\n# promotes_inputs=[('dqdt','fltcond|vs'),'duration',('q_initial','fltcond|h_initial')],promotes_outputs=[('q','fltcond|h'),('q_final','fltcond|h_final')])\n# self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=False), promotes_inputs=['*'], promotes_outputs=['*'])\n# self.add_subsystem('gs',Groundspeeds(num_nodes=nn),promotes_inputs=['*'],promotes_outputs=['*'])\n# # add the user-defined aircraft model\n# self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn, flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])\n# self.add_subsystem('clcomp',SteadyFlightCL(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n# self.add_subsystem('lift',Lift(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n# self.add_subsystem('haccel',HorizontalAcceleration(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n\n# self.add_subsystem('intrange',Integrator(num_nodes=nn, method='simpson', quantity_units='m', diff_units='s', time_setup='duration'),\n# promotes_inputs=[('dqdt','fltcond|groundspeed'),'duration',('q_initial','range_initial')],promotes_outputs=[('q','range'),('q_final','range_final')])\n\n\n# self.add_subsystem('steadyflt',BalanceComp(name='throttle',val=np.ones((nn,))*0.5,lower=0.01,upper=2.0,units=None,normalize=False,eq_units='m/s**2',rhs_name='accel_horiz',lhs_name='zero_accel',rhs_val=np.zeros((nn,))),\n# promotes_inputs=['accel_horiz','zero_accel'],promotes_outputs=['throttle'])\n\nclass ClimbAnglePhase(Group):\n \"\"\"\n This component checks the climb angle for a\n single flight condition at the V2 speed. No integration is performed.\n\n User settable parameter includes the V2/Vstall multiple (default 1.2)\n\n Useful for ensuring all-engine climb gradients in optimization.\n Choose flight_phase = AllEngineClimbAngle or EngineOutClimbAngle\n to set the propulsor_active property correctly.\n\n Inputs\n ------\n range : float\n Total distance travelled (vector, m)\n fltcond|h : float\n Altitude (vector, m)\n fltcond|vs : float\n Vertical speed (vector, m/s)\n fltcond|Ueas : float\n Equivalent airspeed (vector, m/s)\n fltcond|Utrue : float\n True airspeed (vector, m/s)\n fltcond|p : float\n Pressure (vector, Pa)\n fltcond|rho : float\n Density (vector, kg/m3)\n fltcond|T : float\n Temperature (vector, K)\n fltcond|q : float\n Dynamic pressure (vector, Pa)\n fltcond|CL : float\n Lift coefficient (vector, dimensionless)\n throttle : float\n Motor / propeller throttle setting scaled from 0 to 1 or slightly more (vector, dimensionless)\n propulsor_active : float\n If a multi-propulsor airplane, a failure condition should be modeled in the propulsion model by multiplying throttle by propulsor_active.\n It will generally be 1.0 unless a failure condition is being modeled, in which case it will be 0 (vector, dimensionless)\n lift : float\n Lift force (vector, N)\n\n Outputs\n -------\n thrust : float\n Total thrust force produced by all propulsors (vector, N)\n drag : float\n Total drag force in the airplane axis produced by all sources of drag (vector, N)\n weight : float\n Weight (mass, really) of the airplane at each point in time. Generally will need to be integrated by Dymos as a state with a rate source (vector, kg)\n ac|geom|wing|S_ref\n Wing reference area (scalar, m**2)\n ac|aero|CLmax_TO\n CLmax with flaps in max takeoff position (scalar, dimensionless)\n ac|weights|MTOW\n Maximum takeoff weight (scalar, kg)\n \"\"\"\n\n def initialize(self):\n self.options.declare('num_nodes',default=1)\n self.options.declare('flight_phase',default=None,desc='Phase of flight e.g. v0v1, cruise')\n self.options.declare('aircraft_model',default=None)\n\n def setup(self):\n nn = self.options['num_nodes']\n ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=[\"*\"])\n ivcomp.add_output('v2_vstall_mult',val=1.2)\n ivcomp.add_output('fltcond|h',val=np.zeros((nn,)),units='m')\n ivcomp.add_output('fltcond|cosgamma', val=np.ones((nn,)))\n\n flight_phase = self.options['flight_phase']\n if flight_phase == 'AllEngineClimbAngle':\n ivcomp.add_output('propulsor_active',val=np.ones((nn,)))\n ivcomp.add_output('throttle',val=np.ones((nn,)))\n elif flight_phase == 'EngineOutClimbAngle':\n ivcomp.add_output('propulsor_active',val=np.zeros((nn,)))\n ivcomp.add_output('throttle',val=np.ones((nn,)))\n self.add_subsystem('stall',StallSpeed(),promotes_inputs=[('CLmax','ac|aero|CLmax_TO'),('weight','ac|weights|MTOW'),'ac|geom|wing|S_ref'],promotes_outputs=['*'])\n self.add_subsystem('vrspeed',ElementMultiplyDivideComp(output_name='takeoff|v2',input_names=['Vstall_eas','v2_vstall_mult'],input_units=['m/s',None]),promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=False), promotes_inputs=['*'], promotes_outputs=['*'])\n self.add_subsystem('clcomp',SteadyFlightCL(num_nodes=nn), promotes_inputs=[('weight','ac|weights|MTOW'),'fltcond|*','ac|*'],promotes_outputs=['*'])\n self.connect('takeoff|v2','fltcond|Ueas')\n # the aircraft model needs to provide thrust and drag\n self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn,flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('climbangle',ClimbAngleComp(num_nodes=nn),promotes_inputs=['drag',('weight','ac|weights|MTOW'),'thrust'],promotes_outputs=['gamma'])\n\nclass TakeoffTransition(ExplicitComponent):\n \"\"\"\n Computes distance and altitude at end of circular transition.\n\n Based on TO distance analysis method in Raymer book.\n Obstacle clearance height set for GA / Part 23 aircraft\n Override for analyzing Part 25 aircraft\n\n Inputs\n ------\n fltcond|Utrue\n Transition true airspeed (generally avg of vr and v2) (scalar, m/s)\n gamma : float\n Climb out flight path angle (scalar, rad)\n\n Outputs\n -------\n s_transition : float\n Horizontal distance during transition to v2 climb out (scalar, m)\n h_transition : float\n Altitude at transition point (scalar, m)\n t_transition : float\n Elapsed time in transition (scalar, s)\n\n Options\n -------\n h_obstacle : float\n Obstacle height to clear (in **meters**) (default 10.66, equiv. 35 ft)\n load_factor : float\n Load factor during rotation and transition (default 1.2 from Raymer book)\n \"\"\"\n\n def initialize(self):\n self.options.declare('h_obstacle',default=10.66,desc='Obstacle clearance height in m')\n self.options.declare('load_factor', default=1.2, desc='Load factor during circular arc transition')\n def setup(self):\n self.add_input('fltcond|Utrue', units='m/s', src_indices=0)\n self.add_input('gamma', units='rad', src_indices=0)\n self.add_output('s_transition', units='m')\n self.add_output('h_transition', units='m')\n self.add_output('t_transition',units='s')\n self.declare_partials(['s_transition','h_transition','t_transition'], ['fltcond|Utrue','gamma'])\n\n def compute(self, inputs, outputs):\n hobs = self.options['h_obstacle']\n nfactor = self.options['load_factor'] - 1\n g = 9.80665 #m/s^2\n gam = inputs['gamma']\n ut = inputs['fltcond|Utrue']\n\n R = ut**2/nfactor/g\n st = R*np.sin(gam)\n ht = R*(1-np.cos(gam))\n #alternate formula if the obstacle is cleared during transition\n if ht > hobs:\n st = np.sqrt(R**2-(R-hobs)**2)\n ht = hobs\n outputs['s_transition'] = st\n outputs['h_transition'] = ht\n outputs['t_transition'] = st / ut\n\n def compute_partials(self, inputs, J):\n hobs = self.options['h_obstacle']\n nfactor = self.options['load_factor'] - 1\n g = 9.80665 #m/s^2\n gam = inputs['gamma']\n ut = inputs['fltcond|Utrue']\n R = ut**2/nfactor/g\n dRdut = 2*ut/nfactor/g\n st = R*np.sin(gam)\n ht = R*(1-np.cos(gam))\n #alternate formula if the obstacle is cleared during transition\n if ht > hobs:\n st = np.sqrt(R**2-(R-hobs)**2)\n dstdut = 1/2/np.sqrt(R**2-(R-hobs)**2) * (2*R*dRdut - 2*(R-hobs)*dRdut)\n dstdgam = 0\n dhtdut = 0\n dhtdgam = 0\n else:\n dhtdut = dRdut*(1-np.cos(gam))\n dhtdgam = R*np.sin(gam)\n dstdut = dRdut*np.sin(gam)\n dstdgam = R*np.cos(gam)\n J['s_transition','gamma'] = dstdgam\n J['s_transition','fltcond|Utrue'] = dstdut\n J['h_transition','gamma'] = dhtdgam\n J['h_transition','fltcond|Utrue'] = dhtdut\n J['t_transition','gamma'] = dstdgam / ut\n J['t_transition','fltcond|Utrue'] = (dstdut * ut - st) / ut ** 2\n\nclass TakeoffClimb(ExplicitComponent):\n \"\"\"\n Computes ground distance from end of transition until obstacle is cleared.\n\n Analysis based on Raymer book.\n\n Inputs\n ------\n gamma : float\n Climb out flight path angle (scalar, rad)\n h_transition : float\n Altitude at transition point (scalar, m)\n\n Outputs\n -------\n s_climb : float\n Horizontal distance from end of transition until obstacle is cleared (scalar, m)\n\n Options\n -------\n h_obstacle : float\n Obstacle height to clear (in **meters**) (default 10.66, equiv. 35 ft)\n \"\"\"\n\n def initialize(self):\n self.options.declare('h_obstacle',default=10.66,desc='Obstacle clearance height in m')\n def setup(self):\n self.add_input('h_transition', units='m')\n self.add_input('gamma', units='rad',src_indices=-1)\n self.add_input('fltcond|Utrue', units='m/s',src_indices=-1)\n\n self.add_output('s_climb', units='m')\n self.add_output('t_climb', units='s')\n self.declare_partials(['s_climb'], ['h_transition','gamma'])\n self.declare_partials(['t_climb'], ['h_transition','gamma','fltcond|Utrue'])\n\n def compute(self, inputs, outputs):\n hobs = self.options['h_obstacle']\n gam = inputs['gamma']\n ht = inputs['h_transition']\n ut = inputs['fltcond|Utrue']\n sc = (hobs-ht)/np.tan(gam)\n outputs['s_climb'] = sc\n outputs['t_climb'] = sc / ut\n\n def compute_partials(self, inputs, J):\n hobs = self.options['h_obstacle']\n gam = inputs['gamma']\n ht = inputs['h_transition']\n ut = inputs['fltcond|Utrue']\n sc = (hobs-ht)/np.tan(gam)\n J['s_climb','gamma'] = -(hobs-ht)/np.tan(gam)**2 * (1/np.cos(gam))**2\n J['s_climb','h_transition'] = -1/np.tan(gam)\n J['t_climb','gamma'] = J['s_climb','gamma'] / ut\n J['t_climb','h_transition'] = J['s_climb','h_transition'] / ut\n J['t_climb','fltcond|Utrue'] = - sc / ut ** 2\n\n\nclass RobustRotationPhase(oc.PhaseGroup):\n \"\"\"\n This adds general mission analysis capabilities to an existing airplane model.\n The BaseAircraftGroup object is passed in. It should be built to accept the following inputs and return the following outputs.\n The outputs should be promoted to the top level in the component.\n\n Inputs\n ------\n range : float\n Total distance travelled (vector, m)\n fltcond|h : float\n Altitude (vector, m)\n fltcond|vs : float\n Vertical speed (vector, m/s)\n fltcond|Ueas : float\n Equivalent airspeed (vector, m/s)\n fltcond|Utrue : float\n True airspeed (vector, m/s)\n fltcond|p : float\n Pressure (vector, Pa)\n fltcond|rho : float\n Density (vector, kg/m3)\n fltcond|T : float\n Temperature (vector, K)\n fltcond|q : float\n Dynamic pressure (vector, Pa)\n fltcond|CL : float\n Lift coefficient (vector, dimensionless)\n throttle : float\n Motor / propeller throttle setting scaled from 0 to 1 or slightly more (vector, dimensionless)\n propulsor_active : float\n If a multi-propulsor airplane, a failure condition should be modeled in the propulsion model by multiplying throttle by propulsor_active.\n It will generally be 1.0 unless a failure condition is being modeled, in which case it will be 0 (vector, dimensionless)\n braking : float\n Percentage brakes applied, from 0 to 1. Should not be applied in the air or nonphysical effects will result (vector, dimensionless)\n lift : float\n Lift force (vector, N)\n\n Outputs\n -------\n thrust : float\n Total thrust force produced by all propulsors (vector, N)\n drag : float\n Total drag force in the airplane axis produced by all sources of drag (vector, N)\n weight : float\n Weight (mass, really) of the airplane at each point in time. Generally will need to be integrated by Dymos as a state with a rate source (vector, kg)\n ac|geom|wing|S_ref\n Wing reference area (scalar, m**2)\n ac|aero|CLmax_TO\n CLmax with flaps in max takeoff position (scalar, dimensionless)\n ac|weights|MTOW\n Maximum takeoff weight (scalar, kg)\n \"\"\"\n\n def initialize(self):\n self.options.declare('num_nodes',default=1)\n self.options.declare('flight_phase',default=None,desc='Phase of flight e.g. v0v1, cruise')\n self.options.declare('aircraft_model',default=None)\n self.options.declare('h_obstacle',default=10.66, )\n\n def setup(self):\n nn = self.options['num_nodes']\n ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=[\"*\"])\n flight_phase = self.options['flight_phase']\n if flight_phase == 'rotate':\n ivcomp.add_output('braking',val=np.zeros((nn,)))\n ivcomp.add_output('propulsor_active',val=np.zeros((nn,)))\n ivcomp.add_output('throttle',val=np.ones((nn,)))\n # flight conditions are sea level takeoff, transition speed\n # split off a single node to compute climb angle\n # compute the transition distance and add it to range_initial\n # compute the transition time as a function of the groundspeed\n # provide transition time as duration\n ivcomp.add_output('v2_vstall_mult',val=1.2)\n ivcomp.add_output('vr_vstall_mult',val=1.1)\n ivcomp.add_output('fltcond|vs', val=np.zeros((nn,)),units='m/s')\n ivcomp.add_output('fltcond|cosgamma', val=np.ones((nn,)),units=None)\n\n\n\n ivcomp.add_output('h_obstacle',val=35,units='ft')\n\n self.add_subsystem('altitudes',LinearInterpolator(num_nodes=nn, units='m'),promotes_inputs=[('start_val','h_initial')],promotes_outputs=[('vec','fltcond|h')])\n self.connect('h_obstacle','altitudes.end_val')\n\n self.add_subsystem('stall',StallSpeed(),promotes_inputs=[('CLmax','ac|aero|CLmax_TO'),('weight','ac|weights|MTOW'),'ac|geom|wing|S_ref'],promotes_outputs=['*'])\n self.add_subsystem('vrspeed',ElementMultiplyDivideComp(output_name='takeoff|vr',input_names=['Vstall_eas','vr_vstall_mult'],input_units=['m/s',None]),promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('v2speed',ElementMultiplyDivideComp(output_name='takeoff|v2',input_names=['Vstall_eas','v2_vstall_mult'],input_units=['m/s',None]),promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('speeds',LinearInterpolator(num_nodes=nn,units='kn'),promotes_inputs=[('start_val','takeoff|vr'),('end_val','takeoff|v2')],promotes_outputs=[('vec','fltcond|Ueas')])\n self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=False), promotes_inputs=['*'], promotes_outputs=['*'])\n # pretty confident there's a simpler closed form multiple for CL at v2\n self.add_subsystem('clcomp',SteadyFlightCL(num_nodes=nn), promotes_inputs=['weight','fltcond|*','ac|*'],promotes_outputs=['*'])\n # the aircraft model needs to provide thrust and drag\n self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn,flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('climbangle',ClimbAngleComp(num_nodes=nn),promotes_inputs=['drag','weight','thrust'],promotes_outputs=['gamma'])\n self.add_subsystem('transition',TakeoffTransition(),promotes_inputs=['fltcond|Utrue','gamma'],promotes_outputs=['h_transition','s_transition','t_transition'])\n self.add_subsystem('v2climb',TakeoffClimb(),promotes_inputs=['h_transition','gamma','fltcond|Utrue'],promotes_outputs=['s_climb','t_climb'])\n self.add_subsystem('tod_final',AddSubtractComp(output_name='range_final',input_names=['range_initial','s_transition','s_climb'],units='m'),promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('duration',AddSubtractComp(output_name='duration',input_names=['t_transition','t_climb'],units='s'),promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('h_final',AddSubtractComp(output_name='fltcond|h_final',input_names=['h_obstacle'],units='m'),promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('ranges',LinearInterpolator(num_nodes=nn,units='m'),promotes_inputs=[('start_val','range_initial'),('end_val','range_final')],promotes_outputs=[('vec','range')])\n\n\n\n"
] | [
[
"numpy.sqrt",
"numpy.clip",
"numpy.arcsin",
"numpy.arange",
"numpy.isnan",
"numpy.less",
"numpy.cos",
"numpy.sin",
"numpy.tan",
"numpy.ones",
"numpy.flip",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jialeli1/From-Voxel-to-Point | [
"b4dba9c4e9cd83e04199d9224f6ec7bf06b71f93"
] | [
"pcdet/utils/loss_utils.py"
] | [
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom . import box_utils\nfrom . import center_utils\n\ntry:\n from itertools import ifilterfalse\nexcept ImportError: # py3k\n from itertools import filterfalse as ifilterfalse\n\n\n\nclass SigmoidFocalClassificationLoss(nn.Module):\n \"\"\"\n Sigmoid focal cross entropy loss.\n \"\"\"\n\n def __init__(self, gamma: float = 2.0, alpha: float = 0.25):\n \"\"\"\n Args:\n gamma: Weighting parameter to balance loss for hard and easy examples.\n alpha: Weighting parameter to balance loss for positive and negative examples.\n \"\"\"\n super(SigmoidFocalClassificationLoss, self).__init__()\n self.alpha = alpha\n self.gamma = gamma\n\n @staticmethod\n def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):\n \"\"\" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:\n max(x, 0) - x * z + log(1 + exp(-abs(x))) in\n https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits\n\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predicted logits for each class\n target: (B, #anchors, #classes) float tensor.\n One-hot encoded classification targets\n\n Returns:\n loss: (B, #anchors, #classes) float tensor.\n Sigmoid cross entropy loss without reduction\n \"\"\"\n loss = torch.clamp(input, min=0) - input * target + \\\n torch.log1p(torch.exp(-torch.abs(input)))\n return loss\n\n def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):\n \"\"\"\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predicted logits for each class\n target: (B, #anchors, #classes) float tensor.\n One-hot encoded classification targets\n weights: (B, #anchors) float tensor.\n Anchor-wise weights.\n\n Returns:\n weighted_loss: (B, #anchors, #classes) float tensor after weighting.\n \"\"\"\n pred_sigmoid = torch.sigmoid(input)\n alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)\n pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid\n focal_weight = alpha_weight * torch.pow(pt, self.gamma)\n\n bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)\n\n loss = focal_weight * bce_loss\n\n if weights.shape.__len__() == 2 or \\\n (weights.shape.__len__() == 1 and target.shape.__len__() == 2):\n weights = weights.unsqueeze(-1)\n\n assert weights.shape.__len__() == loss.shape.__len__()\n\n return loss * weights\n\n\nclass WeightedSmoothL1Loss(nn.Module):\n \"\"\"\n Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss\n https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py\n | 0.5 * x ** 2 / beta if abs(x) < beta\n smoothl1(x) = |\n | abs(x) - 0.5 * beta otherwise,\n where x = input - target.\n \"\"\"\n def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):\n \"\"\"\n Args:\n beta: Scalar float.\n L1 to L2 change point.\n For beta values < 1e-5, L1 loss is computed.\n code_weights: (#codes) float list if not None.\n Code-wise weights.\n \"\"\"\n super(WeightedSmoothL1Loss, self).__init__()\n self.beta = beta\n if code_weights is not None:\n self.code_weights = np.array(code_weights, dtype=np.float32)\n self.code_weights = torch.from_numpy(self.code_weights).cuda()\n\n @staticmethod\n def smooth_l1_loss(diff, beta):\n if beta < 1e-5:\n loss = torch.abs(diff)\n else:\n n = torch.abs(diff)\n loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)\n\n return loss\n\n def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):\n \"\"\"\n Args:\n input: (B, #anchors, #codes) float tensor.\n Ecoded predicted locations of objects.\n target: (B, #anchors, #codes) float tensor.\n Regression targets.\n weights: (B, #anchors) float tensor if not None.\n\n Returns:\n loss: (B, #anchors) float tensor.\n Weighted smooth l1 loss without reduction.\n \"\"\"\n target = torch.where(torch.isnan(target), input, target) # ignore nan targets\n\n diff = input - target\n # code-wise weighting\n if self.code_weights is not None:\n diff = diff * self.code_weights.view(1, 1, -1)\n\n loss = self.smooth_l1_loss(diff, self.beta)\n\n # anchor-wise weighting\n if weights is not None:\n assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]\n loss = loss * weights.unsqueeze(-1)\n\n return loss\n\n\nclass WeightedL1Loss(nn.Module):\n def __init__(self, code_weights: list = None):\n \"\"\"\n Args:\n code_weights: (#codes) float list if not None.\n Code-wise weights.\n \"\"\"\n super(WeightedL1Loss, self).__init__()\n if code_weights is not None:\n self.code_weights = np.array(code_weights, dtype=np.float32)\n self.code_weights = torch.from_numpy(self.code_weights).cuda()\n\n def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):\n \"\"\"\n Args:\n input: (B, #anchors, #codes) float tensor.\n Ecoded predicted locations of objects.\n target: (B, #anchors, #codes) float tensor.\n Regression targets.\n weights: (B, #anchors) float tensor if not None.\n\n Returns:\n loss: (B, #anchors) float tensor.\n Weighted smooth l1 loss without reduction.\n \"\"\"\n target = torch.where(torch.isnan(target), input, target) # ignore nan targets\n\n diff = input - target\n # code-wise weighting\n if self.code_weights is not None:\n diff = diff * self.code_weights.view(1, 1, -1)\n\n loss = torch.abs(diff)\n\n # anchor-wise weighting\n if weights is not None:\n assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]\n loss = loss * weights.unsqueeze(-1)\n\n return loss\n\n\nclass WeightedCrossEntropyLoss(nn.Module):\n \"\"\"\n Transform input to fit the fomation of PyTorch offical cross entropy loss\n with anchor-wise weighting.\n \"\"\"\n def __init__(self):\n super(WeightedCrossEntropyLoss, self).__init__()\n\n def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):\n \"\"\"\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predited logits for each class.\n target: (B, #anchors, #classes) float tensor.\n One-hot classification targets.\n weights: (B, #anchors) float tensor.\n Anchor-wise weights.\n\n Returns:\n loss: (B, #anchors) float tensor.\n Weighted cross entropy loss without reduction\n \"\"\"\n input = input.permute(0, 2, 1)\n target = target.argmax(dim=-1)\n loss = F.cross_entropy(input, target, reduction='none') * weights\n return loss\n\n\ndef get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):\n \"\"\"\n Args:\n pred_bbox3d: (N, 7) float Tensor.\n gt_bbox3d: (N, 7) float Tensor.\n\n Returns:\n corner_loss: (N) float Tensor.\n \"\"\"\n assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]\n\n pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)\n gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)\n\n # 这里flip的目的应该是忽略朝向,但实际上呢把朝向也纳入整体更好还是说它会造成不稳定呢?\n gt_bbox3d_flip = gt_bbox3d.clone()\n gt_bbox3d_flip[:, 6] += np.pi\n gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)\n # (N, 8)\n corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),\n torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))\n # (N, 8)\n corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)\n\n return corner_loss.mean(dim=1)\n\n\n\n\ndef get_corner_loss_mse(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):\n \"\"\"\n Args:\n pred_bbox3d: (N, 7) float Tensor.\n gt_bbox3d: (N, 7) float Tensor.\n\n Returns:\n corner_loss: (1,) float scaler\n \"\"\"\n assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]\n\n # (N, 8, 3)\n pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)\n gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)\n # print('==> pred_box_corners[0, :, :]')\n # print(pred_box_corners[0,:,:])\n # print('==> gt_box_corners[0, :, :]')\n # print(gt_box_corners[0,:,:])\n # print('==> pred_box_corners[10, :, :]')\n # print(pred_box_corners[10,:,:])\n # print('==> gt_box_corners[10, :, :]')\n # print(gt_box_corners[10,:,:])\n # print('==> pred_box_corners[100, :, :]')\n # print(pred_box_corners[100,:,:])\n # print('==> gt_box_corners[100, :, :]')\n # print(gt_box_corners[100,:,:])\n\n # for each box, mean by 8 corners.\n corner_loss_x = F.mse_loss(input=pred_box_corners[:,:,0], target=gt_box_corners[:,:,0]) # (N, 8) -> (N)\n corner_loss_y = F.mse_loss(input=pred_box_corners[:,:,1], target=gt_box_corners[:,:,1]) # (N, 8) -> (N)\n corner_loss_z = F.mse_loss(input=pred_box_corners[:,:,2], target=gt_box_corners[:,:,2]) # (N, 8) -> (N)\n\n # xyz之间求和\n corner_loss = corner_loss_x + corner_loss_y + corner_loss_z\n\n return corner_loss \n\n\ndef get_iouscore_loss_bce(iou_preds, iou_gts, iou_fg_thresh=0.75, iou_bg_thresh=0.25):\n \"\"\"\n Args:\n iou_preds: (N,)\n iou_gts: (N, )\n Returns:\n loss_iouscore:\n \"\"\"\n # prepare the labels\n # now only for car class, 08132020\n\n # iou_preds = iou_preds.view(-1)\n # iou_gts = iou_gts.view(-1)\n\n # print('==> iou_preds.size()')\n # print(iou_preds.size())\n # print(torch.sigmoid(iou_preds))\n # print('==> iou_gts.size()')\n # print(iou_gts.size())\n # print(iou_gts)\n\n # CLS_FG_THRESH: 0.75\n # CLS_BG_THRESH: 0.25\n # iou_bg_thresh = self.roi_sampler_cfg.CLS_BG_THRESH\n # iou_fg_thresh = self.roi_sampler_cfg.CLS_FG_THRESH\n # iou_bg_thresh = 0.25\n # iou_fg_thresh = 0.75\n\n fg_mask = iou_gts > iou_fg_thresh\n bg_mask = iou_gts < iou_bg_thresh\n interval_mask = (fg_mask == 0) & (bg_mask == 0)\n \n iou_cls_labels = (fg_mask > 0).float()\n iou_cls_labels[interval_mask] = \\\n (iou_gts[interval_mask] - iou_bg_thresh) / (iou_fg_thresh - iou_bg_thresh)\n\n # print('==> iou_cls_labels')\n # print(iou_cls_labels.size())\n # print(iou_cls_labels[:50])\n \n # 这里CE是计算的整个范围的iou,但是最后求和的时候只计算了iou>=0这部分的。\n # 条件 iou_cls_labels >= 0 选出来了那些iou >= 0 的候选框。\n loss_ioucls = F.binary_cross_entropy(torch.sigmoid(iou_preds), iou_cls_labels.float(), reduction='none')\n cls_valid_mask = (iou_cls_labels >= 0).float()\n loss_iouscore = (loss_ioucls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)\n\n return loss_iouscore\n\n\n\ndef get_rot_binres_loss(pred_reg, reg_label, num_head_bin, get_ry_fine=False):\n \"\"\"\n Bin-based 3D bounding boxes regression loss. See https://arxiv.org/abs/1812.04244 for more details.\n \n :param pred_reg: (N, C)\n :param reg_label: (N, 1), ry\n :param num_head_bin: constant\n :param get_ry_fine: False\n :return:\n \"\"\"\n # print('==> pred_reg.size()')\n # print(pred_reg.size()) # should be (N, 24)\n\n reg_loss_dict = {}\n # angle loss\n start_offset = 0\n ry_bin_l, ry_bin_r = start_offset, start_offset + num_head_bin\n ry_res_l, ry_res_r = ry_bin_r, ry_bin_r + num_head_bin\n start_offset = ry_res_r\n ry_label = reg_label.squeeze(dim=-1)\n # print('==> reg_label[] in encode')\n # print(reg_label.size()) # should be (N, C)\n # print(reg_label[100:150])\n # print('==> ry_label[] in encode')\n # print(ry_label.size()) # should be (N,)\n # print(ry_label[100:150])\n if get_ry_fine:\n assert False, \"one-stage should not get_ry_fine.\"\n\n # divide pi/2 into several bins\n angle_per_class = (np.pi / 2) / num_head_bin\n\n ry_label = ry_label % (2 * np.pi) # 0 ~ 2pi\n opposite_flag = (ry_label > np.pi * 0.5) & (ry_label < np.pi * 1.5)\n ry_label[opposite_flag] = (ry_label[opposite_flag] + np.pi) % (2 * np.pi) # (0 ~ pi/2, 3pi/2 ~ 2pi)\n shift_angle = (ry_label + np.pi * 0.5) % (2 * np.pi) # (0 ~ pi)\n\n shift_angle = torch.clamp(shift_angle - np.pi * 0.25, min=1e-3, max=np.pi * 0.5 - 1e-3) # (0, pi/2)\n\n # bin center is (5, 10, 15, ..., 85)\n ry_bin_label = (shift_angle / angle_per_class).floor().long()\n ry_res_label = shift_angle - (ry_bin_label.float() * angle_per_class + angle_per_class / 2)\n ry_res_norm_label = ry_res_label / (angle_per_class / 2)\n\n else:\n # divide 2pi into several bins\n angle_per_class = (2 * np.pi) / num_head_bin\n heading_angle = ry_label % (2 * np.pi) # 0 ~ 2pi\n # print('==> heading_angle[] in encode')\n # print(heading_angle.size())\n # print(heading_angle[100:150])\n\n shift_angle = (heading_angle + angle_per_class / 2) % (2 * np.pi)\n ry_bin_label = (shift_angle / angle_per_class).floor().long()\n ry_res_label = shift_angle - (ry_bin_label.float() * angle_per_class + angle_per_class / 2)\n ry_res_norm_label = ry_res_label / (angle_per_class / 2)\n # print('==> ry_bin_label in encode')\n # print(ry_bin_label.size())\n # print(ry_bin_label[100:150])\n\n\n ry_bin_onehot = torch.cuda.FloatTensor(ry_bin_label.size(0), num_head_bin).zero_()\n ry_bin_onehot.scatter_(1, ry_bin_label.view(-1, 1).long(), 1)\n loss_ry_bin = F.cross_entropy(pred_reg[:, ry_bin_l:ry_bin_r], ry_bin_label)\n loss_ry_res = F.smooth_l1_loss((pred_reg[:, ry_res_l: ry_res_r] * ry_bin_onehot).sum(dim=1), ry_res_norm_label)\n\n reg_loss_dict['loss_ry_bin'] = loss_ry_bin.item()\n reg_loss_dict['loss_ry_res'] = loss_ry_res.item()\n angle_loss = loss_ry_bin + loss_ry_res\n # Total regression loss\n reg_loss_dict['loss_angle'] = angle_loss\n\n return angle_loss, reg_loss_dict\n\n\n\nclass CenterNetFocalLoss(nn.Module):\n '''nn.Module warpper for focal loss'''\n def __init__(self, gamma=4, alpha=2):\n super(CenterNetFocalLoss, self).__init__()\n # self.neg_loss = _neg_loss\n self.gamma = gamma\n self.alpha = alpha\n\n def _sigmoid(self, x):\n # y = torch.clamp(x.sigmoid_(), min=1e-4, max=1 - 1e-4)\n # dnnt use the replace version!\n y = torch.clamp(torch.sigmoid(x), min=1e-4, max=1 - 1e-4)\n\n # too small will cause loss nan.\n # y = torch.clamp(x.sigmoid_(), min=1e-12, max=1 - 1e-12)\n return y\n\n def _neg_loss(self, pred, gt):\n ''' Modified focal loss. Exactly the same as CornerNet.\n Runs faster and costs a little bit more memory\n Arguments:\n pred: (batch x c x h x w), do some clamp or not?. should be clampped already.\n gt: (batch x c x h x w)\n '''\n pos_inds = gt.eq(1).float()\n neg_inds = gt.lt(1).float()\n\n # neg_weights = torch.pow(1 - gt, 4)\n neg_weights = torch.pow(1 - gt, self.gamma)\n\n loss = 0\n\n # pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds\n # neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds\n pos_loss = torch.log(pred) * torch.pow(1 - pred, self.alpha) * pos_inds\n neg_loss = torch.log(1 - pred) * torch.pow(pred, self.alpha) * neg_weights * neg_inds\n\n num_pos = pos_inds.float().sum()\n\n pos_loss = pos_loss.sum()\n neg_loss = neg_loss.sum()\n\n if num_pos == 0:\n loss = loss - neg_loss\n else:\n loss = loss - (pos_loss + neg_loss) / num_pos\n return loss\n\n\n def forward(self, out, target):\n out_norm = self._sigmoid(out)\n\n return self._neg_loss(out_norm, target)\n\n\nclass CenterNetResLoss(nn.Module):\n def __init__(self, cfg):\n super(CenterNetResLoss, self).__init__()\n self.res_func_type = cfg['res_func']\n\n def forward(self, output, mask, ind, target):\n \"\"\"\n Args:\n output: torch.Size([B, C, 152, 152])\n mask: torch.Size([B, max_objs])\n ind: torch.Size([B, max_objs])\n target: torch.Size([B, max_objs, C])\n Returns:\n reduced and weighted loss term.\n \"\"\"\n pred = center_utils._transpose_and_gather_feat(output, ind) # (B, max_objs, C)\n\n # print('==> (ind != 0).float().sum(): ', (ind != 0).float().sum() )\n # print('==> mask.sum(): ', mask.sum() )\n\n if mask.sum():\n # 1. flatten.\n pred_flat = pred.view(-1, pred.shape[-1]) #(B*max_objs, C)\n target_flat = target.view(-1, target.shape[-1]) #(B*max_objs, C)\n mask_flat = mask.view(-1).bool() #(B*max_objs)\n # 2. valid select\n pred_valid = pred_flat[mask_flat] #(num_valid, C)\n target_valid = target_flat[mask_flat] #(num_valid, C)\n # 3. un-reduced loss term\n if self.res_func_type == 'smooth-l1':\n loss = F.smooth_l1_loss(pred_valid, target_valid, reduction='none')\n elif self.res_func_type == 'l1':\n loss = F.l1_loss(pred_valid, target_valid, reduction='none') \n elif self.res_func_type == 'balanced_l1':\n loss = get_balanced_l1_loss(pred_valid, target_valid)\n else:\n raise NotImplementedError \n\n # mean for num_obj_dims, sum for channel_dims\n # (num_valid, C) -> (C) -> ()\n loss = loss.mean(dim=0).sum() \n else:\n loss = 0.\n\n return loss\n\nclass CenterNetRotBinResLoss(nn.Module):\n def __init__(self, cfg):\n super(CenterNetRotBinResLoss, self).__init__()\n\n self.num_head_bin = cfg['num_bins']\n\n def forward(self, output, mask, ind, target):\n \"\"\"\n Args:\n output: torch.Size([B, C, 152, 152])\n mask: torch.Size([B, max_objs])\n ind: torch.Size([B, max_objs])\n target: torch.Size([B, max_objs, C])\n Returns:\n reduced and weighted loss term.\n \"\"\"\n pred = center_utils._transpose_and_gather_feat(output, ind) # torch.Size([1, 500, 2])\n\n if mask.sum():\n # 1. flatten\n pred_flat = pred.view(-1, pred.shape[-1]) # (B*max_objs, C)\n target_flat = target.view(-1, target.shape[-1]) # (B*max_objs, 1)\n mask_flat = mask.view(-1).bool() # (B*max_objs)\n # 2. valid select\n pred_valid = pred_flat[mask_flat] # (num_valid, C)\n target_valid = target_flat[mask_flat] # (num_valid, 1)\n\n # 3. return the reduced rot loss term.\n loss, _ = get_rot_binres_loss(pred_valid, target_valid, num_head_bin=self.num_head_bin)\n \n else:\n loss = 0.\n\n # print('==> loss in rot')\n # print(loss)\n return loss\n\n\n\n\ndef lovasz_softmax(probas, labels, classes='present', per_image=False, ignore=None):\n \"\"\"\n Multi-class Lovasz-Softmax loss\n NOTE probas should be applied with softmax.\n probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).\n Interpreted as binary (sigmoid) output with outputs of size [B, H, W].\n labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)\n classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.\n per_image: compute the loss per image instead of per batch\n ignore: void class labels\n \"\"\"\n # print('==> lovasz_softmax, classes: ', classes)\n # print('==> lovasz_softmax, per_image: ', per_image)\n # print('==> lovasz_softmax, ignore: ', ignore)\n\n if per_image:\n loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes)\n for prob, lab in zip(probas, labels))\n else:\n loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), classes=classes)\n return loss\n\n\n\ndef lovasz_softmax_flat(probas, labels, classes='present'):\n \"\"\"\n Multi-class Lovasz-Softmax loss\n probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)\n labels: [P] Tensor, ground truth labels (between 0 and C - 1)\n classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.\n \"\"\"\n if probas.numel() == 0:\n # only void pixels, the gradients should be 0\n return probas * 0.\n C = probas.size(1)\n losses = []\n class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes\n for c in class_to_sum:\n fg = (labels == c).float() # foreground for class c\n if (classes is 'present' and fg.sum() == 0):\n continue\n if C == 1:\n if len(classes) > 1:\n raise ValueError('Sigmoid output possible only with 1 class')\n class_pred = probas[:, 0]\n else:\n class_pred = probas[:, c]\n errors = (Variable(fg) - class_pred).abs()\n errors_sorted, perm = torch.sort(errors, 0, descending=True)\n perm = perm.data\n fg_sorted = fg[perm]\n losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))\n return mean(losses)\n\n\ndef lovasz_grad(gt_sorted):\n \"\"\"\n Computes gradient of the Lovasz extension w.r.t sorted errors\n See Alg. 1 in paper\n \"\"\"\n p = len(gt_sorted)\n gts = gt_sorted.sum()\n intersection = gts - gt_sorted.float().cumsum(0)\n union = gts + (1 - gt_sorted).float().cumsum(0)\n jaccard = 1. - intersection / union\n if p > 1: # cover 1-pixel case\n jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]\n return jaccard\n\n\ndef flatten_probas(probas, labels, ignore=None):\n \"\"\"\n Flattens predictions in the batch\n \"\"\"\n if probas.dim() == 2: \n # do nothing, 3D segmentation for sparse tensor\n pass\n elif probas.dim() == 3:\n # assumes output of a sigmoid layer\n B, H, W = probas.size()\n probas = probas.view(B, 1, H, W)\n probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C\n elif probas.dim() == 5:\n # 3D segmentation for dense tensor\n B, C, L, H, W = probas.size()\n probas = probas.contiguous().view(B, C, L, H*W)\n probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C\n\n\n labels = labels.view(-1)\n if ignore is not None:\n valid = (labels != ignore)\n # vprobas = probas[valid.nonzero().squeeze()]\n # for newer pytorch\n vprobas = probas[torch.nonzero(valid, as_tuple=False).squeeze()]\n vlabels = labels[valid]\n return vprobas, vlabels\n else: \n return probas, labels\n\n\n# --------------------------- HELPER FUNCTIONS ---------------------------\ndef isnan(x):\n return x != x\n \n \ndef mean(l, ignore_nan=False, empty=0):\n \"\"\"\n nanmean compatible with generators.\n \"\"\"\n l = iter(l)\n if ignore_nan:\n l = ifilterfalse(isnan, l)\n try:\n n = 1\n acc = next(l)\n except StopIteration:\n if empty == 'raise':\n raise ValueError('Empty mean')\n return empty\n for n, v in enumerate(l, 2):\n acc += v\n if n == 1:\n return acc\n return acc / n\n\n"
] | [
[
"torch.abs",
"torch.sigmoid",
"torch.norm",
"torch.isnan",
"torch.nn.functional.l1_loss",
"torch.nn.functional.cross_entropy",
"torch.from_numpy",
"torch.nn.functional.mse_loss",
"torch.log",
"torch.sort",
"torch.where",
"torch.nn.functional.smooth_l1_loss",
"torch.nonzero",
"torch.clamp",
"numpy.array",
"torch.pow",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Bharat-Runwal/path2vec | [
"f99188b882752ff9aa2c87334979b75483940ae0"
] | [
"wsd/graph_wsd_test_v1.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 7 17:13:25 2018\n\n@author: dorgham\n\"\"\"\n\nimport networkx as nx\nfrom nltk.corpus import wordnet as wn\nfrom nltk.corpus import wordnet_ic\nfrom nltk.stem import WordNetLemmatizer\nimport matplotlib.pyplot as plt\nimport xml.etree.ElementTree as ET\nfrom collections import OrderedDict\nimport codecs\nimport string\nfrom nltk.corpus import stopwords\nfrom sklearn.metrics import f1_score, precision_score, recall_score\n\n#algorithm parameters\nUSE_POS_INFO = True\nUSE_LESK = False\nUSE_PAGERANK = True\nAVG_METHOD = 'micro'\nMAX_DEPTH = 3\nLESK_NORM_FACTOR = 20 #this value is emperical\nsenseval_fpath = 'WSD_Unified_Evaluation_Datasets/senseval2/senseval2.data.xml'\ngold_tags_fpath = 'WSD_Unified_Evaluation_Datasets/senseval2/senseval2.gold.key.txt'\n\ninfo_content = wordnet_ic.ic('ic-semcor.dat')\nwnlemmatizer = WordNetLemmatizer()\npywsd_stopwords = [u\"'s\", u\"``\", u\"`\"]\nSTOPWORDS = set(stopwords.words('english') + list(string.punctuation) + pywsd_stopwords)\n\n\ndef lch_similarity(synset1, synset2):\n return wn.lch_similarity(synset1, synset2)\n \ndef jcn_similarity(synset1, synset2):\n return wn.jcn_similarity(synset1, synset2, info_content)\n \ndef lesk_similarity(synset1, synset2):\n str1 = str(synset1.definition()).translate(str.maketrans('','',string.punctuation))\n for example in synset1.examples():\n str1 += ' ' + str(example).translate(str.maketrans('','',string.punctuation))\n lemmatized_str1=''\n for word in set(str1.split()):\n lemmatized_str1 += wnlemmatizer.lemmatize(word) + ' '\n for lemma in synset1.lemma_names():\n lemmatized_str1 += ' ' + lemma\n hyper_hypo = set(synset1.hyponyms() + synset1.hypernyms() + synset1.instance_hyponyms() + synset1.instance_hypernyms())\n for hh in hyper_hypo:\n for lemma in hh.lemma_names():\n lemmatized_str1 += ' ' + lemma\n current_set = set(lemmatized_str1.split())\n current_set = set(cs.lower() for cs in current_set)\n current_set = current_set.difference(STOPWORDS)\n #print (current_set)\n str2 = str(synset2.definition()).translate(str.maketrans('','',string.punctuation))\n for example in synset2.examples():\n str2 += ' ' + str(example).translate(str.maketrans('','',string.punctuation))\n lemmatized_str2=''\n for word in set(str2.split()):\n lemmatized_str2 += wnlemmatizer.lemmatize(word) + ' '\n for lemma in synset2.lemma_names():\n lemmatized_str2 += ' ' + lemma\n hyper_hypo = set(synset2.hyponyms() + synset2.hypernyms() + synset2.instance_hyponyms() + synset2.instance_hypernyms())\n for hh in hyper_hypo:\n for lemma in hh.lemma_names():\n lemmatized_str2 += ' ' + lemma\n neighbor_set = set(lemmatized_str2.split())\n neighbor_set = set(ns.lower() for ns in neighbor_set)\n neighbor_set = neighbor_set.difference(STOPWORDS)\n #print (neighbor_set)\n return len(current_set.intersection(neighbor_set))\n\ndef convert_to_wordnet_pos(senseval_pos):\n if senseval_pos == 'VERB':\n return wn.VERB\n elif senseval_pos == 'NOUN':\n return wn.NOUN\n elif senseval_pos == 'ADV':\n return wn.ADV\n elif senseval_pos == 'ADJ':\n return wn.ADJ\n else:\n return None\n\ndef sentence_wsd(sentences, poses):\n counter=0\n output_dict = dict()\n for sentence in sentences:\n G=nx.Graph()\n sent_len = len(sentence.keys())\n G_pos = dict() #used for aligning the nodes when drawing the graph\n pos_idx=1\n token_nodeNames_map = dict()\n pos_dict = poses[counter]\n \n #construct the nodes of the graph\n for i, _id in enumerate(sentence.keys()):\n if USE_POS_INFO: #restrict the retrieved snysets from wordnet to the target pos\n wn_pos = convert_to_wordnet_pos(pos_dict[_id])\n else:\n wn_pos = None\n \n synsets_list = list(wn.synsets(sentence[_id], pos=wn_pos))\n if len(synsets_list) > 0:\n node_names = []\n for synset in synsets_list:\n node_name = str(i) + ' ' + synset.name()\n #adding the index to the node name is important in the case of \n #having a word that is repeated in the sentence but with \n #different sense each time, so we want unique node for each one.\n G.add_node(node_name)\n node_names.append(node_name)\n token_nodeNames_map[_id] = node_names\n G_pos.update( (label, (pos_idx, j)) for j, label in enumerate(node_names) ) \n pos_idx+=1\n \n #compute word similarity\n ids_list = list(sentence.keys())\n lch_sim_dict = dict()\n jcn_sim_dict = dict()\n lesk_sim_dict = dict()\n #print sentence.values()\n for idx, key in enumerate(ids_list):\n if USE_POS_INFO:\n wn_pos = convert_to_wordnet_pos(pos_dict[ids_list[idx]])\n else:\n wn_pos = None\n synsets_list = list(wn.synsets(sentence[ids_list[idx]], pos=wn_pos))\n if len(synsets_list) > 0:\n i = 1\n while i<=MAX_DEPTH and idx+i<sent_len:\n if USE_POS_INFO:\n wn_pos = convert_to_wordnet_pos(pos_dict[ids_list[idx+i]])\n else:\n wn_pos = None\n \n next_synsets_list = list(wn.synsets(sentence[ids_list[idx+i]], pos=wn_pos))\n if len(next_synsets_list) > 0:\n for current_synset in synsets_list:\n for neighbor_synset in next_synsets_list:\n nodes = str(idx) + ' ' + current_synset.name() + ';'\n nodes += str(idx+i) + ' ' + neighbor_synset.name()\n if current_synset.pos() == 'v' and neighbor_synset.pos() == 'v':\n sim_weight = lch_similarity(current_synset, neighbor_synset)\n lch_sim_dict[nodes] = sim_weight\n elif current_synset.pos() == 'n' and neighbor_synset.pos() == 'n':\n sim_weight = jcn_similarity(current_synset, neighbor_synset)\n jcn_sim_dict[nodes] = sim_weight\n elif USE_LESK:\n sim_weight = lesk_similarity(current_synset, neighbor_synset)\n lesk_sim_dict[nodes] = sim_weight\n i+=1\n \n #normalize the similarity weights and build edges\n if lch_sim_dict:\n max_lch_score = max(lch_sim_dict.values())\n for key in lch_sim_dict:\n nodeIds = key.split(';')\n G.add_edge(nodeIds[0],nodeIds[1], weight=(lch_sim_dict[key]/max_lch_score))\n if jcn_sim_dict:\n max_jcn_score = max(jcn_sim_dict.values())\n for key in jcn_sim_dict:\n nodeIds = key.split(';')\n G.add_edge(nodeIds[0],nodeIds[1], weight=(jcn_sim_dict[key]/max_jcn_score))\n if USE_LESK:\n if lesk_sim_dict:\n max_lesk_score = max(lesk_sim_dict.values())\n if max_lesk_score > 0:\n for key in lesk_sim_dict:\n nodeIds = key.split(';')\n G.add_edge(nodeIds[0],nodeIds[1], weight=(lesk_sim_dict[key]/LESK_NORM_FACTOR))\n \n \n #compute graph centrality\n node_scores = dict()\n if USE_PAGERANK:\n node_scores = nx.pagerank(G)\n else:\n node_scores = G.degree(G.nodes(), \"weight\")\n \n for token_id in ids_list:\n nodeNames = token_nodeNames_map.get(token_id)\n scores = []\n max_label = \"\"\n wordnet_key = \"\"\n if nodeNames:\n for nodeName in nodeNames:\n scores.append(node_scores[nodeName])\n if scores:\n max_index = max(range(len(scores)), key=scores.__getitem__)\n max_label = nodeNames[max_index]\n if max_label:\n i = max_label.find(' ')\n lemmas = wn.synset(max_label[i+1:]).lemmas()\n for lemma in lemmas:\n wordnet_key += lemma.key()+';'\n wordnet_key = wordnet_key[0:-1]\n output_dict[token_id] = wordnet_key\n \n #add the weight as attribute to the nodes of the graph\n #for node in node_scores.keys():\n # G.node[node]['weight']=node_scores[node]\n \n counter += 1\n if counter==1: #draw the graph of the first sentence\n plt.close()\n nx.draw(G, pos=G_pos, with_labels = True)\n plt.show()\n G.clear()\n \n return output_dict\n\n\ndef load_senseval_data(file_path):\n tokens_dict = OrderedDict()\n pos_dict = OrderedDict()\n sentences = []\n pos_list = []\n tree = ET.parse(file_path)\n root = tree.getroot()\n for text in root:\n for sentence in text:\n for word in sentence:\n if word.tag == 'instance' and word.attrib['id']: #only include words with the <instance> tag\n tokens_dict[word.attrib['id']] = word.text\n pos_dict[word.attrib['id']] = word.attrib['pos']\n if tokens_dict:\n sentences.append(tokens_dict)\n pos_list.append(pos_dict)\n tokens_dict = dict()\n pos_dict = dict()\n \n return sentences, pos_list\n\n\n\nif __name__ == \"__main__\":\n sents, poses = load_senseval_data(senseval_fpath)\n output_dict = sentence_wsd(sents, poses)\n #load the gold results\n with codecs.open(gold_tags_fpath, 'r', 'utf-8') as f:\n lines = f.readlines()\n wsd_output = []\n gold_output = []\n for line in lines:\n id_key_pair = line.split()\n predicted_keys = output_dict[id_key_pair[0]].split(';')\n gold_keys_set = set(id_key_pair[1:])\n predected_keys_set = set(predicted_keys)\n if len(predected_keys_set.intersection(gold_keys_set)) > 0:\n wsd_output.append(predicted_keys[0])\n gold_output.append(predicted_keys[0])\n else:\n wsd_output.append(predicted_keys[0])\n gold_output.append(id_key_pair[1])\n \n assert len(wsd_output) == len(gold_output)\n\n f1 = f1_score(gold_output, wsd_output, average=AVG_METHOD)\n precision = precision_score(gold_output, wsd_output, average=AVG_METHOD)\n recall = recall_score(gold_output, wsd_output, average=AVG_METHOD)\n \n print ('F-score: %1.4f' % f1, ' Precision: %1.4f' % precision, ' Recall: %1.4f' % recall)\n \n \n \n"
] | [
[
"sklearn.metrics.precision_score",
"matplotlib.pyplot.close",
"sklearn.metrics.f1_score",
"matplotlib.pyplot.show",
"sklearn.metrics.recall_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
oshapoval/WarpX | [
"84d687da21ee93db67fdc43efec8a9cc80d0e6f9",
"84d687da21ee93db67fdc43efec8a9cc80d0e6f9"
] | [
"Examples/Tests/PythonWrappers/PICMI_inputs_2d.py",
"Examples/Modules/laser_injection/analysis_2d.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable\nfrom pywarpx import picmi\n\n# Number of time steps\nmax_steps = 100\n\n# Grid\nnx = 128\nnz = 128\n\n# Domain\nxmin = 0.e-6\nzmin = 0.e-6\nxmax = 50.e-6\nzmax = 50.e-6\n\n# Cell size\ndx = (xmax - xmin) / nx\ndz = (zmax - zmin) / nz\n\n# Domain decomposition\nmax_grid_size_x = 64\nmax_grid_size_z = 64\n\n# PML\nnxpml = 10\nnzpml = 10\nfield_boundary = ['open', 'open']\n\n# Spectral order\nnox = 8\nnoz = 8\n\n# Guard cells\nnxg = 8\nnzg = 8\n\n# Initialize grid\ngrid = picmi.Cartesian2DGrid(number_of_cells = [nx,nz],\n lower_bound = [xmin,zmin],\n upper_bound = [xmax,zmax],\n lower_boundary_conditions = field_boundary,\n upper_boundary_conditions = field_boundary,\n guard_cells = [nxg,nzg],\n moving_window_velocity = [0.,0.,0],\n warpx_max_grid_size_x = max_grid_size_x,\n warpx_max_grid_size_y = max_grid_size_z)\n\n# Initialize field solver\nsolver = picmi.ElectromagneticSolver(grid=grid, cfl=0.95, method='PSATD',\n stencil_order = [nox,noz],\n divE_cleaning = 1,\n divB_cleaning = 1,\n pml_divE_cleaning = 1,\n pml_divB_cleaning = 1,\n warpx_psatd_update_with_rho = True)\n\n# Initialize diagnostics\ndiag_field_list = [\"E\", \"B\"]\nfield_diag = picmi.FieldDiagnostic(name = 'diag1',\n grid = grid,\n period = 10,\n write_dir = '.',\n warpx_file_prefix = 'Python_wrappers_plt',\n data_list = diag_field_list)\n\n# Initialize simulation\nsim = picmi.Simulation(solver = solver,\n max_steps = max_steps,\n verbose = 1,\n particle_shape = 'cubic',\n warpx_current_deposition_algo = 'direct',\n warpx_particle_pusher_algo = 'boris',\n warpx_field_gathering_algo = 'energy-conserving',\n warpx_use_filter = 1)\n\n# Add diagnostics to simulation\nsim.add_diagnostic(field_diag)\n\n# Write input file to run with compiled version\nsim.write_input_file(file_name = 'inputs_2d')\n\n# Whether to include guard cells in data returned by Python wrappers\ninclude_ghosts = 1\n\n# Compute min and max of fields data\ndef compute_minmax(data):\n vmax = np.abs(data).max()\n vmin = -vmax\n return vmin, vmax\n\n# Plot fields data either in valid domain or in PML\ndef plot_data(data, pml, title, name):\n fig, ax = plt.subplots(nrows = 1, ncols = 1, gridspec_kw = dict(wspace = 0.5), figsize = [6,5])\n cax = make_axes_locatable(ax).append_axes('right', size='5%', pad='5%')\n lw = 0.8\n ls = '--'\n if pml:\n # Draw PMLs and ghost regions\n ax.axvline(x = 0 , linewidth = lw, linestyle = ls)\n ax.axvline(x = 0+nxg , linewidth = lw, linestyle = ls)\n ax.axvline(x = -nxpml , linewidth = lw, linestyle = ls)\n ax.axvline(x = nx , linewidth = lw, linestyle = ls)\n ax.axvline(x = nx-nxg , linewidth = lw, linestyle = ls)\n ax.axvline(x = nx+nxpml, linewidth = lw, linestyle = ls)\n ax.axhline(y = 0 , linewidth = lw, linestyle = ls)\n ax.axhline(y = 0+nzg , linewidth = lw, linestyle = ls)\n ax.axhline(y = -nzpml , linewidth = lw, linestyle = ls)\n ax.axhline(y = nz , linewidth = lw, linestyle = ls)\n ax.axhline(y = nz-nzg , linewidth = lw, linestyle = ls)\n ax.axhline(y = nz+nzpml, linewidth = lw, linestyle = ls)\n # Annotations\n ax.annotate('PML', xy = (-nxpml//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')\n ax.annotate('PML', xy = (nx+nxpml//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')\n ax.annotate('PML', xy = (nx//2,-nzpml//2), rotation = 'horizontal', ha = 'center', va = 'center')\n ax.annotate('PML', xy = (nx//2,nz+nzpml//2), rotation = 'horizontal', ha = 'center', va = 'center')\n ax.annotate('PML ghost', xy = (nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')\n ax.annotate('PML ghost', xy = (-nxpml-nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')\n ax.annotate('PML ghost', xy = (nx-nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')\n ax.annotate('PML ghost', xy = (nx+nxpml+nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')\n ax.annotate('PML ghost', xy = (nx//2,nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')\n ax.annotate('PML ghost', xy = (nx//2,-nzpml-nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')\n ax.annotate('PML ghost', xy = (nx//2,nz-nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')\n ax.annotate('PML ghost', xy = (nx//2,nz+nzpml+nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')\n # Set extent and sliced data\n extent = np.array([-nxg-nxpml, nx+nxpml+nxg, -nzg-nzpml, nz+nzpml+nzg])\n else:\n # Draw ghost regions\n ax.axvline(x = 0 , linewidth = lw, linestyle = ls)\n ax.axvline(x = nx, linewidth = lw, linestyle = ls)\n ax.axhline(y = 0 , linewidth = lw, linestyle = ls)\n ax.axhline(y = nz, linewidth = lw, linestyle = ls)\n # Annotations\n ax.annotate('ghost', xy = (-nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')\n ax.annotate('ghost', xy = (nx+nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')\n ax.annotate('ghost', xy = (nx//2,-nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')\n ax.annotate('ghost', xy = (nx//2,nz+nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')\n # Set extent and sliced data\n extent = np.array([-nxg, nx+nxg, -nzg, nz+nzg])\n X = data[:,:].transpose()\n # Min and max for colorbar\n vmin, vmax = compute_minmax(X)\n # Display data as image\n im = ax.imshow(X = X, origin = 'lower', extent = extent, vmin = vmin, vmax = vmax, cmap = 'seismic')\n # Add colorbar to plot\n fig.colorbar(im, cax = cax)\n # Set label for x- and y-axis, set title\n ax.set_xlabel('x')\n ax.set_ylabel('z')\n ax.set_title(title)\n # Set plot title\n suptitle = 'PML in (x,z), 4 grids 64 x 64'\n plt.suptitle(suptitle)\n # Save figure\n figname = 'figure_' + name + '.png'\n fig.savefig(figname, dpi = 100)\n\n# Initialize fields data (unit pulse) and apply smoothing\ndef init_data(data):\n impulse_1d = np.array([1./4., 1./2., 1./4.])\n impulse = np.outer(impulse_1d, impulse_1d)\n data[nx//2-1:nx//2+2,nz//2-1:nz//2+2] = impulse\n\n# Initialize inputs and WarpX instance\nsim.initialize_inputs()\nsim.initialize_warpx()\n\n# Get fields data using Python wrappers\nimport pywarpx.fields as pwxf\nEx = pwxf.ExFPWrapper(include_ghosts = include_ghosts)\nEy = pwxf.EyFPWrapper(include_ghosts = include_ghosts)\nEz = pwxf.EzFPWrapper(include_ghosts = include_ghosts)\nBx = pwxf.BxFPWrapper(include_ghosts = include_ghosts)\nBy = pwxf.ByFPWrapper(include_ghosts = include_ghosts)\nBz = pwxf.BzFPWrapper(include_ghosts = include_ghosts)\nF = pwxf.FFPWrapper(include_ghosts = include_ghosts)\nG = pwxf.GFPWrapper(include_ghosts = include_ghosts)\nExpml = pwxf.ExFPPMLWrapper(include_ghosts = include_ghosts)\nEypml = pwxf.EyFPPMLWrapper(include_ghosts = include_ghosts)\nEzpml = pwxf.EzFPPMLWrapper(include_ghosts = include_ghosts)\nBxpml = pwxf.BxFPPMLWrapper(include_ghosts = include_ghosts)\nBypml = pwxf.ByFPPMLWrapper(include_ghosts = include_ghosts)\nBzpml = pwxf.BzFPPMLWrapper(include_ghosts = include_ghosts)\nFpml = pwxf.FFPPMLWrapper(include_ghosts = include_ghosts)\nGpml = pwxf.GFPPMLWrapper(include_ghosts = include_ghosts)\n\n# Initialize fields data in valid domain\ninit_data(Ex)\ninit_data(Ey)\ninit_data(Ez)\ninit_data(Bx)\ninit_data(By)\ninit_data(Bz)\ninit_data(F)\ninit_data(G)\n\n# Advance simulation until last time step\nsim.step(max_steps)\n\n# Plot E\nplot_data(Ex, pml = False, title = 'Ex', name = 'Ex')\nplot_data(Ey, pml = False, title = 'Ey', name = 'Ey')\nplot_data(Ez, pml = False, title = 'Ez', name = 'Ez')\n\n# Plot B\nplot_data(Bx, pml = False, title = 'Bx', name = 'Bx')\nplot_data(By, pml = False, title = 'By', name = 'By')\nplot_data(Bz, pml = False, title = 'Bz', name = 'Bz')\n\n# F and G\nplot_data(F, pml = False, title = 'F', name = 'F')\nplot_data(G, pml = False, title = 'G', name = 'G')\n\n# Plot E in PML\nplot_data(Expml[:,:,0], pml = True, title = 'Exy in PML', name = 'Exy')\nplot_data(Expml[:,:,1], pml = True, title = 'Exz in PML', name = 'Exz')\nplot_data(Expml[:,:,2], pml = True, title = 'Exx in PML', name = 'Exx')\nplot_data(Eypml[:,:,0], pml = True, title = 'Eyz in PML', name = 'Eyz')\nplot_data(Eypml[:,:,1], pml = True, title = 'Eyx in PML', name = 'Eyx')\nplot_data(Eypml[:,:,2], pml = True, title = 'Eyy in PML', name = 'Eyy') # zero\nplot_data(Ezpml[:,:,0], pml = True, title = 'Ezx in PML', name = 'Ezx')\nplot_data(Ezpml[:,:,1], pml = True, title = 'Ezy in PML', name = 'Ezy') # zero\nplot_data(Ezpml[:,:,2], pml = True, title = 'Ezz in PML', name = 'Ezz')\n\n# Plot B in PML\nplot_data(Bxpml[:,:,0], pml = True, title = 'Bxy in PML', name = 'Bxy')\nplot_data(Bxpml[:,:,1], pml = True, title = 'Bxz in PML', name = 'Bxz')\nplot_data(Bxpml[:,:,2], pml = True, title = 'Bxx in PML', name = 'Bxx')\nplot_data(Bypml[:,:,0], pml = True, title = 'Byz in PML', name = 'Byz')\nplot_data(Bypml[:,:,1], pml = True, title = 'Byx in PML', name = 'Byx')\nplot_data(Bypml[:,:,2], pml = True, title = 'Byy in PML', name = 'Byy') # zero\nplot_data(Bzpml[:,:,0], pml = True, title = 'Bzx in PML', name = 'Bzx')\nplot_data(Bzpml[:,:,1], pml = True, title = 'Bzy in PML', name = 'Bzy') # zero\nplot_data(Bzpml[:,:,2], pml = True, title = 'Bzz in PML', name = 'Bzz')\n\n# Plot F and G in PML\nplot_data(Fpml[:,:,0], pml = True, title = 'Fx in PML', name = 'Fx')\nplot_data(Fpml[:,:,1], pml = True, title = 'Fy in PML', name = 'Fy')\nplot_data(Fpml[:,:,2], pml = True, title = 'Fz in PML', name = 'Fz')\nplot_data(Gpml[:,:,0], pml = True, title = 'Gx in PML', name = 'Gx')\nplot_data(Gpml[:,:,1], pml = True, title = 'Gy in PML', name = 'Gy')\nplot_data(Gpml[:,:,2], pml = True, title = 'Gz in PML', name = 'Gz')\n\n# Check values with benchmarks (precomputed from the same Python arrays)\ndef check_values(benchmark, data, rtol, atol):\n passed = np.allclose(benchmark, np.sum(np.abs(data[:,:])), rtol = rtol, atol = atol)\n assert(passed)\n\nrtol = 1e-09\natol = 1e-12\n\n# E\ncheck_values(1013263608.6369569, Ex[:,:], rtol, atol)\ncheck_values(717278253.4505507 , Ey[:,:], rtol, atol)\ncheck_values(717866566.5718911 , Ez[:,:], rtol, atol)\n# B\ncheck_values(3.0214509313437636, Bx[:,:], rtol, atol)\ncheck_values(3.0242765102729985, By[:,:], rtol, atol)\ncheck_values(3.0214509326970465, Bz[:,:], rtol, atol)\n# F and G\ncheck_values(3.0188584528062377, F[:,:], rtol, atol)\ncheck_values(1013672631.8764204, G[:,:], rtol, atol)\n# E in PML\ncheck_values(364287936.1526477 , Expml[:,:,0], rtol, atol)\ncheck_values(183582351.3212558 , Expml[:,:,1], rtol, atol)\ncheck_values(190065766.41491824, Expml[:,:,2], rtol, atol)\ncheck_values(440581905.9336025 , Eypml[:,:,0], rtol, atol)\ncheck_values(178117293.6629357 , Eypml[:,:,1], rtol, atol)\ncheck_values(0.0 , Eypml[:,:,2], rtol, atol)\ncheck_values(430277101.26568377, Ezpml[:,:,0], rtol, atol)\ncheck_values(0.0 , Ezpml[:,:,1], rtol, atol)\ncheck_values(190919663.2167449 , Ezpml[:,:,2], rtol, atol)\n# B in PML\ncheck_values(1.0565189315366146 , Bxpml[:,:,0], rtol, atol)\ncheck_values(0.4618191395098556 , Bxpml[:,:,1], rtol, atol)\ncheck_values(0.6849858273929585 , Bxpml[:,:,2], rtol, atol)\ncheck_values(1.7228584190213505 , Bypml[:,:,0], rtol, atol)\ncheck_values(0.47697331996765685, Bypml[:,:,1], rtol, atol)\ncheck_values(0.0 , Bypml[:,:,2], rtol, atol)\ncheck_values(1.5183380774611628 , Bzpml[:,:,0], rtol, atol)\ncheck_values(0.0 , Bzpml[:,:,1], rtol, atol)\ncheck_values(0.6849858291863835 , Bzpml[:,:,2], rtol, atol)\n# F and G in PML\ncheck_values(1.7808748509425263, Fpml[:,:,0], rtol, atol)\ncheck_values(0.0 , Fpml[:,:,1], rtol, atol)\ncheck_values(0.4307845604625681, Fpml[:,:,2], rtol, atol)\ncheck_values(536552745.42701197, Gpml[:,:,0], rtol, atol)\ncheck_values(0.0 , Gpml[:,:,1], rtol, atol)\ncheck_values(196016270.97767758, Gpml[:,:,2], rtol, atol)\n",
"#! /usr/bin/env python\n\n# Copyright 2019 Andrew Myers, Jean-Luc Vay, Maxence Thevenet\n# Remi Lehe, Weiqun Zhang, Luca Fedeli\n#\n# This file is part of WarpX.\n#\n# License: BSD-3-Clause-LBNL\n\n# This file is part of the WarpX automated test suite. Its purpose is to test the\n# injection of a Gaussian laser pulse from an antenna in a 2D simulation.\n# In order to avoid privileged directions, the laser is injected at\n# approximately 27 degrees with respect to the x axis. Moreover the polarization axis is neither\n# parallel nor perpendicular to the xz plane. Finally moving window along the\n# x axis is enabled.\n# The test calculates the envelope of each component of the laser pulse at the end of\n# the simulation and it compares it with theory. It also checks that the\n# central frequency of the Fourier transform is the expected one.\n\nimport yt\nimport sys\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.signal import hilbert\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nsys.path.insert(1, '../../../../warpx/Regression/Checksum/')\nimport checksumAPI\n\n# Maximum acceptable error for this test\nrelative_error_threshold = 0.05\n\n# A small number\nsmall_num = 1.0e-8\n\n# Physical parameters\num = 1.e-6\nfs = 1.e-15\nc = 299792458\n\n# Parameters of the gaussian beam\nwavelength = 1.*um\nw0 = 5.*um\ntt = 10.*fs\nx_c = 10.*um\nt_c = 24.*fs\n# foc_dist = 13.109*um (not actually used)\nE_max = 4e12\n\n# laser direction\ndir_vector = np.array([2.,0,1.0])\ndir_vector /= np.linalg.norm(dir_vector)\n\nrot_angle = np.arctan(dir_vector[2]/dir_vector[0])\n\n# polarization vector\npol_vector = np.array([1.0,1.0,-2.0])\npol_vector /= np.linalg.norm(pol_vector)\n\n# Calculates the envelope of a Gaussian beam\ndef gauss_env(T,XX,ZZ):\n '''Function to compute the theory for the envelope\n '''\n\n Z = np.cos(rot_angle)*(XX-x_c) + np.sin(rot_angle)*ZZ\n X = -np.sin(rot_angle)*(XX-x_c) + np.cos(rot_angle)*ZZ\n\n inv_tau2 = 1./tt/tt\n inv_w_2 = 1.0/(w0*w0)\n exp_arg = - (X*X)*inv_w_2 - inv_tau2 / c/c * (Z-T*c)*(Z-T*c)\n return E_max * np.real(np.exp(exp_arg))\n\n# Checks envelope and central frequency for a given laser component\ndef check_component(data, component, t_env_theory, coeff, X,Z,dx,dz):\n print(\"*** Checking \" + component + \" ***\")\n field = data['boxlib', component].v.squeeze()\n env = abs(hilbert(field))\n\n env_theory = t_env_theory*np.abs(coeff)\n\n # Plot results\n fig = plt.figure(figsize=(12,6))\n\n ax1 = fig.add_subplot(221, aspect='equal')\n ax1.set_title('PIC field')\n p1 = ax1.pcolormesh(X,Z,field)\n cax1 = make_axes_locatable(ax1).append_axes('right', size='5%', pad=0.05)\n fig.colorbar(p1, cax=cax1, orientation='vertical')\n\n ax2 = fig.add_subplot(222, aspect='equal')\n ax2.set_title('PIC envelope')\n p2 = ax2.pcolormesh(X,Z,env)\n cax2 = make_axes_locatable(ax2).append_axes('right', size='5%', pad=0.05)\n fig.colorbar(p2, cax=cax2, orientation='vertical')\n\n ax3 = fig.add_subplot(223, aspect='equal')\n ax3.set_title('Theory envelope')\n p3 = ax3.pcolormesh(X,Z,env_theory)\n cax3 = make_axes_locatable(ax3).append_axes('right', size='5%', pad=0.05)\n fig.colorbar(p3, cax=cax3, orientation='vertical')\n\n ax4 = fig.add_subplot(224, aspect='equal')\n ax4.set_title('Difference')\n p4 = ax4.pcolormesh(X,Z,env-env_theory)\n cax4 = make_axes_locatable(ax4).append_axes('right', size='5%', pad=0.05)\n fig.colorbar(p4, cax=cax4, orientation='vertical')\n\n plt.tight_layout()\n plt.savefig(\"plt_\" + component + \".png\", bbox_inches='tight')\n\n if(np.abs(coeff) < small_num):\n is_field_zero = np.sum(np.abs(env)) < small_num\n if is_field_zero :\n print(\"[OK] Field component expected to be 0 is ~ 0\")\n else :\n print(\"[FAIL] Field component expected to be 0 is NOT ~ 0\")\n assert(is_field_zero)\n print(\"******\\n\")\n return\n\n relative_error_env = np.sum(np.abs(env-env_theory)) / np.sum(np.abs(env_theory))\n is_env_ok = relative_error_env < relative_error_threshold\n if is_env_ok :\n print(\"[OK] Relative error envelope: {:6.3f} %\".format(relative_error_env*100))\n else :\n print(\"[FAIL] Relative error envelope: {:6.3f} %\".format(relative_error_env*100))\n assert(is_env_ok)\n\n fft_field = np.fft.fft2(field)\n\n freq_rows = np.fft.fftfreq(fft_field.shape[0],dx/c)\n freq_cols = np.fft.fftfreq(fft_field.shape[1],dz/c)\n\n pos_max = np.unravel_index(np.abs(fft_field).argmax(), fft_field.shape)\n\n freq = np.sqrt((freq_rows[pos_max[0]])**2 + (freq_cols[pos_max[1]]**2))\n exp_freq = c/wavelength\n\n relative_error_freq = np.abs(freq-exp_freq)/exp_freq\n is_freq_ok = relative_error_freq < relative_error_threshold\n if is_freq_ok :\n print(\"[OK] Relative error frequency: {:6.3f} %\".format(relative_error_freq*100))\n else :\n print(\"[FAIL] Relative error frequency: {:6.3f} %\".format(relative_error_freq*100))\n assert(is_freq_ok)\n\n print(\"******\\n\")\n\ndef check_laser(filename):\n ds = yt.load(filename)\n\n # yt 4.0+ has rounding issues with our domain data:\n # RuntimeError: yt attempted to read outside the boundaries\n # of a non-periodic domain along dimension 0.\n if 'force_periodicity' in dir(ds): ds.force_periodicity()\n\n x = np.linspace(\n ds.domain_left_edge[0].v,\n ds.domain_right_edge[0].v,\n ds.domain_dimensions[0])\n\n dx = (ds.domain_right_edge[0].v-ds.domain_left_edge[0].v)/(ds.domain_dimensions[0]-1)\n\n z = np.linspace(\n ds.domain_left_edge[1].v,\n ds.domain_right_edge[1].v,\n ds.domain_dimensions[1])\n\n dz = (ds.domain_right_edge[1].v-ds.domain_left_edge[1].v)/(ds.domain_dimensions[1]-1)\n\n X, Z = np.meshgrid(x, z, indexing='ij')\n\n # Compute the theory for envelope\n env_theory = gauss_env(+t_c-ds.current_time.to_value(),X,Z)+gauss_env(-t_c+ds.current_time.to_value(),X,Z)\n\n # Read laser field in PIC simulation, and compute envelope\n all_data_level_0 = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions)\n\n b_vector = np.cross(dir_vector, pol_vector)\n\n components = [\"Ex\", \"Ey\", \"Ez\", \"Bx\", \"By\", \"Bz\"]\n coeffs = [\n pol_vector[0],\n pol_vector[1],\n pol_vector[2],\n b_vector[0],\n b_vector[1],\n b_vector[2]]\n\n field_facts = [1, 1, 1, 1/c, 1/c, 1/c]\n\n for comp, coeff, field_fact in zip(components, coeffs, field_facts):\n check_component(all_data_level_0, comp, field_fact*env_theory, coeff, X, Z, dx, dz)\n\ndef main():\n filename_end = sys.argv[1]\n\n check_laser(filename_end)\n\n test_name = filename_end[:-9] # Could also be os.path.split(os.getcwd())[1]\n checksumAPI.evaluate_checksum(test_name, filename_end)\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.outer",
"numpy.array",
"matplotlib.pyplot.suptitle",
"numpy.abs"
],
[
"numpy.fft.fft2",
"numpy.cross",
"matplotlib.pyplot.tight_layout",
"numpy.sqrt",
"numpy.arctan",
"numpy.linspace",
"numpy.meshgrid",
"matplotlib.use",
"numpy.abs",
"numpy.linalg.norm",
"matplotlib.pyplot.savefig",
"numpy.cos",
"numpy.sin",
"scipy.signal.hilbert",
"numpy.fft.fftfreq",
"numpy.exp",
"numpy.array",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
hlebars/YoutubeDataAnalysis | [
"0845effcdfdf6ab3281adc25840ed090e47498c8"
] | [
"Script/test.py"
] | [
"import pandas as pd\nimport datetime\nimport numpy as np\nimport os\nimport re\nimport matplotlib.pyplot as plot\n\nimport pytz\n# @timeit (repeat=3,number=10)\n\ndef EclatedSubPlot(SerieAfterGrpBy,ActivatePlotting,ListOfDateAndTime,Abbreviation):\n\n\n DicoDayOfWeek={\n \"00\":('Mon','Monday'), \"01\":('Tue','Tuesday'), \"02\":('Wed','Wednesday'), \"03\":('Thu','Thursday'),\n \"04\":('Fri','Friday'), \"05\":('Sat','Saturday'), \"06\":('Sun','Sunday')\n }\n \n DicoMonthOfTheYear = {\n \"01\":(\"Jan\", \"January\"),\"02\":(\"Feb\",\"February\"),\"03\":(\"Mar\",\"March\"),\"04\":(\"Apr\",\"April\"),\"05\":(\"May\",\"May\"),\n \"06\":(\"Jun\",\"June\"),\"07\":(\"Jul\",\"July\"),\"08\":(\"Aug\",\"August\"),\"09\":(\"Sep\",\"September\"),\"10\":(\"Oct\",\"October\"),\n \"11\":(\"Nov\",\"November\"),\"12\":(\"Dec\",\"December\")\n }\n\n df_unstack=SerieAfterGrpBy.unstack(level=0)\n\n nblevels = df_unstack.index.nlevels \n \n \n if nblevels!=1:\n for ColumnsName in ListOfDateAndTime:\n\n ListMultiIndexName=df_unstack.index.names\n\n if ColumnsName in ListMultiIndexName:\n level_index=ListMultiIndexName.index(ColumnsName)\n \n if Abbreviation==True:\n if ColumnsName==\"WeekDay\":\n df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek), level=level_index)\n elif ColumnsName==\"M\":\n df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoMonthOfTheYear[x][0],DicoDayOfWeek), level=level_index)\n elif Abbreviation==False:\n if ColumnsName==\"WeekDay\":\n df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek), level=level_index)\n elif ColumnsName==\"M\":\n df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoMonthOfTheYear[x][1],DicoDayOfWeek), level=level_index)\n else:\n\n if Abbreviation==True:\n if ColumnsName==\"WeekDay\":\n df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek)\n elif ColumnsName==\"M\":\n df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][0],DicoMonthOfTheYear)\n elif Abbreviation==False:\n if ColumnsName==\"WeekDay\":\n df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek)\n elif ColumnsName==\"M\":\n df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][1],DicoMonthOfTheYear)\n\n else:\n\n if \"WeekDay\" in ListOfDateAndTime and \"WeekDay\"==ListOfDateAndTime[0]:\n if Abbreviation==True:\n df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek)\n else:\n df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek)\n\n if \"M\" in ListOfDateAndTime and \"M\"==ListOfDateAndTime[0]:\n if Abbreviation==True:\n df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][0],DicoMonthOfTheYear)\n elif Abbreviation==False:\n df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][1],DicoMonthOfTheYear)\n \n\n DicoConfigRowColumsSubPlot={\"Y\":(4,3),\"M\":(4,3),\"W\":(13,4),\"D\":(8,4),\"WeekDay\":(4,2),\"h\":(6,4),\"m\":(10,6),\"s\":(10,6)}\n fig=df_unstack.plot(subplots=True,figsize=(70, 60), layout=DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]],kind=\"bar\",sharex=True,sharey=True,legend=False,)#.flatten()#.map(set_xlabel=(\"toto\"))#**kwargs)\n\n\n # Add Legend for axis in function of the dimention of the subplot\n for Row in range(DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]):\n\n FigRow=fig[Row].flatten()\n\n if DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]%2!=0 and Row%3==1 and Row!=DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]:\n FigRow[0].set_ylabel(\"Nb. Video Trending\")\n elif DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]%2==0 and Row%2==1 and Row!=DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]:\n FigRow[0].set_ylabel(\"Nb. Video Trending\") \n elif DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]==4:\n FigRow[0].set_ylabel(\"Nb. Video Trending\")\n \n for Column in range(len(FigRow)):\n FigRow[Column].set_xlabel(\"Time\")\n\n plot.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.2, hspace=0.5)\n plot.show()\n\n return df_unstack\n\n\n\n\ndef testtemps():\n print(pytz.country_timezones('JP'))\n # Hours=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]\n # Hours=pd.date_range('17:30:00', '21:00:00',freq='15T').strftime('%H:%M').tolist()\n # pd.to_datetime(Hours,format='%H:%M')\n # print(Hours)\n Hours=pd.date_range('00:00:00', '23:59:00',freq=str(30)+'T').time\n\n \n df_NumberHours=pd.DataFrame(0,index=Hours,columns=[\"Number\",\"Label\"])\n # df_NumberHours[\"Label\"]=HoursForLabels\n\n # print(df_NumberHours[\"Label\"].head(3))\n\n Country=\"FRA\"\n PathToInputData=os.path.join(\"Script\",\"Data\",\"Data_IN\",\"Youtube_CSV__And_JSON\",Country+\"videos.csv\")\n\n \n\n\n df=pd.read_csv(PathToInputData)#,engine=\"python\") \n\n #'video_id','title',\n\n df=df.drop(columns=['channel_title','category_id','tags','thumbnail_link','comments_disabled','ratings_disabled','video_error_or_removed','description'])\n\n #get the plublish time and put in the column publish time\n df['publish_time'] = pd.to_datetime(df['publish_time'], format='%Y-%m-%dT%H:%M:%S.%fZ')\n # print(df['publish_time'])\n\n\n\n # [\"JPN\",\n LocalTime=False\n\n if LocalTime==True:\n if Country==\"USA\":\n df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('US/Central')\n elif Country==\"MEX\":\n df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('America/Mexico_City')\n elif Country==\"FRA\":\n df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Europe/Paris')\n elif Country==\"DEU\":\n df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Europe/Berlin')\n elif Country==\"GBR\":\n df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Europe/London')\n elif Country==\"IND\":\n df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Asia/Kolkata')\n elif Country==\"CAN\":\n df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('America/Winnipeg')\n elif Country==\"KOR\":\n df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Asia/Seoul')\n elif Country==\"RUS\":\n df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Asia/Krasnoyarsk')\n elif Country==\"JPN\":\n df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Asia/Tokyo')\n\n \n\n # filtertime=(df[df.index.time > datetime.time(12),] & df[df.index.time < datetime.time(13)])\n\n #Converting LOcal time to UTC time if LocalToUTCTime==True\n # df=ConvertLocalTimeToUTC(df,Country,LocalToUTCTime)\n print(df[\"video_id\"].nunique())\n df = df.drop_duplicates(subset = 'video_id', keep = 'first')\n print(df)\n df.set_index( df['publish_time'], inplace=True)\n # df_FiltResult=\n \n # df=df.groupby([df.index.day_name()],)[\"views\"].count()#,df.index.hour\n\n # df.plot(kind=\"bar\")\n # plot.show()\n\n df_grp=df.groupby([df.index.weekday,df.index.hour])\n ser=df_grp[\"views\"].count()\n\n # print(df_grp[\"views\"].agg([\"count\"])) \n # print(df_grp[\"views\"].agg([\"count\"]).loc[1]) \n # print(df_grp.get_group((1,0)))\n # df.unstack(level=0).plot(kind='bar', subplots=True)\n # plot.show()\n DicoDayOfWeek={\n \"00\":('Mon','Monday'), \"01\":('Tue','Tuesday'), \"02\":('Wed','Wednesday'), \"03\":('Thu','Thursday'),\n \"04\":('Fri','Friday'), \"05\":('Sat','Saturday'), \"06\":('Sun','Sunday')\n }\n # ser.index[0][0] = df.index[0][0].map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek)\n # ser.unstack(level=0).plot(subplots=True, figsize=(70, 60), layout=(4, 2),kind=\"bar\",sharex=True,title=ser.index[0][0] )\n # plot.show()\n # for i in range(1,max(df_grp.keys[0])):\n # print(df_grp[\"views\"].agg([\"count\"]).loc[i])\n # df_grp.plot(y=df_grp[\"views\"].agg([\"count\"]).loc[i].count)\n # plot.show()\n # fig, ax = plot.subplots(figsize=(10,4))\n # # ax.plot(df_grp[\"views\"].loc[1], df_grp['views'].count(), label=df_grp[\"views\"].loc[1])\n # for key, grp in df_grp:#df.groupby(ListOfDateAndTime):\n # print(key,grp)\n # ax.plot(grp.groupby(grp.index.hour), grp['views'].count(), label=key)\n\n # ax.legend()\n # plot.show()\n\n # df.plot()\n # plot.show()\n # plot.show()\n # filt=(df.title.str.find(sub)!=-1)\n # filt=None\n # df_FiltResult=df[\"title\"].resample(\"D\")\n #juste le filtre \n # df_FiltResultsample=df[\"title\"][filt].resample(\"M\").count()\n # totalite de la periode \n \n DicoMonthOfTheYear = {\n \"01\":(\"Jan\", \"January\"),\"02\":(\"Feb\",\"February\"),\"03\":(\"Mar\",\"March\"),\"04\":(\"Apr\",\"April\"),\"05\":(\"May\",\"May\"),\n \"06\":(\"Jun\",\"June\"),\"07\":(\"Jul\",\"July\"),\"08\":(\"Aug\",\"August\"),\"09\":(\"Sep\",\"September\"),\"10\":(\"Oct\",\"October\"),\n \"11\":(\"Nov\",\"November\"),\"12\":(\"Dec\",\"December\")\n }\n\n\n # sub=\"\"\n #fictionnary of group by possibilities\n DicoGroubyPossibility={\n \"Y\":df.index.year,\n \"M\":df.index.month,\n \"W\":df.index.week,\n \"D\":df.index.day,\n \"h\":df.index.hour,\n \"m\":df.index.minute,\n \"s\":df.index.second,\n \"time\":df.index.time,\n \"date\":df.index.date,\n \"WeekDay\":df.index.weekday,\n }\n # ListOfDateAndTime=[\"M\",\"D\"]#,\"M\",\"D\"]\n ListOfDateAndTime=[\"WeekDay\"]#,\"M\",\"D\"]\n #test if the list contain more than one parameter for grouby if it is true then it will group by by the composant o the list\n if len(ListOfDateAndTime)==1:\n\n \n \n \n #Create empty list for date and time classification\n ListOfDate=[]\n ListOfTime=[]\n\n #Classify Date and time in the corresponding list in fucntion of it is in upper case or not upper=date low=time\n for i in ListOfDateAndTime:\n if i.isupper() or i==\"date\" or i==\"WeekDay\":\n ListOfDate.append(i)\n else:\n ListOfTime.append(i)\n\n #get the list of all indexes \n SegmentOfDateOrTime=DicoGroubyPossibility[i].astype(str).tolist()\n\n # and add a zero in front of the index string to have 00 h and not 0h or days etc \n for DateOrTime in range(len(SegmentOfDateOrTime)):\n if len(SegmentOfDateOrTime[DateOrTime])==1:\n SegmentOfDateOrTime[DateOrTime]=str(0)+SegmentOfDateOrTime[DateOrTime]\n \n #Place it back in the columns of the date or time correspondant like Y(Year) or h(hour) to get a series grouby with different name\n df.loc[:,i]=SegmentOfDateOrTime\n\n\n #grouby in function of the entry in the list of date and time \n # df_grp=df.groupby(ListOfDateAndTime)#[\"views\"].count()\n Abbreviation=True\n\n\n df_grp=df.groupby([df.index.weekday,df.index.hour])#[\"views\"].count()\n\n df=df_grp[\"views\"].count()\n EclatedSubPlot(df,True,ListOfDateAndTime,Abbreviation)\n\n \n\n # Abbreviation=False\n \n\n # # fig, (ax1, ax2) = plot.subplots(2, 1)\n \n # # df.plot(x='Weekday', y='h', ax=ax1, legend=False)\n # # df.sort_values().plot(kind='barh', ax=ax2)\n # ser=df_grp[\"views\"].count()\n \n\n \n\n # df_unstack=ser.unstack(level=0)\n\n # nblevels = df_unstack.index.nlevels \n # print(nblevels)\n \n # if nblevels!=1:\n # for ColumnsName in ListOfDateAndTime:\n\n # ListMultiIndexName=df_unstack.index.names\n\n # if ColumnsName in ListMultiIndexName:\n # level_index=ListMultiIndexName.index(ColumnsName)\n \n # if Abbreviation==True:\n # if ColumnsName==\"WeekDay\":\n # df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek), level=level_index)\n # elif ColumnsName==\"M\":\n # df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoMonthOfTheYear[x][0],DicoDayOfWeek), level=level_index)\n # elif Abbreviation==False:\n # if ColumnsName==\"WeekDay\":\n # df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek), level=level_index)\n # elif ColumnsName==\"M\":\n # df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoMonthOfTheYear[x][1],DicoDayOfWeek), level=level_index)\n # else:\n\n # if Abbreviation==True:\n # if ColumnsName==\"WeekDay\":\n # df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek)\n # elif ColumnsName==\"M\":\n # df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][0],DicoMonthOfTheYear)\n # elif Abbreviation==False:\n # if ColumnsName==\"WeekDay\":\n # df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek)\n # elif ColumnsName==\"M\":\n # df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][1],DicoMonthOfTheYear)\n\n # else:\n\n # if \"WeekDay\" in ListOfDateAndTime and \"WeekDay\"==ListOfDateAndTime[0]:\n # if Abbreviation==True:\n # df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek)\n # else:\n # df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek)\n # else:\n # if Abbreviation==True:\n # df_unstack.index = df_unstack.index.map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek)\n # else:\n # df_unstack.index = df_unstack.index.map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek)\n\n # if \"M\" in ListOfDateAndTime and \"M\"==ListOfDateAndTime[0]:\n # if Abbreviation==True:\n # df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][0],DicoMonthOfTheYear)\n # elif Abbreviation==False:\n # df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][1],DicoMonthOfTheYear)\n # else:\n # if Abbreviation==True:\n # df_unstack.index = df_unstack.index.map(lambda x : DicoMonthOfTheYear[x][0],DicoMonthOfTheYear)\n # elif Abbreviation==False:\n # df_unstack.index = df_unstack.index.map(lambda x : DicoMonthOfTheYear[x][1],DicoMonthOfTheYear)\n # print(df_unstack.index)\n # # fig, axes=plot.subplots(nrows=4,ncols=2,)\n # # axes[0][0].plot(df_unstack)\n # # plot.show()\n # # ax.plot(df_unstack)\n # # fig = plot.figure() # create a figure object\n # # axs = fig.subplots(nrows=4,ncols=2)\n # # fig\n # # for ax in axs:\n # # ax.plot(df_grp[0])\n # # create an axes object in the figure\n # # ax.plot(df_unstack)\n # # ax.set_ylabel('some numbers')\n # # plot.figure(1)\n # # df_unstack.plot()\n # # fig=plot.figure()\n # # ax1=fig.add_subplot(df_unstack)\n\n # DicoConfigRowColumsSubPlot={\"Y\":(4,3),\"M\":(4,3),\"W\":(13,4),\"D\":(8,4),\"WeekDay\":(4,2),\"h\":(6,4),\"m\":(10,6),\"s\":(10,6)}\n # fig=df_unstack.plot(subplots=True,figsize=(70, 60), layout=DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]],kind=\"bar\",sharex=True,sharey=True,legend=False,).flatten()#.map(set_xlabel=(\"toto\"))#**kwargs)\n # fig=fig.flatten()\n # # fig.text(0.5, 0.04, 'common xlabel', ha='center', va='center')\n # # fig.text(0.06, 0.5, 'common ylabel', ha='center', va='center', rotation='vertical')\n # # fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.2, hspace=0.2)\n # for i in range(len(fig)):\n \n # fig[i].set_ylabel(\"Nb. Video Trending\")\n # fig[i].set_xlabel(\"Time\")\n\n # plot.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.2, hspace=0.5)\n # plot.show()\n \n # plot.show()\n\n\n # df_unstack[df_unstack.columns[0]].plot(ax=axes[0,0])\n # df_unstack[df_unstack.columns[1]].plot(ax=axes[0,1])\n # plot.show()\n\n # rowlength = df_grp.ngroups//2\n # fig, axs = plot.subplots()\n # df_unstack.plot(subplot=True,layout=(4, 2), figsize=(70, 60),kind=\"bar\",sharex=True,sharey=True,)\n # fig=df_unstack.plot(subplot=True,ax=ax,kind=\"bar\")\n #title of the x axis of the plot\n # ax.set_xlabel('common xlabel')\n # fig.xlabel('common xlabel')\n # fig.ylabel('common ylabel')\n # plot.xlabel(\"hours\")\n\n #title of y axis of the plot\n # plot.ylabel(\"Number Of Video Trending\")\n # plot.(xtitle=\"hours\",ytitle=\"Number Of Video Trending\")\n # plot.tight_layout()\n plot.show()\n # plot.show()\n # fig, ax = plot.subplots(figsize=(10,4))\n # for key, grp in df.groupby(ListOfDateAndTime):\n # ax.plot(grp['WeekDay'], grp['h'], label=key)\n\n # ax.legend()\n # plot.show()\n\n\n #Go from pd series to dataframe with another index\n df=df.to_frame(name = 'Number Of Video Trending').reset_index()\n\n\n \n # fig, axs = plot.subplots(2, 1, sharex=True)\n\n # # gs = df.groupby([\"WeekDay\",\"h\"], axis=1)\n # # df.set_index('WeekDay',inplace=True)\n # gs = df.groupby([\"WeekDay\"], axis=1)\n # for (_, g), ax in zip(gs, axs):\n # g.plot.bar(stacked=True, ax=ax)\n\n # plot.show()\n \n if \"WeekDay\" in ListOfDateAndTime:\n dayOfWeek={\"00\":'Monday', \"01\":'Tuesday', \"02\":'Wednesday', \"03\":'Thursday', \"04\":'Friday', \"05\":'Saturday', \"06\":'Sunday'}\n df['WeekDay'] = df['WeekDay'].map(dayOfWeek)\n\n #create the columns time in function of the date and time in listoftime\n if len(ListOfDate)>0 and len(ListOfTime)>0:\n df['Time'] = df[ListOfDate].astype(str).agg('-'.join, axis=1)+\" \"+df[ListOfTime].astype(str).agg(':'.join, axis=1)\n elif len(ListOfDate)>0 and len(ListOfTime)==0:\n df['Time'] = df[ListOfDate].astype(str).agg('-'.join, axis=1)\n elif len(ListOfDate)==0 and len(ListOfTime)>0:\n df['Time'] = df[ListOfTime].astype(str).agg(':'.join, axis=1)\n \n #Put the column Time in index\n df.set_index( df['Time'], inplace=True)\n\n #add the column Time to ListOfDateAndTime before dropping every columns of ListOfDateAndTime to have a nice dataframe with just the number\n #of videos trending and the time index\n ListOfDateAndTime.append('Time')\n df=df.drop(ListOfDateAndTime,axis=1)\n\n else:\n #if their is only one thing in the list\n\n\n #get the list of all indexes \n SegmentOfDateOrTime=DicoGroubyPossibility[ListOfDateAndTime[0]].astype(str).tolist()\n\n # and add a zero in front of the index string to have 00 h and not 0h or days etc \n for DateOrTime in range(len(SegmentOfDateOrTime)):\n if len(SegmentOfDateOrTime[DateOrTime])==1:\n SegmentOfDateOrTime[DateOrTime]=str(0)+SegmentOfDateOrTime[DateOrTime]\n\n #grouby in function of the entry in the list of index \n df=df.groupby(SegmentOfDateOrTime)[\"views\"].count()\n\n #Create a dataframe with the grouby serie\n df=df.to_frame(name = 'Number Of Video Trending')#.reset_index()\n\n # Rename the dataframe index in Time\n df.index=df.index.rename('Time')\n\n \n \n # df1.columns=ListOfDateAndTime.split(\"_\")\n # df1=df1.to_frame(name = 'count').reset_index()\n \n # df=df.loc[:,ListOfTime].join()\n \n\n\n\n\n # df=df.resample(\"60T\").views.count()#, df.index.minute df.index.hour\n # df=df.groupby(pd.Grouper(key='publish_time',freq='30T')).views.count()#, df.index.minute df.index.hour\n # df=df.groupby([df.index.second]).views.count()#df.index.hour,\n # df=df.groupby([df.index.hour,df.index.minute,df.index.second]).views.count()\n # df=df.groupby([df.index.year,df.index.month,df.index.day,df.index.hour,df.index.minute,df.index.second]).views.count()\n # print(df)\n df.plot(kind=\"bar\")\n\n plot.show()\n \n # df_FiltResult=df[\"views\"].resample(\"H\").count()\n # print(df_FiltResult)\n FindText=\" !\"\n filtre=\"Minute\"\n NumberOfVideoTrendingByCountry=\"Number Of Video \"+Country\n DicoResampleAndGraph={\"Year\":(\"Y\",\"%y\"),\"Month\":(\"M\",\"%y/%m\"),\"Day\":(\"D\",\"%y/%m/%d\"),\"Hour\":(\"H\",\"%y/%m/%d %H\"),\"Minute\":(\"m\",\"%y/%m/%d %H:%m\")}\n # filt=(df.index.year==2017) | (df.index.year==2018)\n filt=(df.index.month==12) | (df.index.day==25)\n df=df[filt]\n if FindText!=\"\":\n df[\"result\"]=df[\"title\"].apply(lambda x: 1 if x.find(FindText)!=-1 else 0)\n df_FiltResult=df[\"result\"].resample(DicoResampleAndGraph[filtre][0]).sum()\n \n else:\n df_FiltResult=df[\"views\"].resample(DicoResampleAndGraph[filtre][0]).count()\n df_FiltResult.columns=[\"Label\",NumberOfVideoTrendingByCountry]\n df_FiltResult.index=df_FiltResult.index.strftime(DicoResampleAndGraph[filtre][1])#-%d\n\n # df_FiltResult.index=df_FiltResult.index.strftime(\"%V\")#-%d\n # print(df_FiltResult.index)\n # filt=(df.title.str.find(sub)!=-1)\n # df_FiltResult=df[\"title\"][filt].resample(\"W\").count()\n # df_FiltResult=df[\"title\"].resample(\"W\").count()\n # df_FiltResult.index=df_FiltResult.index.strftime(\"%V\")#-%d\n print(df_FiltResult)\n \n # if df\n # df_FiltResult.loc[\"value\"]=df[\"title\"][filt].count()\n # df.index=pd.to_datetime(df.index,format='%Y-%m-%d')\n # df_FiltResultsample.plot(y=0,kind=\"bar\")\n df_FiltResult.plot(y=0,kind=\"bar\")\n plot.show()\n NumberOfVideoTrendingByCountry=\"Number Of Video \"+Country\n Months=[\"January\",\"February\",\"March\",\"April\",\"May\",\"June\",\"July\",\"August\",\"October\",\"November\",\"December\"]\n Years=[]\n for Year in range(min(df.publish_time.dt.year),max(df.publish_time.dt.year)+1):\n Years.append(Year)\n df_VideoCountForDayOfTheWeek=pd.DataFrame(0,index=Years,columns=[NumberOfVideoTrendingByCountry])\n print(min(df.publish_time.dt.year))\n print(max(df.publish_time.dt.year))\n sub=\" Noël \"\n for Year in Years:\n filtervalue=(df.publish_time.dt.year==Year) & (df.title.str.find(sub)!=-1)\n df_VideoCountForDayOfTheWeek.loc[Year,NumberOfVideoTrendingByCountry]=max(df[filtervalue].count())\n print(df_VideoCountForDayOfTheWeek)\n WeekDays=[\"Monday\",\"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\",\"Saturday\",\"Sunday\"]\n df_VideoCountForDayOfTheWeek=pd.DataFrame(0,index=WeekDays,columns=[\"Number Of Videos\"])\n for WeekDay in WeekDays:\n df_VideoCountForDayOfTheWeek.loc[WeekDay,\"Number Of Videos\"]=max(df[df.publish_time.dt.day_name()==WeekDay].count())\n print(df_VideoCountForDayOfTheWeek)\n\n df_VideoCountForDayOfTheWeek.plot(y=\"Number Of Videos\",kind=\"bar\")\n plot.show()\n #insert publish date in the corresponding columns\n df.insert(5, 'publish_date', df['publish_time'].dt.date)\n\n # convert them into datetime time \n df['publish_time'] = df['publish_time'].dt.time\n\n #convert the trending date string into a datetime format\n df['trending_date'] = pd.to_datetime(df['trending_date'], format='%y.%d.%m')\n\n #Put the trending date in the same format before soustracting them to \n # get the time before trending\n df[\"trending_date\"]=df[\"trending_date\"].values.astype('datetime64[D]')\n df[\"publish_date\"]=df[\"publish_date\"].values.astype('datetime64[D]')\n\n\n\n # functionning from 1 s tp 24h \n IntervalMinute=1/60\n\n if IntervalMinute==1/60:\n\n \n counttotal=0\n countindex=0\n \n HoursForLabels=pd.date_range('00:00:00', '23:59:59',freq=str(IntervalMinute)+'T').strftime('%H:%M:%S').tolist()\n\n NumberOfVideoTrendingByCountry=\"Number Of Video \"+Country\n df_NumberHours=pd.DataFrame(0,index=HoursForLabels,columns=[\"Label\",NumberOfVideoTrendingByCountry])\n df_NumberHours[\"Label\"]=HoursForLabels\n\n\n\n for index in range(len(HoursForLabels)):\n if index<(len(HoursForLabels)-1):\n df_NumberHours.loc[HoursForLabels[index],NumberOfVideoTrendingByCountry]=df[\"views\"].between_time(start_time=HoursForLabels[index],end_time=HoursForLabels[index+1],include_end=False).count()\n else:\n df_NumberHours.loc[HoursForLabels[index],NumberOfVideoTrendingByCountry]=df[\"views\"].between_time(start_time=HoursForLabels[index],end_time=\"23:59:59\",include_start=True,include_end=True).count()\n\n else:\n #insert publish date in the corresponding columns\n df.insert(5, 'publish_date', df['publish_time'].dt.date)\n\n # convert them into datetime time \n df['publish_time'] = df['publish_time'].dt.time\n\n #convert the trending date string into a datetime format\n df['trending_date'] = pd.to_datetime(df['trending_date'], format='%y.%d.%m')\n\n #Put the trending date in the same format before soustracting them to \n # get the time before trending\n df[\"trending_date\"]=df[\"trending_date\"].values.astype('datetime64[D]')\n df[\"publish_date\"]=df[\"publish_date\"].values.astype('datetime64[D]')\n\n\n #Get all time data in function of the day of the week if DayOfTheWeek==\"All\" skip this to have all day of the dataframe\n df[\"weekday_publish_date\"] = df[\"publish_date\"].dt.day_name()\n # df=GetDFFromWeekDay(df,DayOfTheWeek)\n \n\n\n\n # get the time before trending\n df[\"Time_Before_Trending\"]=df[\"trending_date\"].sub(df[\"publish_date\"],axis=0)\n\n\n\n # count the number of video publish in the same time \n df_NumberHours=df['publish_time'].value_counts()\n df_NumberHours.sort_values(0,ascending=True)\n # df_NumberHours.index=sorted(df_NumberHours.index,key=)\n df_NumberHours=df_NumberHours.sort_index()\n HoursForLabels=pd.date_range('00:00:00', '23:59:59',freq=str(IntervalMinute)+'T').strftime('%H:%M:%S').tolist()\n for time in HoursForLabels:\n if time not in df_NumberHours.index:\n df_NumberHours.set_value(time,0)\n df_NumberHours.index=df_NumberHours.index.time\n #Supres the last row of the df for interval and video publish in the interval \n # because it is 23:59:59 but is empty cause every thing goes to 00:00:00\n df_NumberHours.drop(df_NumberHours.tail(1).index,inplace=True)\n\n # print(df_NumberHours)\n # print(len(df))\n # print(df_NumberHours[NumberOfVideoTrendingByCountry].sum())\n\n\n # df_NumberHours.plot(y=NumberOfVideoTrendingByCountry,kind=\"bar\")\n # plot.show()\n\n ##############################################################################################################################\n # x=2\n # print(df)\n # print(df[\"views\"].between_time(start_time=\"00:00:00\",end_time=\"23:59:59\").count())\n # print(df[\"views\"].count())\n # print(len(df[\"views\"]))\n\n # df_NumberHours.loc[\"23:59\",[\"Label\",NumberOfVideoTrendingByCountry]] = \"23:59\",0\n # print(df_NumberHours)\n # for index in range(len(HoursForLabels)+1):\n # if index<(len(HoursForLabels)-1):\n # # if HoursForLabels[index]==\"23:30\":\n # # x=1\n # df_NumberHours.loc[HoursForLabels[index],NumberOfVideoTrendingByCountry]=df[\"views\"].between_time(start_time=HoursForLabels[index],end_time=HoursForLabels[index+1],include_end=False).count()\n # elif index==(len(HoursForLabels)-1):\n # df_NumberHours.loc[HoursForLabels[-1],NumberOfVideoTrendingByCountry]=df[\"views\"].between_time(start_time=HoursForLabels[index-1],end_time=HoursForLabels[-1],include_end=False).count()\n # else:\n # df_NumberHours.loc[\"23:59\",NumberOfVideoTrendingByCountry]=df[\"views\"].between_time(start_time=HoursForLabels[-1],end_time=\"23:59:59\",include_start=True,include_end=True).count()\n\n\n # df_NumberHours.set_index(\"Label\",inplace=True)\n\n\n # for index in range(len(HoursForLabels)):\n # if index<(len(HoursForLabels)-1):\n # df_NumberHours.loc[HoursForLabels[index],NumberOfVideoTrendingByCountry]=df[\"views\"].between_time(start_time=HoursForLabels[index],end_time=HoursForLabels[index+1],include_end=False).count()\n # elif index==len(HoursForLabels)-1:\n # df_NumberHours.loc[HoursForLabels[-1],NumberOfVideoTrendingByCountry]=df[\"views\"].between_time(start_time=HoursForLabels[-1],end_time=\"23:59:59\",include_end=True).count()\n # df_NumberHours.loc[\"23:59\",NumberOfVideoTrendingByCountry]=df[\"views\"].between_time(start_time=HoursForLabels[-1],end_time=\"23:59:59\",include_start=True,include_end=True).count()\n # elif index==len(HoursForLabels):\n \n # print(df_NumberHours[NumberOfVideoTrendingByCountry].sum())\n\n #0 a 03 \n\n\n\n\n\n\ndef anepasutiliser():\n \n print(df_NumberHours[NumberOfVideoTrendingByCountry].sum())\n\n print(df_NumberHours)\n\n\n\n\n df_NumberHours=pd.DataFrame(0,index=HoursForLabels,columns=[\"Label\",NumberOfVideoTrendingByCountry])\n df.insert(5, 'publish_date', df['publish_time'].dt.date)\n\n #convert them into datetime time \n # df['publish_time'] = df['publish_time'].dt.time\n # df['publish_time'] =df['publish_time'] .astype('datetime64[D]')\n df['publish_time'] = pd.DatetimeIndex(df['publish_time'])\n df['publish_time']=df['publish_time'].dt.time\n print(df['publish_time'])\n # count the number of video publish in the same time \n df[\"Count\"]=df['publish_time'].value_counts()\n df.sort_values('Count',ascending=True)\n print(df)\n pd.to_timedelta(df['publish_time'])\n\n df.set_index(pd.to_datetime(df['publish_time'],\"hh:mm:ss\"), inplace=True)\n\n print(df.index.time)\n\n\n # df.set_index(pd.DatetimeIndex(df['publish_time']), inplace=True)\n\n\n print(df.index)\n\n\n\n\n\n\n print(df['views'].resample('T').sum())\n\n\n\n\n df['publish_time'] = df['publish_time']\n\n\n\n #convert the trending date string into a datetime format\n df['trending_date'] = pd.to_datetime(df['trending_date'], format='%y.%d.%m')\n\n\n\n #Put the trending date in the same format before soustracting them to \n # get the time before trending\n df[\"trending_date\"]=df[\"trending_date\"].values.astype('datetime64[D]')\n df[\"publish_date\"]=df[\"publish_date\"].values.astype('datetime64[D]')\n\n\n df[\"weekday_publish_date\"] = df[\"publish_date\"].dt.day_name()\n # df=df[df.weekday_publish_date==DayOfTheWeek]\n\n\n\n print(df)\n\n # get the time before trending\n df[\"Time_Before_Trending\"]=df[\"trending_date\"].sub(df[\"publish_date\"],axis=0)\n\n\n\n # count the number of video publish in the same time \n Df_TimeAndNumberOfPublication=df['publish_time'].value_counts()\n Df_TimeAndNumberOfPublication.sort_values(0,ascending=True)\n\n # print(datetime.time(hour=,minute=-30,second=40))\n print(df_NumberHours.tail(5))\n #40562 via fonction via tableau 40723 \n #il faut que les valeur centrer entre 16:30 avec 15 min a gauche 15 min a droite soit increment/2 \n\n\n print(df_NumberHours[\"Number Of Video\"].sum())\n #et si les minutes sont egales a zero alors il faut retirer une heure\n # \n # df_NumberHours.plot(x=\"Label\",y=NumberOfVideoTrendingByCountry, kind='bar')\n\n # #title of the plot\n # plot.title(\"Number of Video Trending in \" +Country +\" by publication time\")\n\n # #title of the x axis of the plot\n # plot.xlabel('Time')\n\n # #title of y axis of the plot\n # plot.ylabel('Number of Video Trending')\n\n # #show the graph\n # plot.show()\n\ntesttemps()\n\n\n\ndef NumberOfVideoFilterByPublishTime(df,Country,IntervalMinute):\n\n if IntervalMinute!=1/60:\n df.set_index( df['publish_time'], inplace=True)\n counttotal=0\n countindex=0\n IntervalMinute=1/60\n HoursForLabels=pd.date_range('00:00:00', '23:59:59',freq=str(IntervalMinute)+'T').strftime('%H:%M:%S').tolist()\n\n NumberOfVideoTrendingByCountry=\"Number Of Video \"+Country\n df_NumberHours=pd.DataFrame(0,index=HoursForLabels,columns=[\"Label\",NumberOfVideoTrendingByCountry])\n df_NumberHours[\"Label\"]=HoursForLabels\n\n\n\n for index in range(len(HoursForLabels)):\n if index<(len(HoursForLabels)-1):\n df_NumberHours.loc[HoursForLabels[index],NumberOfVideoTrendingByCountry]=df[\"views\"].between_time(start_time=HoursForLabels[index],end_time=HoursForLabels[index+1],include_end=False).count()\n else:\n df_NumberHours.loc[HoursForLabels[index],NumberOfVideoTrendingByCountry]=df[\"views\"].between_time(start_time=HoursForLabels[index],end_time=\"23:59:59\",include_start=True,include_end=True).count()\n else:\n\n\n #insert publish date in the corresponding columns\n df.insert(5, 'publish_date', df['publish_time'].dt.date)\n\n # convert them into datetime time \n df['publish_time'] = df['publish_time'].dt.time\n\n #convert the trending date string into a datetime format\n df['trending_date'] = pd.to_datetime(df['trending_date'], format='%y.%d.%m')\n\n #Put the trending date in the same format before soustracting them to \n # get the time before trending\n df[\"trending_date\"]=df[\"trending_date\"].values.astype('datetime64[D]')\n df[\"publish_date\"]=df[\"publish_date\"].values.astype('datetime64[D]')\n\n\n #Get all time data in function of the day of the week if DayOfTheWeek==\"All\" skip this to have all day of the dataframe\n df[\"weekday_publish_date\"] = df[\"publish_date\"].dt.day_name()\n df=GetDFFromWeekDay(df,DayOfTheWeek)\n \n\n\n\n # get the time before trending\n df[\"Time_Before_Trending\"]=df[\"trending_date\"].sub(df[\"publish_date\"],axis=0)\n\n\n\n # count the number of video publish in the same time \n df_NumberHours=df['publish_time'].value_counts()\n # df_NumberHours.sort_values(0,ascending=True)\n\n\n \n #Supres the last row of the df for interval and video publish in the interval \n # because it is 23:59:59 but is empty cause every thing goes to 00:00:00\n df_NumberHours.drop(df_NumberHours.tail(1).index,inplace=True)\n \n\n return df_NumberHours"
] | [
[
"pandas.to_datetime",
"pandas.read_csv",
"pandas.DataFrame",
"pandas.DatetimeIndex",
"matplotlib.pyplot.subplots_adjust",
"pandas.to_timedelta",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
binnietom/py21cmmc_wv-1 | [
"2d5405700c1d99bd5f22c762999aea89d1ca1c23"
] | [
"devel/test_wv.py"
] | [
"from py21cmmc_wv import morlet\nimport numpy as np\n\nbw = 50.0\nnumin = 130.0\nN = 736\nnu = np.arange(N) * bw/N + numin\nmid = (nu[0] + nu[-1])/2\n\nspectrum = np.exp(-(nu-mid)**2/ (2*4.0**2))\n\ntrnsc, fc, _ = morlet.morlet_transform_c(spectrum, nu)\ntrnsc = np.abs(trnsc)**2\n"
] | [
[
"numpy.arange",
"numpy.exp",
"numpy.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vgp314/Udacity-Arvato-Identify-Customer-Segments | [
"6be1d4f1eeac391c17c70fdf584bdc4813f80fd8"
] | [
"cluster.py"
] | [
"from sklearn.cluster import KMeans\nfrom sklearn.cluster import MiniBatchKMeans\nimport matplotlib.pyplot as plt\n\n\ndef plot_clustering(data):\n\t'''\n\t\tDefinition:\n\t\t\tThis function plot the squared error for the clustered points\n\t\targs:\n\t\t\tdata to be clusterd\n\t\treturns:\n\t\t\tNone\n\t\t\n\t'''\t\n\tcost =[] \n\tmax_clusters = 20\n\tfor i in range(2, max_clusters):\n\t print(\"Analysing \", i, \" clusters\")\n\t KM = MiniBatchKMeans(n_clusters = i,batch_size=20000) \n\t KM.fit(data) \n\t cost.append(KM.inertia_) \n\t \n\n\tplt.plot(range(2, max_clusters), cost, color ='g', linewidth ='3') \n\tplt.xlabel(\"Number of Clusters\") \n\tplt.ylabel(\"Squared Error (Cost)\") \n\tplt.show()\n\t \n\ndef do_clustering(data,number_clusters):\n\t'''\n\t\tDefinition:\n\t\t\tThis function initizalize KMeans with number_clusters and fit to data\n\t\targs:\n\t\t\tdata to be clustered, number_clusters\n\t\treturns:\n\t\t\tfitted K-Means mdel\n\t\t\n\t'''\t\n\t\n\tkmeans = KMeans(number_clusters)\n\tfitted_model_k_means = kmeans.fit(data)\n\treturn fitted_model_k_means\n\n"
] | [
[
"sklearn.cluster.KMeans",
"matplotlib.pyplot.xlabel",
"sklearn.cluster.MiniBatchKMeans",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tkhe/tkdetection | [
"54e6c112ef2930e755f457e38449736f5743a9ea",
"54e6c112ef2930e755f457e38449736f5743a9ea",
"54e6c112ef2930e755f457e38449736f5743a9ea"
] | [
"projects/PointRend/point_rend/coarse_mask_head.py",
"projects/PointRend/train_net.py",
"tkdet/structures/instances.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom tkdet.layers import Conv2d\nfrom tkdet.layers import ShapeSpec\nfrom tkdet.models.roi_head.mask_head import MASK_HEAD_REGISTRY\nfrom tkdet.utils import weight_init\n\n__all__ = [\"CoarseMaskHead\"]\n\n\n@MASK_HEAD_REGISTRY.register()\nclass CoarseMaskHead(nn.Module):\n def __init__(self, cfg, input_shape: ShapeSpec):\n super(CoarseMaskHead, self).__init__()\n\n self.num_classes = cfg.MODEL.NUM_CLASSES\n conv_dim = cfg.MODEL.ROI_MASK_HEAD.CONV_DIM\n self.fc_dim = cfg.MODEL.ROI_MASK_HEAD.FC_DIM\n num_fc = cfg.MODEL.ROI_MASK_HEAD.NUM_FC\n self.output_side_resolution = cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION\n self.input_channels = input_shape.channels\n self.input_h = input_shape.height\n self.input_w = input_shape.width\n\n self.conv_layers = []\n if self.input_channels > conv_dim:\n self.reduce_channel_dim_conv = Conv2d(\n self.input_channels,\n conv_dim,\n kernel_size=1,\n activation=\"ReLU\"\n )\n self.conv_layers.append(self.reduce_channel_dim_conv)\n\n self.reduce_spatial_dim_conv = Conv2d(\n conv_dim,\n conv_dim,\n kernel_size=2,\n stride=2,\n padding=0,\n bias=True,\n activation=\"ReLU\"\n )\n self.conv_layers.append(self.reduce_spatial_dim_conv)\n\n input_dim = conv_dim * self.input_h * self.input_w\n input_dim //= 4\n\n self.fcs = []\n for k in range(num_fc):\n fc = nn.Linear(input_dim, self.fc_dim)\n self.add_module(\"coarse_mask_fc{}\".format(k + 1), fc)\n self.fcs.append(fc)\n input_dim = self.fc_dim\n\n output_dim = self.num_classes * self.output_side_resolution * self.output_side_resolution\n\n self.prediction = nn.Linear(self.fc_dim, output_dim)\n nn.init.normal_(self.prediction.weight, std=0.001)\n nn.init.constant_(self.prediction.bias, 0)\n\n for layer in self.conv_layers:\n weight_init.c2_msra_fill(layer)\n for layer in self.fcs:\n weight_init.c2_xavier_fill(layer)\n\n def forward(self, x):\n N = x.shape[0]\n x = x.view(N, self.input_channels, self.input_h, self.input_w)\n for layer in self.conv_layers:\n x = layer(x)\n x = torch.flatten(x, start_dim=1)\n for layer in self.fcs:\n x = F.relu(layer(x))\n return self.prediction(x).view(\n N,\n self.num_classes,\n self.output_side_resolution,\n self.output_side_resolution\n )\n",
"import os\n\nimport torch\n\nimport tkdet.utils.comm as comm\nfrom tkdet.checkpoint import DetectionCheckpointer\nfrom tkdet.config import get_cfg\nfrom tkdet.data import MetadataCatalog\nfrom tkdet.engine import DefaultTrainer\nfrom tkdet.engine import default_argument_parser\nfrom tkdet.engine import default_setup\nfrom tkdet.engine import launch\nfrom tkdet.evaluation import CityscapesInstanceEvaluator\nfrom tkdet.evaluation import COCOEvaluator\nfrom tkdet.evaluation import DatasetEvaluators\nfrom tkdet.evaluation import LVISEvaluator\nfrom tkdet.evaluation import verify_results\n\nfrom point_rend import add_pointrend_config\n\n\nclass Trainer(DefaultTrainer):\n @classmethod\n def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n if evaluator_type == \"coco\":\n return COCOEvaluator(dataset_name, cfg, True, output_folder)\n if evaluator_type == \"cityscapes\":\n assert torch.cuda.device_count() >= comm.get_rank(), \\\n \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesInstanceEvaluator(dataset_name)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n f\"no Evaluator for the dataset {dataset_name} with the type {evaluator_type}\"\n )\n if len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)\n\n\ndef setup(args):\n cfg = get_cfg()\n add_pointrend_config(cfg)\n cfg.merge_from_file(args.config)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg\n\n\ndef main(args):\n cfg = setup(args)\n\n if args.eval_only:\n model = Trainer.build_model(cfg)\n DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(\n cfg.MODEL.WEIGHTS, resume=args.resume\n )\n res = Trainer.test(cfg, model)\n if comm.is_main_process():\n verify_results(cfg, res)\n return res\n\n trainer = Trainer(cfg)\n trainer.resume_or_load(resume=args.resume)\n return trainer.train()\n\n\nif __name__ == \"__main__\":\n args = default_argument_parser().parse_args()\n print(\"Command Line Args:\", args)\n launch(\n main,\n args.num_gpus,\n num_machines=args.num_machines,\n machine_rank=args.machine_rank,\n dist_url=args.dist_url,\n args=(args,),\n )\n",
"import itertools\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Tuple\nfrom typing import Union\n\nimport torch\n\n__all__ = [\"Instances\"]\n\n\nclass Instances(object):\n def __init__(self, image_size: Tuple[int, int], **kwargs: Any):\n self._image_size = image_size\n self._fields: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.set(k, v)\n\n @property\n def image_size(self) -> Tuple[int, int]:\n return self._image_size\n\n def __setattr__(self, name: str, val: Any) -> None:\n if name.startswith(\"_\"):\n super().__setattr__(name, val)\n else:\n self.set(name, val)\n\n def __getattr__(self, name: str) -> Any:\n if name == \"_fields\" or name not in self._fields:\n raise AttributeError(f\"Cannot find field '{name}' in the given Instances!\")\n return self._fields[name]\n\n def set(self, name: str, value: Any) -> None:\n data_len = len(value)\n if len(self._fields):\n assert len(self) == data_len, \\\n f\"Adding a field of length {data_len} to a Instances of length {len(self)}\"\n self._fields[name] = value\n\n def has(self, name: str) -> bool:\n return name in self._fields\n\n def remove(self, name: str) -> None:\n del self._fields[name]\n\n def get(self, name: str) -> Any:\n return self._fields[name]\n\n def get_fields(self) -> Dict[str, Any]:\n return self._fields\n\n def to(self, *args: Any, **kwargs: Any) -> \"Instances\":\n ret = Instances(self._image_size)\n for k, v in self._fields.items():\n if hasattr(v, \"to\"):\n v = v.to(*args, **kwargs)\n ret.set(k, v)\n return ret\n\n def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> \"Instances\":\n if type(item) == int:\n if item >= len(self) or item < -len(self):\n raise IndexError(\"Instances index out of range!\")\n else:\n item = slice(item, None, len(self))\n\n ret = Instances(self._image_size)\n for k, v in self._fields.items():\n ret.set(k, v[item])\n return ret\n\n def __len__(self) -> int:\n for v in self._fields.values():\n return len(v)\n raise NotImplementedError(\"Empty Instances does not support __len__!\")\n\n def __iter__(self):\n raise NotImplementedError(\"`Instances` object is not iterable!\")\n\n @staticmethod\n def cat(instance_lists: List[\"Instances\"]) -> \"Instances\":\n assert all(isinstance(i, Instances) for i in instance_lists)\n assert len(instance_lists) > 0\n\n if len(instance_lists) == 1:\n return instance_lists[0]\n\n image_size = instance_lists[0].image_size\n for i in instance_lists[1:]:\n assert i.image_size == image_size\n\n ret = Instances(image_size)\n for k in instance_lists[0]._fields.keys():\n values = [i.get(k) for i in instance_lists]\n v0 = values[0]\n if isinstance(v0, torch.Tensor):\n values = torch.cat(values, dim=0)\n elif isinstance(v0, list):\n values = list(itertools.chain(*values))\n elif hasattr(type(v0), \"cat\"):\n values = type(v0).cat(values)\n else:\n raise ValueError(f\"Unsupported type {type(v0)} for concatenation\")\n ret.set(k, values)\n return ret\n\n def __str__(self) -> str:\n s = self.__class__.__name__ + \"(\"\n s += \"num_instances={}, \".format(len(self))\n s += \"image_height={}, \".format(self._image_size[0])\n s += \"image_width={}, \".format(self._image_size[1])\n s += \"fields=[{}])\".format(\", \".join((f\"{k}: {v}\" for k, v in self._fields.items())))\n return s\n\n __repr__ = __str__\n"
] | [
[
"torch.nn.init.constant_",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.flatten"
],
[
"torch.cuda.device_count"
],
[
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Chicoryn/dream-go | [
"6a4b71d7e1fcc28110ba859c0a2b59c10041c083"
] | [
"contrib/trainer/dream_tf/layers/policy_head.py"
] | [
"# Copyright (c) 2019 Karl Sundequist Blomdahl <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom .batch_norm import batch_norm_conv2d\nfrom .dense import dense\nfrom .recompute_grad import recompute_grad\n\n\ndef policy_head(x, mode, params):\n \"\"\"\n The policy head attached after the residual blocks as described by DeepMind:\n\n 1. A convolution of 8 filters of kernel size 3 × 3 with stride 1\n 2. Batch normalisation\n 3. A rectifier non-linearity\n 4. A fully connected linear layer that outputs a vector of size 19²+1 = 362\n corresponding to logit probabilities for all intersections and the pass\n move\n \"\"\"\n num_channels = params['num_channels']\n num_samples = params['num_samples']\n\n def _forward(x, is_recomputing=False):\n \"\"\" Returns the result of the forward inference pass on `x` \"\"\"\n y = batch_norm_conv2d(x, 'conv_1', (3, 3, num_channels, num_samples), mode, params, is_recomputing=is_recomputing)\n y = tf.nn.relu(y)\n\n y = tf.reshape(y, (-1, 361 * num_samples))\n y = dense(y, 'linear_1', (361 * num_samples, 362), policy_offset_op, mode, params, is_recomputing=is_recomputing)\n\n return tf.cast(y, tf.float32)\n\n return recompute_grad(_forward)(x)\n\n\ndef policy_offset_op(shape, dtype=None, partition_info=None):\n \"\"\" Initial value for the policy offset, this should roughly correspond to\n the log probability of each move being played. \"\"\"\n return np.array([\n -7.93991e+00, -6.91853e+00, -6.86255e+00, -6.78094e+00, -6.79361e+00, -6.75976e+00,\n -6.88288e+00, -6.90817e+00, -6.93508e+00, -6.92374e+00, -6.91856e+00, -6.91075e+00,\n -6.87607e+00, -6.75246e+00, -6.79823e+00, -6.80791e+00, -6.86863e+00, -6.89708e+00,\n -7.93729e+00, -6.95779e+00, -6.11830e+00, -5.85974e+00, -5.83566e+00, -5.81966e+00,\n -5.84875e+00, -5.90686e+00, -5.97848e+00, -5.99648e+00, -5.99342e+00, -5.99524e+00,\n -5.96306e+00, -5.88135e+00, -5.83725e+00, -5.81963e+00, -5.84671e+00, -5.85574e+00,\n -6.07402e+00, -6.89741e+00, -6.91472e+00, -5.87616e+00, -5.51456e+00, -5.48398e+00,\n -5.55522e+00, -5.49329e+00, -5.70271e+00, -5.65749e+00, -5.70621e+00, -5.68975e+00,\n -5.69774e+00, -5.66463e+00, -5.68246e+00, -5.43859e+00, -5.59398e+00, -5.44977e+00,\n -5.45890e+00, -5.81432e+00, -6.85663e+00, -6.83055e+00, -5.84429e+00, -5.40160e+00,\n -5.34049e+00, -5.66119e+00, -5.62512e+00, -5.71932e+00, -5.72455e+00, -5.70309e+00,\n -5.69903e+00, -5.70189e+00, -5.71451e+00, -5.68138e+00, -5.59716e+00, -5.64521e+00,\n -5.29867e+00, -5.42794e+00, -5.80074e+00, -6.80807e+00, -6.81930e+00, -5.82896e+00,\n -5.63177e+00, -5.67078e+00, -5.93261e+00, -5.78339e+00, -5.80250e+00, -5.78522e+00,\n -5.79703e+00, -5.79409e+00, -5.79848e+00, -5.78746e+00, -5.77879e+00, -5.76154e+00,\n -5.94899e+00, -5.67992e+00, -5.59753e+00, -5.78787e+00, -6.79474e+00, -6.79318e+00,\n -5.85460e+00, -5.47365e+00, -5.60804e+00, -5.79080e+00, -5.80699e+00, -5.80015e+00,\n -5.81436e+00, -5.81617e+00, -5.80918e+00, -5.81150e+00, -5.80510e+00, -5.77611e+00,\n -5.78804e+00, -5.76476e+00, -5.58303e+00, -5.41241e+00, -5.83056e+00, -6.78050e+00,\n -6.88840e+00, -5.91061e+00, -5.69064e+00, -5.71108e+00, -5.79579e+00, -5.80311e+00,\n -5.81472e+00, -5.81526e+00, -5.81671e+00, -5.81616e+00, -5.81570e+00, -5.80513e+00,\n -5.79622e+00, -5.77254e+00, -5.77513e+00, -5.67571e+00, -5.67228e+00, -5.89279e+00,\n -6.86025e+00, -6.91154e+00, -5.97718e+00, -5.66273e+00, -5.72542e+00, -5.78770e+00,\n -5.81699e+00, -5.81516e+00, -5.81869e+00, -5.81941e+00, -5.81940e+00, -5.81482e+00,\n -5.80754e+00, -5.79365e+00, -5.78832e+00, -5.75882e+00, -5.70202e+00, -5.63253e+00,\n -5.94600e+00, -6.88401e+00, -6.91774e+00, -5.99960e+00, -5.70958e+00, -5.70386e+00,\n -5.80010e+00, -5.81106e+00, -5.81648e+00, -5.81789e+00, -5.81997e+00, -5.81948e+00,\n -5.81279e+00, -5.80583e+00, -5.80135e+00, -5.78998e+00, -5.77203e+00, -5.68193e+00,\n -5.67815e+00, -5.96948e+00, -6.88898e+00, -6.91699e+00, -5.99684e+00, -5.69323e+00,\n -5.68440e+00, -5.79516e+00, -5.81060e+00, -5.81611e+00, -5.81406e+00, -5.81620e+00,\n -5.80901e+00, -5.81298e+00, -5.80653e+00, -5.79696e+00, -5.78196e+00, -5.76473e+00,\n -5.65428e+00, -5.66398e+00, -5.96876e+00, -6.89641e+00, -6.92151e+00, -5.99694e+00,\n -5.71110e+00, -5.71325e+00, -5.79821e+00, -5.80778e+00, -5.81212e+00, -5.81205e+00,\n -5.81020e+00, -5.81116e+00, -5.80801e+00, -5.79830e+00, -5.79276e+00, -5.78653e+00,\n -5.77101e+00, -5.68899e+00, -5.69274e+00, -5.97098e+00, -6.90131e+00, -6.89817e+00,\n -5.95772e+00, -5.64660e+00, -5.72654e+00, -5.77678e+00, -5.80212e+00, -5.80607e+00,\n -5.80127e+00, -5.80551e+00, -5.80743e+00, -5.80042e+00, -5.79346e+00, -5.79025e+00,\n -5.78733e+00, -5.75338e+00, -5.69506e+00, -5.63437e+00, -5.95747e+00, -6.88818e+00,\n -6.86408e+00, -5.86964e+00, -5.67686e+00, -5.70769e+00, -5.79369e+00, -5.78719e+00,\n -5.79913e+00, -5.80025e+00, -5.80054e+00, -5.80132e+00, -5.79529e+00, -5.78667e+00,\n -5.78821e+00, -5.76922e+00, -5.76675e+00, -5.69570e+00, -5.68074e+00, -5.90285e+00,\n -6.86338e+00, -6.76061e+00, -5.80263e+00, -5.41706e+00, -5.58843e+00, -5.78328e+00,\n -5.79366e+00, -5.78934e+00, -5.79841e+00, -5.79591e+00, -5.79041e+00, -5.79060e+00,\n -5.78705e+00, -5.78000e+00, -5.77674e+00, -5.75681e+00, -5.57623e+00, -5.50113e+00,\n -5.85626e+00, -6.78012e+00, -6.79139e+00, -5.80594e+00, -5.58041e+00, -5.65286e+00,\n -5.94338e+00, -5.77647e+00, -5.78968e+00, -5.77167e+00, -5.78232e+00, -5.76841e+00,\n -5.77241e+00, -5.75895e+00, -5.78530e+00, -5.76951e+00, -5.88238e+00, -5.64461e+00,\n -5.61617e+00, -5.82903e+00, -6.80791e+00, -6.81286e+00, -5.84175e+00, -5.48596e+00,\n -5.28293e+00, -5.71807e+00, -5.60505e+00, -5.71724e+00, -5.70963e+00, -5.68757e+00,\n -5.65039e+00, -5.67046e+00, -5.68983e+00, -5.69079e+00, -5.58636e+00, -5.60082e+00,\n -5.39104e+00, -5.38788e+00, -5.85818e+00, -6.81584e+00, -6.83461e+00, -5.85197e+00,\n -5.47331e+00, -5.40193e+00, -5.63715e+00, -5.47135e+00, -5.68295e+00, -5.64977e+00,\n -5.67997e+00, -5.64680e+00, -5.67367e+00, -5.61327e+00, -5.67216e+00, -5.50078e+00,\n -5.53072e+00, -5.40751e+00, -5.52960e+00, -5.87713e+00, -6.89602e+00, -6.89446e+00,\n -6.07997e+00, -5.83860e+00, -5.78284e+00, -5.77460e+00, -5.81606e+00, -5.88522e+00,\n -5.95163e+00, -5.97232e+00, -5.95954e+00, -5.96527e+00, -5.94048e+00, -5.88465e+00,\n -5.82810e+00, -5.82003e+00, -5.84255e+00, -5.88531e+00, -6.11968e+00, -6.92480e+00,\n -7.88397e+00, -6.89418e+00, -6.83908e+00, -6.78821e+00, -6.75784e+00, -6.75053e+00,\n -6.85545e+00, -6.88249e+00, -6.88945e+00, -6.88525e+00, -6.88876e+00, -6.86828e+00,\n -6.83631e+00, -6.75981e+00, -6.76317e+00, -6.74771e+00, -6.86408e+00, -6.90874e+00,\n -7.91371e+00, -6.27113e+00\n ])\n"
] | [
[
"tensorflow.cast",
"tensorflow.nn.relu",
"numpy.array",
"tensorflow.reshape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
eblur/newdust | [
"7e843ae2604a844826606ea04c459694fdd5c178"
] | [
"newdust/graindist/composition/cmdrude.py"
] | [
"import numpy as np\nfrom newdust import constants as c\n\n__all__ = ['CmDrude']\n\nRHO_DRUDE = 3.0 # g cm^-3\nLAM_MAX = c.hc / 0.01 # maximal wavelength that we will allow for RG-Drude\n\nclass CmDrude(object):\n \"\"\"\n | **ATTRIBUTES**\n | cmtype : 'Drude'\n | rho : grain density [g cm^-3]\n | citation : A string containing citation to original work\n |\n | *functions*\n | rp(lam, unit='kev') : Returns real part (unit='kev'|'angs')\n | ip(lam, unit='kev') : Returns imaginary part (always 0.0)\n | cm(lam, unit='kev') : Complex index of refraction of dtype='complex'\n | plot(lam, unit='kev') : Plots Re(m-1)\n \"\"\"\n def __init__(self, rho=RHO_DRUDE): # Returns a CM using the Drude approximation\n self.cmtype = 'Drude'\n self.rho = rho\n self.citation = \"Using the Drude approximation.\\nBohren, C. F. & Huffman, D. R., 1983, Absorption and Scattering of Light by Small Particles (New York: Wiley)\"\n\n def rp(self, lam, unit='kev'):\n assert unit in c.ALLOWED_LAM_UNITS\n lam_cm = c._lam_cm(lam, unit)\n\n mm1 = self.rho / (2.0*c.m_p) * c.r_e/(2.0*np.pi) * np.power(lam_cm, 2)\n return mm1 + 1.0\n\n '''# Returns 1 if the wavelength supplied is too low energy (i.e. inappropriate for applying Drude)\n mm1 = np.zeros(np.size(lam_cm))\n if (np.size(lam_cm) == 1):\n if lam_cm >= LAM_MAX:\n pass\n else:\n mm1 = self.rho / (2.0*c.m_p) * c.r_e/(2.0*np.pi) * np.power(lam_cm, 2)\n else:\n ii = (lam_cm <= LAM_MAX)\n mm1[ii] = self.rho / (2.0*c.m_p) * c.r_e/(2.0*np.pi) * np.power(lam_cm[ii], 2)\n return mm1 + 1.0'''\n\n def ip(self, lam, unit='kev'):\n if np.size(lam) > 1:\n return np.zeros(np.size(lam))\n else:\n return 0.0\n\n def cm(self, lam, unit='kev'):\n return self.rp(lam, unit=unit) + 0j\n\n def plot(self, ax, lam, unit='kev', **kwargs):\n assert unit in c.ALLOWED_LAM_UNITS\n rp = self.rp(lam, unit=unit)\n ax.plot(lam, rp-1.0, **kwargs)\n ax.set_ylabel(\"m-1\")\n if unit == 'kev':\n ax.set_xlabel(\"Energy (keV)\")\n if unit == 'angs':\n ax.set_xlabel(\"Wavelength (Angstroms)\")\n"
] | [
[
"numpy.size",
"numpy.power"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
luvrpg/cleverhans | [
"1f2ee7a04cff1ec54c96dcba5294f6e2d7780d42",
"1f2ee7a04cff1ec54c96dcba5294f6e2d7780d42"
] | [
"examples/nips17_adversarial_competition/dev_toolkit/sample_defenses/ens_adv_inception_resnet_v2/defense.py",
"cleverhans/attacks.py"
] | [
"\"\"\"Implementation of sample defense.\n\nThis defense loads inception resnet v2 checkpoint and classifies all images\nusing loaded checkpoint.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\nfrom scipy.misc import imread\n\nimport tensorflow as tf\n\nimport inception_resnet_v2\n\nslim = tf.contrib.slim\n\n\ntf.flags.DEFINE_string(\n 'master', '', 'The address of the TensorFlow master to use.')\n\ntf.flags.DEFINE_string(\n 'checkpoint_path', '', 'Path to checkpoint for inception network.')\n\ntf.flags.DEFINE_string(\n 'input_dir', '', 'Input directory with images.')\n\ntf.flags.DEFINE_string(\n 'output_file', '', 'Output file to save labels.')\n\ntf.flags.DEFINE_integer(\n 'image_width', 299, 'Width of each input images.')\n\ntf.flags.DEFINE_integer(\n 'image_height', 299, 'Height of each input images.')\n\ntf.flags.DEFINE_integer(\n 'batch_size', 16, 'How many images process at one time.')\n\nFLAGS = tf.flags.FLAGS\n\n\ndef load_images(input_dir, batch_shape):\n \"\"\"Read png images from input directory in batches.\n\n Args:\n input_dir: input directory\n batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3]\n\n Yields:\n filenames: list file names without path of each image\n Lenght of this list could be less than batch_size, in this case only\n first few images of the result are elements of the minibatch.\n images: array with all images from this batch\n \"\"\"\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images\n\n\ndef main(_):\n batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]\n num_classes = 1001\n\n tf.logging.set_verbosity(tf.logging.INFO)\n\n with tf.Graph().as_default():\n # Prepare graph\n x_input = tf.placeholder(tf.float32, shape=batch_shape)\n\n with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):\n _, end_points = inception_resnet_v2.inception_resnet_v2(\n x_input, num_classes=num_classes, is_training=False)\n\n predicted_labels = tf.argmax(end_points['Predictions'], 1)\n\n # Run computation\n saver = tf.train.Saver(slim.get_model_variables())\n session_creator = tf.train.ChiefSessionCreator(\n scaffold=tf.train.Scaffold(saver=saver),\n checkpoint_filename_with_path=FLAGS.checkpoint_path,\n master=FLAGS.master)\n\n with tf.train.MonitoredSession(session_creator=session_creator) as sess:\n with tf.gfile.Open(FLAGS.output_file, 'w') as out_file:\n for filenames, images in load_images(FLAGS.input_dir, batch_shape):\n labels = sess.run(predicted_labels, feed_dict={x_input: images})\n for filename, label in zip(filenames, labels):\n out_file.write('{0},{1}\\n'.format(filename, label))\n\n\nif __name__ == '__main__':\n tf.app.run()\n",
"from abc import ABCMeta\nimport numpy as np\nfrom six.moves import xrange\nimport warnings\nimport collections\n\nimport cleverhans.utils as utils\nfrom cleverhans.model import Model, CallableModelWrapper\n\n_logger = utils.create_logger(\"cleverhans.attacks\")\n\n\nclass Attack(object):\n\n \"\"\"\n Abstract base class for all attack classes.\n \"\"\"\n __metaclass__ = ABCMeta\n\n def __init__(self, model, back='tf', sess=None):\n \"\"\"\n :param model: An instance of the cleverhans.model.Model class.\n :param back: The backend to use. Currently 'tf' is the only option.\n :param sess: The tf session to run graphs in\n \"\"\"\n if not(back == 'tf'):\n raise ValueError(\"Backend argument must either be 'tf'.\")\n\n if back == 'tf' and sess is None:\n import tensorflow as tf\n sess = tf.get_default_session()\n\n if not isinstance(model, Model):\n raise ValueError(\"The model argument should be an instance of\"\n \" the cleverhans.model.Model class.\")\n\n # Prepare attributes\n self.model = model\n self.back = back\n self.sess = sess\n\n # We are going to keep track of old graphs and cache them.\n self.graphs = {}\n\n # When calling generate_np, arguments in the following set should be\n # fed into the graph, as they are not structural items that require\n # generating a new graph.\n # This dict should map names of arguments to the types they should\n # have.\n # (Usually, the target class will be a feedable keyword argument.)\n self.feedable_kwargs = {}\n\n # When calling generate_np, arguments in the following set should NOT\n # be fed into the graph, as they ARE structural items that require\n # generating a new graph.\n # This list should contain the names of the structural arguments.\n self.structural_kwargs = []\n\n def generate(self, x, **kwargs):\n \"\"\"\n Generate the attack's symbolic graph for adversarial examples. This\n method should be overriden in any child class that implements an\n attack that is expressable symbolically. Otherwise, it will wrap the\n numerical implementation as a symbolic operator.\n\n :param x: The model's symbolic inputs.\n :param **kwargs: optional parameters used by child classes.\n :return: A symbolic representation of the adversarial examples.\n \"\"\"\n\n error = \"Sub-classes must implement generate.\"\n raise NotImplementedError(error)\n\n def construct_graph(self, fixed, feedable, x_val, hash_key):\n \"\"\"\n Construct the graph required to run the attack through generate_np.\n\n :param fixed: Structural elements that require defining a new graph.\n :param feedable: Arguments that can be fed to the same graph when\n they take different values.\n :param x_val: symbolic adversarial example\n :param hash_key: the key used to store this graph in our cache\n \"\"\"\n # try our very best to create a TF placeholder for each of the\n # feedable keyword arguments, and check the types are one of\n # the allowed types\n import tensorflow as tf\n\n class_name = str(self.__class__).split(\".\")[-1][:-2]\n _logger.info(\"Constructing new graph for attack \" + class_name)\n\n # remove the None arguments, they are just left blank\n for k in list(feedable.keys()):\n if feedable[k] is None:\n del feedable[k]\n\n # process all of the rest and create placeholders for them\n new_kwargs = dict(x for x in fixed.items())\n for name, value in feedable.items():\n given_type = self.feedable_kwargs[name]\n if isinstance(value, np.ndarray):\n new_shape = [None] + list(value.shape[1:])\n new_kwargs[name] = tf.placeholder(given_type, new_shape)\n elif isinstance(value, utils.known_number_types):\n new_kwargs[name] = tf.placeholder(given_type, shape=[])\n else:\n raise ValueError(\"Could not identify type of argument \" +\n name + \": \" + str(value))\n\n # x is a special placeholder we always want to have\n x_shape = [None] + list(x_val.shape)[1:]\n x = tf.placeholder(tf.float32, shape=x_shape)\n\n # now we generate the graph that we want\n x_adv = self.generate(x, **new_kwargs)\n\n self.graphs[hash_key] = (x, new_kwargs, x_adv)\n\n if len(self.graphs) >= 10:\n warnings.warn(\"Calling generate_np() with multiple different \"\n \"structural paramaters is inefficient and should\"\n \" be avoided. Calling generate() is preferred.\")\n\n def generate_np(self, x_val, **kwargs):\n \"\"\"\n Generate adversarial examples and return them as a NumPy array.\n Sub-classes *should not* implement this method unless they must\n perform special handling of arguments.\n\n :param x_val: A NumPy array with the original inputs.\n :param **kwargs: optional parameters used by child classes.\n :return: A NumPy array holding the adversarial examples.\n \"\"\"\n if self.sess is None:\n raise ValueError(\"Cannot use `generate_np` when no `sess` was\"\n \" provided\")\n\n fixed, feedable, hash_key = self.construct_variables(kwargs)\n\n if hash_key not in self.graphs:\n self.construct_graph(fixed, feedable, x_val, hash_key)\n\n x, new_kwargs, x_adv = self.graphs[hash_key]\n\n feed_dict = {x: x_val}\n\n for name in feedable:\n feed_dict[new_kwargs[name]] = feedable[name]\n\n return self.sess.run(x_adv, feed_dict)\n\n def construct_variables(self, kwargs):\n \"\"\"\n Construct the inputs to the attack graph to be used by generate_np.\n\n :param kwargs: Keyword arguments to generate_np.\n :return: Structural and feedable arguments as well as a unique key\n for the graph given these inputs.\n \"\"\"\n # the set of arguments that are structural properties of the attack\n # if these arguments are different, we must construct a new graph\n fixed = dict((k, v) for k, v in kwargs.items()\n if k in self.structural_kwargs)\n\n # the set of arguments that are passed as placeholders to the graph\n # on each call, and can change without constructing a new graph\n feedable = dict((k, v) for k, v in kwargs.items()\n if k in self.feedable_kwargs)\n\n if len(fixed) + len(feedable) < len(kwargs):\n warnings.warn(\"Supplied extra keyword arguments that are not \"\n \"used in the graph computation. They have been \"\n \"ignored.\")\n\n if not all(isinstance(value, collections.Hashable)\n for value in fixed.values()):\n # we have received a fixed value that isn't hashable\n # this means we can't cache this graph for later use,\n # and it will have to be discarded later\n hash_key = None\n else:\n # create a unique key for this set of fixed paramaters\n hash_key = tuple(sorted(fixed.items()))\n\n return fixed, feedable, hash_key\n\n def get_or_guess_labels(self, x, kwargs):\n \"\"\"\n Get the label to use in generating an adversarial example for x.\n The kwargs are fed directly from the kwargs of the attack.\n If 'y' is in kwargs, then assume it's an untargeted attack and\n use that as the label.\n If 'y_target' is in kwargs, then assume it's a targeted attack and\n use that as the label.\n Otherwise, use the model's prediction as the label and perform an\n untargeted attack.\n \"\"\"\n import tensorflow as tf\n\n if 'y' in kwargs and 'y_target' in kwargs:\n raise ValueError(\"Can not set both 'y' and 'y_target'.\")\n elif 'y' in kwargs:\n labels = kwargs['y']\n elif 'y_target' in kwargs:\n labels = kwargs['y_target']\n else:\n preds = self.model.get_probs(x)\n preds_max = tf.reduce_max(preds, 1, keep_dims=True)\n original_predictions = tf.to_float(tf.equal(preds,\n preds_max))\n labels = tf.stop_gradient(original_predictions)\n if isinstance(labels, np.ndarray):\n nb_classes = labels.shape[1]\n else:\n nb_classes = labels.get_shape().as_list()[1]\n return labels, nb_classes\n\n def parse_params(self, params=None):\n \"\"\"\n Take in a dictionary of parameters and applies attack-specific checks\n before saving them as attributes.\n\n :param params: a dictionary of attack-specific parameters\n :return: True when parsing was successful\n \"\"\"\n return True\n\n\nclass FastGradientMethod(Attack):\n\n \"\"\"\n This attack was originally implemented by Goodfellow et al. (2015) with the\n infinity norm (and is known as the \"Fast Gradient Sign Method\"). This\n implementation extends the attack to other norms, and is therefore called\n the Fast Gradient Method.\n Paper link: https://arxiv.org/abs/1412.6572\n \"\"\"\n\n def __init__(self, model, back='tf', sess=None):\n \"\"\"\n Create a FastGradientMethod instance.\n Note: the model parameter should be an instance of the\n cleverhans.model.Model abstraction provided by CleverHans.\n \"\"\"\n if not isinstance(model, Model):\n model = CallableModelWrapper(model, 'probs')\n\n super(FastGradientMethod, self).__init__(model, back, sess)\n self.feedable_kwargs = {'eps': np.float32,\n 'y': np.float32,\n 'y_target': np.float32,\n 'clip_min': np.float32,\n 'clip_max': np.float32}\n self.structural_kwargs = ['ord']\n\n def generate(self, x, **kwargs):\n \"\"\"\n Generate symbolic graph for adversarial examples and return.\n\n :param x: The model's symbolic inputs.\n :param eps: (optional float) attack step size (input variation)\n :param ord: (optional) Order of the norm (mimics NumPy).\n Possible values: np.inf, 1 or 2.\n :param y: (optional) A tensor with the model labels. Only provide\n this parameter if you'd like to use true labels when crafting\n adversarial samples. Otherwise, model predictions are used as\n labels to avoid the \"label leaking\" effect (explained in this\n paper: https://arxiv.org/abs/1611.01236). Default is None.\n Labels should be one-hot-encoded.\n :param y_target: (optional) A tensor with the labels to target. Leave\n y_target=None if y is also set. Labels should be\n one-hot-encoded.\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n # Parse and save attack-specific parameters\n assert self.parse_params(**kwargs)\n\n from .attacks_tf import fgm\n\n labels, nb_classes = self.get_or_guess_labels(x, kwargs)\n\n return fgm(x, self.model.get_probs(x), y=labels, eps=self.eps,\n ord=self.ord, clip_min=self.clip_min,\n clip_max=self.clip_max,\n targeted=(self.y_target is not None))\n\n def parse_params(self, eps=0.3, ord=np.inf, y=None, y_target=None,\n clip_min=None, clip_max=None, **kwargs):\n \"\"\"\n Take in a dictionary of parameters and applies attack-specific checks\n before saving them as attributes.\n\n Attack-specific parameters:\n\n :param eps: (optional float) attack step size (input variation)\n :param ord: (optional) Order of the norm (mimics NumPy).\n Possible values: np.inf, 1 or 2.\n :param y: (optional) A tensor with the model labels. Only provide\n this parameter if you'd like to use true labels when crafting\n adversarial samples. Otherwise, model predictions are used as\n labels to avoid the \"label leaking\" effect (explained in this\n paper: https://arxiv.org/abs/1611.01236). Default is None.\n Labels should be one-hot-encoded.\n :param y_target: (optional) A tensor with the labels to target. Leave\n y_target=None if y is also set. Labels should be\n one-hot-encoded.\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n # Save attack-specific parameters\n\n self.eps = eps\n self.ord = ord\n self.y = y\n self.y_target = y_target\n self.clip_min = clip_min\n self.clip_max = clip_max\n\n if self.y is not None and self.y_target is not None:\n raise ValueError(\"Must not set both y and y_target\")\n # Check if order of the norm is acceptable given current implementation\n if self.ord not in [np.inf, int(1), int(2)]:\n raise ValueError(\"Norm order must be either np.inf, 1, or 2.\")\n return True\n\n\nclass BasicIterativeMethod(Attack):\n\n \"\"\"\n The Basic Iterative Method (Kurakin et al. 2016). The original paper used\n hard labels for this attack; no label smoothing.\n Paper link: https://arxiv.org/pdf/1607.02533.pdf\n \"\"\"\n\n def __init__(self, model, back='tf', sess=None):\n \"\"\"\n Create a BasicIterativeMethod instance.\n Note: the model parameter should be an instance of the\n cleverhans.model.Model abstraction provided by CleverHans.\n \"\"\"\n if not isinstance(model, Model):\n model = CallableModelWrapper(model, 'probs')\n\n super(BasicIterativeMethod, self).__init__(model, back, sess)\n self.feedable_kwargs = {'eps': np.float32,\n 'eps_iter': np.float32,\n 'y': np.float32,\n 'y_target': np.float32,\n 'clip_min': np.float32,\n 'clip_max': np.float32}\n self.structural_kwargs = ['ord', 'nb_iter']\n\n def generate(self, x, **kwargs):\n \"\"\"\n Generate symbolic graph for adversarial examples and return.\n\n :param x: The model's symbolic inputs.\n :param eps: (required float) maximum distortion of adversarial example\n compared to original input\n :param eps_iter: (required float) step size for each attack iteration\n :param nb_iter: (required int) Number of attack iterations.\n :param y: (optional) A tensor with the model labels.\n :param y_target: (optional) A tensor with the labels to target. Leave\n y_target=None if y is also set. Labels should be\n one-hot-encoded.\n :param ord: (optional) Order of the norm (mimics Numpy).\n Possible values: np.inf, 1 or 2.\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n import tensorflow as tf\n\n # Parse and save attack-specific parameters\n assert self.parse_params(**kwargs)\n\n # Initialize loop variables\n eta = 0\n\n # Fix labels to the first model predictions for loss computation\n model_preds = self.model.get_probs(x)\n preds_max = tf.reduce_max(model_preds, 1, keep_dims=True)\n if self.y_target is not None:\n y = self.y_target\n targeted = True\n elif self.y is not None:\n y = self.y\n targeted = False\n else:\n y = tf.to_float(tf.equal(model_preds, preds_max))\n y = tf.stop_gradient(y)\n targeted = False\n\n y_kwarg = 'y_target' if targeted else 'y'\n fgm_params = {'eps': self.eps_iter, y_kwarg: y, 'ord': self.ord,\n 'clip_min': self.clip_min, 'clip_max': self.clip_max}\n\n for i in range(self.nb_iter):\n FGM = FastGradientMethod(self.model, back=self.back,\n sess=self.sess)\n # Compute this step's perturbation\n adv_x = FGM.generate(x + eta, **fgm_params)\n\n # Clipping perturbation according to clip_min and clip_max\n if self.clip_min is not None and self.clip_max is not None:\n adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)\n\n # Clipping perturbation eta to self.ord norm ball\n eta = adv_x - x\n from cleverhans.utils_tf import clip_eta\n eta = clip_eta(eta, self.ord, self.eps)\n\n # Define adversarial example (and clip if necessary)\n adv_x = x + eta\n if self.clip_min is not None and self.clip_max is not None:\n adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)\n\n return adv_x\n\n def parse_params(self, eps=0.3, eps_iter=0.05, nb_iter=10, y=None,\n ord=np.inf, clip_min=None, clip_max=None,\n y_target=None, **kwargs):\n \"\"\"\n Take in a dictionary of parameters and applies attack-specific checks\n before saving them as attributes.\n\n Attack-specific parameters:\n\n :param eps: (required float) maximum distortion of adversarial example\n compared to original input\n :param eps_iter: (required float) step size for each attack iteration\n :param nb_iter: (required int) Number of attack iterations.\n :param y: (optional) A tensor with the model labels.\n :param y_target: (optional) A tensor with the labels to target. Leave\n y_target=None if y is also set. Labels should be\n one-hot-encoded.\n :param ord: (optional) Order of the norm (mimics Numpy).\n Possible values: np.inf, 1 or 2.\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n\n # Save attack-specific parameters\n self.eps = eps\n self.eps_iter = eps_iter\n self.nb_iter = nb_iter\n self.y = y\n self.y_target = y_target\n self.ord = ord\n self.clip_min = clip_min\n self.clip_max = clip_max\n\n if self.y is not None and self.y_target is not None:\n raise ValueError(\"Must not set both y and y_target\")\n # Check if order of the norm is acceptable given current implementation\n if self.ord not in [np.inf, 1, 2]:\n raise ValueError(\"Norm order must be either np.inf, 1, or 2.\")\n\n return True\n\n\nclass MomentumIterativeMethod(Attack):\n\n \"\"\"\n The Momentum Iterative Method (Dong et al. 2017). This method won\n the first places in NIPS 2017 Non-targeted Adversarial Attacks and\n Targeted Adversarial Attacks. The original paper used hard labels\n for this attack; no label smoothing.\n Paper link: https://arxiv.org/pdf/1710.06081.pdf\n \"\"\"\n\n def __init__(self, model, back='tf', sess=None):\n \"\"\"\n Create a MomentumIterativeMethod instance.\n Note: the model parameter should be an instance of the\n cleverhans.model.Model abstraction provided by CleverHans.\n \"\"\"\n if not isinstance(model, Model):\n model = CallableModelWrapper(model, 'probs')\n\n super(MomentumIterativeMethod, self).__init__(model, back, sess)\n self.feedable_kwargs = {'eps': np.float32,\n 'eps_iter': np.float32,\n 'y': np.float32,\n 'y_target': np.float32,\n 'clip_min': np.float32,\n 'clip_max': np.float32}\n self.structural_kwargs = ['ord', 'nb_iter', 'decay_factor']\n\n def generate(self, x, **kwargs):\n \"\"\"\n Generate symbolic graph for adversarial examples and return.\n\n :param x: The model's symbolic inputs.\n :param eps: (required float) maximum distortion of adversarial example\n compared to original input\n :param eps_iter: (required float) step size for each attack iteration\n :param nb_iter: (required int) Number of attack iterations.\n :param y: (optional) A tensor with the model labels.\n :param y_target: (optional) A tensor with the labels to target. Leave\n y_target=None if y is also set. Labels should be\n one-hot-encoded.\n :param ord: (optional) Order of the norm (mimics Numpy).\n Possible values: np.inf, 1 or 2.\n :param decay_factor: (optional) Decay factor for the momentum term.\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n import tensorflow as tf\n\n # Parse and save attack-specific parameters\n assert self.parse_params(**kwargs)\n\n # Initialize loop variables\n momentum = 0\n adv_x = x\n\n # Fix labels to the first model predictions for loss computation\n y, nb_classes = self.get_or_guess_labels(x, kwargs)\n y = y / tf.reduce_sum(y, 1, keep_dims=True)\n targeted = (self.y_target is not None)\n\n from . import utils_tf\n for i in range(self.nb_iter):\n # Compute loss\n preds = self.model.get_probs(adv_x)\n loss = utils_tf.model_loss(y, preds, mean=False)\n if targeted:\n loss = -loss\n\n # Define gradient of loss wrt input\n grad, = tf.gradients(loss, adv_x)\n\n # Normalize current gradient and add it to the accumulated gradient\n red_ind = list(xrange(1, len(grad.get_shape())))\n avoid_zero_div = tf.cast(1e-12, grad.dtype)\n grad = grad / tf.maximum(avoid_zero_div,\n tf.reduce_mean(tf.abs(grad),\n red_ind,\n keep_dims=True))\n momentum = self.decay_factor * momentum + grad\n\n if self.ord == np.inf:\n normalized_grad = tf.sign(momentum)\n elif self.ord == 1:\n norm = tf.maximum(avoid_zero_div,\n tf.reduce_sum(tf.abs(momentum),\n red_ind,\n keep_dims=True))\n normalized_grad = momentum / norm\n elif self.ord == 2:\n square = tf.reduce_sum(tf.square(momentum),\n red_ind,\n keep_dims=True)\n norm = tf.sqrt(tf.maximum(avoid_zero_div, square))\n normalized_grad = momentum / norm\n else:\n raise NotImplementedError(\"Only L-inf, L1 and L2 norms are \"\n \"currently implemented.\")\n\n # Update and clip adversarial example in current iteration\n scaled_grad = self.eps_iter * normalized_grad\n adv_x = adv_x + scaled_grad\n adv_x = x + utils_tf.clip_eta(adv_x - x, self.ord, self.eps)\n\n if self.clip_min is not None and self.clip_max is not None:\n adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)\n\n adv_x = tf.stop_gradient(adv_x)\n\n return adv_x\n\n def parse_params(self, eps=0.3, eps_iter=0.06, nb_iter=10, y=None,\n ord=np.inf, decay_factor=1.0,\n clip_min=None, clip_max=None,\n y_target=None, **kwargs):\n \"\"\"\n Take in a dictionary of parameters and applies attack-specific checks\n before saving them as attributes.\n\n Attack-specific parameters:\n\n :param eps: (required float) maximum distortion of adversarial example\n compared to original input\n :param eps_iter: (required float) step size for each attack iteration\n :param nb_iter: (required int) Number of attack iterations.\n :param y: (optional) A tensor with the model labels.\n :param y_target: (optional) A tensor with the labels to target. Leave\n y_target=None if y is also set. Labels should be\n one-hot-encoded.\n :param ord: (optional) Order of the norm (mimics Numpy).\n Possible values: np.inf, 1 or 2.\n :param decay_factor: (optional) Decay factor for the momentum term.\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n\n # Save attack-specific parameters\n self.eps = eps\n self.eps_iter = eps_iter\n self.nb_iter = nb_iter\n self.y = y\n self.y_target = y_target\n self.ord = ord\n self.decay_factor = decay_factor\n self.clip_min = clip_min\n self.clip_max = clip_max\n\n if self.y is not None and self.y_target is not None:\n raise ValueError(\"Must not set both y and y_target\")\n # Check if order of the norm is acceptable given current implementation\n if self.ord not in [np.inf, 1, 2]:\n raise ValueError(\"Norm order must be either np.inf, 1, or 2.\")\n\n return True\n\n\nclass SaliencyMapMethod(Attack):\n\n \"\"\"\n The Jacobian-based Saliency Map Method (Papernot et al. 2016).\n Paper link: https://arxiv.org/pdf/1511.07528.pdf\n \"\"\"\n\n def __init__(self, model, back='tf', sess=None):\n \"\"\"\n Create a SaliencyMapMethod instance.\n Note: the model parameter should be an instance of the\n cleverhans.model.Model abstraction provided by CleverHans.\n \"\"\"\n if not isinstance(model, Model):\n model = CallableModelWrapper(model, 'probs')\n\n super(SaliencyMapMethod, self).__init__(model, back, sess)\n\n import tensorflow as tf\n self.feedable_kwargs = {'y_target': tf.float32}\n self.structural_kwargs = ['theta', 'gamma',\n 'clip_max', 'clip_min', 'symbolic_impl']\n\n def generate(self, x, **kwargs):\n \"\"\"\n Generate symbolic graph for adversarial examples and return.\n\n :param x: The model's symbolic inputs.\n :param theta: (optional float) Perturbation introduced to modified\n components (can be positive or negative)\n :param gamma: (optional float) Maximum percentage of perturbed features\n :param clip_min: (optional float) Minimum component value for clipping\n :param clip_max: (optional float) Maximum component value for clipping\n :param y_target: (optional) Target tensor if the attack is targeted\n \"\"\"\n import tensorflow as tf\n\n # Parse and save attack-specific parameters\n assert self.parse_params(**kwargs)\n\n if self.symbolic_impl:\n from .attacks_tf import jsma_symbolic\n\n # Create random targets if y_target not provided\n if self.y_target is None:\n from random import randint\n\n def random_targets(gt):\n result = gt.copy()\n nb_s = gt.shape[0]\n nb_classes = gt.shape[1]\n\n for i in xrange(nb_s):\n result[i, :] = np.roll(result[i, :],\n randint(1, nb_classes-1))\n\n return result\n\n labels, nb_classes = self.get_or_guess_labels(x, kwargs)\n self.y_target = tf.py_func(random_targets, [labels],\n tf.float32)\n self.y_target.set_shape([None, nb_classes])\n\n x_adv = jsma_symbolic(x, model=self.model, y_target=self.y_target,\n theta=self.theta, gamma=self.gamma,\n clip_min=self.clip_min,\n clip_max=self.clip_max)\n else:\n from .attacks_tf import jacobian_graph, jsma_batch\n\n # Define Jacobian graph wrt to this input placeholder\n preds = self.model.get_probs(x)\n nb_classes = preds.get_shape().as_list()[-1]\n grads = jacobian_graph(preds, x, nb_classes)\n\n # Define appropriate graph (targeted / random target labels)\n if self.y_target is not None:\n def jsma_wrap(x_val, y_target):\n return jsma_batch(self.sess, x, preds, grads, x_val,\n self.theta, self.gamma, self.clip_min,\n self.clip_max, nb_classes,\n y_target=y_target)\n\n # Attack is targeted, target placeholder will need to be fed\n x_adv = tf.py_func(jsma_wrap, [x, self.y_target], tf.float32)\n else:\n def jsma_wrap(x_val):\n return jsma_batch(self.sess, x, preds, grads, x_val,\n self.theta, self.gamma, self.clip_min,\n self.clip_max, nb_classes,\n y_target=None)\n\n # Attack is untargeted, target values will be chosen at random\n x_adv = tf.py_func(jsma_wrap, [x], tf.float32)\n\n return x_adv\n\n def parse_params(self, theta=1., gamma=1., nb_classes=None,\n clip_min=0., clip_max=1., y_target=None,\n symbolic_impl=True, **kwargs):\n \"\"\"\n Take in a dictionary of parameters and applies attack-specific checks\n before saving them as attributes.\n\n Attack-specific parameters:\n\n :param theta: (optional float) Perturbation introduced to modified\n components (can be positive or negative)\n :param gamma: (optional float) Maximum percentage of perturbed features\n :param nb_classes: (optional int) Number of model output classes\n :param clip_min: (optional float) Minimum component value for clipping\n :param clip_max: (optional float) Maximum component value for clipping\n :param y_target: (optional) Target tensor if the attack is targeted\n \"\"\"\n\n if nb_classes is not None:\n warnings.warn(\"The nb_classes argument is depricated and will \"\n \"be removed on 2018-02-11\")\n self.theta = theta\n self.gamma = gamma\n self.clip_min = clip_min\n self.clip_max = clip_max\n self.y_target = y_target\n self.symbolic_impl = symbolic_impl\n\n return True\n\n\nclass VirtualAdversarialMethod(Attack):\n\n \"\"\"\n This attack was originally proposed by Miyato et al. (2016) and was used\n for virtual adversarial training.\n Paper link: https://arxiv.org/abs/1507.00677\n\n \"\"\"\n\n def __init__(self, model, back='tf', sess=None):\n \"\"\"\n Note: the model parameter should be an instance of the\n cleverhans.model.Model abstraction provided by CleverHans.\n \"\"\"\n if not isinstance(model, Model):\n model = CallableModelWrapper(model, 'logits')\n\n super(VirtualAdversarialMethod, self).__init__(model, back, sess)\n\n import tensorflow as tf\n self.feedable_kwargs = {'eps': tf.float32, 'xi': tf.float32,\n 'clip_min': tf.float32,\n 'clip_max': tf.float32}\n self.structural_kwargs = ['num_iterations']\n\n def generate(self, x, **kwargs):\n \"\"\"\n Generate symbolic graph for adversarial examples and return.\n\n :param x: The model's symbolic inputs.\n :param eps: (optional float ) the epsilon (input variation parameter)\n :param num_iterations: (optional) the number of iterations\n :param xi: (optional float) the finite difference parameter\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n # Parse and save attack-specific parameters\n assert self.parse_params(**kwargs)\n\n return vatm(self.model, x, self.model.get_logits(x), eps=self.eps,\n num_iterations=self.num_iterations, xi=self.xi,\n clip_min=self.clip_min, clip_max=self.clip_max)\n\n def parse_params(self, eps=2.0, num_iterations=1, xi=1e-6, clip_min=None,\n clip_max=None, **kwargs):\n \"\"\"\n Take in a dictionary of parameters and applies attack-specific checks\n before saving them as attributes.\n\n Attack-specific parameters:\n\n :param eps: (optional float )the epsilon (input variation parameter)\n :param num_iterations: (optional) the number of iterations\n :param xi: (optional float) the finite difference parameter\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n # Save attack-specific parameters\n self.eps = eps\n self.num_iterations = num_iterations\n self.xi = xi\n self.clip_min = clip_min\n self.clip_max = clip_max\n return True\n\n\nclass CarliniWagnerL2(Attack):\n \"\"\"\n This attack was originally proposed by Carlini and Wagner. It is an\n iterative attack that finds adversarial examples on many defenses that\n are robust to other attacks.\n Paper link: https://arxiv.org/abs/1608.04644\n\n At a high level, this attack is an iterative attack using Adam and\n a specially-chosen loss function to find adversarial examples with\n lower distortion than other attacks. This comes at the cost of speed,\n as this attack is often much slower than others.\n \"\"\"\n def __init__(self, model, back='tf', sess=None):\n \"\"\"\n Note: the model parameter should be an instance of the\n cleverhans.model.Model abstraction provided by CleverHans.\n \"\"\"\n if not isinstance(model, Model):\n model = CallableModelWrapper(model, 'logits')\n\n super(CarliniWagnerL2, self).__init__(model, back, sess)\n\n import tensorflow as tf\n self.feedable_kwargs = {'y': tf.float32,\n 'y_target': tf.float32}\n\n self.structural_kwargs = ['batch_size', 'confidence',\n 'targeted', 'learning_rate',\n 'binary_search_steps', 'max_iterations',\n 'abort_early', 'initial_const',\n 'clip_min', 'clip_max']\n\n def generate(self, x, **kwargs):\n \"\"\"\n Return a tensor that constructs adversarial examples for the given\n input. Generate uses tf.py_func in order to operate over tensors.\n\n :param x: (required) A tensor with the inputs.\n :param y: (optional) A tensor with the true labels for an untargeted\n attack. If None (and y_target is None) then use the\n original labels the classifier assigns.\n :param y_target: (optional) A tensor with the target labels for a\n targeted attack.\n :param confidence: Confidence of adversarial examples: higher produces\n examples with larger l2 distortion, but more\n strongly classified as adversarial.\n :param batch_size: Number of attacks to run simultaneously.\n :param learning_rate: The learning rate for the attack algorithm.\n Smaller values produce better results but are\n slower to converge.\n :param binary_search_steps: The number of times we perform binary\n search to find the optimal tradeoff-\n constant between norm of the purturbation\n and confidence of the classification.\n :param max_iterations: The maximum number of iterations. Setting this\n to a larger value will produce lower distortion\n results. Using only a few iterations requires\n a larger learning rate, and will produce larger\n distortion results.\n :param abort_early: If true, allows early aborts if gradient descent\n is unable to make progress (i.e., gets stuck in\n a local minimum).\n :param initial_const: The initial tradeoff-constant to use to tune the\n relative importance of size of the pururbation\n and confidence of classification.\n If binary_search_steps is large, the initial\n constant is not important. A smaller value of\n this constant gives lower distortion results.\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n import tensorflow as tf\n from .attacks_tf import CarliniWagnerL2 as CWL2\n self.parse_params(**kwargs)\n\n labels, nb_classes = self.get_or_guess_labels(x, kwargs)\n\n attack = CWL2(self.sess, self.model, self.batch_size,\n self.confidence, 'y_target' in kwargs,\n self.learning_rate, self.binary_search_steps,\n self.max_iterations, self.abort_early,\n self.initial_const, self.clip_min, self.clip_max,\n nb_classes, x.get_shape().as_list()[1:])\n\n def cw_wrap(x_val, y_val):\n return np.array(attack.attack(x_val, y_val), dtype=np.float32)\n wrap = tf.py_func(cw_wrap, [x, labels], tf.float32)\n\n return wrap\n\n def parse_params(self, y=None, y_target=None, nb_classes=None,\n batch_size=1, confidence=0,\n learning_rate=5e-3,\n binary_search_steps=5, max_iterations=1000,\n abort_early=True, initial_const=1e-2,\n clip_min=0, clip_max=1):\n\n # ignore the y and y_target argument\n if nb_classes is not None:\n warnings.warn(\"The nb_classes argument is depricated and will \"\n \"be removed on 2018-02-11\")\n self.batch_size = batch_size\n self.confidence = confidence\n self.learning_rate = learning_rate\n self.binary_search_steps = binary_search_steps\n self.max_iterations = max_iterations\n self.abort_early = abort_early\n self.initial_const = initial_const\n self.clip_min = clip_min\n self.clip_max = clip_max\n\n\nclass ElasticNetMethod(Attack):\n \"\"\"\n This attack features L1-oriented adversarial examples and includes\n the C&W L2 attack as a special case (when beta is set to 0).\n Adversarial examples attain similar performance to those\n generated by the C&W L2 attack in the white-box case,\n and more importantly, have improved transferability properties\n and complement adversarial training.\n Paper link: https://arxiv.org/abs/1709.04114\n \"\"\"\n def __init__(self, model, back='tf', sess=None):\n \"\"\"\n Note: the model parameter should be an instance of the\n cleverhans.model.Model abstraction provided by CleverHans.\n \"\"\"\n if not isinstance(model, Model):\n model = CallableModelWrapper(model, 'logits')\n\n super(ElasticNetMethod, self).__init__(model, back, sess)\n\n import tensorflow as tf\n self.feedable_kwargs = {'y': tf.float32,\n 'y_target': tf.float32}\n\n self.structural_kwargs = ['fista', 'beta', 'decision_rule',\n 'batch_size', 'confidence',\n 'targeted', 'learning_rate',\n 'binary_search_steps', 'max_iterations',\n 'abort_early', 'initial_const',\n 'clip_min', 'clip_max']\n\n def generate(self, x, **kwargs):\n \"\"\"\n Return a tensor that constructs adversarial examples for the given\n input. Generate uses tf.py_func in order to operate over tensors.\n\n :param x: (required) A tensor with the inputs.\n :param y: (optional) A tensor with the true labels for an untargeted\n attack. If None (and y_target is None) then use the\n original labels the classifier assigns.\n :param y_target: (optional) A tensor with the target labels for a\n targeted attack.\n :param fista: FISTA or ISTA. FISTA has better convergence properties\n but performs an additional query per iteration\n :param beta: Trades off L2 distortion with L1 distortion: higher\n produces examples with lower L1 distortion, at the\n cost of higher L2 (and typically Linf) distortion\n :param decision_rule: EN or L1. Select final adversarial example from\n all successful examples based on the least\n elastic-net or L1 distortion criterion.\n :param confidence: Confidence of adversarial examples: higher produces\n examples with larger l2 distortion, but more\n strongly classified as adversarial.\n :param batch_size: Number of attacks to run simultaneously.\n :param learning_rate: The learning rate for the attack algorithm.\n Smaller values produce better results but are\n slower to converge.\n :param binary_search_steps: The number of times we perform binary\n search to find the optimal tradeoff-\n constant between norm of the perturbation\n and confidence of the classification.\n :param max_iterations: The maximum number of iterations. Setting this\n to a larger value will produce lower distortion\n results. Using only a few iterations requires\n a larger learning rate, and will produce larger\n distortion results.\n :param abort_early: If true, allows early abort when the total\n loss starts to increase (greatly speeds up attack,\n but hurts performance, particularly on ImageNet)\n :param initial_const: The initial tradeoff-constant to use to tune the\n relative importance of size of the perturbation\n and confidence of classification.\n If binary_search_steps is large, the initial\n constant is not important. A smaller value of\n this constant gives lower distortion results.\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n import tensorflow as tf\n self.parse_params(**kwargs)\n\n from .attacks_tf import ElasticNetMethod as EAD\n labels, nb_classes = self.get_or_guess_labels(x, kwargs)\n\n attack = EAD(self.sess, self.model, self.fista, self.beta,\n self.decision_rule, self.batch_size, self.confidence,\n 'y_target' in kwargs, self.learning_rate,\n self.binary_search_steps, self.max_iterations,\n self.abort_early, self.initial_const,\n self.clip_min, self.clip_max,\n nb_classes, x.get_shape().as_list()[1:])\n\n def ead_wrap(x_val, y_val):\n return np.array(attack.attack(x_val, y_val), dtype=np.float32)\n wrap = tf.py_func(ead_wrap, [x, labels], tf.float32)\n\n return wrap\n\n def parse_params(self, y=None, y_target=None,\n nb_classes=None, fista=True, beta=1e-3,\n decision_rule='EN', batch_size=1, confidence=0,\n learning_rate=1e-2,\n binary_search_steps=9, max_iterations=1000,\n abort_early=False, initial_const=1e-3,\n clip_min=0, clip_max=1):\n\n # ignore the y and y_target argument\n if nb_classes is not None:\n warnings.warn(\"The nb_classes argument is depricated and will \"\n \"be removed on 2018-02-11\")\n self.fista = fista\n self.beta = beta\n self.decision_rule = decision_rule\n self.batch_size = batch_size\n self.confidence = confidence\n self.learning_rate = learning_rate\n self.binary_search_steps = binary_search_steps\n self.max_iterations = max_iterations\n self.abort_early = abort_early\n self.initial_const = initial_const\n self.clip_min = clip_min\n self.clip_max = clip_max\n\n\nclass DeepFool(Attack):\n\n \"\"\"\n DeepFool is an untargeted & iterative attack which is based on an\n iterative linearization of the classifier. The implementation here\n is w.r.t. the L2 norm.\n Paper link: \"https://arxiv.org/pdf/1511.04599.pdf\"\n \"\"\"\n\n def __init__(self, model, back='tf', sess=None):\n \"\"\"\n Create a DeepFool instance.\n \"\"\"\n if not isinstance(model, Model):\n model = CallableModelWrapper(model, 'logits')\n\n super(DeepFool, self).__init__(model, back, sess)\n\n self.structural_kwargs = ['over_shoot', 'max_iter', 'clip_max',\n 'clip_min', 'nb_candidate']\n\n def generate(self, x, **kwargs):\n \"\"\"\n Generate symbolic graph for adversarial examples and return.\n\n :param x: The model's symbolic inputs.\n :param nb_candidate: The number of classes to test against, i.e.,\n deepfool only consider nb_candidate classes when\n attacking(thus accelerate speed). The nb_candidate\n classes are chosen according to the prediction\n confidence during implementation.\n :param overshoot: A termination criterion to prevent vanishing updates\n :param max_iter: Maximum number of iteration for deepfool\n :param nb_classes: The number of model output classes\n :param clip_min: Minimum component value for clipping\n :param clip_max: Maximum component value for clipping\n \"\"\"\n\n import tensorflow as tf\n from .attacks_tf import jacobian_graph, deepfool_batch\n\n # Parse and save attack-specific parameters\n assert self.parse_params(**kwargs)\n\n # Define graph wrt to this input placeholder\n logits = self.model.get_logits(x)\n self.nb_classes = logits.get_shape().as_list()[-1]\n assert self.nb_candidate <= self.nb_classes,\\\n 'nb_candidate should not be greater than nb_classes'\n preds = tf.reshape(tf.nn.top_k(logits, k=self.nb_candidate)[0],\n [-1, self.nb_candidate])\n # grads will be the shape [batch_size, nb_candidate, image_size]\n grads = tf.stack(jacobian_graph(preds, x, self.nb_candidate), axis=1)\n\n # Define graph\n def deepfool_wrap(x_val):\n return deepfool_batch(self.sess, x, preds, logits, grads, x_val,\n self.nb_candidate, self.overshoot,\n self.max_iter, self.clip_min, self.clip_max,\n self.nb_classes)\n return tf.py_func(deepfool_wrap, [x], tf.float32)\n\n def parse_params(self, nb_candidate=10, overshoot=0.02, max_iter=50,\n nb_classes=None, clip_min=0., clip_max=1., **kwargs):\n \"\"\"\n :param nb_candidate: The number of classes to test against, i.e.,\n deepfool only consider nb_candidate classes when\n attacking(thus accelerate speed). The nb_candidate\n classes are chosen according to the prediction\n confidence during implementation.\n :param overshoot: A termination criterion to prevent vanishing updates\n :param max_iter: Maximum number of iteration for deepfool\n :param nb_classes: The number of model output classes\n :param clip_min: Minimum component value for clipping\n :param clip_max: Maximum component value for clipping\n \"\"\"\n if nb_classes is not None:\n warnings.warn(\"The nb_classes argument is depricated and will \"\n \"be removed on 2018-02-11\")\n self.nb_candidate = nb_candidate\n self.overshoot = overshoot\n self.max_iter = max_iter\n self.clip_min = clip_min\n self.clip_max = clip_max\n\n return True\n\n\nclass LBFGS(Attack):\n \"\"\"\n LBFGS is the first adversarial attack for convolutional neural networks,\n and is a target & iterative attack.\n Paper link: \"https://arxiv.org/pdf/1312.6199.pdf\"\n \"\"\"\n def __init__(self, model, back='tf', sess=None):\n \"\"\"\n Note: the model parameter should be an instance of the\n cleverhans.model.Model abstraction provided by CleverHans.\n \"\"\"\n if not isinstance(model, Model):\n model = CallableModelWrapper(model, 'probs')\n\n super(LBFGS, self).__init__(model, back, sess)\n\n import tensorflow as tf\n self.feedable_kwargs = {'y_target': tf.float32}\n self.structural_kwargs = ['batch_size', 'binary_search_steps',\n 'max_iterations', 'initial_const',\n 'clip_min', 'clip_max']\n\n def generate(self, x, **kwargs):\n \"\"\"\n Return a tensor that constructs adversarial examples for the given\n input. Generate uses tf.py_func in order to operate over tensors.\n\n :param x: (required) A tensor with the inputs.\n :param y_target: (required) A tensor with the one-hot target labels.\n :param batch_size: The number of inputs to include in a batch and\n process simultaneously.\n :param binary_search_steps: The number of times we perform binary\n search to find the optimal tradeoff-\n constant between norm of the purturbation\n and cross-entropy loss of classification.\n :param max_iterations: The maximum number of iterations.\n :param initial_const: The initial tradeoff-constant to use to tune the\n relative importance of size of the perturbation\n and cross-entropy loss of the classification.\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n import tensorflow as tf\n from .attacks_tf import LBFGS_attack\n self.parse_params(**kwargs)\n\n _, nb_classes = self.get_or_guess_labels(x, kwargs)\n\n attack = LBFGS_attack(self.sess, x, self.model.get_probs(x),\n self.y_target, self.binary_search_steps,\n self.max_iterations, self.initial_const,\n self.clip_min, self.clip_max, nb_classes,\n self.batch_size)\n\n def lbfgs_wrap(x_val, y_val):\n return np.array(attack.attack(x_val, y_val), dtype=np.float32)\n wrap = tf.py_func(lbfgs_wrap, [x, self.y_target], tf.float32)\n\n return wrap\n\n def parse_params(self, y_target=None, batch_size=1,\n binary_search_steps=5, max_iterations=1000,\n initial_const=1e-2, clip_min=0, clip_max=1):\n\n self.y_target = y_target\n self.batch_size = batch_size\n self.binary_search_steps = binary_search_steps\n self.max_iterations = max_iterations\n self.initial_const = initial_const\n self.clip_min = clip_min\n self.clip_max = clip_max\n\n\ndef vatm(model, x, logits, eps, back='tf', num_iterations=1, xi=1e-6,\n clip_min=None, clip_max=None):\n \"\"\"\n A wrapper for the perturbation methods used for virtual adversarial\n training : https://arxiv.org/abs/1507.00677\n It calls the right function, depending on the\n user's backend.\n\n :param model: the model which returns the network unnormalized logits\n :param x: the input placeholder\n :param logits: the model's unnormalized output tensor\n :param eps: the epsilon (input variation parameter)\n :param num_iterations: the number of iterations\n :param xi: the finite difference parameter\n :param clip_min: optional parameter that can be used to set a minimum\n value for components of the example returned\n :param clip_max: optional parameter that can be used to set a maximum\n value for components of the example returned\n :return: a tensor for the adversarial example\n\n \"\"\"\n assert back == 'tf'\n # Compute VATM using TensorFlow\n from .attacks_tf import vatm as vatm_tf\n return vatm_tf(model, x, logits, eps, num_iterations=num_iterations,\n xi=xi, clip_min=clip_min, clip_max=clip_max)\n\n\nclass MadryEtAl(Attack):\n\n \"\"\"\n The Projected Gradient Descent Attack (Madry et al. 2017).\n Paper link: https://arxiv.org/pdf/1706.06083.pdf\n \"\"\"\n\n def __init__(self, model, back='tf', sess=None):\n \"\"\"\n Create a MadryEtAl instance.\n \"\"\"\n if not isinstance(model, Model):\n model = CallableModelWrapper(model, 'probs')\n\n super(MadryEtAl, self).__init__(model, back, sess)\n self.feedable_kwargs = {'eps': np.float32,\n 'eps_iter': np.float32,\n 'y': np.float32,\n 'y_target': np.float32,\n 'clip_min': np.float32,\n 'clip_max': np.float32}\n self.structural_kwargs = ['ord', 'nb_iter', 'rand_init']\n\n def generate(self, x, **kwargs):\n \"\"\"\n Generate symbolic graph for adversarial examples and return.\n\n :param x: The model's symbolic inputs.\n :param eps: (required float) maximum distortion of adversarial example\n compared to original input\n :param eps_iter: (required float) step size for each attack iteration\n :param nb_iter: (required int) Number of attack iterations.\n :param y: (optional) A tensor with the model labels.\n :param y_target: (optional) A tensor with the labels to target. Leave\n y_target=None if y is also set. Labels should be\n one-hot-encoded.\n :param ord: (optional) Order of the norm (mimics Numpy).\n Possible values: np.inf, 1 or 2.\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n :param rand_init: (optional bool) If True, an initial random\n perturbation is added.\n \"\"\"\n\n # Parse and save attack-specific parameters\n assert self.parse_params(**kwargs)\n\n labels, nb_classes = self.get_or_guess_labels(x, kwargs)\n self.targeted = self.y_target is not None\n\n # Initialize loop variables\n adv_x = self.attack(x, labels)\n\n return adv_x\n\n def parse_params(self, eps=0.3, eps_iter=0.01, nb_iter=40, y=None,\n ord=np.inf, clip_min=None, clip_max=None,\n y_target=None, rand_init=True, **kwargs):\n \"\"\"\n Take in a dictionary of parameters and applies attack-specific checks\n before saving them as attributes.\n\n Attack-specific parameters:\n\n :param eps: (required float) maximum distortion of adversarial example\n compared to original input\n :param eps_iter: (required float) step size for each attack iteration\n :param nb_iter: (required int) Number of attack iterations.\n :param y: (optional) A tensor with the model labels.\n :param y_target: (optional) A tensor with the labels to target. Leave\n y_target=None if y is also set. Labels should be\n one-hot-encoded.\n :param ord: (optional) Order of the norm (mimics Numpy).\n Possible values: np.inf, 1 or 2.\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n :param rand_init: (optional bool) If True, an initial random\n perturbation is added.\n \"\"\"\n\n # Save attack-specific parameters\n self.eps = eps\n self.eps_iter = eps_iter\n self.nb_iter = nb_iter\n self.y = y\n self.y_target = y_target\n self.ord = ord\n self.clip_min = clip_min\n self.clip_max = clip_max\n self.rand_init = rand_init\n\n if self.y is not None and self.y_target is not None:\n raise ValueError(\"Must not set both y and y_target\")\n # Check if order of the norm is acceptable given current implementation\n if self.ord not in [np.inf, 1, 2]:\n raise ValueError(\"Norm order must be either np.inf, 1, or 2.\")\n\n return True\n\n def attack_single_step(self, x, eta, y):\n \"\"\"\n Given the original image and the perturbation computed so far, computes\n a new perturbation.\n\n :param x: A tensor with the original input.\n :param eta: A tensor the same shape as x that holds the perturbation.\n :param y: A tensor with the target labels or ground-truth labels.\n \"\"\"\n import tensorflow as tf\n from cleverhans.utils_tf import model_loss, clip_eta\n\n adv_x = x + eta\n preds = self.model.get_probs(adv_x)\n loss = model_loss(y, preds)\n if self.targeted:\n loss = -loss\n grad, = tf.gradients(loss, adv_x)\n scaled_signed_grad = self.eps_iter * tf.sign(grad)\n adv_x = adv_x + scaled_signed_grad\n if self.clip_min is not None and self.clip_max is not None:\n adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)\n eta = adv_x - x\n eta = clip_eta(eta, self.ord, self.eps)\n return eta\n\n def attack(self, x, y):\n \"\"\"\n This method creates a symbolic graph that given an input image,\n first randomly perturbs the image. The\n perturbation is bounded to an epsilon ball. Then multiple steps of\n gradient descent is performed to increase the probability of a target\n label or decrease the probability of the ground-truth label.\n\n :param x: A tensor with the input image.\n \"\"\"\n import tensorflow as tf\n from cleverhans.utils_tf import clip_eta\n\n if self.rand_init:\n eta = tf.random_uniform(tf.shape(x), -self.eps, self.eps)\n eta = clip_eta(eta, self.ord, self.eps)\n else:\n eta = tf.zeros_like(x)\n\n for i in range(self.nb_iter):\n eta = self.attack_single_step(x, eta, y)\n\n adv_x = x + eta\n if self.clip_min is not None and self.clip_max is not None:\n adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)\n\n return adv_x\n\n\nclass FastFeatureAdversaries(Attack):\n \"\"\"\n This is a fast implementation of \"Feature Adversaries\", an attack\n against a target internal representation of a model.\n \"Feature adversaries\" were originally introduced in (Sabour et al. 2016),\n where the optimization was done using LBFGS.\n Paper link: https://arxiv.org/abs/1511.05122\n\n This implementation is similar to \"Basic Iterative Method\"\n (Kurakin et al. 2016) but applied to the internal representations.\n \"\"\"\n\n def __init__(self, model, back='tf', sess=None):\n \"\"\"\n Create a FastFeatureAdversaries instance.\n \"\"\"\n super(FastFeatureAdversaries, self).__init__(model, back, sess)\n self.feedable_kwargs = {'eps': np.float32,\n 'eps_iter': np.float32,\n 'clip_min': np.float32,\n 'clip_max': np.float32,\n 'layer': str}\n self.structural_kwargs = ['ord', 'nb_iter']\n\n assert isinstance(self.model, Model)\n\n def parse_params(self, layer=None, eps=0.3, eps_iter=0.05, nb_iter=10,\n ord=np.inf, clip_min=None, clip_max=None, **kwargs):\n \"\"\"\n Take in a dictionary of parameters and applies attack-specific checks\n before saving them as attributes.\n\n Attack-specific parameters:\n\n :param layer: (required str) name of the layer to target.\n :param eps: (required float) maximum distortion of adversarial example\n compared to original input\n :param eps_iter: (required float) step size for each attack iteration\n :param nb_iter: (required int) Number of attack iterations.\n :param ord: (optional) Order of the norm (mimics Numpy).\n Possible values: np.inf, 1 or 2.\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n\n # Save attack-specific parameters\n self.layer = layer\n self.eps = eps\n self.eps_iter = eps_iter\n self.nb_iter = nb_iter\n self.ord = ord\n self.clip_min = clip_min\n self.clip_max = clip_max\n\n # Check if order of the norm is acceptable given current implementation\n if self.ord not in [np.inf, 1, 2]:\n raise ValueError(\"Norm order must be either np.inf, 1, or 2.\")\n\n return True\n\n def attack_single_step(self, x, eta, g_feat):\n \"\"\"\n TensorFlow implementation of the Fast Feature Gradient. This is a\n single step attack similar to Fast Gradient Method that attacks an\n internal representation.\n\n :param x: the input placeholder\n :param eta: A tensor the same shape as x that holds the perturbation.\n :param g_feat: model's internal tensor for guide\n :return: a tensor for the adversarial example\n \"\"\"\n import tensorflow as tf\n from cleverhans.utils_tf import clip_eta\n\n adv_x = x + eta\n a_feat = self.model.get_layer(adv_x, self.layer)\n\n # feat.shape = (batch, c) or (batch, w, h, c)\n axis = list(range(1, len(a_feat.shape)))\n\n # Compute loss\n # This is a targeted attack, hence the negative sign\n loss = -tf.reduce_sum(tf.square(a_feat - g_feat), axis)\n\n # Define gradient of loss wrt input\n grad, = tf.gradients(loss, adv_x)\n\n # Multiply by constant epsilon\n scaled_signed_grad = self.eps_iter * tf.sign(grad)\n\n # Add perturbation to original example to obtain adversarial example\n adv_x = adv_x + scaled_signed_grad\n\n # If clipping is needed,\n # reset all values outside of [clip_min, clip_max]\n if (self.clip_min is not None) and (self.clip_max is not None):\n adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)\n\n adv_x = tf.stop_gradient(adv_x)\n\n eta = adv_x - x\n eta = clip_eta(eta, self.ord, self.eps)\n\n return eta\n\n def generate(self, x, g, **kwargs):\n \"\"\"\n Generate symbolic graph for adversarial examples and return.\n\n :param x: The model's symbolic inputs.\n :param g: The target's symbolic representation.\n :param eps: (required float) maximum distortion of adversarial example\n compared to original input\n :param eps_iter: (required float) step size for each attack iteration\n :param nb_iter: (required int) Number of attack iterations.\n :param ord: (optional) Order of the norm (mimics Numpy).\n Possible values: np.inf, 1 or 2.\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n \"\"\"\n import tensorflow as tf\n from cleverhans.utils_tf import clip_eta\n\n # Parse and save attack-specific parameters\n assert self.parse_params(**kwargs)\n\n g_feat = self.model.get_layer(g, self.layer)\n\n # Initialize loop variables\n eta = tf.random_uniform(tf.shape(x), -self.eps, self.eps)\n eta = clip_eta(eta, self.ord, self.eps)\n\n for i in range(self.nb_iter):\n eta = self.attack_single_step(x, eta, g_feat)\n\n # Define adversarial example (and clip if necessary)\n adv_x = x + eta\n if self.clip_min is not None and self.clip_max is not None:\n adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)\n\n return adv_x\n\n\nclass SPSA(Attack):\n \"\"\"\n This implements the SPSA adversary, as in https://arxiv.org/abs/1802.05666\n (Uesato et al. 2018). SPSA is a gradient-free optimization method, which\n is useful when the model is non-differentiable, or more generally, the\n gradients do not point in useful directions.\n \"\"\"\n\n def __init__(self, model, back='tf', sess=None):\n super(SPSA, self).__init__(model, back, sess)\n assert isinstance(self.model, Model)\n\n def generate(self, x, y=None, y_target=None, epsilon=None, num_steps=None,\n is_targeted=False, early_stop_loss_threshold=None,\n learning_rate=0.01, delta=0.01, batch_size=128, spsa_iters=1,\n is_debug=False):\n \"\"\"\n Generate symbolic graph for adversarial examples.\n\n :param x: The model's symbolic inputs. Must be a batch of size 1.\n :param y: A Tensor or None. The index of the correct label.\n :param y_target: A Tensor or None. The index of the target label in a\n targeted attack.\n :param epsilon: The size of the maximum perturbation, measured in the\n L-infinity norm.\n :param num_steps: The number of optimization steps.\n :param is_targeted: Whether to use a targeted or untargeted attack.\n :param early_stop_loss_threshold: A float or None. If specified, the\n attack will end as soon as the loss\n is below `early_stop_loss_threshold`.\n :param learning_rate: Learning rate of ADAM optimizer.\n :param delta: Perturbation size used for SPSA approximation.\n :param batch_size: Number of inputs to evaluate at a single time. Note\n that the true batch size (the number of evaluated\n inputs for each update) is `batch_size * spsa_iters`\n :param spsa_iters: Number of model evaluations before performing an\n update, where each evaluation is on `batch_size`\n different inputs.\n :param is_debug: If True, print the adversarial loss after each update.\n \"\"\"\n from .attacks_tf import SPSAAdam, pgd_attack, margin_logit_loss\n\n optimizer = SPSAAdam(lr=learning_rate, delta=delta,\n num_samples=batch_size, num_iters=spsa_iters)\n\n def loss_fn(x, label):\n logits = self.model.get_logits(x)\n loss_multiplier = 1 if is_targeted else -1\n return loss_multiplier * margin_logit_loss(\n logits, label, num_classes=self.model.num_classes)\n\n y_attack = y_target if is_targeted else y\n adv_x = pgd_attack(\n loss_fn, x, y_attack, epsilon, num_steps=num_steps,\n optimizer=optimizer,\n early_stop_loss_threshold=early_stop_loss_threshold,\n is_debug=is_debug,\n )\n return adv_x\n"
] | [
[
"tensorflow.Graph",
"tensorflow.train.Scaffold",
"tensorflow.gfile.Open",
"tensorflow.flags.DEFINE_string",
"tensorflow.train.MonitoredSession",
"tensorflow.placeholder",
"tensorflow.logging.set_verbosity",
"scipy.misc.imread",
"tensorflow.argmax",
"numpy.zeros",
"tensorflow.flags.DEFINE_integer",
"tensorflow.app.run"
],
[
"tensorflow.get_default_session",
"tensorflow.clip_by_value",
"tensorflow.reduce_max",
"tensorflow.sign",
"tensorflow.shape",
"tensorflow.reduce_sum",
"tensorflow.maximum",
"tensorflow.cast",
"tensorflow.gradients",
"tensorflow.placeholder",
"tensorflow.equal",
"tensorflow.stop_gradient",
"tensorflow.zeros_like",
"tensorflow.nn.top_k",
"tensorflow.abs",
"tensorflow.square",
"tensorflow.py_func"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"1.0",
"0.19",
"0.18",
"1.2",
"0.12",
"0.10",
"0.17",
"0.16"
],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
YaoYao1995/mbpo | [
"b9571e469459ce3a632b19dc3fee68c9ac3857b2"
] | [
"mbpo/algorithms/meee.py"
] | [
"## adapted from https://github.com/rail-berkeley/softlearning/blob/master/softlearning/algorithms/sac.py\r\n\r\nimport os\r\nimport math\r\nimport pickle\r\nfrom collections import OrderedDict\r\nfrom numbers import Number\r\nfrom itertools import count\r\nimport gtimer as gt\r\nimport pdb\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow.python.training import training_util\r\n\r\nfrom softlearning.algorithms.rl_algorithm import RLAlgorithm\r\nfrom softlearning.replay_pools.simple_replay_pool import WeightedReplayPool\r\n\r\nfrom mbpo.models.constructor import construct_model, format_samples_for_training\r\nfrom mbpo.models.fake_env import FakeEnv\r\nfrom mbpo.utils.writer import Writer\r\nfrom mbpo.utils.visualization import visualize_policy\r\nfrom mbpo.utils.logging import Progress\r\nimport mbpo.utils.filesystem as filesystem\r\n\r\n\r\ndef td_target(reward, discount, next_value):\r\n return reward + discount * next_value\r\n\r\n\r\nclass MEEE(RLAlgorithm):\r\n \"\"\" Model-Ensemble Policy Optimization (MEEE)\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n training_environment,\r\n evaluation_environment,\r\n policy,\r\n Qs,\r\n pool,\r\n static_fns,\r\n plotter=None,\r\n tf_summaries=False,\r\n\r\n lr=3e-4,\r\n reward_scale=1.0,\r\n target_entropy='auto',\r\n discount=0.99,\r\n tau=5e-3,\r\n target_update_interval=1,\r\n action_prior='uniform',\r\n reparameterize=False,\r\n store_extra_policy_info=False,\r\n\r\n deterministic=False,\r\n model_train_freq=250,\r\n num_networks=7,\r\n num_elites=5,\r\n model_retain_epochs=20,\r\n rollout_batch_size=100e3,\r\n real_ratio=0.1,\r\n rollout_schedule=[20,100,1,1],\r\n hidden_dim=200,\r\n max_model_t=None,\r\n **kwargs,\r\n ):\r\n \"\"\"\r\n Args:\r\n env (`SoftlearningEnv`): Environment used for training.\r\n policy: A policy function approximator.\r\n initial_exploration_policy: ('Policy'): A policy that we use\r\n for initial exploration which is not trained by the algorithm.\r\n Qs: Q-function approximators. The min of these\r\n approximators will be used. Usage of at least two Q-functions\r\n improves performance by reducing overestimation bias.\r\n pool (`PoolBase`): Replay pool to add gathered samples to.\r\n plotter (`QFPolicyPlotter`): Plotter instance to be used for\r\n visualizing Q-function during training.\r\n lr (`float`): Learning rate used for the function approximators.\r\n discount (`float`): Discount factor for Q-function updates.\r\n tau (`float`): Soft value function target update weight.\r\n target_update_interval ('int'): Frequency at which target network\r\n updates occur in iterations.\r\n reparameterize ('bool'): If True, we use a gradient estimator for\r\n the policy derived using the reparameterization trick. We use\r\n a likelihood ratio based estimator otherwise.\r\n \"\"\"\r\n\r\n super(MEEE, self).__init__(**kwargs)\r\n\r\n obs_dim = np.prod(training_environment.observation_space.shape)\r\n act_dim = np.prod(training_environment.action_space.shape)\r\n self._model = construct_model(obs_dim=obs_dim, act_dim=act_dim, hidden_dim=hidden_dim, num_networks=num_networks, num_elites=num_elites)\r\n self._static_fns = static_fns\r\n self.fake_env = FakeEnv(self._model, self._static_fns)\r\n\r\n self._rollout_schedule = rollout_schedule\r\n self._max_model_t = max_model_t\r\n\r\n # self._model_pool_size = model_pool_size\r\n # print('[ MBPO ] Model pool size: {:.2E}'.format(self._model_pool_size))\r\n # self._model_pool = WeightedReplayPool(pool._observation_space, pool._action_space, self._model_pool_size)\r\n\r\n self._model_retain_epochs = model_retain_epochs\r\n\r\n self._model_train_freq = model_train_freq\r\n self._rollout_batch_size = int(rollout_batch_size)\r\n self._deterministic = deterministic\r\n self._real_ratio = real_ratio\r\n\r\n self._log_dir = os.getcwd()\r\n self._writer = Writer(self._log_dir)\r\n\r\n self._training_environment = training_environment\r\n self._evaluation_environment = evaluation_environment\r\n self._policy = policy\r\n\r\n self._Qs = Qs\r\n self._Q_targets = tuple(tf.keras.models.clone_model(Q) for Q in Qs)\r\n\r\n self._pool = pool\r\n self._plotter = plotter\r\n self._tf_summaries = tf_summaries\r\n\r\n self._policy_lr = lr\r\n self._Q_lr = lr\r\n\r\n self._reward_scale = reward_scale\r\n self._target_entropy = (\r\n -np.prod(self._training_environment.action_space.shape)\r\n if target_entropy == 'auto'\r\n else target_entropy)\r\n print('[ MEEE ] Target entropy: {}'.format(self._target_entropy))\r\n\r\n self._discount = discount\r\n self._tau = tau\r\n self._target_update_interval = target_update_interval\r\n self._action_prior = action_prior\r\n\r\n self._reparameterize = reparameterize\r\n self._store_extra_policy_info = store_extra_policy_info\r\n\r\n observation_shape = self._training_environment.active_observation_shape\r\n action_shape = self._training_environment.action_space.shape\r\n\r\n assert len(observation_shape) == 1, observation_shape\r\n self._observation_shape = observation_shape\r\n assert len(action_shape) == 1, action_shape\r\n self._action_shape = action_shape\r\n\r\n self._build()\r\n\r\n def _build(self):\r\n self._training_ops = {}\r\n\r\n self._init_global_step()\r\n self._init_placeholders()\r\n self._init_actor_update()\r\n self._init_critic_update()\r\n\r\n def _train(self):\r\n \r\n \"\"\"Return a generator that performs RL training.\r\n\r\n Args:\r\n env (`SoftlearningEnv`): Environment used for training.\r\n policy (`Policy`): Policy used for training\r\n initial_exploration_policy ('Policy'): Policy used for exploration\r\n If None, then all exploration is done using policy\r\n pool (`PoolBase`): Sample pool to add samples to\r\n \"\"\"\r\n training_environment = self._training_environment\r\n evaluation_environment = self._evaluation_environment\r\n policy = self._policy\r\n pool = self._pool\r\n model_metrics = {}\r\n\r\n if not self._training_started:\r\n self._init_training()\r\n\r\n self._initial_exploration_hook(\r\n training_environment, self._initial_exploration_policy, pool)\r\n\r\n self.sampler.initialize(training_environment, policy, pool)\r\n\r\n gt.reset_root()\r\n gt.rename_root('RLAlgorithm')\r\n gt.set_def_unique(False)\r\n\r\n self._training_before_hook()\r\n\r\n for self._epoch in gt.timed_for(range(self._epoch, self._n_epochs)):\r\n\r\n self._epoch_before_hook()\r\n gt.stamp('epoch_before_hook')\r\n\r\n self._training_progress = Progress(self._epoch_length * self._n_train_repeat)\r\n start_samples = self.sampler._total_samples\r\n for i in count():\r\n samples_now = self.sampler._total_samples\r\n self._timestep = samples_now - start_samples\r\n\r\n if (samples_now >= start_samples + self._epoch_length\r\n and self.ready_to_train):\r\n break\r\n\r\n self._timestep_before_hook()\r\n gt.stamp('timestep_before_hook')\r\n\r\n if self._timestep % self._model_train_freq == 0 and self._real_ratio < 1.0:\r\n self._training_progress.pause()\r\n print('[ MEEE ] log_dir: {} | ratio: {}'.format(self._log_dir, self._real_ratio))\r\n print('[ MEEE ] Training model at epoch {} | freq {} | timestep {} (total: {}) | epoch train steps: {} (total: {})'.format(\r\n self._epoch, self._model_train_freq, self._timestep, self._total_timestep, self._train_steps_this_epoch, self._num_train_steps)\r\n )\r\n\r\n model_train_metrics = self._train_model(batch_size=256, max_epochs=None, holdout_ratio=0.2, max_t=self._max_model_t)\r\n model_metrics.update(model_train_metrics)\r\n gt.stamp('epoch_train_model')\r\n \r\n self._set_rollout_length()\r\n self._reallocate_model_pool()\r\n model_rollout_metrics = self._rollout_model(rollout_batch_size=self._rollout_batch_size, deterministic=self._deterministic)\r\n model_metrics.update(model_rollout_metrics)\r\n \r\n\r\n gt.stamp('epoch_rollout_model')\r\n # self._visualize_model(self._evaluation_environment, self._total_timestep)\r\n self._training_progress.resume()\r\n\r\n # No UCB exploration\r\n #self._do_sampling(timestep=self._total_timestep)\r\n \r\n self._do_sampling(timestep=self._total_timestep, disturb=True, fake_env=self.fake_env, Qs = self._Qs)\r\n #print(\"**exploration**\")\r\n gt.stamp('sample')\r\n\r\n if self.ready_to_train:\r\n self._do_training_repeats(timestep=self._total_timestep)\r\n gt.stamp('train')\r\n\r\n self._timestep_after_hook()\r\n gt.stamp('timestep_after_hook')\r\n\r\n training_paths = self.sampler.get_last_n_paths(\r\n math.ceil(self._epoch_length / self.sampler._max_path_length))\r\n gt.stamp('training_paths')\r\n evaluation_paths = self._evaluation_paths(\r\n policy, evaluation_environment)\r\n gt.stamp('evaluation_paths')\r\n\r\n training_metrics = self._evaluate_rollouts(\r\n training_paths, training_environment)\r\n gt.stamp('training_metrics')\r\n if evaluation_paths:\r\n evaluation_metrics = self._evaluate_rollouts(\r\n evaluation_paths, evaluation_environment)\r\n gt.stamp('evaluation_metrics')\r\n else:\r\n evaluation_metrics = {}\r\n\r\n self._epoch_after_hook(training_paths)\r\n gt.stamp('epoch_after_hook')\r\n\r\n sampler_diagnostics = self.sampler.get_diagnostics()\r\n\r\n diagnostics = self.get_diagnostics(\r\n iteration=self._total_timestep,\r\n batch=self._evaluation_batch(),\r\n training_paths=training_paths,\r\n evaluation_paths=evaluation_paths)\r\n\r\n time_diagnostics = gt.get_times().stamps.itrs\r\n\r\n diagnostics.update(OrderedDict((\r\n *(\r\n (f'evaluation/{key}', evaluation_metrics[key])\r\n for key in sorted(evaluation_metrics.keys())\r\n ),\r\n *(\r\n (f'training/{key}', training_metrics[key])\r\n for key in sorted(training_metrics.keys())\r\n ),\r\n *(\r\n (f'times/{key}', time_diagnostics[key][-1])\r\n for key in sorted(time_diagnostics.keys())\r\n ),\r\n *(\r\n (f'sampler/{key}', sampler_diagnostics[key])\r\n for key in sorted(sampler_diagnostics.keys())\r\n ),\r\n *(\r\n (f'model/{key}', model_metrics[key])\r\n for key in sorted(model_metrics.keys())\r\n ),\r\n ('epoch', self._epoch),\r\n ('timestep', self._timestep),\r\n ('timesteps_total', self._total_timestep),\r\n ('train-steps', self._num_train_steps),\r\n )))\r\n\r\n if self._eval_render_mode is not None and hasattr(\r\n evaluation_environment, 'render_rollouts'):\r\n training_environment.render_rollouts(evaluation_paths)\r\n\r\n yield diagnostics\r\n\r\n self.sampler.terminate()\r\n\r\n self._training_after_hook()\r\n\r\n self._training_progress.close()\r\n\r\n yield {'done': True, **diagnostics}\r\n\r\n def train(self, *args, **kwargs):\r\n return self._train(*args, **kwargs)\r\n\r\n def _log_policy(self):\r\n save_path = os.path.join(self._log_dir, 'models')\r\n filesystem.mkdir(save_path)\r\n weights = self._policy.get_weights()\r\n data = {'policy_weights': weights}\r\n full_path = os.path.join(save_path, 'policy_{}.pkl'.format(self._total_timestep))\r\n print('Saving policy to: {}'.format(full_path))\r\n pickle.dump(data, open(full_path, 'wb'))\r\n\r\n def _log_model(self):\r\n save_path = os.path.join(self._log_dir, 'models')\r\n filesystem.mkdir(save_path)\r\n print('Saving model to: {}'.format(save_path))\r\n self._model.save(save_path, self._total_timestep)\r\n\r\n def _set_rollout_length(self):\r\n min_epoch, max_epoch, min_length, max_length = self._rollout_schedule\r\n if self._epoch <= min_epoch:\r\n y = min_length\r\n else:\r\n dx = (self._epoch - min_epoch) / (max_epoch - min_epoch)\r\n dx = min(dx, 1)\r\n y = dx * (max_length - min_length) + min_length\r\n\r\n self._rollout_length = int(y)\r\n print('[ Model Length ] Epoch: {} (min: {}, max: {}) | Length: {} (min: {} , max: {})'.format(\r\n self._epoch, min_epoch, max_epoch, self._rollout_length, min_length, max_length\r\n ))\r\n\r\n def _reallocate_model_pool(self):\r\n obs_space = self._pool._observation_space\r\n act_space = self._pool._action_space\r\n\r\n rollouts_per_epoch = self._rollout_batch_size * self._epoch_length / self._model_train_freq\r\n model_steps_per_epoch = int(self._rollout_length * rollouts_per_epoch)\r\n new_pool_size = self._model_retain_epochs * model_steps_per_epoch\r\n\r\n if not hasattr(self, '_model_pool'):\r\n print('[ MEEE ] Initializing new model pool with size {:.2e}'.format(\r\n new_pool_size\r\n ))\r\n self._model_pool = WeightedReplayPool(obs_space, act_space, new_pool_size)\r\n \r\n elif self._model_pool._max_size != new_pool_size:\r\n print('[ MEEE ] Updating model pool | {:.2e} --> {:.2e}'.format(\r\n self._model_pool._max_size, new_pool_size\r\n ))\r\n samples = self._model_pool.return_all_samples()\r\n new_pool = WeightedReplayPool(obs_space, act_space, new_pool_size)\r\n new_pool.add_samples(samples)\r\n assert self._model_pool.size == new_pool.size\r\n self._model_pool = new_pool\r\n\r\n def _train_model(self, **kwargs):\r\n env_samples = self._pool.return_all_samples()\r\n train_inputs, train_outputs = format_samples_for_training(env_samples)\r\n model_metrics = self._model.train(train_inputs, train_outputs, **kwargs)\r\n return model_metrics\r\n\r\n def _rollout_model(self, rollout_batch_size, **kwargs):\r\n print('[ Model Rollout ] Starting | Epoch: {} | Rollout length: {} | Batch size: {}'.format(\r\n self._epoch, self._rollout_length, rollout_batch_size\r\n ))\r\n batch = self.sampler.random_batch(rollout_batch_size)\r\n obs = batch['observations']\r\n steps_added = []\r\n for i in range(self._rollout_length):\r\n act = self._policy.actions_np(obs)\r\n \r\n next_obs, rew, term, info = self.fake_env.step(obs, act, **kwargs)\r\n steps_added.append(len(obs))\r\n\r\n samples = {'observations': obs, 'actions': act, 'next_observations': next_obs, 'rewards': rew, 'terminals': term, 'stds': info['dev'][:,None]}\r\n self._model_pool.add_samples(samples)\r\n\r\n nonterm_mask = ~term.squeeze(-1)\r\n if nonterm_mask.sum() == 0:\r\n print('[ Model Rollout ] Breaking early: {} | {} / {}'.format(i, nonterm_mask.sum(), nonterm_mask.shape))\r\n break\r\n\r\n obs = next_obs[nonterm_mask]\r\n\r\n mean_rollout_length = sum(steps_added) / rollout_batch_size\r\n rollout_stats = {'mean_rollout_length': mean_rollout_length}\r\n print('[ Model Rollout ] Added: {:.1e} | Model pool: {:.1e} (max {:.1e}) | Length: {} | Train rep: {}'.format(\r\n sum(steps_added), self._model_pool.size, self._model_pool._max_size, mean_rollout_length, self._n_train_repeat\r\n ))\r\n return rollout_stats\r\n\r\n def _visualize_model(self, env, timestep):\r\n ## save env state\r\n state = env.unwrapped.state_vector()\r\n qpos_dim = len(env.unwrapped.sim.data.qpos)\r\n qpos = state[:qpos_dim]\r\n qvel = state[qpos_dim:]\r\n\r\n print('[ Visualization ] Starting | Epoch {} | Log dir: {}\\n'.format(self._epoch, self._log_dir))\r\n visualize_policy(env, self.fake_env, self._policy, self._writer, timestep)\r\n print('[ Visualization ] Done')\r\n ## set env state\r\n env.unwrapped.set_state(qpos, qvel)\r\n\r\n def _training_batch(self, batch_size=None):\r\n batch_size = batch_size or self.sampler._batch_size\r\n env_batch_size = int(batch_size*self._real_ratio)\r\n model_batch_size = batch_size - env_batch_size\r\n\r\n ## can sample from the env pool even if env_batch_size == 0\r\n env_batch = self._pool.random_batch(env_batch_size)\r\n\r\n if model_batch_size > 0:\r\n model_batch = self._model_pool.random_batch(model_batch_size)\r\n\r\n keys = env_batch.keys()\r\n batch = {k: np.concatenate((env_batch[k], model_batch[k]), axis=0) for k in keys}\r\n else:\r\n ## if real_ratio == 1.0, no model pool was ever allocated,\r\n ## so skip the model pool sampling\r\n batch = env_batch\r\n return batch\r\n\r\n def _init_global_step(self):\r\n self.global_step = training_util.get_or_create_global_step()\r\n self._training_ops.update({\r\n 'increment_global_step': training_util._increment_global_step(1)\r\n })\r\n\r\n def _init_placeholders(self):\r\n \"\"\"Create input placeholders for the SAC algorithm.\r\n\r\n Creates `tf.placeholder`s for:\r\n - observation\r\n - next observation\r\n - action\r\n - reward\r\n - terminals\r\n - stds\r\n \"\"\"\r\n self._iteration_ph = tf.placeholder(\r\n tf.int64, shape=None, name='iteration')\r\n\r\n self._observations_ph = tf.placeholder(\r\n tf.float32,\r\n shape=(None, *self._observation_shape),\r\n name='observation',\r\n )\r\n\r\n self._next_observations_ph = tf.placeholder(\r\n tf.float32,\r\n shape=(None, *self._observation_shape),\r\n name='next_observation',\r\n )\r\n\r\n self._actions_ph = tf.placeholder(\r\n tf.float32,\r\n shape=(None, *self._action_shape),\r\n name='actions',\r\n )\r\n\r\n self._rewards_ph = tf.placeholder(\r\n tf.float32,\r\n shape=(None, 1),\r\n name='rewards',\r\n )\r\n\r\n self._stds_ph = tf.placeholder(\r\n tf.float32,\r\n shape=(None, 1),\r\n name='stds',\r\n )\r\n\r\n self._terminals_ph = tf.placeholder(\r\n tf.float32,\r\n shape=(None, 1),\r\n name='terminals',\r\n )\r\n\r\n if self._store_extra_policy_info:\r\n self._log_pis_ph = tf.placeholder(\r\n tf.float32,\r\n shape=(None, 1),\r\n name='log_pis',\r\n )\r\n self._raw_actions_ph = tf.placeholder(\r\n tf.float32,\r\n shape=(None, *self._action_shape),\r\n name='raw_actions',\r\n )\r\n\r\n def _get_Q_target(self):\r\n next_actions = self._policy.actions([self._next_observations_ph])\r\n next_log_pis = self._policy.log_pis(\r\n [self._next_observations_ph], next_actions)\r\n\r\n next_Qs_values = tuple(\r\n Q([self._next_observations_ph, next_actions])\r\n for Q in self._Q_targets)\r\n\r\n min_next_Q = tf.reduce_min(next_Qs_values, axis=0)\r\n next_value = min_next_Q - self._alpha * next_log_pis\r\n\r\n Q_target = td_target(\r\n reward=self._reward_scale * self._rewards_ph,\r\n discount=self._discount,\r\n next_value=(1 - self._terminals_ph) * next_value)\r\n\r\n return Q_target\r\n\r\n def _init_critic_update(self):\r\n \"\"\"Create minimization operation for critic Q-function.\r\n\r\n Creates a `tf.optimizer.minimize` operation for updating\r\n critic Q-function with gradient descent, and appends it to\r\n `self._training_ops` attribute.\r\n \"\"\"\r\n Q_target = tf.stop_gradient(self._get_Q_target())\r\n\r\n assert Q_target.shape.as_list() == [None, 1]\r\n # weighted critic loss\r\n temperature_critic = 5.0\r\n weight_target_Q = tf.stop_gradient(tf.sigmoid(-self._stds_ph * temperature_critic))\r\n Q_values = self._Q_values = tuple(\r\n Q([self._observations_ph, self._actions_ph])\r\n for Q in self._Qs)\r\n\r\n Q_losses = self._Q_losses = tuple(\r\n tf.losses.mean_squared_error(\r\n labels=Q_target, predictions=Q_value, weights=weight_target_Q)\r\n for Q_value in Q_values)\r\n\r\n self._Q_optimizers = tuple(\r\n tf.train.AdamOptimizer(\r\n learning_rate=self._Q_lr,\r\n name='{}_{}_optimizer'.format(Q._name, i)\r\n ) for i, Q in enumerate(self._Qs))\r\n Q_training_ops = tuple(\r\n tf.contrib.layers.optimize_loss(\r\n Q_loss,\r\n self.global_step,\r\n learning_rate=self._Q_lr,\r\n optimizer=Q_optimizer,\r\n variables=Q.trainable_variables,\r\n increment_global_step=False,\r\n summaries=((\r\n \"loss\", \"gradients\", \"gradient_norm\", \"global_gradient_norm\"\r\n ) if self._tf_summaries else ()))\r\n for i, (Q, Q_loss, Q_optimizer)\r\n in enumerate(zip(self._Qs, Q_losses, self._Q_optimizers)))\r\n\r\n self._training_ops.update({'Q': tf.group(Q_training_ops)})\r\n\r\n def _init_actor_update(self):\r\n \"\"\"Create minimization operations for policy and entropy.\r\n\r\n Creates a `tf.optimizer.minimize` operations for updating\r\n policy and entropy with gradient descent, and adds them to\r\n `self._training_ops` attribute.\r\n \"\"\"\r\n\r\n actions = self._policy.actions([self._observations_ph])\r\n log_pis = self._policy.log_pis([self._observations_ph], actions)\r\n\r\n assert log_pis.shape.as_list() == [None, 1]\r\n\r\n log_alpha = self._log_alpha = tf.get_variable(\r\n 'log_alpha',\r\n dtype=tf.float32,\r\n initializer=0.0)\r\n alpha = tf.exp(log_alpha)\r\n\r\n if isinstance(self._target_entropy, Number):\r\n alpha_loss = -tf.reduce_mean(\r\n log_alpha * tf.stop_gradient(log_pis + self._target_entropy))\r\n\r\n self._alpha_optimizer = tf.train.AdamOptimizer(\r\n self._policy_lr, name='alpha_optimizer')\r\n self._alpha_train_op = self._alpha_optimizer.minimize(\r\n loss=alpha_loss, var_list=[log_alpha])\r\n\r\n self._training_ops.update({\r\n 'temperature_alpha': self._alpha_train_op\r\n })\r\n\r\n self._alpha = alpha\r\n\r\n if self._action_prior == 'normal':\r\n policy_prior = tf.contrib.distributions.MultivariateNormalDiag(\r\n loc=tf.zeros(self._action_shape),\r\n scale_diag=tf.ones(self._action_shape))\r\n policy_prior_log_probs = policy_prior.log_prob(actions)\r\n elif self._action_prior == 'uniform':\r\n policy_prior_log_probs = 0.0\r\n\r\n Q_log_targets = tuple(\r\n Q([self._observations_ph, actions])\r\n for Q in self._Qs)\r\n min_Q_log_target = tf.reduce_min(Q_log_targets, axis=0)\r\n\r\n # weighted actor loss\r\n temperature_act = 5.0\r\n weight_actor_Q = tf.stop_gradient(tf.sigmoid(-self._stds_ph * temperature_act) + 0.5)\r\n if self._reparameterize:\r\n policy_kl_losses = (\r\n alpha * log_pis\r\n - min_Q_log_target\r\n - policy_prior_log_probs) * weight_actor_Q\r\n else:\r\n raise NotImplementedError\r\n\r\n assert policy_kl_losses.shape.as_list() == [None, 1]\r\n\r\n policy_loss = tf.reduce_mean(policy_kl_losses)\r\n\r\n self._policy_optimizer = tf.train.AdamOptimizer(\r\n learning_rate=self._policy_lr,\r\n name=\"policy_optimizer\")\r\n policy_train_op = tf.contrib.layers.optimize_loss(\r\n policy_loss,\r\n self.global_step,\r\n learning_rate=self._policy_lr,\r\n optimizer=self._policy_optimizer,\r\n variables=self._policy.trainable_variables,\r\n increment_global_step=False,\r\n summaries=(\r\n \"loss\", \"gradients\", \"gradient_norm\", \"global_gradient_norm\"\r\n ) if self._tf_summaries else ())\r\n\r\n self._training_ops.update({'policy_train_op': policy_train_op})\r\n\r\n def _init_training(self):\r\n self._update_target(tau=1.0)\r\n\r\n def _update_target(self, tau=None):\r\n tau = tau or self._tau\r\n\r\n for Q, Q_target in zip(self._Qs, self._Q_targets):\r\n source_params = Q.get_weights()\r\n target_params = Q_target.get_weights()\r\n Q_target.set_weights([\r\n tau * source + (1.0 - tau) * target\r\n for source, target in zip(source_params, target_params)\r\n ])\r\n\r\n def _do_training(self, iteration, batch):\r\n \"\"\"Runs the operations for updating training and target ops.\"\"\"\r\n\r\n self._training_progress.update()\r\n self._training_progress.set_description()\r\n\r\n feed_dict = self._get_feed_dict(iteration, batch)\r\n\r\n self._session.run(self._training_ops, feed_dict)\r\n\r\n if iteration % self._target_update_interval == 0:\r\n # Run target ops here.\r\n self._update_target()\r\n\r\n def _get_feed_dict(self, iteration, batch):\r\n \"\"\"Construct TensorFlow feed_dict from sample batch.\"\"\"\r\n\r\n feed_dict = {\r\n self._observations_ph: batch['observations'],\r\n self._actions_ph: batch['actions'],\r\n self._next_observations_ph: batch['next_observations'],\r\n self._rewards_ph: batch['rewards'],\r\n self._terminals_ph: batch['terminals'],\r\n self._stds_ph: batch['stds'],\r\n }\r\n\r\n if self._store_extra_policy_info:\r\n feed_dict[self._log_pis_ph] = batch['log_pis']\r\n feed_dict[self._raw_actions_ph] = batch['raw_actions']\r\n\r\n if iteration is not None:\r\n feed_dict[self._iteration_ph] = iteration\r\n\r\n return feed_dict\r\n\r\n def get_diagnostics(self,\r\n iteration,\r\n batch,\r\n training_paths,\r\n evaluation_paths):\r\n \"\"\"Return diagnostic information as ordered dictionary.\r\n\r\n Records mean and standard deviation of Q-function and state\r\n value function, and TD-loss (mean squared Bellman error)\r\n for the sample batch.\r\n\r\n Also calls the `draw` method of the plotter, if plotter defined.\r\n \"\"\"\r\n\r\n feed_dict = self._get_feed_dict(iteration, batch)\r\n\r\n (Q_values, Q_losses, alpha, global_step) = self._session.run(\r\n (self._Q_values,\r\n self._Q_losses,\r\n self._alpha,\r\n self.global_step),\r\n feed_dict)\r\n\r\n diagnostics = OrderedDict({\r\n 'Q-avg': np.mean(Q_values),\r\n 'Q-std': np.std(Q_values),\r\n 'Q_loss': np.mean(Q_losses),\r\n 'alpha': alpha,\r\n })\r\n\r\n policy_diagnostics = self._policy.get_diagnostics(\r\n batch['observations'])\r\n diagnostics.update({\r\n f'policy/{key}': value\r\n for key, value in policy_diagnostics.items()\r\n })\r\n\r\n if self._plotter:\r\n self._plotter.draw()\r\n\r\n return diagnostics\r\n\r\n @property\r\n def tf_saveables(self):\r\n saveables = {\r\n '_policy_optimizer': self._policy_optimizer,\r\n **{\r\n f'Q_optimizer_{i}': optimizer\r\n for i, optimizer in enumerate(self._Q_optimizers)\r\n },\r\n '_log_alpha': self._log_alpha,\r\n }\r\n\r\n if hasattr(self, '_alpha_optimizer'):\r\n saveables['_alpha_optimizer'] = self._alpha_optimizer\r\n\r\n return saveables\r\n"
] | [
[
"tensorflow.get_variable",
"tensorflow.python.training.training_util._increment_global_step",
"tensorflow.keras.models.clone_model",
"tensorflow.zeros",
"numpy.concatenate",
"numpy.mean",
"tensorflow.train.AdamOptimizer",
"tensorflow.group",
"tensorflow.stop_gradient",
"numpy.std",
"tensorflow.placeholder",
"tensorflow.exp",
"tensorflow.losses.mean_squared_error",
"tensorflow.reduce_mean",
"tensorflow.python.training.training_util.get_or_create_global_step",
"tensorflow.sigmoid",
"tensorflow.ones",
"tensorflow.reduce_min",
"numpy.prod",
"tensorflow.contrib.layers.optimize_loss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
RyoTTa/geopm | [
"74246c8ce70ee47f53bc5629638f51c2c391027b"
] | [
"test_integration/geopm_test_integration.py"
] | [
"#!/usr/bin/env python\n#\n# Copyright (c) 2015, 2016, 2017, 2018, 2019, Intel Corporation\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n#\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n\nfrom __future__ import absolute_import\n\nfrom __future__ import division\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import str\nimport os\nimport sys\nimport unittest\nimport subprocess\nimport time\nimport pandas\nimport collections\nimport socket\nimport shlex\nimport json\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom test_integration import util\nfrom test_integration import geopm_test_launcher\nimport geopmpy.io\nimport geopmpy.launcher\n\ndef create_frequency_map_policy(min_freq, max_freq, frequency_map, use_env=False):\n \"\"\"Create a frequency map to be consumed by the frequency map agent.\n\n Arguments:\n min_freq: Floor frequency for the agent\n max_freq: Ceiling frequency for the agent\n frequency_map: Dictionary mapping region names to frequencies\n use_env: If true, apply the map to an environment variable, and return\n the policy needed when the environment variable is in use.\n Otherwise, clear the environment variable and return the policy\n needed when the variable is not in use.\n \"\"\"\n policy = {'frequency_min': min_freq, 'frequency_max': max_freq}\n known_hashes = {\n 'dgemm': 0x00000000a74bbf35,\n 'all2all': 0x000000003ddc81bf,\n 'stream': 0x00000000d691da00,\n 'sleep': 0x00000000536c798f,\n 'MPI_Barrier': 0x000000007b561f45,\n 'model-init': 0x00000000644f9787,\n 'unmarked-region': 0x00000000725e8066 }\n\n if use_env:\n os.environ['GEOPM_FREQUENCY_MAP'] = json.dumps(frequency_map)\n else:\n if 'GEOPM_FREQUENCY_MAP' in os.environ:\n os.environ.pop('GEOPM_FREQUENCY_MAP')\n for i, (region_name, frequency) in enumerate(frequency_map.items()):\n region_hash = known_hashes[region_name]\n policy['HASH_{}'.format(i)] = int(region_hash)\n policy['FREQ_{}'.format(i)] = frequency\n\n return policy\n\nclass TestIntegration(unittest.TestCase):\n def setUp(self):\n self.longMessage = True\n self._agent = 'power_governor'\n self._options = {'power_budget': 150}\n self._tmp_files = []\n self._output = None\n self._power_limit = geopm_test_launcher.geopmread(\"MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT board 0\")\n self._frequency = geopm_test_launcher.geopmread(\"MSR::PERF_CTL:FREQ board 0\")\n self._original_freq_map_env = os.environ.get('GEOPM_FREQUENCY_MAP')\n\n def tearDown(self):\n geopm_test_launcher.geopmwrite(\"MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT board 0 \" + str(self._power_limit))\n geopm_test_launcher.geopmwrite(\"MSR::PERF_CTL:FREQ board 0 \" + str(self._frequency))\n if sys.exc_info() == (None, None, None) and os.getenv('GEOPM_KEEP_FILES') is None:\n if self._output is not None:\n self._output.remove_files()\n for ff in self._tmp_files:\n try:\n os.remove(ff)\n except OSError:\n pass\n if self._original_freq_map_env is None:\n if 'GEOPM_FREQUENCY_MAP' in os.environ:\n os.environ.pop('GEOPM_FREQUENCY_MAP')\n else:\n os.environ['GEOPM_FREQUENCY_MAP'] = self._original_freq_map_env\n\n def assertNear(self, a, b, epsilon=0.05, msg=''):\n denom = a if a != 0 else 1\n if abs((a - b) / denom) >= epsilon:\n self.fail('The fractional difference between {a} and {b} is greater than {epsilon}. {msg}'.format(a=a, b=b, epsilon=epsilon, msg=msg))\n\n def create_progress_df(self, df):\n # Build a df with only the first region entry and the exit.\n df = df.reset_index(drop=True)\n last_index = 0\n filtered_df = pandas.DataFrame()\n row_list = []\n progress_1s = df['REGION_PROGRESS'].loc[df['REGION_PROGRESS'] == 1]\n for index, _ in progress_1s.iteritems():\n row = df.loc[last_index:index].head(1)\n row_list += [row[['TIME', 'REGION_PROGRESS', 'REGION_RUNTIME']]]\n row = df.loc[last_index:index].tail(1)\n row_list += [row[['TIME', 'REGION_PROGRESS', 'REGION_RUNTIME']]]\n last_index = index + 1 # Set the next starting index to be one past where we are\n filtered_df = pandas.concat(row_list)\n return filtered_df\n\n def test_report_and_trace_generation(self):\n name = 'test_report_and_trace_generation'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 4\n num_rank = 16\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('sleep', 1.0)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(num_node, len(node_names))\n for nn in node_names:\n report = self._output.get_report_data(node_name=nn)\n self.assertNotEqual(0, len(report))\n trace = self._output.get_trace_data(node_name=nn)\n self.assertNotEqual(0, len(trace))\n\n def test_no_report_and_trace_generation(self):\n name = 'test_no_report_and_trace_generation'\n num_node = 4\n num_rank = 16\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('sleep', 1.0)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n @unittest.skipUnless('mr-fusion' in socket.gethostname(), \"This test only enabled on known working systems.\")\n def test_report_and_trace_generation_pthread(self):\n name = 'test_report_and_trace_generation_pthread'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 4\n num_rank = 16\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('sleep', 1.0)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.set_pmpi_ctl('pthread')\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(num_node, len(node_names))\n for nn in node_names:\n report = self._output.get_report_data(node_name=nn)\n self.assertNotEqual(0, len(report))\n trace = self._output.get_trace_data(node_name=nn)\n self.assertNotEqual(0, len(trace))\n\n @unittest.skipUnless(geopm_test_launcher.detect_launcher() != \"aprun\",\n 'ALPS does not support multi-application launch on the same nodes.')\n @util.skip_unless_batch()\n def test_report_and_trace_generation_application(self):\n name = 'test_report_and_trace_generation_application'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 4\n num_rank = 16\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('sleep', 1.0)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.set_pmpi_ctl('application')\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(num_node, len(node_names))\n for nn in node_names:\n report = self._output.get_report_data(node_name=nn)\n self.assertNotEqual(0, len(report))\n trace = self._output.get_trace_data(node_name=nn)\n self.assertNotEqual(0, len(trace))\n\n @unittest.skipUnless(geopm_test_launcher.detect_launcher() == \"srun\" and os.getenv('SLURM_NODELIST') is None,\n 'Requires non-sbatch SLURM session for alloc\\'d and idle nodes.')\n def test_report_generation_all_nodes(self):\n name = 'test_report_generation_all_nodes'\n report_path = name + '.report'\n num_node = 1\n num_rank = 1\n delay = 1.0\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('sleep', delay)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n time.sleep(5) # Wait a moment to finish cleaning-up from a previous test\n idle_nodes = launcher.get_idle_nodes()\n idle_nodes_copy = list(idle_nodes)\n alloc_nodes = launcher.get_alloc_nodes()\n launcher.write_log(name, 'Idle nodes : {nodes}'.format(nodes=idle_nodes))\n launcher.write_log(name, 'Alloc\\'d nodes : {nodes}'.format(nodes=alloc_nodes))\n node_names = []\n for nn in idle_nodes_copy:\n launcher.set_node_list(nn.split()) # Hack to convert string to list\n try:\n launcher.run(name)\n node_names += nn.split()\n except subprocess.CalledProcessError as e:\n if e.returncode == 1 and nn not in launcher.get_idle_nodes():\n launcher.write_log(name, '{node} has disappeared from the idle list!'.format(node=nn))\n idle_nodes.remove(nn)\n else:\n launcher.write_log(name, 'Return code = {code}'.format(code=e.returncode))\n raise e\n ao = geopmpy.io.AppOutput(report_path, do_cache=False)\n sleep_data = ao.get_report_data(node_name=nn, region='sleep')\n app_data = ao.get_app_total_data(node_name=nn)\n self.assertNotEqual(0, len(sleep_data))\n self.assertNear(delay, sleep_data['runtime'].item())\n self.assertGreater(app_data['runtime'].item(), sleep_data['runtime'].item())\n self.assertEqual(1, sleep_data['count'].item())\n\n self.assertEqual(len(node_names), len(idle_nodes))\n\n def test_runtime(self):\n name = 'test_runtime'\n report_path = name + '.report'\n num_node = 1\n num_rank = 5\n delay = 3.0\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('sleep', delay)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n self._output = geopmpy.io.AppOutput(report_path)\n node_names = self._output.get_node_names()\n self.assertEqual(num_node, len(node_names))\n for nn in node_names:\n report = self._output.get_report_data(node_name=nn, region='sleep')\n app_total = self._output.get_app_total_data(node_name=nn)\n self.assertNear(delay, report['runtime'].item())\n self.assertGreater(app_total['runtime'].item(), report['runtime'].item())\n\n def test_runtime_epoch(self):\n name = 'test_runtime_epoch'\n report_path = name + '.report'\n num_node = 1\n num_rank = 5\n delay = 3.0\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('sleep', delay)\n app_conf.append_region('spin', delay)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n self._output = geopmpy.io.AppOutput(report_path)\n node_names = self._output.get_node_names()\n self.assertEqual(num_node, len(node_names))\n for nn in node_names:\n spin_data = self._output.get_report_data(node_name=nn, region='spin')\n sleep_data = self._output.get_report_data(node_name=nn, region='sleep')\n epoch_data = self._output.get_report_data(node_name=nn, region='epoch')\n total_runtime = sleep_data['runtime'].item() + spin_data['runtime'].item()\n self.assertNear(total_runtime, epoch_data['runtime'].item())\n\n def test_epoch_data_valid(self):\n name = 'test_epoch_data_valid'\n report_path = name + '.report'\n num_node = 1\n num_rank = 1\n big_o = 1.0\n loop_count = 10\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.set_loop_count(loop_count)\n app_conf.append_region('spin-unmarked', big_o)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n report = geopmpy.io.RawReport(report_path)\n node_names = report.host_names()\n self.assertEqual(num_node, len(node_names))\n for nn in node_names:\n regions = report.region_names(nn)\n self.assertTrue('model-init' not in regions)\n totals = report.raw_totals(nn)\n unmarked = report.raw_region(nn, 'unmarked-region')\n epoch = report.raw_epoch(nn)\n\n # Epoch has valid data\n self.assertGreater(epoch['runtime (sec)'], 0)\n self.assertGreater(epoch['sync-runtime (sec)'], 0)\n self.assertGreater(epoch['package-energy (joules)'], 0)\n self.assertGreater(epoch['dram-energy (joules)'], 0)\n self.assertGreater(epoch['power (watts)'], 0)\n self.assertGreater(epoch['frequency (%)'], 0)\n self.assertGreater(epoch['frequency (Hz)'], 0)\n self.assertEqual(epoch['count'], loop_count)\n\n # Runtime\n self.assertTrue(totals['runtime (sec)'] > unmarked['runtime (sec)'] >= epoch['runtime (sec)'],\n '''The total runtime is NOT > the unmarked runtime or the unmarked runtime is NOT\n >= the Epoch runtime.''')\n\n # Package Energy (joules)\n self.assertTrue(totals['package-energy (joules)'] >\n unmarked['package-energy (joules)'] >=\n epoch['package-energy (joules)'],\n '''The total package energy (joules) is NOT > the unmarked package energy (joules)\n or the unmarked package energy (joules) is NOT >= the Epoch package\n energy (joules).''')\n\n # DRAM Energy\n self.assertTrue(totals['dram-energy (joules)'] >\n unmarked['dram-energy (joules)'] >=\n epoch['dram-energy (joules)'],\n '''The total dram energy is NOT > the unmarked dram energy or the unmarked\n dram energy is NOT >= the Epoch dram energy.''')\n\n # Sync-runtime\n self.assertTrue(unmarked['sync-runtime (sec)'] >= epoch['sync-runtime (sec)'],\n '''The sync-runtime for the unmarked region is NOT >= the Epoch sync-runtime.''')\n\n\n def test_runtime_nested(self):\n name = 'test_runtime_nested'\n report_path = name + '.report'\n num_node = 1\n num_rank = 1\n delay = 1.0\n loop_count = 2\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.set_loop_count(loop_count)\n app_conf.append_region('nested-progress', delay)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n self._output = geopmpy.io.AppOutput(report_path)\n node_names = self._output.get_node_names()\n self.assertEqual(num_node, len(node_names))\n for nn in node_names:\n spin_data = self._output.get_report_data(node_name=nn, region='spin')\n epoch_data = self._output.get_report_data(node_name=nn, region='epoch')\n app_totals = self._output.get_app_total_data(node_name=nn)\n # The spin sections of this region sleep for 'delay' seconds twice per loop.\n self.assertNear(2 * loop_count * delay, spin_data['runtime'].item())\n self.assertNear(spin_data['runtime'].item(), epoch_data['runtime'].item(), epsilon=0.01)\n self.assertGreater(app_totals['network-time'].item(), 0)\n self.assertGreater(0.1, app_totals['network-time'].item())\n self.assertEqual(loop_count, spin_data['count'].item())\n\n def test_trace_runtimes(self):\n name = 'test_trace_runtimes'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 4\n num_rank = 16\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('sleep', 1.0)\n app_conf.append_region('dgemm', 1.0)\n app_conf.append_region('all2all', 1.0)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,\n trace_path, region_barrier=True)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n regions = self._output.get_region_names()\n for nn in node_names:\n trace = self._output.get_trace_data(node_name=nn)\n app_totals = self._output.get_app_total_data(node_name=nn)\n self.assertNear(trace.iloc[-1]['TIME'], app_totals['runtime'].item(), msg='Application runtime failure, node_name={}.'.format(nn))\n # Calculate runtime totals for each region in each trace, compare to report\n tt = trace.reset_index(level='index') # move 'index' field from multiindex to columns\n tt = tt.set_index(['REGION_HASH'], append=True) # add region_hash column to multiindex\n tt_reg = tt.groupby(level=['REGION_HASH'])\n for region_name in regions:\n region_data = self._output.get_report_data(node_name=nn, region=region_name)\n if (region_name not in ['unmarked-region', 'model-init', 'epoch'] and\n not region_name.startswith('MPI_') and\n region_data['sync_runtime'].item() != 0):\n region_hash = region_data['id'].item()\n trace_data = tt_reg.get_group(region_hash)\n start_idx = trace_data.iloc[0]['index']\n end_idx = trace_data.iloc[-1]['index'] + 1 # use time from sample after exiting region\n start_time = tt.loc[tt['index'] == start_idx]['TIME'].item()\n end_time = tt.loc[tt['index'] == end_idx]['TIME'].item()\n trace_elapsed_time = end_time - start_time\n msg = 'for region {rn} on node {nn}'.format(rn=region_name, nn=nn)\n self.assertNear(trace_elapsed_time, region_data['sync_runtime'].item(), msg=msg)\n #epoch\n region_data = self._output.get_report_data(node_name=nn, region='epoch')\n trace_elapsed_time = trace.iloc[-1]['TIME'] - trace['TIME'].loc[trace['EPOCH_COUNT'] == 0].iloc[0]\n msg = 'for epoch on node {nn}'.format(nn=nn)\n self.assertNear(trace_elapsed_time, region_data['runtime'].item(), msg=msg)\n\n @util.skip_unless_config_enable('bloat')\n def test_runtime_regulator(self):\n name = 'test_runtime_regulator'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 1\n num_rank = 4\n loop_count = 20\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.set_loop_count(loop_count)\n sleep_big_o = 1.0\n spin_big_o = 0.5\n expected_region_runtime = {'spin': spin_big_o, 'sleep': sleep_big_o}\n app_conf.append_region('sleep', sleep_big_o)\n app_conf.append_region('spin', spin_big_o)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path, region_barrier=True)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n regions = self._output.get_region_names()\n for nn in node_names:\n app_totals = self._output.get_app_total_data(node_name=nn)\n trace = self._output.get_trace_data(node_name=nn)\n self.assertNear(trace.iloc[-1]['TIME'], app_totals['runtime'].item())\n tt = trace.set_index(['REGION_HASH'], append=True)\n tt = tt.groupby(level=['REGION_HASH'])\n for region_name in regions:\n region_data = self._output.get_report_data(node_name=nn, region=region_name)\n if region_name not in ['unmarked-region', 'model-init', 'epoch'] and not region_name.startswith('MPI_') and region_data['runtime'].item() != 0:\n trace_data = tt.get_group(region_data['id'].item())\n filtered_df = self.create_progress_df(trace_data)\n first_time = False\n epsilon = 0.001 if region_name != 'sleep' else 0.05\n for index, df in filtered_df.iterrows():\n if df['REGION_PROGRESS'] == 1:\n self.assertNear(df['REGION_RUNTIME'], expected_region_runtime[region_name], epsilon=epsilon)\n first_time = True\n if first_time is True and df['REGION_PROGRESS'] == 0:\n self.assertNear(df['REGION_RUNTIME'], expected_region_runtime[region_name], epsilon=epsilon)\n\n @util.skip_unless_run_long_tests()\n @util.skip_unless_config_enable('bloat')\n def test_region_runtimes(self):\n name = 'test_region_runtimes'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 4\n num_rank = 16\n loop_count = 500\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('dgemm', 8.0)\n app_conf.set_loop_count(loop_count)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path, time_limit=900)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n\n # Calculate region times from traces\n region_times = collections.defaultdict(lambda: collections.defaultdict(dict))\n for nn in node_names:\n tt = self._output.get_trace_data(node_name=nn).set_index(['REGION_HASH'], append=True).groupby(level=['REGION_HASH'])\n\n for region_hash, data in tt:\n filtered_df = self.create_progress_df(data)\n filtered_df = filtered_df.diff()\n # Since I'm not separating out the progress 0's from 1's, when I do the diff I only care about the\n # case where 1 - 0 = 1 for the progress column.\n filtered_df = filtered_df.loc[filtered_df['REGION_PROGRESS'] == 1]\n\n if len(filtered_df) > 1:\n launcher.write_log(name, 'Region elapsed time stats from {} - {} :\\n{}'\\\n .format(nn, region_hash, filtered_df['TIME'].describe()))\n filtered_df['TIME'].describe()\n region_times[nn][region_hash] = filtered_df\n\n launcher.write_log(name, '{}'.format('-' * 80))\n\n # Loop through the reports to see if the region runtimes line up with what was calculated from the trace files above.\n regions = self._output.get_region_names()\n write_regions = True\n for nn in node_names:\n for region_name in regions:\n rr = self._output.get_report_data(node_name=nn, region=region_name)\n if (region_name != 'epoch' and\n rr['id'].item() != 0 and\n rr['count'].item() > 1):\n if write_regions:\n launcher.write_log(name, 'Region {} is {}.'.format(rr['id'].item(), region_name))\n runtime = rr['sync_runtime'].item()\n self.assertNear(runtime,\n region_times[nn][rr['id'].item()]['TIME'].sum())\n write_regions = False\n\n # Test to ensure every region detected in the trace is captured in the report.\n for nn in node_names:\n report_ids = []\n for region_name in regions:\n rr = self._output.get_report_data(node_name=nn, region=region_name)\n report_ids.append(rr['id'].item())\n for region_hash in region_times[nn].keys():\n self.assertTrue(region_hash in report_ids, msg='Report from {} missing region_hash {}'.format(nn, region_hash))\n\n def test_progress(self):\n name = 'test_progress'\n report_path = name + '.report'\n num_node = 1\n num_rank = 4\n delay = 3.0\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('sleep-progress', delay)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n self._output = geopmpy.io.AppOutput(report_path)\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n for nn in node_names:\n sleep_data = self._output.get_report_data(node_name=nn, region='sleep')\n app_total = self._output.get_app_total_data(node_name=nn)\n self.assertNear(delay, sleep_data['runtime'].item())\n self.assertGreater(app_total['runtime'].item(), sleep_data['runtime'].item())\n self.assertEqual(1, sleep_data['count'].item())\n\n def test_count(self):\n name = 'test_count'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 1\n num_rank = 4\n delay = 0.01\n loop_count = 100\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.set_loop_count(loop_count)\n app_conf.append_region('spin', delay)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n for nn in node_names:\n trace_data = self._output.get_trace_data(node_name=nn)\n spin_data = self._output.get_report_data(node_name=nn, region='spin')\n epoch_data = self._output.get_report_data(node_name=nn, region='epoch')\n self.assertNear(delay * loop_count, spin_data['runtime'].item())\n self.assertEqual(loop_count, spin_data['count'].item())\n self.assertEqual(loop_count, epoch_data['count'].item())\n self.assertEqual(loop_count, trace_data['EPOCH_COUNT'][-1])\n\n @util.skip_unless_run_long_tests()\n def test_scaling(self):\n \"\"\"\n This test will start at ${num_node} nodes and ranks. It will then calls check_run() to\n ensure that commands can be executed successfully on all of the allocated compute nodes.\n Afterwards it will run the specified app config on each node and verify the reports. When\n complete it will double num_node and run the steps again.\n\n WARNING: This test can take a long time to run depending on the number of starting nodes and\n the size of the allocation.\n \"\"\"\n name = 'test_scaling'\n report_path = name + '.report'\n num_node = 2\n loop_count = 100\n\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('dgemm', 1.0)\n app_conf.append_region('all2all', 1.0)\n app_conf.set_loop_count(loop_count)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, time_limit=900)\n\n check_successful = True\n while check_successful:\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_node)\n try:\n launcher.check_run(name)\n except subprocess.CalledProcessError as e:\n # If we exceed the available nodes in the allocation ALPS/SLURM give a rc of 1\n # All other rc's are real errors\n if e.returncode != 1:\n raise e\n check_successful = False\n if check_successful:\n launcher.write_log(name, 'About to run on {} nodes.'.format(num_node))\n launcher.run(name)\n self._output = geopmpy.io.AppOutput(report_path)\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n for nn in node_names:\n dgemm_data = self._output.get_report_data(node_name=nn, region='dgemm')\n all2all_data = self._output.get_report_data(node_name=nn, region='all2all')\n self.assertEqual(loop_count, dgemm_data['count'].item())\n self.assertEqual(loop_count, all2all_data['count'].item())\n self.assertGreater(dgemm_data['runtime'].item(), 0.0)\n self.assertGreater(all2all_data['runtime'].item(), 0.0)\n num_node *= 2\n self._output.remove_files()\n\n @util.skip_unless_run_long_tests()\n def test_power_consumption(self):\n name = 'test_power_consumption'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 4\n num_rank = 16\n loop_count = 500\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('dgemm', 8.0)\n app_conf.set_loop_count(loop_count)\n\n fam, mod = geopm_test_launcher.get_platform()\n if fam == 6 and mod == 87:\n # budget for KNL\n self._options['power_budget'] = 130\n else:\n self._options['power_budget'] = 200\n gov_agent_conf_path = name + '_gov_agent.config'\n self._tmp_files.append(gov_agent_conf_path)\n gov_agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n launcher = geopm_test_launcher.TestLauncher(app_conf, gov_agent_conf, report_path,\n trace_path, time_limit=900)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.write_log(name, 'Power cap = {}W'.format(self._options['power_budget']))\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(num_node, len(node_names))\n all_power_data = {}\n # Total power consumed will be Socket(s) + DRAM\n for nn in node_names:\n tt = self._output.get_trace_data(node_name=nn)\n\n first_epoch_index = tt.loc[tt['EPOCH_COUNT'] == 0][:1].index[0]\n epoch_dropped_data = tt[first_epoch_index:] # Drop all startup data\n\n power_data = epoch_dropped_data.filter(regex='ENERGY')\n power_data['TIME'] = epoch_dropped_data['TIME']\n power_data = power_data.diff().dropna()\n power_data.rename(columns={'TIME': 'ELAPSED_TIME'}, inplace=True)\n power_data = power_data.loc[(power_data != 0).all(axis=1)] # Will drop any row that is all 0's\n\n pkg_energy_cols = [s for s in power_data.keys() if 'ENERGY_PACKAGE' in s]\n dram_energy_cols = [s for s in power_data.keys() if 'ENERGY_DRAM' in s]\n power_data['SOCKET_POWER'] = power_data[pkg_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']\n power_data['DRAM_POWER'] = power_data[dram_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']\n power_data['COMBINED_POWER'] = power_data['SOCKET_POWER'] + power_data['DRAM_POWER']\n\n pandas.set_option('display.width', 100)\n launcher.write_log(name, 'Power stats from {} :\\n{}'.format(nn, power_data.describe()))\n\n all_power_data[nn] = power_data\n\n for node_name, power_data in all_power_data.items():\n # Allow for overages of 2% at the 75th percentile.\n self.assertGreater(self._options['power_budget'] * 1.02, power_data['SOCKET_POWER'].quantile(.75))\n\n # TODO Checks on the maximum power computed during the run?\n # TODO Checks to see how much power was left on the table?\n\n @util.skip_unless_run_long_tests()\n @util.skip_unless_batch()\n def test_power_balancer(self):\n name = 'test_power_balancer'\n num_node = 4\n num_rank = 16\n loop_count = 500\n # Require that the balancer moves the maximum dgemm runtime at\n # least 1/4 the distance to the mean dgemm runtime under the\n # governor.\n margin_factor = 0.25\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('dgemm-imbalance', 8.0)\n app_conf.append_region('all2all', 0.05)\n app_conf.set_loop_count(loop_count)\n\n # Update app config with imbalance\n alloc_nodes = geopm_test_launcher.TestLauncher.get_alloc_nodes()\n for nn in range(len(alloc_nodes) // 2):\n app_conf.append_imbalance(alloc_nodes[nn], 0.5)\n\n fam, mod = geopm_test_launcher.get_platform()\n if fam == 6 and mod == 87:\n # budget for KNL\n power_budget = 130\n else:\n power_budget = 200\n self._options = {'power_budget': power_budget}\n gov_agent_conf_path = name + '_gov_agent.config'\n bal_agent_conf_path = name + '_bal_agent.config'\n self._tmp_files.append(gov_agent_conf_path)\n self._tmp_files.append(bal_agent_conf_path)\n\n agent_list = ['power_governor', 'power_balancer']\n path_dict = {'power_governor': gov_agent_conf_path, 'power_balancer': bal_agent_conf_path}\n agent_runtime = dict()\n for agent in agent_list:\n agent_conf = geopmpy.io.AgentConf(path_dict[agent], agent, self._options)\n run_name = '{}_{}'.format(name, agent)\n report_path = '{}.report'.format(run_name)\n trace_path = '{}.trace'.format(run_name)\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,\n trace_path, time_limit=2700)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.write_log(run_name, 'Power cap = {}W'.format(power_budget))\n launcher.run(run_name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(num_node, len(node_names))\n\n power_limits = []\n # Total power consumed will be Socket(s) + DRAM\n for nn in node_names:\n tt = self._output.get_trace_data(node_name=nn)\n\n first_epoch_index = tt.loc[tt['EPOCH_COUNT'] == 0][:1].index[0]\n epoch_dropped_data = tt[first_epoch_index:] # Drop all startup data\n\n power_data = epoch_dropped_data.filter(regex='ENERGY')\n power_data['TIME'] = epoch_dropped_data['TIME']\n power_data = power_data.diff().dropna()\n power_data.rename(columns={'TIME': 'ELAPSED_TIME'}, inplace=True)\n power_data = power_data.loc[(power_data != 0).all(axis=1)] # Will drop any row that is all 0's\n\n pkg_energy_cols = [s for s in power_data.keys() if 'ENERGY_PACKAGE' in s]\n dram_energy_cols = [s for s in power_data.keys() if 'ENERGY_DRAM' in s]\n power_data['SOCKET_POWER'] = power_data[pkg_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']\n power_data['DRAM_POWER'] = power_data[dram_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']\n power_data['COMBINED_POWER'] = power_data['SOCKET_POWER'] + power_data['DRAM_POWER']\n\n pandas.set_option('display.width', 100)\n launcher.write_log(name, 'Power stats from {} {} :\\n{}'.format(agent, nn, power_data.describe()))\n\n # Get final power limit set on the node\n if agent == 'power_balancer':\n power_limits.append(epoch_dropped_data['POWER_LIMIT'][-1])\n\n if agent == 'power_balancer':\n avg_power_limit = sum(power_limits) / len(power_limits)\n self.assertTrue(avg_power_limit <= power_budget)\n\n min_runtime = float('nan')\n max_runtime = float('nan')\n node_names = self._output.get_node_names()\n runtime_list = []\n for node_name in node_names:\n epoch_data = self._output.get_report_data(node_name=node_name, region='dgemm')\n runtime_list.append(epoch_data['runtime'].item())\n if agent == 'power_governor':\n mean_runtime = sum(runtime_list) / len(runtime_list)\n max_runtime = max(runtime_list)\n margin = margin_factor * (max_runtime - mean_runtime)\n\n agent_runtime[agent] = max(runtime_list)\n\n self.assertGreater(agent_runtime['power_governor'] - margin,\n agent_runtime['power_balancer'],\n \"governor runtime: {}, balancer runtime: {}, margin: {}\".format(\n agent_runtime['power_governor'], agent_runtime['power_balancer'], margin))\n\n def test_progress_exit(self):\n \"\"\"\n Check that when we always see progress exit before the next entry.\n Make sure that progress only decreases when a new region is entered.\n \"\"\"\n name = 'test_progress_exit'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 1\n num_rank = 16\n loop_count = 100\n big_o = 0.1\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.set_loop_count(loop_count)\n app_conf.append_region('dgemm-progress', big_o)\n app_conf.append_region('spin-progress', big_o)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path, region_barrier=True)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(num_node, len(node_names))\n\n for nn in node_names:\n tt = self._output.get_trace_data(node_name=nn)\n tt = tt.set_index(['REGION_HASH'], append=True)\n tt = tt.groupby(level=['REGION_HASH'])\n for region_hash, data in tt:\n tmp = data['REGION_PROGRESS'].diff()\n #@todo legacy branch?\n # Look for changes in progress that are more negative\n # than can be expected due to extrapolation error.\n if region_hash == 8300189175:\n negative_progress = tmp.loc[(tmp > -1) & (tmp < -0.1)]\n launcher.write_log(name, '{}'.format(negative_progress))\n self.assertEqual(0, len(negative_progress))\n\n @util.skip_unless_run_long_tests()\n @util.skip_unless_optimized()\n def test_sample_rate(self):\n \"\"\"\n Check that sample rate is regular and fast.\n \"\"\"\n name = 'test_sample_rate'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 1\n num_rank = 16\n loop_count = 10\n big_o = 10.0\n region = 'dgemm-progress'\n max_mean = 0.01 # 10 millisecond max sample period\n max_nstd = 0.1 # 10% normalized standard deviation (std / mean)\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.set_loop_count(loop_count)\n app_conf.append_region(region, big_o)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(num_node, len(node_names))\n\n for nn in node_names:\n tt = self._output.get_trace_data(node_name=nn)\n delta_t = tt['TIME'].diff()\n delta_t = delta_t.loc[delta_t != 0]\n self.assertGreater(max_mean, delta_t.mean())\n # WARNING : The following line may mask issues in the sampling rate. To do a fine grained analysis, comment\n # out the next line and do NOT run on the BSP. This will require modifications to the launcher or manual testing.\n size_orig = len(delta_t)\n delta_t = delta_t[(delta_t - delta_t.mean()) < 3*delta_t.std()] # Only keep samples within 3 stds of the mean\n self.assertGreater(0.06, 1 - (float(len(delta_t)) / size_orig))\n self.assertGreater(max_nstd, delta_t.std() / delta_t.mean())\n\n def test_network_times(self):\n name = 'test_network_times'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 4\n num_rank = 16\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('sleep', 1.0)\n app_conf.append_region('dgemm', 1.0)\n app_conf.append_region('all2all', 1.0)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n for nn in node_names:\n all2all_data = self._output.get_report_data(node_name=nn, region='all2all')\n sleep_data = self._output.get_report_data(node_name=nn, region='sleep')\n dgemm_data = self._output.get_report_data(node_name=nn, region='dgemm')\n barrier_data = self._output.get_report_data(node_name=nn, region='MPI_Barrier')\n unmarked_data = self._output.get_report_data(node_name=nn, region='unmarked-region')\n epoch_data = self._output.get_report_data(node_name=nn, region='epoch')\n app_total = self._output.get_app_total_data(node_name=nn)\n self.assertEqual(0, unmarked_data['count'].item())\n # Since MPI time is is counted if any rank on a node is in\n # an MPI call, but region time is counted only when all\n # ranks on a node are in a region, we must use the\n # unmarked-region time as our error term when comparing\n # MPI time and all2all time.\n mpi_epsilon = max(unmarked_data['runtime'].item() / all2all_data['network_time'].item(), 0.05)\n self.assertNear(all2all_data['network_time'].item(), all2all_data['runtime'].item(), mpi_epsilon)\n self.assertNear(all2all_data['network_time'].item() + barrier_data['network_time'].item(),\n epoch_data['network_time'].item())\n # TODO: inconsistent; can we just use _ everywhere?\n self.assertNear(all2all_data['network_time'].item() + barrier_data['network_time'].item(),\n app_total['network-time'].item())\n self.assertEqual(0, unmarked_data['network_time'].item())\n self.assertEqual(0, sleep_data['network_time'].item())\n self.assertEqual(0, dgemm_data['network_time'].item())\n\n def test_ignore_runtime(self):\n name = 'test_ignore_runtime'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 4\n num_rank = 16\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('ignore', 1.0)\n app_conf.append_region('dgemm', 1.0)\n app_conf.append_region('all2all', 1.0)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n for nn in node_names:\n ignore_data = self._output.get_report_data(node_name=nn, region='ignore')\n app_data = self._output.get_app_total_data(node_name=nn)\n self.assertNear(ignore_data['runtime'].item(),\n app_data['ignore-runtime'].item(), 0.00005)\n\n @util.skip_unless_config_enable('ompt')\n def test_unmarked_ompt(self):\n name = 'test_unmarked_ompt'\n report_path = name + '.report'\n num_node = 4\n num_rank = 16\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('stream-unmarked', 1.0)\n app_conf.append_region('dgemm-unmarked', 1.0)\n app_conf.append_region('all2all-unmarked', 1.0)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path)\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n stream_id = None\n region_names = self._output.get_region_names()\n stream_name = [key for key in region_names if key.lower().find('stream') != -1][0]\n for nn in node_names:\n stream_data = self._output.get_report_data(node_name=nn, region=stream_name)\n found = False\n for name in region_names:\n if stream_name in name: # account for numbers at end of OMPT region names\n found = True\n self.assertTrue(found)\n self.assertEqual(1, stream_data['count'].item())\n if stream_id:\n self.assertEqual(stream_id, stream_data['id'].item())\n else:\n stream_id = stream_data['id'].item()\n ompt_regions = [key for key in region_names if key.startswith('[OMPT]')]\n self.assertLessEqual(2, len(ompt_regions))\n self.assertTrue(('MPI_Alltoall' in region_names))\n gemm_region = [key for key in region_names if key.lower().find('gemm') != -1]\n self.assertLessEqual(1, len(gemm_region))\n\n def _test_agent_frequency_map(self, name, use_env=False):\n min_freq = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_MIN board 0\")\n max_freq = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_MAX board 0\")\n sticker_freq = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_STICKER board 0\")\n freq_step = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_STEP board 0\")\n self._agent = \"frequency_map\"\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 1\n num_rank = 4\n loop_count = 5\n dgemm_bigo = 15.0\n stream_bigo = 1.0\n dgemm_bigo_jlse = 35.647\n dgemm_bigo_quartz = 29.12\n stream_bigo_jlse = 1.6225\n stream_bigo_quartz = 1.7941\n hostname = socket.gethostname()\n if hostname.endswith('.alcf.anl.gov'):\n dgemm_bigo = dgemm_bigo_jlse\n stream_bigo = stream_bigo_jlse\n elif hostname.startswith('mcfly'):\n dgemm_bigo = 42.0\n stream_bigo = 1.75\n elif hostname.startswith('quartz'):\n dgemm_bigo = dgemm_bigo_quartz\n stream_bigo = stream_bigo_quartz\n\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.set_loop_count(loop_count)\n app_conf.append_region('dgemm', dgemm_bigo)\n app_conf.append_region('stream', stream_bigo)\n app_conf.append_region('all2all', 1.0)\n app_conf.write()\n freq_map = {}\n freq_map['dgemm'] = sticker_freq\n freq_map['stream'] = sticker_freq - 2 * freq_step\n freq_map['all2all'] = min_freq\n self._options = create_frequency_map_policy(min_freq, max_freq, freq_map, use_env)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,\n trace_path, region_barrier=True, time_limit=900)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n regions = self._output.get_region_names()\n for nn in node_names:\n for region_name in regions:\n region_data = self._output.get_report_data(node_name=nn, region=region_name)\n if (region_name in ['dgemm', 'stream', 'all2all']):\n #todo verify trace frequencies\n #todo verify agent report augment frequecies\n msg = region_name + \" frequency should be near assigned map frequency\"\n self.assertNear(region_data['frequency'].item(), freq_map[region_name] / sticker_freq * 100, msg=msg)\n\n def test_agent_frequency_map_env(self):\n \"\"\"\n Test of the FrequencyMapAgent, setting a map through GEOPM_FREQUENCY_MAP.\n \"\"\"\n self._test_agent_frequency_map('test_agent_frequency_map_env', use_env=True)\n\n def test_agent_frequency_map_policy(self):\n \"\"\"\n Test of the FrequencyMapAgent, setting a map through the policy.\n \"\"\"\n self._test_agent_frequency_map('test_agent_frequency_map_policy', use_env=False)\n\n def test_agent_energy_efficient_single_region(self):\n \"\"\"\n Test of the EnergyEfficientAgent against single region loop.\n \"\"\"\n name = 'test_energy_efficient_single_region'\n min_freq = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_MIN board 0\")\n sticker_freq = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_STICKER board 0\")\n freq_step = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_STEP board 0\")\n self._agent = \"energy_efficient\"\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 1\n num_rank = 4\n loop_count = 100\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.set_loop_count(loop_count)\n app_conf.append_region('spin', 0.1)\n self._options = {'frequency_min': min_freq,\n 'frequency_max': sticker_freq}\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n regions = self._output.get_region_names()\n for nn in node_names:\n for region_name in regions:\n report = geopmpy.io.RawReport(report_path)\n if (region_name in ['spin']):\n region = report.raw_region(nn, region_name)\n msg = region_name + \" frequency should be minimum frequency as specified by policy\"\n self.assertEqual(region['requested-online-frequency'], min_freq, msg=msg) # freq should reduce\n\n\n @util.skip_unless_run_long_tests()\n @util.skip_unless_cpufreq()\n @util.skip_unless_batch()\n def test_agent_energy_efficient(self):\n \"\"\"\n Test of the EnergyEfficientAgent.\n \"\"\"\n name = 'test_energy_efficient_sticker'\n min_freq = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_MIN board 0\")\n max_freq = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_MAX board 0\")\n sticker_freq = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_STICKER board 0\")\n freq_step = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_STEP board 0\")\n self._agent = \"energy_efficient\"\n num_node = 1\n num_rank = 4\n loop_count = 200\n dgemm_bigo = 15.0\n stream_bigo = 1.0\n dgemm_bigo_jlse = 35.647\n dgemm_bigo_quartz = 29.12\n stream_bigo_jlse = 1.6225\n stream_bigo_quartz = 1.7941\n hostname = socket.gethostname()\n if hostname.endswith('.alcf.anl.gov'):\n dgemm_bigo = dgemm_bigo_jlse\n stream_bigo = stream_bigo_jlse\n elif hostname.startswith('mcfly'):\n dgemm_bigo = 42.0\n stream_bigo = 1.75\n elif hostname.startswith('quartz'):\n dgemm_bigo = dgemm_bigo_quartz\n stream_bigo = stream_bigo_quartz\n\n run = ['_sticker', '_nan_nan']\n for rr in run:\n report_path = name + rr + '.report'\n trace_path = name + rr + '.trace'\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.set_loop_count(loop_count)\n app_conf.append_region('dgemm', dgemm_bigo)\n app_conf.append_region('stream', stream_bigo)\n app_conf.write()\n if rr == '_sticker':\n self._options = {'frequency_min': sticker_freq,\n 'frequency_max': sticker_freq}\n freq = sticker_freq\n else:\n self._options = {'frequency_min': min_freq,\n 'frequency_max': sticker_freq}\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,\n trace_path, region_barrier=True, time_limit=900)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name + rr)\n\n # compare the app_total runtime and energy and assert within bounds\n report_path = name + run[0] + '.report'\n trace_path = name + run[0] + '.trace'\n sticker_out = geopmpy.io.AppOutput(report_path, trace_path + '*')\n report_path = name + run[1] + '.report'\n trace_path = name + run[1] + '.trace'\n nan_out = geopmpy.io.AppOutput(report_path, trace_path + '*')\n for nn in nan_out.get_node_names():\n sticker_app_total = sticker_out.get_app_total_data(node_name=nn)\n nan_app_total = nan_out.get_app_total_data(node_name=nn)\n runtime_savings_epoch = (sticker_app_total['runtime'].item() - nan_app_total['runtime'].item()) / sticker_app_total['runtime'].item()\n energy_savings_epoch = (sticker_app_total['energy-package'].item() - nan_app_total['energy-package'].item()) / sticker_app_total['energy-package'].item()\n self.assertLess(-0.1, runtime_savings_epoch) # want -10% or better\n self.assertLess(0.0, energy_savings_epoch)\n\n\nclass TestIntegrationGeopmio(unittest.TestCase):\n ''' Tests of geopmread and geopmwrite.'''\n def setUp(self):\n self.skip_warning_string = 'Incompatible CPU'\n\n def check_output(self, args, expected):\n try:\n proc = subprocess.Popen([self.exec_name] + args,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for exp in expected:\n line = proc.stdout.readline()\n while self.skip_warning_string.encode() in line:\n line = proc.stdout.readline()\n self.assertIn(exp.encode(), line)\n for line in proc.stdout:\n if self.skip_warning_string.encode() not in line:\n self.assertNotIn(b'Error', line)\n except subprocess.CalledProcessError as ex:\n sys.stderr.write('{}\\n'.format(ex.output))\n\n def check_output_range(self, args, min_exp, max_exp):\n try:\n proc = subprocess.Popen([self.exec_name] + args,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for line in proc.stdout:\n if self.skip_warning_string.encode() in line:\n continue\n if line.startswith(b'0x'):\n value = int(line)\n else:\n value = float(line)\n self.assertLessEqual(min_exp, value, msg=\"Value read for {} smaller than {}: {}.\".format(args, min_exp, value))\n self.assertGreaterEqual(max_exp, value, msg=\"Value read for {} larger than {}: {}.\".format(args, max_exp, value))\n except subprocess.CalledProcessError as ex:\n sys.stderr.write('{}\\n'.format(ex.output))\n\n def check_no_error(self, args):\n try:\n proc = subprocess.Popen([self.exec_name] + args,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for line in proc.stdout:\n if self.skip_warning_string.encode() not in line:\n self.assertNotIn(b'Error', line)\n except subprocess.CalledProcessError as ex:\n sys.stderr.write('{}\\n'.format(ex.output))\n\n def test_geopmread_command_line(self):\n '''\n Check that geopmread commandline arguments work.\n '''\n self.exec_name = \"geopmread\"\n\n # no args\n self.check_no_error([])\n\n # domain flag\n self.check_output(['--domain'], ['board', 'package', 'core', 'cpu',\n 'board_memory', 'package_memory',\n 'board_nic', 'package_nic',\n 'board_accelerator', 'package_accelerator'])\n self.check_output(['--domain', 'TIME'], ['cpu'])\n\n # read signal\n self.check_no_error(['TIME', 'board', '0'])\n\n # info\n self.check_no_error(['--info'])\n self.check_output(['--info', 'TIME'], ['Time in seconds'])\n\n # errors\n read_err = 'domain type and domain index are required'\n self.check_output(['TIME'], [read_err])\n self.check_output(['TIME', 'board'], [read_err])\n self.check_output(['TIME', 'board', 'bad'], ['invalid domain index'])\n self.check_output(['FREQUENCY', 'package', '111'], ['cannot read signal'])\n self.check_output(['ENERGY_PACKAGE', 'cpu', '0'], ['cannot read signal'])\n self.check_output(['INVALID', 'board', '0'], ['cannot read signal'])\n self.check_output(['--domain', 'INVALID'], ['unable to determine signal type'])\n self.check_output(['--domain', '--info'], ['info about domain not implemented'])\n\n @util.skip_unless_batch()\n def test_geopmread_all_signal_agg(self):\n '''\n Check that all reported signals can be read for board, aggregating if necessary.\n '''\n self.exec_name = \"geopmread\"\n all_signals = []\n try:\n proc = subprocess.Popen([self.exec_name],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for line in proc.stdout:\n if self.skip_warning_string.encode() not in line:\n all_signals.append(line.strip())\n except subprocess.CalledProcessError as ex:\n sys.stderr.write('{}\\n'.format(ex.output))\n for sig in all_signals:\n self.check_no_error([sig.decode(), 'board', '0'])\n\n @util.skip_unless_batch()\n def test_geopmread_signal_value(self):\n '''\n Check that some specific signals give a sane value.\n '''\n self.exec_name = \"geopmread\"\n signal_range = {\n \"POWER_PACKAGE\": (20, 400),\n \"FREQUENCY\": (1.0e8, 5.0e9),\n \"TIME\": (0, 10), # time in sec to start geopmread\n \"TEMPERATURE_CORE\": (0, 100)\n }\n\n for signal, val_range in signal_range.items():\n try:\n self.check_no_error([signal, \"board\", \"0\"])\n except:\n raise\n pass # skip missing signals\n else:\n self.check_output_range([signal, \"board\", \"0\"], *val_range)\n\n def test_geopmread_custom_msr(self):\n '''\n Check that MSRIOGroup picks up additional MSRs in path.\n '''\n self.exec_name = \"geopmread\"\n path = os.path.join(\n os.path.dirname(\n os.path.dirname(\n os.path.realpath(__file__))),\n 'examples/custom_msr/')\n custom_env = os.environ.copy()\n custom_env['GEOPM_PLUGIN_PATH'] = path\n all_signals = []\n try:\n proc = subprocess.Popen([self.exec_name], env=custom_env,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for line in proc.stdout:\n if self.skip_warning_string.encode() not in line:\n all_signals.append(line.strip())\n except subprocess.CalledProcessError as ex:\n sys.stderr.write('{}\\n'.format(ex.output))\n self.assertIn(b'MSR::CORE_PERF_LIMIT_REASONS#', all_signals)\n\n def test_geopmwrite_command_line(self):\n '''\n Check that geopmwrite commandline arguments work.\n '''\n self.exec_name = \"geopmwrite\"\n\n # no args\n self.check_no_error([])\n\n # domain flag\n self.check_output(['--domain'], ['board', 'package', 'core', 'cpu',\n 'board_memory', 'package_memory',\n 'board_nic', 'package_nic',\n 'board_accelerator', 'package_accelerator'])\n self.check_no_error(['--domain', 'FREQUENCY'])\n\n # info\n self.check_no_error(['--info'])\n self.check_output(['--info', 'FREQUENCY'], ['processor frequency'])\n\n # errors\n write_err = 'domain type, domain index, and value are required'\n self.check_output(['FREQUENCY'], [write_err])\n self.check_output(['FREQUENCY', 'board'], [write_err])\n self.check_output(['FREQUENCY', 'board', '0'], [write_err])\n self.check_output(['FREQUENCY', 'board', 'bad', '0'], ['invalid domain index'])\n self.check_output(['FREQUENCY', 'board', '0', 'bad'], ['invalid write value'])\n self.check_output(['FREQUENCY', 'package', '111', '0'], ['cannot write control'])\n self.check_output(['FREQUENCY', 'board_nic', '0', '0'], ['cannot write control'])\n self.check_output(['INVALID', 'board', '0', '0'], ['cannot write control'])\n self.check_output(['--domain', 'INVALID'], ['unable to determine control type'])\n self.check_output(['--domain', '--info'], ['info about domain not implemented'])\n\n @util.skip_unless_batch()\n def test_geopmwrite_set_freq(self):\n '''\n Check that geopmwrite can be used to set frequency.\n '''\n def read_stdout_line(stdout):\n line = stdout.readline()\n while self.skip_warning_string.encode() in line:\n line = stdout.readline()\n return line.strip()\n\n def read_current_freq(domain, signal='FREQUENCY'):\n read_proc = subprocess.Popen(['geopmread', signal, domain, '0'],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n freq = read_stdout_line(read_proc.stdout)\n freq = float(freq)\n return freq\n\n def read_min_max_freq():\n read_proc = subprocess.Popen(['geopmread', 'CPUINFO::FREQ_MIN', 'board', '0'],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n min_freq = read_stdout_line(read_proc.stdout)\n min_freq = float(int(float(min_freq)/1e8)*1e8) # convert to multiple of 1e8\n read_proc = subprocess.Popen(['geopmread', 'CPUINFO::FREQ_MAX', 'board', '0'],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n max_freq = read_stdout_line(read_proc.stdout)\n max_freq = float(int(float(max_freq)/1e8)*1e8)\n return min_freq, max_freq\n\n self.exec_name = \"geopmwrite\"\n\n read_proc = subprocess.Popen(['geopmread', '--domain', 'FREQUENCY'],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n read_domain = read_stdout_line(read_proc.stdout).decode()\n write_proc = subprocess.Popen([self.exec_name, '--domain', 'FREQUENCY'],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n write_domain = read_stdout_line(write_proc.stdout).decode()\n min_freq, max_freq = read_min_max_freq()\n\n old_freq = read_current_freq(write_domain, 'MSR::PERF_CTL:FREQ')\n self.assertLess(old_freq, max_freq * 2)\n self.assertGreater(old_freq, min_freq - 1e8)\n\n # set to min and check\n self.check_no_error(['FREQUENCY', write_domain, '0', str(min_freq)])\n result = read_current_freq(read_domain)\n self.assertEqual(min_freq, result)\n # set to max and check\n self.check_no_error(['FREQUENCY', write_domain, '0', str(max_freq)])\n result = read_current_freq(read_domain)\n self.assertEqual(max_freq, result)\n\n self.check_no_error(['FREQUENCY', write_domain, '0', str(old_freq)])\n\n\nclass TestIntegrationGeopmagent(unittest.TestCase):\n ''' Tests of geopmagent.'''\n def setUp(self):\n self.exec_name = 'geopmagent'\n self.skip_warning_string = 'Incompatible CPU frequency driver/governor'\n\n def check_output(self, args, expected):\n try:\n proc = subprocess.Popen([self.exec_name] + args,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for exp in expected:\n line = proc.stdout.readline()\n while self.skip_warning_string.encode() in line or line == b'\\n':\n line = proc.stdout.readline()\n self.assertIn(exp.encode(), line)\n for line in proc.stdout:\n if self.skip_warning_string.encode() not in line:\n self.assertNotIn(b'Error', line)\n except subprocess.CalledProcessError as ex:\n sys.stderr.write('{}\\n'.format(ex.output))\n\n def check_json_output(self, args, expected):\n try:\n proc = subprocess.Popen([self.exec_name] + args,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as ex:\n sys.stderr.write('{}\\n'.format(ex.output))\n line = proc.stdout.readline()\n while self.skip_warning_string.encode() in line or line == b'\\n':\n line = proc.stdout.readline()\n try:\n out_json = json.loads(line.decode())\n except ValueError:\n self.fail('Could not convert json string: {}\\n'.format(line))\n self.assertEqual(expected, out_json)\n for line in proc.stdout:\n if self.skip_warning_string.encode() not in line:\n self.assertNotIn(b'Error', line)\n\n def check_no_error(self, args):\n try:\n proc = subprocess.Popen([self.exec_name] + args,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for line in proc.stdout:\n if self.skip_warning_string.encode() not in line:\n self.assertNotIn(b'Error', line)\n except subprocess.CalledProcessError as ex:\n sys.stderr.write('{}\\n'.format(ex.output))\n\n def test_geopmagent_command_line(self):\n '''\n Check that geopmagent commandline arguments work.\n '''\n # no args\n agent_names = ['monitor', 'power_balancer', 'power_governor',\n 'energy_efficient', 'frequency_map']\n self.check_output([], agent_names)\n\n # help message\n self.check_output(['--help'], ['Usage'])\n\n # version\n self.check_no_error(['--version'])\n\n # agent policy and sample names\n for agent in agent_names:\n self.check_output(['--agent', agent],\n ['Policy', 'Sample'])\n\n # policy file\n self.check_json_output(['--agent', 'monitor', '--policy', 'None'],\n {})\n self.check_json_output(['--agent', 'power_governor', '--policy', '150'],\n {'POWER_PACKAGE_LIMIT_TOTAL': 150})\n # default value policy\n self.check_json_output(['--agent', 'power_governor', '--policy', 'NAN'],\n {'POWER_PACKAGE_LIMIT_TOTAL': 'NAN'})\n self.check_json_output(['--agent', 'power_governor', '--policy', 'nan'],\n {'POWER_PACKAGE_LIMIT_TOTAL': 'NAN'})\n self.check_json_output(['--agent', 'energy_efficient', '--policy', 'nan,nan'],\n {'FREQ_MIN': 'NAN', 'FREQ_MAX': 'NAN'})\n self.check_json_output(['--agent', 'energy_efficient', '--policy', '1.2e9,nan'],\n {'FREQ_MIN': 1.2e9, 'FREQ_MAX': 'NAN'})\n self.check_json_output(['--agent', 'energy_efficient', '--policy', 'nan,1.3e9'],\n {'FREQ_MIN': 'NAN', 'FREQ_MAX': 1.3e9})\n # unspecified policy values are accepted\n self.check_json_output(['--agent', 'power_balancer', '--policy', '150'],\n {'POWER_PACKAGE_LIMIT_TOTAL': 150})\n\n # errors\n self.check_output(['--agent', 'power_governor', '--policy', 'None'],\n ['not a valid floating-point number', 'Invalid argument'])\n self.check_output(['--agent', 'monitor', '--policy', '300'],\n ['agent takes no parameters', 'Invalid argument'])\n self.check_output(['--agent', 'energy_efficient', '--policy', '2.0e9,5.0e9,4.5e9,6.7,4.2'],\n ['Number of policies', 'Invalid argument'])\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"pandas.set_option",
"pandas.concat",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
lilleswing/Reinvent-1 | [
"ac4e3e6fa6379c6f4af883478dfd1b3407933ada",
"ac4e3e6fa6379c6f4af883478dfd1b3407933ada",
"ac4e3e6fa6379c6f4af883478dfd1b3407933ada",
"ac4e3e6fa6379c6f4af883478dfd1b3407933ada"
] | [
"running_modes/utils/general.py",
"running_modes/transfer_learning/link_invent_actions/collect_stats.py",
"running_modes/transfer_learning/link_invent_transfer_learning_runner.py",
"running_modes/curriculum_learning/logging/remote_curriculum_logger.py"
] | [
"import time\n\nimport numpy as np\nimport torch\n\n\ndef to_tensor(tensor):\n if isinstance(tensor, np.ndarray):\n tensor = torch.from_numpy(tensor)\n if torch.cuda.is_available():\n return torch.autograd.Variable(tensor).cuda()\n return torch.autograd.Variable(tensor)\n\n\ndef set_default_device_cuda():\n \"\"\"Sets the default device (cpu or cuda) used for all tensors.\"\"\"\n if torch.cuda.is_available() == False:\n tensor = torch.FloatTensor\n torch.set_default_tensor_type(tensor)\n return False\n else: # device_name == \"cuda\":\n tensor = torch.cuda.FloatTensor # pylint: disable=E1101\n torch.set_default_tensor_type(tensor)\n return True\n\n\ndef estimate_run_time(start_time, n_steps, step):\n time_elapsed = int(time.time() - start_time)\n time_left = (time_elapsed * ((n_steps - step) / (step + 1)))\n summary = {\"elapsed\": time_elapsed, \"left\": time_left}\n return summary",
"import random\nfrom reinvent_chemistry import TransformationTokens\nfrom reinvent_chemistry.library_design import BondMaker, AttachmentPoints\nfrom reinvent_chemistry.conversions import Conversions\nfrom typing import List, Optional\n\nimport numpy as np\nimport scipy.stats as sps\nfrom reinvent_models.model_factory.generative_model_base import GenerativeModelBase\nfrom reinvent_models.link_invent.dto import SampledSequencesDTO\n\nfrom running_modes.transfer_learning.dto.collected_stats_dto import CollectedStatsDTO\nfrom running_modes.transfer_learning.dto.sampled_stats_dto import SampledStatsDTO\nfrom running_modes.transfer_learning.link_invent_actions.base_action import BaseAction\nfrom running_modes.transfer_learning.logging.base_transfer_learning_logger import BaseTransferLearningLogger\n\n\nclass CollectStats(BaseAction):\n def __init__(self, model: GenerativeModelBase, training_data: List[List[str]],\n validation_data: Optional[List[List[str]]], logger: BaseTransferLearningLogger, sample_size,\n initialize_data_loader_func):\n\n BaseAction.__init__(self, logger=logger)\n\n self._model = model\n self._training_data = training_data\n self._validation_data = validation_data\n self._sample_size = sample_size\n self._get_data_loader = initialize_data_loader_func\n\n self._bond_maker = BondMaker()\n self._attachment_points = AttachmentPoints()\n self._conversions = Conversions()\n self._tokens = TransformationTokens()\n\n def run(self) -> CollectedStatsDTO:\n\n self._logger.log_message(\"Collecting stats\")\n\n # collect training stats\n training_data_loader = self._get_data_loader(self._get_subset(self._training_data), batch_size=128,\n shuffle=False)\n training_nll_list, training_sampled_stats = self._calc_stats(training_data_loader)\n\n if self._validation_data is not None:\n validation_data_loader = self._get_data_loader(self._get_subset(self._validation_data), batch_size=128,\n shuffle=False)\n validation_nll_list, validation_sampled_stats = self._calc_stats(validation_data_loader)\n dist = [training_sampled_stats.nll_input_sampled_target, validation_sampled_stats.nll_input_sampled_target,\n training_nll_list, validation_nll_list]\n else:\n validation_nll_list = None\n validation_sampled_stats = None\n dist = [training_sampled_stats.nll_input_sampled_target, training_nll_list]\n\n stats = CollectedStatsDTO(jsd_binned=self._jsd(dist, binned=True), jsd_un_binned=self._jsd(dist, binned=False),\n nll=training_nll_list, training_stats=training_sampled_stats,\n validation_nll=validation_nll_list, validation_stats=validation_sampled_stats)\n return stats\n\n def _get_subset(self, data: List):\n subset = list(random.sample(data, self._sample_size))\n return subset\n\n def _calc_stats(self, data_loader):\n sampled_sequence_list = []\n nll_list = []\n for warhead_batch, linker_batch in data_loader:\n sampled_sequence_list += self._model.sample(*warhead_batch)\n nll_list += list(self._model.likelihood(*warhead_batch, *linker_batch).data.cpu().numpy())\n sample_stats = self._get_sampled_stats(sampled_sequence_list)\n return nll_list, sample_stats\n\n def _jsd(self, dists, binned=False):\n min_size = min(len(dist) for dist in dists)\n dists = [dist[:min_size] for dist in dists]\n if binned:\n dists = [self._bin_dist(dist) for dist in dists]\n num_dists = len(dists)\n avg_dist = np.sum(dists, axis=0) / num_dists\n return sum((sps.entropy(dist, avg_dist) for dist in dists)) / num_dists\n\n @staticmethod\n def _bin_dist(dist, bins=1000, dist_range=(0, 100)):\n bins = np.histogram(dist, bins=bins, range=dist_range, density=False)[0]\n bins[bins == 0] = 1\n return bins / bins.sum()\n\n def _get_sampled_stats(self, sampled_sequence_list: List[SampledSequencesDTO]) -> SampledStatsDTO:\n nll_list = []\n molecule_smiles_list = []\n molecule_parts_smiles_list = []\n for sample in sampled_sequence_list:\n\n nll_list.append(sample.nll)\n\n labeled_linker = self._attachment_points.add_attachment_point_numbers(sample.output, canonicalize=False)\n molecule = self._bond_maker.join_scaffolds_and_decorations(labeled_linker, sample.input)\n molecule_smiles = self._conversions.mol_to_smiles(molecule) if molecule else None\n molecule_is_valid = True if molecule_smiles else False\n molecule_parts_smiles = sample.input + self._tokens.ATTACHMENT_SEPARATOR_TOKEN + sample.output\n\n if molecule_is_valid:\n molecule_smiles_list.append(molecule_smiles)\n molecule_parts_smiles_list.append(molecule_parts_smiles)\n\n sample_stats = SampledStatsDTO(nll_input_sampled_target=nll_list,\n molecule_smiles=molecule_smiles_list,\n molecule_parts_smiles=molecule_parts_smiles_list,\n valid_fraction=len(molecule_smiles_list) / len(nll_list) * 100)\n return sample_stats\n\n",
"import glob\nimport itertools as it\nimport os\n\nfrom torch.utils.data import DataLoader\nfrom reinvent_chemistry.file_reader import FileReader\nfrom reinvent_models.link_invent.dataset.paired_dataset import PairedDataset\nfrom reinvent_models.model_factory.generative_model_base import GenerativeModelBase\n\nfrom running_modes.configurations.transfer_learning.link_invent_transfer_learning_configuration import \\\n LinkInventTransferLearningConfiguration\nfrom running_modes.constructors.base_running_mode import BaseRunningMode\nfrom running_modes.transfer_learning.link_invent_actions.collect_stats import CollectStats\nfrom running_modes.transfer_learning.link_invent_actions.train_epoch import TrainEpoch\nfrom running_modes.transfer_learning.logging.local_link_invent_transfer_learning_logger import \\\n LocalLinkInventTransferLearningLogger\n\n\nclass LinkInventTransferLearningRunner(BaseRunningMode):\n def __init__(self, model: GenerativeModelBase, configuration: LinkInventTransferLearningConfiguration, optimizer,\n learning_rate_scheduler, logger: LocalLinkInventTransferLearningLogger):\n self._model = model\n self._config = configuration\n self._optimizer = optimizer\n self._logger = logger\n self._lr_scheduler = learning_rate_scheduler\n\n self._reader = FileReader([], self._logger)\n self._training_data_sets = self._load_data_set(self._config.input_smiles_path)\n self._validation_data_set = self._load_data_set(self._config.validation_smiles_path) if \\\n self._config.validation_smiles_path else None\n\n self._trained_model_path = os.path.join(self._config.output_path, 'trained_models')\n\n def run(self):\n self._set_up_output_folder()\n for epoch in self._get_epoch_range():\n\n self._logger.log_message(f'Working on epoch {epoch}')\n\n training_data = next(self._training_data_sets)\n validation_data = next(self._validation_data_set) if self._validation_data_set else None\n self._train_epoch(training_data)\n\n self._logging_stats(training_data=training_data, validation_data=validation_data, epoch=epoch,\n learning_rate=self._lr_scheduler.optimizer.param_groups[0][\"lr\"])\n self._save_model_checkpoint(epoch=epoch)\n\n self._lr_scheduler.step()\n\n terminate = self._check_termination_criteria(epoch, self._lr_scheduler.optimizer.param_groups[0][\"lr\"])\n\n if terminate:\n self._model.save_to_file(os.path.join(self._config.output_path, self._config.model_file_name))\n break\n\n def _set_up_output_folder(self):\n os.makedirs(self._config.output_path, exist_ok=True)\n if self._config.save_model_frequency > 0:\n os.makedirs(self._trained_model_path, exist_ok=True)\n\n def _train_epoch(self, training_data):\n data_loader = self._initialize_data_loader(training_data, self._config.batch_size, drop_last=True)\n train_epoch_action = TrainEpoch(model=self._model, configuration=self._config, logger=self._logger,\n optimizer=self._optimizer, training_data_data_loader=data_loader,\n lr_scheduler=self._lr_scheduler)\n train_epoch_action.run()\n\n def _collect_stats(self, training_data, validation_data):\n stats_collector = CollectStats(model=self._model, training_data=training_data, validation_data=validation_data,\n logger=self._logger, sample_size=self._config.sample_size,\n initialize_data_loader_func=self._initialize_data_loader)\n stats = stats_collector.run()\n return stats\n\n def _check_termination_criteria(self, epoch, new_lr):\n terminate_flag = False\n self._lr_scheduler.step()\n if new_lr < self._config.learning_rate.min:\n self._logger.log_message(\"Reached LR minimum. Saving and terminating.\")\n terminate_flag = True\n elif epoch == self._config.num_epochs:\n self._logger.log_message(f\"Reached maximum number of epochs ({epoch}). Saving and terminating.\")\n terminate_flag = True\n return terminate_flag\n\n def _logging_stats(self, training_data, validation_data, epoch: int, learning_rate: float):\n if self._config.collect_stats_frequency > 0 and epoch % self._config.collect_stats_frequency == 0:\n collected_stats = self._collect_stats(training_data, validation_data)\n self._logger.log_time_step(epoch=epoch, learning_rate=learning_rate, collected_stats=collected_stats,\n model=self._model)\n\n def _save_model_checkpoint(self, epoch):\n if self._config.save_model_frequency > 0 and epoch % self._config.save_model_frequency == 0:\n self._logger.log_message('Save model checkpoint')\n self._model.save_to_file(os.path.join(self._trained_model_path, f'model_{epoch:03d}.ckpt'))\n\n def _load_data_set(self, path_to_data_set: str):\n\n if os.path.isdir(path_to_data_set):\n file_paths = sorted(glob.glob(f\"{path_to_data_set}/*.smi\"))\n elif os.path.isfile(path_to_data_set):\n file_paths = [path_to_data_set]\n else:\n raise ValueError('path_to_data_set needs to be the path to a file or a folder')\n\n for path in it.cycle(file_paths): # stores the path instead of the set\n dataset = list(self._reader.read_library_design_data_file(path, num_fields=2))\n\n if len(dataset) == 0:\n raise IOError(f\"No valid entries are present in the supplied file: {path}\")\n\n yield dataset\n\n def _get_epoch_range(self) -> range:\n last_epoch = self._config.starting_epoch + self._config.num_epochs - 1\n epoch_range = range(self._config.starting_epoch, last_epoch + 1)\n return epoch_range\n\n def _initialize_data_loader(self, data_set, batch_size, shuffle: bool = True, drop_last: bool = False):\n data_set = PairedDataset(input_target_smi_list=data_set, vocabulary=self._model.get_vocabulary())\n data_loader = DataLoader(data_set, batch_size=batch_size, shuffle=shuffle,\n collate_fn=PairedDataset.collate_fn, drop_last=drop_last)\n return data_loader\n",
"import os\n\nimport numpy as np\nimport requests\nimport torch\n\nimport running_modes.utils.configuration as ull\nimport running_modes.utils.general\nimport reinvent_chemistry.logging as ul_rl\nfrom running_modes.configurations.general_configuration_envelope import GeneralConfigurationEnvelope\nfrom running_modes.curriculum_learning.logging import BaseCurriculumLogger\nfrom reinvent_scoring.scoring.diversity_filters.reinvent_core.base_diversity_filter import BaseDiversityFilter\nfrom reinvent_scoring.scoring.score_summary import FinalSummary\nfrom reinvent_scoring.scoring.enums.scoring_function_component_enum import ScoringFunctionComponentNameEnum\nfrom running_modes.configurations.logging import get_remote_logging_auth_token\n\n\nclass RemoteCurriculumLogger(BaseCurriculumLogger):\n def __init__(self, configuration: GeneralConfigurationEnvelope):\n super().__init__(configuration)\n self._rows = 2\n self._columns = 5\n self._sample_size = self._rows * self._columns\n self._sf_component_enum = ScoringFunctionComponentNameEnum()\n self._is_dev = ull._is_development_environment()\n\n def log_message(self, message: str):\n self._logger.info(message)\n\n def timestep_report(self, start_time, n_steps, step, smiles,\n mean_score: np.array, score_summary: FinalSummary, score,\n agent_likelihood: torch.tensor, prior_likelihood: torch.tensor,\n augmented_likelihood: torch.tensor, diversity_filter: BaseDiversityFilter):\n score_components = self._score_summary_breakdown(score_summary, mean_score, diversity_filter)\n learning_curves = self._learning_curve_profile(agent_likelihood, prior_likelihood, augmented_likelihood)\n structures_table = self._visualize_structures(smiles, score, score_summary)\n smiles_report = self._create_sample_report(smiles, score, score_summary)\n\n time_estimation = running_modes.utils.general.estimate_run_time(start_time, n_steps, step)\n data = self._assemble_timestep_report(step, score_components, structures_table, learning_curves,\n time_estimation, ul_rl.fraction_valid_smiles(smiles), smiles_report)\n self._notify_server(data, self._log_config.recipient)\n\n def save_final_state(self, agent, scaffold_filter):\n agent.save(os.path.join(self._log_config.result_folder, 'Agent.ckpt'))\n self.save_diversity_memory(scaffold_filter)\n\n def _notify_server(self, data, to_address):\n \"\"\"This is called every time we are posting data to server\"\"\"\n try:\n self._logger.warning(f\"posting to {to_address}\")\n headers = {\n 'Accept': 'application/json', 'Content-Type': 'application/json',\n 'Authorization': get_remote_logging_auth_token()\n }\n response = requests.post(to_address, json=data, headers=headers)\n\n if self._is_dev:\n \"\"\"logs out the response content only when running a test instance\"\"\"\n if response.status_code == requests.codes.ok:\n self._logger.info(f\"SUCCESS: {response.status_code}\")\n self._logger.info(response.content)\n else:\n self._logger.info(f\"PROBLEM: {response.status_code}\")\n self._logger.exception(data, exc_info=False)\n except Exception as t_ex:\n self._logger.exception(\"Exception occurred\", exc_info=True)\n self._logger.exception(f\"Attempted posting the following data:\")\n self._logger.exception(data, exc_info=False)\n\n def _get_matching_substructure_from_config(self, score_summary: FinalSummary):\n smarts_pattern = \"\"\n for summary_component in score_summary.scaffold_log:\n if summary_component.parameters.component_type == self._sf_component_enum.MATCHING_SUBSTRUCTURE:\n smarts = summary_component.parameters.specific_parameters.get(self._specific_parameters_enum.SMILES, [])\n if len(smarts) > 0:\n smarts_pattern = smarts[0]\n return smarts_pattern\n\n def _visualize_structures(self, smiles, score, score_summary: FinalSummary):\n score, smiles = ul_rl.sort_smiles_by_score(score, smiles)\n smiles = ul_rl.padding_with_invalid_smiles(smiles, self._sample_size)\n list_of_mols, legend = ul_rl.check_for_invalid_mols_and_create_legend(smiles, score, self._sample_size)\n smarts_pattern = self._get_matching_substructure_from_config(score_summary)\n pattern = ul_rl.find_matching_pattern_in_smiles(list_of_mols=list_of_mols, smarts_pattern=smarts_pattern)\n mol_in_base64_string = ul_rl.mol_to_png_string(list_of_mols, molsPerRow=self._columns, subImgSize=(300, 300),\n legend=legend, matches=pattern)\n return mol_in_base64_string\n\n def _create_sample_report(self, smiles, score, score_summary: FinalSummary):\n score, smiles = ul_rl.sort_smiles_by_score(score, smiles)\n smiles = ul_rl.padding_with_invalid_smiles(smiles, self._sample_size)\n _, legend = ul_rl.check_for_invalid_mols_and_create_legend(smiles, score, self._sample_size)\n smarts_pattern = self._get_matching_substructure_from_config(score_summary)\n\n smiles_legend_pairs = [{\"smiles\": smiles[indx], \"legend\": legend[indx]} for indx in range(self._sample_size)]\n\n report = {\n \"smarts_pattern\": smarts_pattern,\n \"smiles_legend_pairs\": smiles_legend_pairs\n }\n return report\n\n def _learning_curve_profile(self, agent_likelihood, prior_likelihood, augmented_likelihood):\n learning_curves = {\n \"prior\": float(np.float(prior_likelihood.detach().mean().cpu())),\n \"augmented\": float(np.float(augmented_likelihood.detach().mean().cpu())),\n \"agent\": float(np.float(agent_likelihood.detach().mean().cpu()))\n }\n return learning_curves\n\n def _score_summary_breakdown(self, score_summary: FinalSummary, mean_score: np.array,\n diversity_filter: BaseDiversityFilter):\n score_components = {}\n for i, log in enumerate(score_summary.profile):\n score_components[f\"{score_summary.profile[i].component_type}:{score_summary.profile[i].name}\"] = \\\n float(np.mean(score_summary.profile[i].score))\n score_components[\"total_score:total_score\"] = float(mean_score)\n score_components[\"collected smiles in memory\"] = diversity_filter.number_of_smiles_in_memory()\n return score_components\n\n def _assemble_timestep_report(self, step, score_components, structures_table, learning_curves, time_estimation,\n fraction_valid_smiles, smiles_report):\n actual_step = step + 1\n timestep_report = {\"step\": actual_step,\n \"components\": score_components,\n # \"structures\": structures_table,\n \"learning\": learning_curves,\n \"time_estimation\": time_estimation,\n \"fraction_valid_smiles\": fraction_valid_smiles,\n \"smiles_report\": smiles_report\n }\n return timestep_report\n"
] | [
[
"torch.set_default_tensor_type",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.autograd.Variable"
],
[
"scipy.stats.entropy",
"numpy.histogram",
"numpy.sum"
],
[
"torch.utils.data.DataLoader"
],
[
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Shreyashwaghe/monk_v1 | [
"62f34a52f242772186ffff7e56764e958fbcd920",
"62f34a52f242772186ffff7e56764e958fbcd920",
"62f34a52f242772186ffff7e56764e958fbcd920",
"62f34a52f242772186ffff7e56764e958fbcd920",
"62f34a52f242772186ffff7e56764e958fbcd920",
"62f34a52f242772186ffff7e56764e958fbcd920",
"62f34a52f242772186ffff7e56764e958fbcd920"
] | [
"monk/system_unit_tests/pytorch/test_block_resnet_v2.py",
"monk/system_unit_tests/pytorch/test_layer_average_pooling1d.py",
"monk/system_unit_tests/gluon/test_layer_max_pooling1d.py",
"monk/system_unit_tests/pytorch/test_activation_tanh.py",
"monk/system_unit_tests/gluon/test_layer_convolution2d.py",
"monk/system_unit_tests/pytorch/test_layer_transposed_convolution1d.py",
"monk/system_unit_tests/keras/test_layer_concatenate.py"
] | [
"import os\nimport sys\nsys.path.append(\"../../../monk/\");\nimport psutil\n\nfrom pytorch_prototype import prototype\nfrom compare_prototype import compare\nfrom common import print_start\nfrom common import print_status\n\nimport torch\nimport numpy as np\nfrom pytorch.losses.return_loss import load_loss\n\n\ndef test_block_resnet_v2(system_dict):\n forward = True;\n\n test = \"test_block_resnet_v2\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n\n\n network = [];\n network.append(gtf.resnet_v2_block(output_channels=32, stride=1, downsample=True));\n network.append(gtf.resnet_v2_block(output_channels=32, stride=1, downsample=False));\n gtf.Compile_Network(network, data_shape=(1, 64, 64), use_gpu=False);\n\n x = torch.randn(1, 1, 64, 64);\n y = gtf.system_dict[\"local\"][\"model\"](x); \n\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n",
"import os\nimport sys\nsys.path.append(\"../../../monk/\");\nimport psutil\n\nfrom pytorch_prototype import prototype\nfrom compare_prototype import compare\nfrom common import print_start\nfrom common import print_status\n\nimport torch\nimport numpy as np\nfrom pytorch.losses.return_loss import load_loss\n\n\ndef test_layer_average_pooling1d(system_dict):\n forward = True;\n\n test = \"test_layer_average_pooling1d\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n\n\n network = [];\n network.append(gtf.average_pooling1d());\n gtf.Compile_Network(network, data_shape=(3, 128), use_gpu=False);\n\n x = torch.randn(1, 3, 128);\n y = gtf.system_dict[\"local\"][\"model\"](x); \n\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n",
"import os\nimport sys\nsys.path.append(\"../../../monk/\");\nimport psutil\n\nfrom gluon_prototype import prototype\nfrom compare_prototype import compare\nfrom common import print_start\nfrom common import print_status\n\nimport mxnet as mx\nimport numpy as np\nfrom gluon.losses.return_loss import load_loss\n\n\ndef test_layer_max_pooling1d(system_dict):\n forward = True;\n\n test = \"test_layer_max_pooling1d\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n\n\n network = [];\n network.append(gtf.max_pooling1d());\n gtf.Compile_Network(network, use_gpu=False);\n\n x = np.random.rand(1, 64, 4);\n x = mx.nd.array(x);\n y = gtf.system_dict[\"local\"][\"model\"].forward(x); \n\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n",
"import os\nimport sys\nsys.path.append(\"../../../monk/\");\nimport psutil\n\nfrom pytorch_prototype import prototype\nfrom compare_prototype import compare\nfrom common import print_start\nfrom common import print_status\n\nimport torch\nimport numpy as np\nfrom pytorch.losses.return_loss import load_loss\n\n\ndef test_activation_tanh(system_dict):\n forward = True;\n\n test = \"test_activation_tanh\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n\n\n network = [];\n network.append(gtf.tanh());\n gtf.Compile_Network(network, data_shape=(3, 64, 64), use_gpu=False);\n\n x = torch.randn(1, 3, 64, 64);\n y = gtf.system_dict[\"local\"][\"model\"](x); \n\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n",
"import os\nimport sys\nsys.path.append(\"../../../monk/\");\nimport psutil\n\nfrom gluon_prototype import prototype\nfrom compare_prototype import compare\nfrom common import print_start\nfrom common import print_status\n\nimport mxnet as mx\nimport numpy as np\nfrom gluon.losses.return_loss import load_loss\n\n\ndef test_layer_convolution2d(system_dict):\n forward = True;\n\n test = \"test_layer_convolution2d\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n\n\n network = [];\n network.append(gtf.convolution2d(output_channels=3, kernel_size=3));\n gtf.Compile_Network(network, use_gpu=False);\n\n x = np.random.rand(1, 1, 64, 64);\n x = mx.nd.array(x);\n y = gtf.system_dict[\"local\"][\"model\"].forward(x); \n\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n",
"import os\nimport sys\nsys.path.append(\"../../../monk/\");\nimport psutil\n\nfrom pytorch_prototype import prototype\nfrom compare_prototype import compare\nfrom common import print_start\nfrom common import print_status\n\nimport torch\nimport numpy as np\nfrom pytorch.losses.return_loss import load_loss\n\n\ndef test_layer_transposed_convolution1d(system_dict):\n forward = True;\n\n test = \"test_layer_transposed_convolution1d\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n\n\n network = [];\n network.append(gtf.transposed_convolution1d(output_channels=3, kernel_size=3));\n gtf.Compile_Network(network, data_shape=(3, 128), use_gpu=False);\n\n x = torch.randn(1, 3, 128);\n y = gtf.system_dict[\"local\"][\"model\"](x); \n\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n",
"import os\nimport sys\nsys.path.append(\"../../../monk/\");\nimport psutil\n\nfrom keras_prototype import prototype\nfrom compare_prototype import compare\nfrom common import print_start\nfrom common import print_status\n\nimport tensorflow as tf\nimport numpy as np\n\n\ndef test_layer_concatenate(system_dict):\n forward = True;\n\n test = \"test_layer_concatenate\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n\n\n network = [];\n network.append(gtf.convolution(output_channels=16));\n network.append(gtf.batch_normalization());\n network.append(gtf.relu());\n network.append(gtf.convolution(output_channels=16));\n network.append(gtf.batch_normalization());\n network.append(gtf.relu());\n network.append(gtf.max_pooling());\n\n\n subnetwork = [];\n branch1 = [];\n branch1.append(gtf.convolution(output_channels=16));\n branch1.append(gtf.batch_normalization());\n branch1.append(gtf.convolution(output_channels=16));\n branch1.append(gtf.batch_normalization());\n\n branch2 = [];\n branch2.append(gtf.convolution(output_channels=16));\n branch2.append(gtf.batch_normalization());\n\n branch3 = [];\n branch3.append(gtf.identity())\n\n subnetwork.append(branch1);\n subnetwork.append(branch2);\n subnetwork.append(branch3);\n subnetwork.append(gtf.concatenate());\n\n\n network.append(subnetwork);\n gtf.Compile_Network(network, data_shape=(3, 32, 32), use_gpu=False);\n\n x = tf.placeholder(tf.float32, shape=(1, 32, 32, 3))\n y = gtf.system_dict[\"local\"][\"model\"](x); \n\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n"
] | [
[
"torch.randn"
],
[
"torch.randn"
],
[
"numpy.random.rand"
],
[
"torch.randn"
],
[
"numpy.random.rand"
],
[
"torch.randn"
],
[
"tensorflow.placeholder"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
teristam/spiketoolk | [
"0ae7adabce46cf620c3627ee0093d890996ef355"
] | [
"spiketoolkit/preprocessing/center.py"
] | [
"from spikeextractors import RecordingExtractor\nfrom .transform import TransformRecording\nimport numpy as np\n\n\nclass CenterRecording(TransformRecording):\n preprocessor_name = 'Center'\n\n def __init__(self, recording, mode, seconds, n_snippets):\n if not isinstance(recording, RecordingExtractor):\n raise ValueError(\"'recording' must be a RecordingExtractor\")\n self._scalar = 1\n self._mode = mode\n self._seconds = seconds\n self._n_snippets = n_snippets\n assert self._mode in ['mean', 'median'], \"'mode' can be 'mean' or 'median'\"\n\n # use n_snippets of equal duration equally distributed on the recording\n n_snippets = int(n_snippets)\n assert n_snippets > 0, \"'n_snippets' must be positive\"\n snip_len = seconds / n_snippets * recording.get_sampling_frequency()\n\n if seconds * recording.get_sampling_frequency() >= recording.get_num_frames():\n traces = recording.get_traces()\n else:\n # skip initial and final part\n snip_start = np.linspace(snip_len // 2, recording.get_num_frames()-int(1.5*snip_len), n_snippets)\n traces_snippets = recording.get_snippets(reference_frames=snip_start, snippet_len=snip_len)\n traces_snippets = traces_snippets.swapaxes(0, 1)\n traces = traces_snippets.reshape((traces_snippets.shape[0],\n traces_snippets.shape[1] * traces_snippets.shape[2]))\n if self._mode == 'mean':\n self._offset = -np.mean(traces, axis=1)\n else:\n self._offset = -np.median(traces, axis=1)\n dtype = str(recording.get_dtype())\n if 'uint' in dtype:\n if 'numpy' in dtype:\n dtype = str(dtype).replace(\"<class '\", \"\").replace(\"'>\", \"\")\n # drop 'numpy'\n dtype = dtype.split('.')[1]\n dtype = dtype[1:]\n TransformRecording.__init__(self, recording, scalar=self._scalar, offset=self._offset, dtype=dtype)\n self._kwargs = {'recording': recording.make_serialized_dict(), 'mode': mode, 'seconds': seconds,\n 'n_snippets': n_snippets}\n\n\ndef center(recording, mode='median', seconds=10., n_snippets=10):\n '''\n Removes the offset of the traces channel by channel.\n\n Parameters\n ----------\n recording: RecordingExtractor\n The recording extractor to be transformed\n mode: str\n 'median' (default) or 'mean'\n seconds: float\n Number of seconds used to compute center\n n_snippets: int\n Number of snippets in which the total 'seconds' are divided spanning the recording duration\n\n Returns\n -------\n center: CenterRecording\n The output recording extractor object\n '''\n return CenterRecording(recording=recording, mode=mode, seconds=seconds, n_snippets=n_snippets)\n"
] | [
[
"numpy.median",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DoubleE1/Keras-GAN | [
"775eb82b18cb146203295f19c937d4290de2953f",
"775eb82b18cb146203295f19c937d4290de2953f"
] | [
"dcgan/mnist/InceptionScore.py",
"dcgan/cifar10/dcgan_cifar10.py"
] | [
"# calculate inception score for cifar-10 in Keras\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom math import floor\nfrom numpy import ones, expand_dims, log, mean, std, exp\nfrom numpy.random import shuffle\nfrom keras.applications.inception_v3 import InceptionV3, preprocess_input\nfrom keras.datasets import cifar10\nfrom skimage.transform import resize\nfrom numpy import asarray\nfrom PIL import Image\nimport os.path\nfrom os import path\nfrom IPython.display import clear_output\n\n# scale an array of images to a new size\ndef scale_images(images, new_shape):\n images_list = list()\n for image in images:\n # resize with nearest neighbor interpolation\n new_image = resize(image, new_shape, 0)\n # store\n images_list.append(new_image)\n return asarray(images_list)\n\ndef crop_center(img):\n #hardcoded for now\n left = 143\n top = 58\n right = 513\n bottom = 427\n # Crop the center of the image\n return np.asarray(img.crop((left, top, right, bottom)))\n\n# assumes images have any shape and pixels in [0,255]\ndef calculate_inception_score(images, n_split=10, eps=1E-16):\n # load inception v3 model\n model = InceptionV3()\n # enumerate splits of images/predictions\n scores = list()\n n_part = floor(images.shape[0] / n_split)\n for i in range(n_split):\n # retrieve images\n ix_start, ix_end = i * n_part, (i+1) * n_part\n subset = images[ix_start:ix_end]\n # convert from uint8 to float32\n print(i, ix_end, ix_start, n_part)\n subset = subset.astype('float32')\n # scale images to the required size\n subset = scale_images(subset, (299,299,1))\n # pre-process images, scale to [-1,1]\n subset = preprocess_input(subset)\n # predict p(y|x)\n p_yx = model.predict(subset)\n # calculate p(y)\n p_y = expand_dims(p_yx.mean(axis=0), 0)\n # calculate KL divergence using log probabilities\n kl_d = p_yx * (log(p_yx + eps) - log(p_y + eps))\n # sum over classes\n sum_kl_d = kl_d.sum(axis=1)\n # average over images\n avg_kl_d = mean(sum_kl_d)\n # undo the log\n is_score = exp(avg_kl_d)\n # store\n scores.append(is_score)\n # print(i)\n # average across images\n is_avg, is_std = mean(scores), std(scores)\n return is_avg, is_std\n\nimage_path = \"Keras-GAN/dcgan/mnist/single_mnist_images\"\n\nif path.exists(image_path):\n images = []\n head_tail = path.split(image_path)\n for i in range(2):\n head_tail = head_tail[0]\n head_tail = path.split(head_tail)\n\n if ~image_path.endswith('/'):\n image_path = image_path + '/'\n print(image_path)\n\n for i in range(5000):\n if path.exists(image_path + str(f\"{i}.png\")):\n new_image_path = image_path + str(f\"{i}.png\")\n print(\"Loaded image: \", str(f\"{i}.png\"))\n img = Image.open(new_image_path)\n img = crop_center(img)\n\n # append the image into a list\n images.append(img)\n\n clear_output()\n\n # convert the list into array\n images = np.asarray(images)\n print(images.shape)\n\n # calculates the average and standard deviation inception scores\n is_avg, is_std = calculate_inception_score(images)\n print(f\"The inception score for {head_tail[1]}\")\n print('average inception score:', is_avg, 'standard deviation inception scores:', is_std)\nelse:\n print(\"Image path not found\")",
"from __future__ import print_function, division\nfrom keras.initializers import RandomNormal\nfrom keras.datasets import cifar10\nfrom keras.layers import Input, Dense, Reshape, Flatten, Dropout\nfrom keras.layers import BatchNormalization, Activation, ZeroPadding2D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.convolutional import UpSampling2D, Conv2D, Conv2DTranspose\nfrom keras.models import Sequential, Model\nfrom keras.optimizers import Adam\n\nimport matplotlib.pyplot as plt\n\nimport sys\n\nimport numpy as np\n\nclass DCGAN():\n def __init__(self):\n # Input shape\n self.img_rows = 32\n self.img_cols = 32\n self.channels = 3\n self.img_shape = (self.img_rows, self.img_cols, self.channels)\n self.latent_dim = 100\n\n optimizer = Adam(0.0002, 0.5)\n\n # Build and compile the discriminator\n self.discriminator = self.build_discriminator()\n self.discriminator.compile(loss='binary_crossentropy',\n optimizer=optimizer,\n metrics=['accuracy'])\n\n # Build the generator\n self.generator = self.build_generator()\n\n # The generator takes noise as input and generates imgs\n z = Input(shape=(self.latent_dim,))\n img = self.generator(z)\n\n # For the combined model we will only train the generator\n self.discriminator.trainable = False\n\n # The discriminator takes generated images as input and determines validity\n valid = self.discriminator(img)\n\n # The combined model (stacked generator and discriminator)\n # Trains the generator to fool the discriminator\n self.combined = Model(z, valid)\n self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)\n\n def build_generator(self):\n\n model = Sequential()\n\n model.add(Dense(128 * 8 * 8, activation=\"relu\", input_dim=self.latent_dim))\n model.add(Reshape((8, 8, 128)))\n model.add(UpSampling2D())\n\n model.add(Conv2D(128, kernel_size=3, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Activation(\"relu\"))\n model.add(UpSampling2D())\n\n model.add(Conv2D(64, kernel_size=3, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(Activation(\"relu\"))\n\n model.add(Conv2D(self.channels, kernel_size=3, padding=\"same\"))\n model.add(Activation(\"tanh\"))\n\n model.summary()\n\n noise = Input(shape=(self.latent_dim,))\n img = model(noise)\n\n return Model(noise, img)\n\n def build_discriminator(self):\n\n model = Sequential()\n\n model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding=\"same\"))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(64, kernel_size=3, strides=2, padding=\"same\"))\n model.add(ZeroPadding2D(padding=((0,1),(0,1))))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(128, kernel_size=3, strides=2, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(256, kernel_size=3, strides=1, padding=\"same\"))\n model.add(BatchNormalization(momentum=0.8))\n model.add(LeakyReLU(alpha=0.2))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(1, activation='sigmoid'))\n\n model.summary()\n\n img = Input(shape=self.img_shape)\n validity = model(img)\n\n return Model(img, validity)\n\n def train(self, epochs, batch_size=128, save_interval=50):\n\n # Load the dataset\n (X_train, y_train), (_, _) = cifar10.load_data()\n\n # Extract dogs and cats\n X_cats = X_train[(y_train == 3).flatten()]\n X_dogs = X_train[(y_train == 5).flatten()]\n X_train = np.vstack((X_cats, X_dogs))\n\n # Configure input rescale fomr [0,255] to [-1, 1]\n X_train = X_train / 127.5 - 1.\n y_train = y_train.reshape(-1, 1)\n\n # Adversarial ground truths\n valid = np.ones((batch_size, 1))\n fake = np.zeros((batch_size, 1))\n\n for epoch in range(epochs):\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n\n # Select a random half of images\n idx = np.random.randint(0, X_train.shape[0], batch_size)\n imgs = X_train[idx]\n\n # Sample noise and generate a batch of new images\n noise = np.random.normal(0, 1, (batch_size, self.latent_dim))\n gen_imgs = self.generator.predict(noise)\n\n # Train the discriminator (real classified as ones and generated as zeros)\n d_loss_real = self.discriminator.train_on_batch(imgs, valid)\n d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)\n d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)\n\n # ---------------------\n # Train Generator\n # ---------------------\n\n # Train the generator (wants discriminator to mistake images as real)\n g_loss = self.combined.train_on_batch(noise, valid)\n\n # Plot the progress\n print (\"%d [D loss: %f, acc.: %.2f%%] [G loss: %f]\" % (epoch, d_loss[0], 100*d_loss[1], g_loss))\n\n # If at save interval => save generated image samples\n if epoch % save_interval == 0:\n self.save_imgs(epoch)\n\n def single_sample_images(self, epoch):\n noise = np.random.normal(0, 1, (1, self.latent_dim))\n gen_imgs = self.generator.predict(noise)\n\n # Rescale images 0 - 1\n gen_imgs = 0.5 * gen_imgs + 0.5\n\n fig, axs = plt.subplots()\n axs.imshow(gen_imgs[0, :,:])\n axs.axis('off')\n fig.savefig(\"Keras-GAN/dcgan/cifar10/single_cifar10_images/%d.png\" % epoch)\n plt.close()\n\n def save_imgs(self, epoch):\n r, c = 5, 5\n noise = np.random.normal(0, 1, (r * c, self.latent_dim))\n gen_imgs = self.generator.predict(noise)\n\n # Rescale images 0 - 1\n gen_imgs = 0.5 * gen_imgs + 0.5\n\n fig, axs = plt.subplots(r, c)\n cnt = 0\n for i in range(r):\n for j in range(c):\n axs[i,j].imshow(gen_imgs[cnt, :,:,:])\n axs[i,j].axis('off')\n cnt += 1\n fig.savefig(\"Keras-GAN/dcgan/cifar10/cifar10_images/%d.png\" % epoch)\n plt.close()\n\nif __name__ == '__main__':\n dcgan = DCGAN()\n dcgan.train(epochs=5000, batch_size=64, save_interval=10)"
] | [
[
"numpy.log",
"numpy.asarray",
"numpy.std",
"numpy.mean",
"numpy.exp"
],
[
"matplotlib.pyplot.subplots",
"numpy.ones",
"numpy.random.normal",
"matplotlib.pyplot.close",
"numpy.add",
"numpy.zeros",
"numpy.vstack",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kaylode/Custom-Template | [
"b2f11bfacf2b03b793476a19781f9046fab6fd82",
"b2f11bfacf2b03b793476a19781f9046fab6fd82",
"b2f11bfacf2b03b793476a19781f9046fab6fd82",
"b2f11bfacf2b03b793476a19781f9046fab6fd82"
] | [
"theseus/utilities/cuda.py",
"theseus/classification/metrics/projection.py",
"configs/semantic/infer.py",
"theseus/base/trainer/supervised_trainer.py"
] | [
"\"\"\" CUDA / AMP utils\nHacked together by / Copyright 2020 Ross Wightman\n\"\"\"\nimport torch\nfrom typing import Any\nfrom theseus.utilities.loggers.observer import LoggerObserver\n\nLOGGER = LoggerObserver.getLogger('main')\n\ndef get_devices_info(device_names=\"0\"):\n\n if device_names.startswith('cuda'):\n device_names = device_names.split('cuda:')[1]\n elif device_names.startswith('cpu'):\n return \"CPU\"\n\n devices_info = \"\"\n for i, device_id in enumerate(device_names.split(',')):\n p = torch.cuda.get_device_properties(i)\n devices_info += f\"CUDA:{device_id} ({p.name}, {p.total_memory / 1024 ** 2}MB)\\n\" # bytes to MB\n return devices_info\n\ndef get_device(name='cpu') -> torch.device:\n if name.startswith('cuda'):\n if not torch.cuda.is_available():\n LOGGER.text(\"CUDA is not available. Using CPU...\", level=LoggerObserver.WARN)\n name = 'cpu'\n return torch.device(name)\n\ndef move_to(obj: Any, device: torch.device):\n \"\"\"Credit: https://discuss.pytorch.org/t/pytorch-tensor-to-device-for-a-list-of-dict/66283\n Arguments:\n obj {dict, list} -- Object to be moved to device\n device {torch.device} -- Device that object will be moved to\n Raises:\n TypeError: object is of type that is not implemented to process\n Returns:\n type(obj) -- same object but moved to specified device\n \"\"\"\n if torch.is_tensor(obj) or isinstance(obj, torch.nn.Module):\n return obj.to(device)\n if isinstance(obj, dict):\n res = {k: move_to(v, device) for k, v in obj.items()}\n return res\n if isinstance(obj, list):\n return [move_to(v, device) for v in obj]\n if isinstance(obj, tuple):\n return tuple(move_to(list(obj), device))\n \n return obj\n\ndef detach(obj: Any):\n \"\"\"Credit: https://discuss.pytorch.org/t/pytorch-tensor-to-device-for-a-list-of-dict/66283\n Arguments:\n obj {dict, list} -- Object to be moved to cpu\n Raises:\n TypeError: Invalid type for detach\n Returns:\n type(obj) -- same object but moved to cpu\n \"\"\"\n if torch.is_tensor(obj):\n return obj.detach()\n if isinstance(obj, dict):\n res = {k: detach(v) for k, v in obj.items()}\n return res\n if isinstance(obj, list):\n return [detach(v) for v in obj]\n if isinstance(obj, tuple):\n return tuple(detach(list(obj)))\n raise TypeError(\"Invalid type for detach\")",
"import os\nimport torch\nfrom typing import Any, Dict\nfrom theseus.base.metrics.metric_template import Metric\nimport cv2\nimport numpy as np\nimport hashlib\nfrom theseus.utilities.visualization.visualizer import Visualizer\nfrom theseus.utilities.loggers import LoggerObserver\n\n# To fix tensorflow bug on Google Colab\nimport tensorflow as tf\nimport tensorboard as tb\ntf.io.gfile = tb.compat.tensorflow_stub.io.gfile\n\nclass EmbeddingProjection(Metric):\n \"\"\"\n Visualize embedding project for classification\n \"\"\"\n def __init__(self, save_dir='.temp', has_labels=False, **kwargs):\n super().__init__(**kwargs)\n self.has_labels = has_labels\n self.save_dir = save_dir\n self.visualizer = Visualizer()\n self.logger = LoggerObserver.getLogger('main')\n self.reset()\n\n os.makedirs(self.save_dir, exist_ok=True)\n\n def update(self, outputs: Dict[str, Any], batch: Dict[str, Any]):\n \"\"\"\n Perform calculation based on prediction and targets\n \"\"\"\n features = outputs[\"features\"].detach().cpu().numpy() \n predictions = torch.argmax(outputs['outputs'].detach().cpu(), dim=1).numpy().tolist()\n inputs = batch[\"inputs\"] \n targets = batch[\"targets\"].numpy().tolist()\n img_names = batch['img_names']\n\n for i, _ in enumerate(features):\n filename = hashlib.sha256(img_names[i].encode('utf-8')).hexdigest()\n pred_img = self.visualizer.denormalize(inputs[i])\n pred_img = cv2.resize(pred_img, dsize=(64,64), interpolation=cv2.INTER_CUBIC)\n\n embedding_path = self.save_dir + r\"/\" + filename + '_feat.npy' \n image_path = self.save_dir + r\"/\" + filename + '_img.npy'\n np.save(image_path, pred_img)\n np.save(embedding_path, features[i])\n\n self.embeddings.append(embedding_path)\n self.imgs.append(image_path)\n self.predictions.append(predictions[i])\n\n if self.has_labels:\n self.labels.append(targets[i])\n \n def reset(self):\n self.embeddings = []\n self.imgs = []\n self.predictions = []\n if self.has_labels:\n self.labels = []\n else:\n self.labels = None\n\n def value(self):\n \n all_embeddings = [np.load(embedding_path) for embedding_path in self.embeddings]\n all_images = [np.load(image_path) for image_path in self.imgs]\n all_images = [a.transpose(2,0,1) for a in all_images] # (HWC) -> (CHW)\n\n ## Stack into tensors\n all_embeddings = torch.from_numpy(np.stack(all_embeddings, axis=0))\n all_images = torch.from_numpy(np.stack(all_images, axis=0))\n\n ## Metadata, in column style\n if self.has_labels:\n metadata = [a for a in zip(self.labels, self.predictions)]\n metadata_header = ['ground truth', 'prediction']\n else:\n metadata = self.predictions\n metadata_header = ['prediction']\n\n ## Log to tensorboard\n self.logger.log([{\n 'tag': f\"Validation/projection\",\n 'value': all_embeddings,\n 'type': LoggerObserver.EMBED,\n 'kwargs': {\n 'step': 0,\n 'label_img': all_images, \n 'metadata': metadata,\n 'metadata_header': metadata_header\n }\n }])\n\n return {'projection': \"Embedding projection generated\"}",
"import matplotlib as mpl\nmpl.use(\"Agg\")\nfrom theseus.opt import Opts\n\nimport os\nimport cv2\nimport torch\nfrom theseus.opt import Config\nfrom theseus.semantic.models import MODEL_REGISTRY\nfrom theseus.semantic.augmentations import TRANSFORM_REGISTRY\nfrom theseus.semantic.datasets import DATASET_REGISTRY, DATALOADER_REGISTRY\n\nfrom theseus.utilities.loggers import LoggerObserver\nfrom theseus.base.pipeline import BaseTestPipeline\nfrom theseus.utilities.visualization.visualizer import Visualizer\n\nclass TestPipeline(BaseTestPipeline):\n def __init__(\n self,\n opt: Config\n ):\n\n super(TestPipeline, self).__init__()\n self.opt = opt\n\n def init_globals(self):\n super().init_globals()\n\n def init_registry(self):\n self.model_registry = MODEL_REGISTRY\n self.dataset_registry = DATASET_REGISTRY\n self.dataloader_registry = DATALOADER_REGISTRY\n self.transform_registry = TRANSFORM_REGISTRY\n self.logger.text(\n \"Overidding registry in pipeline...\", LoggerObserver.INFO\n )\n\n @torch.no_grad()\n def inference(self):\n self.init_pipeline()\n self.logger.text(\"Inferencing...\", level=LoggerObserver.INFO)\n\n visualizer = Visualizer()\n\n saved_mask_dir = os.path.join(self.savedir, 'masks')\n saved_overlay_dir = os.path.join(self.savedir, 'overlays')\n\n os.makedirs(saved_mask_dir, exist_ok=True)\n os.makedirs(saved_overlay_dir, exist_ok=True)\n\n for idx, batch in enumerate(self.dataloader):\n inputs = batch['inputs']\n img_names = batch['img_names']\n ori_sizes = batch['ori_sizes']\n\n outputs = self.model.get_prediction(batch, self.device)\n preds = outputs['masks']\n\n for (inpt, pred, filename, ori_size) in zip(inputs, preds, img_names, ori_sizes):\n decode_pred = visualizer.decode_segmap(pred)[:,:,::-1]\n resized_decode_mask = cv2.resize(decode_pred, tuple(ori_size))\n\n # Save mask\n savepath = os.path.join(saved_mask_dir, filename)\n cv2.imwrite(savepath, resized_decode_mask)\n\n # Save overlay\n raw_image = visualizer.denormalize(inpt)\n ori_image = cv2.resize(raw_image, tuple(ori_size))\n overlay = ori_image * 0.7 + resized_decode_mask * 0.3\n savepath = os.path.join(saved_overlay_dir, filename)\n cv2.imwrite(savepath, overlay)\n\n self.logger.text(f\"Save image at {savepath}\", level=LoggerObserver.INFO)\n \n\nif __name__ == '__main__':\n opts = Opts().parse_args()\n val_pipeline = TestPipeline(opts)\n val_pipeline.inference()",
"import torch\n\nfrom torch.cuda import amp\nfrom tqdm import tqdm\nfrom .base_trainer import BaseTrainer\nfrom theseus.utilities.loggers.observer import LoggerObserver\nLOGGER = LoggerObserver.getLogger(\"main\")\n\nclass SupervisedTrainer(BaseTrainer):\n \"\"\"Trainer for supervised tasks\n \n model : `torch.nn.Module`\n Wrapper model with loss \n trainloader : `torch.utils.DataLoader`\n DataLoader for training\n valloader : `torch.utils.DataLoader`\n DataLoader for validation\n metrics: `List[Metric]`\n list of metrics for evaluation\n optimizer: `torch.optim.Optimizer`\n optimizer for parameters update\n scheduler: `torch.optim.lr_scheduler.Scheduler`\n learning rate schedulers\n\n \"\"\"\n def __init__(\n self, \n model, \n trainloader, \n valloader,\n metrics,\n optimizer,\n scheduler,\n **kwargs):\n\n super().__init__(**kwargs)\n\n self.model = model\n self.metrics = metrics \n self.optimizer = optimizer\n self.scheduler = scheduler\n self.trainloader = trainloader\n self.valloader = valloader\n self.use_cuda = next(self.model.parameters()).is_cuda\n\n if self.scheduler:\n self.step_per_epoch = self.scheduler.step_per_epoch\n\n # Flags for shutting down training or validation stages\n self.shutdown_training = False\n self.shutdown_validation = False\n\n\n def training_epoch(self):\n \"\"\"\n Perform training one epoch\n \"\"\"\n self.model.train()\n self.callbacks.run('on_train_epoch_start')\n self.optimizer.zero_grad()\n\n last_batch = 0\n for _, batch in enumerate(self.trainloader):\n last_batch = batch\n\n # Check if shutdown flag has been turned on\n if self.shutdown_training or self.shutdown_all:\n break\n\n self.callbacks.run('on_train_batch_start', {\n 'batch': batch,\n 'iters': self.iters,\n 'num_iterations': self.num_iterations\n })\n\n # Gradient scaler\n with amp.autocast(enabled=self.use_amp):\n outputs = self.model.training_step(batch)\n loss = outputs['loss']\n loss_dict = outputs['loss_dict']\n\n # Backward loss\n self.scaler(loss, self.optimizer)\n \n # Optmizer step\n self.scaler.step(self.optimizer, clip_grad=self.clip_grad, parameters=self.model.parameters())\n if self.scheduler and not self.step_per_epoch:\n self.scheduler.step()\n self.optimizer.zero_grad()\n\n if self.use_cuda:\n torch.cuda.synchronize()\n\n # Calculate current iteration\n self.iters = self.iters + 1\n\n # Get learning rate\n lrl = [x['lr'] for x in self.optimizer.param_groups]\n lr = sum(lrl) / len(lrl)\n\n self.callbacks.run('on_train_batch_end', {\n 'loss_dict': loss_dict,\n 'iters': self.iters,\n 'num_iterations': self.num_iterations,\n 'lr': lr\n })\n\n\n if self.scheduler and self.step_per_epoch:\n self.scheduler.step()\n\n self.callbacks.run('on_train_epoch_end', {\n 'last_batch': last_batch,\n 'iters': self.iters\n })\n \n\n @torch.no_grad() \n def evaluate_epoch(self):\n \"\"\"\n Perform validation one epoch\n \"\"\"\n self.model.eval()\n\n self.callbacks.run('on_val_epoch_start')\n last_batch = 0\n for batch in tqdm(self.valloader):\n last_batch = batch\n # Check if shutdown flag has been turned on\n if self.shutdown_validation or self.shutdown_all:\n break\n\n self.callbacks.run('on_val_batch_start', {\n 'batch': batch,\n 'iters': self.iters,\n 'num_iterations': self.num_iterations\n })\n\n # Gradient scaler\n with amp.autocast(enabled=self.use_amp):\n outputs = self.model.evaluate_step(batch, self.metrics)\n loss_dict = outputs['loss_dict']\n\n self.callbacks.run('on_val_batch_end', {\n 'loss_dict': loss_dict,\n 'iters': self.iters,\n 'num_iterations': self.num_iterations,\n })\n \n metric_dict = {}\n for metric in self.metrics:\n metric_dict.update(metric.value())\n metric.reset() \n\n self.callbacks.run(\"on_val_epoch_end\", {\n 'metric_dict': metric_dict,\n 'iters': self.iters,\n 'num_iterations': self.num_iterations,\n 'last_batch': last_batch,\n 'last_outputs': outputs['model_outputs']\n })\n"
] | [
[
"torch.device",
"torch.cuda.get_device_properties",
"torch.is_tensor",
"torch.cuda.is_available"
],
[
"numpy.load",
"numpy.stack",
"numpy.save"
],
[
"matplotlib.use",
"torch.no_grad"
],
[
"torch.cuda.synchronize",
"torch.no_grad",
"torch.cuda.amp.autocast"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wangx1996/CenterPillarNet | [
"4be3d53265b8ecb1f9572612fa87f7acd8c57669",
"4be3d53265b8ecb1f9572612fa87f7acd8c57669"
] | [
"src/config/train_config.py",
"src/evaluatefile.py"
] | [
"\"\"\"\n# -*- coding: utf-8 -*-\n-----------------------------------------------------------------------------------\n# Author: Nguyen Mau Dung\n# DoC: 2020.08.17\n# email: [email protected]\n-----------------------------------------------------------------------------------\n# Description: The configurations of the project will be defined here\n\"\"\"\n\nimport os\nimport argparse\n\nimport torch\nfrom easydict import EasyDict as edict\nimport kitti_config as cnf\n\ndef parse_train_configs():\n parser = argparse.ArgumentParser(description='The Implementation using PyTorch')\n parser.add_argument('--seed', type=int, default=2020,\n help='re-produce the results with seed random')\n parser.add_argument('--saved_fn', type=str, default='fpn_resnet_18', metavar='FN',\n help='The name using for saving logs, models,...')\n\n parser.add_argument('--root-dir', type=str, default='../', metavar='PATH',\n help='The ROOT working directory')\n ####################################################################\n ############## Model configs ########################\n ####################################################################\n parser.add_argument('--arch', type=str, default='fpn_resnet_18', metavar='ARCH',\n help='The name of the model architecture')\n parser.add_argument('--pretrained_path', type=str, default=None, metavar='PATH',\n help='the path of the pretrained checkpoint')\n\n ####################################################################\n ############## Dataloader and Running configs #######\n ####################################################################\n parser.add_argument('--hflip_prob', type=float, default=0.5,\n help='The probability of horizontal flip')\n parser.add_argument('--no-val', action='store_true',\n help='If true, dont evaluate the model on the val set')\n parser.add_argument('--num_samples', type=int, default=None,\n help='Take a subset of the dataset to run and debug')\n parser.add_argument('--num_workers', type=int, default=4,\n help='Number of threads for loading data')\n parser.add_argument('--batch_size', type=int, default=16,\n help='mini-batch size (default: 16), this is the total'\n 'batch size of all GPUs on the current node when using'\n 'Data Parallel or Distributed Data Parallel')\n parser.add_argument('--print_freq', type=int, default=50, metavar='N',\n help='print frequency (default: 50)')\n parser.add_argument('--tensorboard_freq', type=int, default=50, metavar='N',\n help='frequency of saving tensorboard (default: 50)')\n parser.add_argument('--checkpoint_freq', type=int, default=2, metavar='N',\n help='frequency of saving checkpoints (default: 5)')\n ####################################################################\n ############## Training strategy ####################\n ####################################################################\n\n parser.add_argument('--start_epoch', type=int, default=1, metavar='N',\n help='the starting epoch')\n parser.add_argument('--num_epochs', type=int, default=300, metavar='N',\n help='number of total epochs to run')\n parser.add_argument('--lr_type', type=str, default='cosin',\n help='the type of learning rate scheduler (cosin or multi_step or one_cycle)')\n parser.add_argument('--lr', type=float, default=0.003, metavar='LR',\n help='initial learning rate')\n parser.add_argument('--minimum_lr', type=float, default=1e-7, metavar='MIN_LR',\n help='minimum learning rate during training')\n parser.add_argument('--momentum', type=float, default=0.949, metavar='M',\n help='momentum')\n parser.add_argument('-wd', '--weight_decay', type=float, default=0., metavar='WD',\n help='weight decay (default: 0.)')\n parser.add_argument('--optimizer_type', type=str, default='adam', metavar='OPTIMIZER',\n help='the type of optimizer, it can be sgd or adam')\n parser.add_argument('--steps', nargs='*', default=[150, 180],\n help='number of burn in step')\n\n ####################################################################\n ############## Loss weight ##########################\n ####################################################################\n\n ####################################################################\n ############## Distributed Data Parallel ############\n ####################################################################\n parser.add_argument('--world-size', default=-1, type=int, metavar='N',\n help='number of nodes for distributed training')\n parser.add_argument('--rank', default=-1, type=int, metavar='N',\n help='node rank for distributed training')\n parser.add_argument('--dist-url', default='tcp://127.0.0.1:29500', type=str,\n help='url used to set up distributed training')\n parser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\n parser.add_argument('--gpu_idx', default=0, type=int,\n help='GPU index to use.')\n parser.add_argument('--no_cuda', action='store_true',\n help='If true, cuda is not used.')\n parser.add_argument('--multiprocessing-distributed', action='store_true',\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\n ####################################################################\n ############## Evaluation configurations ###################\n ####################################################################\n parser.add_argument('--evaluate', action='store_true',\n help='only evaluate the model, not training')\n parser.add_argument('--resume_path', type=str, default=None, metavar='PATH',\n help='the path of the resumed checkpoint')\n parser.add_argument('--K', type=int, default=50,\n help='the number of top K')\n\n configs = edict(vars(parser.parse_args()))\n\n ####################################################################\n ############## Hardware configurations #############################\n ####################################################################\n configs.device = torch.device('cpu' if configs.no_cuda else 'cuda')\n configs.ngpus_per_node = torch.cuda.device_count()\n\n configs.pin_memory = True\n configs.input_size = (cnf.BEV_WIDTH, cnf.BEV_HEIGHT)\n configs.down_ratio = 2\n configs.hm_size = (cnf.BEV_WIDTH/configs.down_ratio, cnf.BEV_HEIGHT/configs.down_ratio)\n configs.max_objects = 50\n\n configs.imagenet_pretrained = True\n configs.head_conv = 256\n configs.num_classes = 1\n configs.num_center_offset = 2\n configs.num_z = 1\n configs.num_dim = 3\n configs.num_direction = 2 # sin, cos 8 for bin cos sin\n configs.voxel_size = [0.16, 0.16, 4]\n configs.point_cloud_range =[0, -34.56, -2.73, 69.12, 34.56, 1.27]\n configs.max_number_of_points_per_voxel = 100\n\n\n configs.heads = {\n 'hm_cen': configs.num_classes,\n 'cen_offset': configs.num_center_offset,\n 'direction': configs.num_direction,\n 'z_coor': configs.num_z,\n 'dim': configs.num_dim\n }\n\n configs.num_input_features = 4\n\n ####################################################################\n ############## Dataset, logs, Checkpoints dir ######################\n ####################################################################\n configs.dataset_dir = '/media/wx/File/data/kittidata'\n configs.checkpoints_dir = os.path.join(configs.root_dir, 'checkpoints', configs.saved_fn)\n configs.logs_dir = os.path.join(configs.root_dir, 'logs', configs.saved_fn)\n\n if not os.path.isdir(configs.checkpoints_dir):\n os.makedirs(configs.checkpoints_dir)\n if not os.path.isdir(configs.logs_dir):\n os.makedirs(configs.logs_dir)\n\n return configs\n",
"\"\"\"\n# -*- coding: utf-8 -*-\n-----------------------------------------------------------------------------------\n# Author: Nguyen Mau Dung\n# DoC: 2020.08.17\n# email: [email protected]\n-----------------------------------------------------------------------------------\n# Description: This script for training\n# Modified: Wang Xu\n# email: [email protected]\n\"\"\"\n\n\nimport argparse\nimport os\nimport time\nimport numpy as np\nimport sys\nsys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')\n\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\nfrom config import kitti_config as cnf\nimport torch\nimport torch.utils.data.distributed\nfrom tqdm import tqdm\nfrom easydict import EasyDict as edict\nimport cv2\nsys.path.append('./')\n\nfrom data_process.kitti_dataloader import create_val_dataloader\nfrom models.model_utils import create_model\nfrom utils.misc import AverageMeter, ProgressMeter\nfrom utils.misc import make_folder, time_synchronized\nfrom utils.evaluation_utils import decode, post_processingv2, \\\n get_batch_statistics_rotated_bbox, ap_per_class, \\\n load_classes, convert_det_to_real_values_v2\nfrom utils.visualization_utils import project_to_image, compute_box_3d, draw_box_3d\nfrom data_process.transformation import lidar_to_camera_box\nfrom spconv.utils import VoxelGeneratorV2\nfrom utils.torch_utils import _sigmoid\nfrom data_process.kitti_data_utils import Calibration\nimport mayavi.mlab\nimport config.kitti_config as cnf\nfrom utils.evaluation_utils import decode, post_processing, draw_predictions, convert_det_to_real_values\n\n\ndef inverse_rigid_trans(Tr):\n ''' Inverse a rigid body transform matrix (3x4 as [R|t])\n [R'|-R't; 0|1]\n '''\n inv_Tr = np.zeros_like(Tr) # 3x4\n inv_Tr[0:3,0:3] = np.transpose(Tr[0:3,0:3])\n inv_Tr[0:3,3] = np.dot(-np.transpose(Tr[0:3,0:3]), Tr[0:3,3])\n return inv_Tr\n\nV2C= np.array([7.533745000000e-03, -9.999714000000e-01, -6.166020000000e-04,\n -4.069766000000e-03, 1.480249000000e-02, 7.280733000000e-04,\n -9.998902000000e-01, -7.631618000000e-02, 9.998621000000e-01,\n 7.523790000000e-03, 1.480755000000e-02, -2.717806000000e-01])\nV2C = np.reshape(V2C, [3, 4])\n\nC2V = inverse_rigid_trans(V2C)\n\nR0 = np.array([9.999239000000e-01, 9.837760000000e-03, -7.445048000000e-03, -9.869795000000e-03,\n 9.999421000000e-01, -4.278459000000e-03, 7.402527000000e-03, 4.351614000000e-03,\n 9.999631000000e-01])\nR0 = np.reshape(R0, [3, 3])\n\n\ndef cart2hom(pts_3d):\n ''' Input: nx3 points in Cartesian\n Oupput: nx4 points in Homogeneous by pending 1\n '''\n n = pts_3d.shape[0]\n pts_3d_hom = np.hstack((pts_3d, np.ones((n, 1))))\n return pts_3d_hom\n\ndef project_ref_to_velo(pts_3d_ref):\n pts_3d_ref = cart2hom(pts_3d_ref) # nx4\n return np.dot(pts_3d_ref, np.transpose(C2V))\n\n\ndef project_rect_to_ref(pts_3d_rect):\n ''' Input and Output are nx3 points '''\n return np.transpose(np.dot(np.linalg.inv(R0), np.transpose(pts_3d_rect)))\n\n\ndef project_rect_to_velo(pts_3d_rect):\n ''' Input: nx3 points in rect camera coord.\n Output: nx3 points in velodyne coord.\n '''\n pts_3d_ref = project_rect_to_ref(pts_3d_rect)\n return project_ref_to_velo(pts_3d_ref)\n\ndef rotz(t):\n ''' Rotation about the y-axis. '''\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])\n\n\ndef roty(t):\n ''' Rotation about the y-axis. '''\n c = np.cos(t)\n s = np.sin(t)\n return np.array([[c, 0, s],\n [0, 1, 0],\n [-s, 0, c]])\n\ndef draw_gt_boxes3d(gt_boxes3d, score,fig, color=(1,1,1), line_width=1, draw_text=True, text_scale=(1,1,1), color_list=None, ):\n ''' Draw 3D bounding boxes\n Args:\n gt_boxes3d: numpy array (n,8,3) for XYZs of the box corners\n fig: mayavi figure handler\n color: RGB value tuple in range (0,1), box line color\n line_width: box line width\n draw_text: boolean, if true, write box indices beside boxes\n text_scale: three number tuple\n color_list: a list of RGB tuple, if not None, overwrite color.\n Returns:\n fig: updated fig\n '''\n num = len(gt_boxes3d)\n for n in range(num):\n b = gt_boxes3d[n]\n if color_list is not None:\n color = color_list[n]\n #if draw_text: mayavi.mlab.text3d(b[4,0], b[4,1], b[4,2], 'car'+\"{:.2f}\".format(float(score)), scale=text_scale, color=(1,1,1), figure=fig)\n for k in range(0,4):\n #http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html\n i,j=k,(k+1)%4\n mayavi.mlab.plot3d([b[i,0], b[j,0]], [b[i,1], b[j,1]], [b[i,2], b[j,2]], color=color, tube_radius=None, line_width=line_width, figure=fig)\n\n i,j=k+4,(k+1)%4 + 4\n mayavi.mlab.plot3d([b[i,0], b[j,0]], [b[i,1], b[j,1]], [b[i,2], b[j,2]], color=color, tube_radius=None, line_width=line_width, figure=fig)\n\n i,j=k,k+4\n mayavi.mlab.plot3d([b[i,0], b[j,0]], [b[i,1], b[j,1]], [b[i,2], b[j,2]], color=color, tube_radius=None, line_width=line_width, figure=fig)\n #mlab.show(1)\n #mlab.view(azimuth=180, elevation=70, focalpoint=[ 12.0909996 , -1.04700089, -2.03249991], distance=62.0, figure=fig)\n return fig\n\n\ndef show3dlidar(pointpaht, detections,V2C, R0, P2):\n pointcloud = np.fromfile(pointpaht, dtype=np.float32).reshape(-1, 4)\n x = pointcloud[:, 0] # x position of point\n xmin = np.amin(x, axis=0)\n xmax = np.amax(x, axis=0 )\n y = pointcloud[:, 1] # y position of point\n ymin = np.amin(y, axis=0)\n ymax = np.amax(y, axis=0)\n z = pointcloud[:, 2] # z position of point\n zmin = np.amin(z, axis=0)\n zmax = np.amax(z, axis=0)\n d = np.sqrt(x ** 2 + y ** 2) # Map Distance from sensor\n vals = 'height'\n if vals == \"height\":\n col = z\n else:\n col = d\n fig = mayavi.mlab.figure(bgcolor=(0, 0, 0), size=(640, 500))\n mayavi.mlab.points3d(x, y, z,\n col, # Values used for Color\n mode=\"point\",\n # 灰度图的伪彩映射\n colormap='Blues', # 'bone', 'copper', 'gnuplot'\n # color=(0, 1, 0), # Used a fixed (r,g,b) instead\n figure=fig,\n )\n # 绘制原点\n mayavi.mlab.points3d(0, 0, 0, color=(1, 1, 1), mode=\"sphere\",scale_factor=0.2)\n\n print(detections.shape)\n\n detections[:, 1:8] = lidar_to_camera_box(detections[:, 1:8], V2C, R0, P2)\n\n for i in range(detections.shape[0]):\n\n h = float(detections[i][4])\n w = float(detections[i][5])\n l = float(detections[i][6])\n\n x = float(detections[i][1])\n y = float(detections[i][2])\n z = float(detections[i][3])\n x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2] ;\n y_corners = [0, 0, 0, 0, -h, -h, -h, -h] ;\n z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2];\n #print(x_corners)\n #print(detections[i])\n R = roty(float(detections[i][7]))\n corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))\n # print corners_3d.shape\n #corners_3d = np.zeros((3,8))\n corners_3d[0, :] = corners_3d[0, :] + x;\n corners_3d[1, :] = corners_3d[1, :] + y;\n corners_3d[2, :] = corners_3d[2, :] + z;\n corners_3d = np.transpose(corners_3d)\n box3d_pts_3d_velo = project_rect_to_velo(corners_3d)\n #x1, y1, z1 = box3d_pts_3d_velo[0, :]\n #x2, y2, z2 = box3d_pts_3d_velo[1, :]\n if detections[i][0] == 1.0:\n draw_gt_boxes3d([box3d_pts_3d_velo],1,color=(1,0,0), fig=fig)\n else:\n draw_gt_boxes3d([box3d_pts_3d_velo], 1, color=(0, 1, 0), fig=fig)\n\n # 绘制坐标\n '''axes = np.array(\n [[20.0, 0.0, 0.0, 0.0], [0.0, 20.0, 0.0, 0.0], [0.0, 0.0, 20.0, 0.0]],\n dtype=np.float64,\n )\n #x轴\n mayavi.mlab.plot3d(\n [0, axes[0, 0]],\n [0, axes[0, 1]],\n [0, axes[0, 2]],\n color=(1, 0, 0),\n tube_radius=None,\n figure=fig,\n )\n #y轴\n mayavi.mlab.plot3d(\n [0, axes[1, 0]],\n [0, axes[1, 1]],\n [0, axes[1, 2]],\n color=(0, 1, 0),\n tube_radius=None,\n figure=fig,\n )\n #z轴\n mayavi.mlab.plot3d(\n [0, axes[2, 0]],\n [0, axes[2, 1]],\n [0, axes[2, 2]],\n color=(0, 0, 1),\n tube_radius=None,\n figure=fig,\n )'''\n mayavi.mlab.show()\n\ndef evaluate_mAP(val_loader, model, configs, logger):\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n\n progress = ProgressMeter(len(val_loader), [batch_time, data_time],\n prefix=\"Evaluation phase...\")\n labels = []\n sample_metrics = [] # List of tuples (TP, confs, pred)\n # switch to evaluate mode\n model.eval()\n\n class_id = {0:'Car', 1:'Pedestrian', 2:'Cyclist'}\n\n with torch.no_grad():\n start_time = time.time()\n for batch_idx, batch_data in enumerate(tqdm(val_loader)):\n metadatas, targets= batch_data\n\n batch_size = len(metadatas['img_path'])\n\n voxelinput = metadatas['voxels']\n coorinput = metadatas['coors']\n numinput = metadatas['num_points']\n\n dtype = torch.float32\n voxelinputr = torch.tensor(\n voxelinput, dtype=torch.float32, device=configs.device).to(dtype)\n\n coorinputr = torch.tensor(\n coorinput, dtype=torch.int32, device=configs.device)\n\n numinputr = torch.tensor(\n numinput, dtype=torch.int32, device=configs.device)\n t1 = time_synchronized()\n outputs = model(voxelinputr, coorinputr, numinputr)\n outputs = outputs._asdict()\n\n outputs['hm_cen'] = _sigmoid(outputs['hm_cen'])\n outputs['cen_offset'] = _sigmoid(outputs['cen_offset'])\n # detections size (batch_size, K, 10)\n img_path = metadatas['img_path'][0]\n #print(img_path)\n calib = Calibration(img_path.replace(\".png\", \".txt\").replace(\"image_2\", \"calib\"))\n\n detections = decode(outputs['hm_cen'], outputs['cen_offset'], outputs['direction'], outputs['z_coor'],\n outputs['dim'], K=configs.K)\n detections = detections.cpu().numpy().astype(np.float32)\n detections = post_processing(detections, configs.num_classes, configs.down_ratio, configs.peak_thresh)\n\n for i in range(configs.batch_size):\n detections[i] = convert_det_to_real_values(detections[i])\n img_path = metadatas['img_path'][i]\n #rint(img_path)\n datap = str.split(img_path,'/')\n filename = str.split(datap[7],'.')\n file_write_obj = open('../result/' + filename[0] + '.txt', 'w')\n lidar_path = '/' + datap[1] + '/' + datap[2] + '/' + datap[3] + '/' + \\\n datap[4] + '/' + datap[5] + '/' + 'velodyne' + '/' + filename[0] + '.bin'\n #print(lidar_path)\n #show3dlidar(lidar_path, detections[i], calib.V2C, calib.R0, calib.P2)\n dets = detections[i]\n if len(dets) >0 :\n dets[:, 1:] = lidar_to_camera_box(dets[:, 1:], calib.V2C, calib.R0, calib.P2)\n for box_idx, label in enumerate(dets):\n location, dim, ry = label[1:4], label[4:7], label[7]\n if ry < -np.pi:\n ry = 2*np.pi + ry\n if ry > np.pi:\n ry = -2*np.pi + ry\n corners_3d = compute_box_3d(dim, location, ry)\n corners_2d = project_to_image(corners_3d, calib.P2)\n minxy = np.min(corners_2d, axis=0)\n maxxy = np.max(corners_2d, axis=0)\n bbox = np.concatenate([minxy, maxxy], axis=0)\n if bbox[0] < 0 or bbox[2]<0:\n continue\n if bbox[1] > 1272 or bbox[3] > 375:\n continue\n oblist = ['Car',' ','0.0', ' ', '0', ' ', '-10', ' ','%.2f'%bbox[0], ' ', \\\n '%.2f' %bbox[1], ' ','%.2f'%bbox[2], ' ','%.2f'%bbox[3], ' ','%.2f'%dim[0], ' ','%.2f'%dim[1], ' ','%.2f'%dim[2], ' ', \\\n '%.2f' %location[0], ' ','%.2f'%location[1], ' ','%.2f'%location[2], ' ', '%.2f'%ry, '\\n']\n file_write_obj.writelines(oblist)\n file_write_obj.close()\n\n '''for sample_i in range(len(detections)):\n # print(output.shape)\n num = targets['count'][sample_i]\n # print(targets['batch'][sample_i][:num].shape)\n target = targets['batch'][sample_i][:num]\n #print(target[:, 8].tolist())\n labels += target[:, 8].tolist()\n\n\n sample_metrics += get_batch_statistics_rotated_bbox(detections, targets, iou_threshold=configs.iou_thresh)\n\n t2 = time_synchronized()\n\n # measure elapsed time\n # torch.cuda.synchronize()\n batch_time.update(time.time() - start_time)\n\n # Log message\n if logger is not None:\n if ((batch_idx + 1) % configs.print_freq) == 0:\n logger.info(progress.get_message(batch_idx))\n\n start_time = time.time()\n\n # Concatenate sample statistics\n true_positives, pred_scores, pred_labels = [np.concatenate(x, 0) for x in list(zip(*sample_metrics))]\n precision, recall, AP, f1, ap_class = ap_per_class(true_positives, pred_scores, pred_labels, labels)'''\n\n #return precision, recall, AP, f1, ap_class\n\n\ndef parse_eval_configs():\n parser = argparse.ArgumentParser(description='Testing config for the Implementation')\n parser.add_argument('--classnames-infor-path', type=str, default='/media/wx/File/kittidatabase/classes_names_pillar.txt',\n metavar='PATH', help='The class names of objects in the task')\n parser.add_argument('--saved_fn', type=str, default='fpn_resnet_18', metavar='FN',\n help='The name using for saving logs, models,...')\n parser.add_argument('-a', '--arch', type=str, default='fpn_resnet_18', metavar='ARCH',\n help='The name of the model architecture')\n parser.add_argument('--pretrained_path', type=str,\n default='../checkpoints/fpn_resnet_18/fpn_resnet_18_epoch_300.pth', metavar='PATH',\n help='the path of the pretrained checkpoint')\n parser.add_argument('--K', type=int, default=50,\n help='the number of top K')\n parser.add_argument('--no_cuda', action='store_true',\n help='If true, cuda is not used.')\n parser.add_argument('--gpu_idx', default=0, type=int,\n help='GPU index to use.')\n parser.add_argument('--num_samples', type=int, default=None,\n help='Take a subset of the dataset to run and debug')\n parser.add_argument('--num_workers', type=int, default=1,\n help='Number of threads for loading data')\n parser.add_argument('--batch_size', type=int, default=4,\n help='mini-batch size (default: 4)')\n parser.add_argument('--peak_thresh', type=float, default=0.3)\n parser.add_argument('--save_test_output', action='store_true',\n help='If true, the output image of the testing phase will be saved')\n parser.add_argument('--output_format', type=str, default='image', metavar='PATH',\n help='the type of the test output (support image or video)')\n parser.add_argument('--output_video_fn', type=str, default='out_fpn_resnet_18', metavar='PATH',\n help='the video filename if the output format is video')\n parser.add_argument('--output-width', type=int, default=608,\n help='the width of showing output, the height maybe vary')\n\n parser.add_argument('--conf_thresh', type=float, default=0.5,\n help='for evaluation - the threshold for class conf')\n parser.add_argument('--nms_thresh', type=float, default=0.5,\n help='for evaluation - the threshold for nms')\n parser.add_argument('--iou_thresh', type=float, default=0.5,\n help='for evaluation - the threshold for IoU')\n\n configs = edict(vars(parser.parse_args()))\n configs.pin_memory = True\n configs.distributed = False # For testing on 1 GPU only\n\n configs.input_size = (432, 432)\n configs.hm_size = (216, 216)\n configs.down_ratio = 2\n configs.max_objects = 50\n\n configs.imagenet_pretrained = False\n configs.head_conv = 256\n configs.num_classes = 1\n configs.num_center_offset = 2\n configs.num_z = 1\n configs.num_dim = 3\n configs.num_direction = 2 # sin, cos\n configs.voxel_size = [0.16, 0.16, 4]\n configs.point_cloud_range = [0, -34.56, -2.73, 69.12, 34.56, 1.27]\n configs.max_number_of_points_per_voxel = 100\n\n configs.heads = {\n 'hm_cen': configs.num_classes,\n 'cen_offset': configs.num_center_offset,\n 'direction': configs.num_direction,\n 'z_coor': configs.num_z,\n 'dim': configs.num_dim\n }\n configs.num_input_features = 4\n\n ####################################################################\n ##############Dataset, Checkpoints, and results dir configs#########\n ####################################################################\n configs.root_dir = '../'\n configs.dataset_dir = '/media/wx/File/kittidatabase'\n\n if configs.save_test_output:\n configs.results_dir = os.path.join(configs.root_dir, 'results', configs.saved_fn)\n make_folder(configs.results_dir)\n\n return configs\n\n\n\nif __name__ == '__main__':\n configs = parse_eval_configs()\n configs.distributed = False # For evaluation\n class_names = load_classes(configs.classnames_infor_path)\n print(configs.iou_thresh)\n\n voxel_generator = VoxelGeneratorV2(\n voxel_size=list(configs.voxel_size),\n point_cloud_range = list(configs.point_cloud_range),\n max_num_points= configs.max_number_of_points_per_voxel,\n max_voxels=20000\n )\n\n model = create_model(configs, voxel_generator)\n print('\\n\\n' + '-*=' * 30 + '\\n\\n')\n assert os.path.isfile(configs.pretrained_path), \"No file at {}\".format(configs.pretrained_path)\n model.load_state_dict(torch.load(configs.pretrained_path, map_location='cpu'))\n print('Loaded weights from {}\\n'.format(configs.pretrained_path))\n\n configs.device = torch.device('cpu' if configs.no_cuda else 'cuda:{}'.format(configs.gpu_idx))\n model = model.to(device=configs.device)\n\n out_cap = None\n\n model.eval()\n\n print('Create the validation dataloader')\n val_dataloader = create_val_dataloader(configs, voxel_generator)\n\n print(\"\\nStart computing mAP...\\n\")\n evaluate_mAP(val_dataloader, model, configs, None)\n '''print(\"\\nDone computing mAP...\\n\")\n for idx, cls in enumerate(ap_class):\n print(\"\\t>>>\\t Class {} ({}): precision = {:.4f}, recall = {:.4f}, AP = {:.4f}, f1: {:.4f}\".format(cls, \\\n class_names[cls][:3], precision[idx], recall[idx], AP[idx], f1[idx]))\n\n print(\"\\nmAP: {}\\n\".format(AP.mean()))'''\n"
] | [
[
"torch.device",
"torch.cuda.device_count"
],
[
"numpy.amax",
"numpy.sqrt",
"torch.load",
"numpy.concatenate",
"numpy.max",
"numpy.zeros_like",
"torch.no_grad",
"numpy.reshape",
"numpy.sin",
"torch.tensor",
"numpy.min",
"numpy.amin",
"numpy.linalg.inv",
"numpy.transpose",
"numpy.array",
"numpy.fromfile",
"numpy.cos",
"numpy.ones",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alimuldal/scipy | [
"713cf7df7b759e2aaeef0f81eb632f48c9b4bae0"
] | [
"scipy/special/__init__.py"
] | [
"\"\"\"\n========================================\nSpecial functions (:mod:`scipy.special`)\n========================================\n\n.. module:: scipy.special\n\nNearly all of the functions below are universal functions and follow\nbroadcasting and automatic array-looping rules. Exceptions are noted.\n\nError handling\n==============\n\nErrors are handled by returning nans, or other appropriate values.\nSome of the special function routines will emit warnings when an error\noccurs. By default this is disabled. To enable such messages use\n``errprint(1)``, and to disable such messages use ``errprint(0)``.\n\nExample:\n\n >>> print scipy.special.bdtr(-1,10,0.3)\n >>> scipy.special.errprint(1)\n >>> print scipy.special.bdtr(-1,10,0.3)\n\n.. autosummary::\n :toctree: generated/\n\n errprint\n SpecialFunctionWarning -- Warning that can be issued with ``errprint(True)``\n\nAvailable functions\n===================\n\nAiry functions\n--------------\n\n.. autosummary::\n :toctree: generated/\n\n airy -- Airy functions and their derivatives.\n airye -- Exponentially scaled Airy functions\n ai_zeros -- [+]Zeros of Airy functions Ai(x) and Ai'(x)\n bi_zeros -- [+]Zeros of Airy functions Bi(x) and Bi'(x)\n itairy --\n \n\nElliptic Functions and Integrals\n--------------------------------\n\n.. autosummary::\n :toctree: generated/\n\n ellipj -- Jacobian elliptic functions\n ellipk -- Complete elliptic integral of the first kind.\n ellipkm1 -- ellipkm1(x) == ellipk(1 - x)\n ellipkinc -- Incomplete elliptic integral of the first kind.\n ellipe -- Complete elliptic integral of the second kind.\n ellipeinc -- Incomplete elliptic integral of the second kind.\n\nBessel Functions\n----------------\n\n.. autosummary::\n :toctree: generated/\n\n jv -- Bessel function of real-valued order and complex argument.\n jn -- Alias for jv\n jve -- Exponentially scaled Bessel function.\n yn -- Bessel function of second kind (integer order).\n yv -- Bessel function of the second kind (real-valued order).\n yve -- Exponentially scaled Bessel function of the second kind.\n kn -- Modified Bessel function of the second kind (integer order).\n kv -- Modified Bessel function of the second kind (real order).\n kve -- Exponentially scaled modified Bessel function of the second kind.\n iv -- Modified Bessel function.\n ive -- Exponentially scaled modified Bessel function.\n hankel1 -- Hankel function of the first kind.\n hankel1e -- Exponentially scaled Hankel function of the first kind.\n hankel2 -- Hankel function of the second kind.\n hankel2e -- Exponentially scaled Hankel function of the second kind.\n\nThe following is not an universal function:\n\n.. autosummary::\n :toctree: generated/\n\n lmbda -- [+]Sequence of lambda functions with arbitrary order v.\n\nZeros of Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n jnjnp_zeros -- [+]Zeros of integer-order Bessel functions and derivatives sorted in order.\n jnyn_zeros -- [+]Zeros of integer-order Bessel functions and derivatives as separate arrays.\n jn_zeros -- [+]Zeros of Jn(x)\n jnp_zeros -- [+]Zeros of Jn'(x)\n yn_zeros -- [+]Zeros of Yn(x)\n ynp_zeros -- [+]Zeros of Yn'(x)\n y0_zeros -- [+]Complex zeros: Y0(z0)=0 and values of Y0'(z0)\n y1_zeros -- [+]Complex zeros: Y1(z1)=0 and values of Y1'(z1)\n y1p_zeros -- [+]Complex zeros of Y1'(z1')=0 and values of Y1(z1')\n\nFaster versions of common Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. autosummary::\n :toctree: generated/\n\n j0 -- Bessel function of order 0.\n j1 -- Bessel function of order 1.\n y0 -- Bessel function of second kind of order 0.\n y1 -- Bessel function of second kind of order 1.\n i0 -- Modified Bessel function of order 0.\n i0e -- Exponentially scaled modified Bessel function of order 0.\n i1 -- Modified Bessel function of order 1.\n i1e -- Exponentially scaled modified Bessel function of order 1.\n k0 -- Modified Bessel function of the second kind of order 0.\n k0e -- Exponentially scaled modified Bessel function of the second kind of order 0.\n k1 -- Modified Bessel function of the second kind of order 1.\n k1e -- Exponentially scaled modified Bessel function of the second kind of order 1.\n\nIntegrals of Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. autosummary::\n :toctree: generated/\n\n itj0y0 -- Basic integrals of j0 and y0 from 0 to x.\n it2j0y0 -- Integrals of (1-j0(t))/t from 0 to x and y0(t)/t from x to inf.\n iti0k0 -- Basic integrals of i0 and k0 from 0 to x.\n it2i0k0 -- Integrals of (i0(t)-1)/t from 0 to x and k0(t)/t from x to inf.\n besselpoly -- Integral of a Bessel function: Jv(2* a* x) * x[+]lambda from x=0 to 1.\n\nDerivatives of Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. autosummary::\n :toctree: generated/\n\n jvp -- Nth derivative of Jv(v,z)\n yvp -- Nth derivative of Yv(v,z)\n kvp -- Nth derivative of Kv(v,z)\n ivp -- Nth derivative of Iv(v,z)\n h1vp -- Nth derivative of H1v(v,z)\n h2vp -- Nth derivative of H2v(v,z)\n\nSpherical Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n sph_jn -- [+]Sequence of spherical Bessel functions, jn(z)\n sph_yn -- [+]Sequence of spherical Bessel functions, yn(z)\n sph_jnyn -- [+]Sequence of spherical Bessel functions, jn(z) and yn(z)\n sph_in -- [+]Sequence of spherical Bessel functions, in(z)\n sph_kn -- [+]Sequence of spherical Bessel functions, kn(z)\n sph_inkn -- [+]Sequence of spherical Bessel functions, in(z) and kn(z)\n\nRiccati-Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n riccati_jn -- [+]Sequence of Ricatti-Bessel functions of first kind.\n riccati_yn -- [+]Sequence of Ricatti-Bessel functions of second kind.\n\nStruve Functions\n----------------\n\n.. autosummary::\n :toctree: generated/\n\n struve -- Struve function --- Hv(x)\n modstruve -- Modified Struve function --- Lv(x)\n itstruve0 -- Integral of H0(t) from 0 to x\n it2struve0 -- Integral of H0(t)/t from x to Inf.\n itmodstruve0 -- Integral of L0(t) from 0 to x.\n\n\nRaw Statistical Functions\n-------------------------\n\n.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.\n\n.. autosummary::\n :toctree: generated/\n\n bdtr -- Sum of terms 0 through k of the binomial pdf.\n bdtrc -- Sum of terms k+1 through n of the binomial pdf.\n bdtri -- Inverse of bdtr\n bdtrik --\n bdtrin --\n btdtr -- Integral from 0 to x of beta pdf.\n btdtri -- Quantiles of beta distribution\n btdtria --\n btdtrib --\n fdtr -- Integral from 0 to x of F pdf.\n fdtrc -- Integral from x to infinity under F pdf.\n fdtri -- Inverse of fdtrc\n fdtridfd -- \n gdtr -- Integral from 0 to x of gamma pdf.\n gdtrc -- Integral from x to infinity under gamma pdf.\n gdtria -- Inverse with respect to `a` of gdtr.\n gdtrib -- Inverse with respect to `b` of gdtr.\n gdtrix -- Inverse with respect to `x` of gdtr.\n nbdtr -- Sum of terms 0 through k of the negative binomial pdf.\n nbdtrc -- Sum of terms k+1 to infinity under negative binomial pdf.\n nbdtri -- Inverse of nbdtr\n nbdtrik --\n nbdtrin --\n ncfdtr -- CDF of non-central t distribution.\n ncfdtridfd -- Find degrees of freedom (denominator) of noncentral F distribution.\n ncfdtridfn -- Find degrees of freedom (numerator) of noncentral F distribution.\n ncfdtri -- Inverse CDF of noncentral F distribution.\n ncfdtrinc -- Find noncentrality parameter of noncentral F distribution.\n nctdtr -- CDF of noncentral t distribution.\n nctdtridf -- Find degrees of freedom of noncentral t distribution.\n nctdtrit -- Inverse CDF of noncentral t distribution.\n nctdtrinc -- Find noncentrality parameter of noncentral t distribution.\n nrdtrimn -- Find mean of normal distribution from cdf and std.\n nrdtrisd -- Find std of normal distribution from cdf and mean.\n pdtr -- Sum of terms 0 through k of the Poisson pdf.\n pdtrc -- Sum of terms k+1 to infinity of the Poisson pdf.\n pdtri -- Inverse of pdtr\n pdtrik --\n stdtr -- Integral from -infinity to t of the Student-t pdf.\n stdtridf --\n stdtrit --\n chdtr -- Integral from 0 to x of the Chi-square pdf.\n chdtrc -- Integral from x to infnity of Chi-square pdf.\n chdtri -- Inverse of chdtrc.\n chdtriv --\n ndtr -- Integral from -infinity to x of standard normal pdf\n log_ndtr -- Logarithm of integral from -infinity to x of standard normal pdf\n ndtri -- Inverse of ndtr (quantiles)\n chndtr --\n chndtridf --\n chndtrinc --\n chndtrix --\n smirnov -- Kolmogorov-Smirnov complementary CDF for one-sided test statistic (Dn+ or Dn-)\n smirnovi -- Inverse of smirnov.\n kolmogorov -- The complementary CDF of the (scaled) two-sided test statistic (Kn*) valid for large n.\n kolmogi -- Inverse of kolmogorov\n tklmbda -- Tukey-Lambda CDF\n logit --\n expit --\n boxcox -- Compute the Box-Cox transformation.\n boxcox1p -- Compute the Box-Cox transformation of 1 + x.\n inv_boxcox -- Compute the inverse of the Box-Cox tranformation.\n inv_boxcox1p -- Compute the inverse of the Box-Cox transformation of 1 + x.\n\n\nInformation Theory Functions\n----------------------------\n\n.. autosummary::\n :toctree: generated/\n\n entr -- entr(x) = -x*log(x)\n rel_entr -- rel_entr(x, y) = x*log(x/y)\n kl_div -- kl_div(x, y) = x*log(x/y) - x + y\n huber -- Huber loss function.\n pseudo_huber -- Pseudo-Huber loss function.\n\n\nGamma and Related Functions\n---------------------------\n\n.. autosummary::\n :toctree: generated/\n\n gamma -- Gamma function.\n gammaln -- Log transformation of the gamma function.\n gammasgn -- Sign of the gamma function.\n gammainc -- Incomplete gamma integral.\n gammaincinv -- Inverse of gammainc.\n gammaincc -- Complemented incomplete gamma integral.\n gammainccinv -- Inverse of gammaincc.\n beta -- Beta function.\n betaln -- Log of the absolute value of the beta function.\n betainc -- Incomplete beta integral.\n betaincinv -- Inverse of betainc.\n psi -- Logarithmic derivative of the gamma function.\n rgamma -- One divided by the gamma function.\n polygamma -- Nth derivative of psi function.\n multigammaln -- Log of the multivariate gamma.\n digamma -- Digamma function (derivative of the logarithm of gamma).\n poch -- The Pochhammer symbol (rising factorial).\n\n\nError Function and Fresnel Integrals\n------------------------------------\n\n.. autosummary::\n :toctree: generated/\n\n erf -- Error function.\n erfc -- Complemented error function (1- erf(x))\n erfcx -- Scaled complemented error function exp(x**2)*erfc(x)\n erfi -- Imaginary error function, -i erf(i x)\n erfinv -- Inverse of error function\n erfcinv -- Inverse of erfc\n wofz -- Fadeeva function.\n dawsn -- Dawson's integral.\n fresnel -- Fresnel sine and cosine integrals.\n fresnel_zeros -- Complex zeros of both Fresnel integrals\n modfresnelp -- Modified Fresnel integrals F_+(x) and K_+(x)\n modfresnelm -- Modified Fresnel integrals F_-(x) and K_-(x)\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n erf_zeros -- [+]Complex zeros of erf(z)\n fresnelc_zeros -- [+]Complex zeros of Fresnel cosine integrals\n fresnels_zeros -- [+]Complex zeros of Fresnel sine integrals\n\nLegendre Functions\n------------------\n\n.. autosummary::\n :toctree: generated/\n\n lpmv -- Associated Legendre Function of arbitrary non-negative degree v.\n sph_harm -- Spherical Harmonics (complex-valued) Y^m_n(theta,phi)\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n clpmn -- [+]Associated Legendre Function of the first kind for complex arguments.\n lpn -- [+]Legendre Functions (polynomials) of the first kind\n lqn -- [+]Legendre Functions of the second kind.\n lpmn -- [+]Associated Legendre Function of the first kind for real arguments.\n lqmn -- [+]Associated Legendre Function of the second kind.\n\nEllipsoidal Harmonics\n---------------------\n\n.. autosummary::\n :toctree: generated/\n\n ellip_harm -- Ellipsoidal harmonic E\n ellip_harm_2 -- Ellipsoidal harmonic F\n ellip_normal -- Ellipsoidal normalization constant\n\nOrthogonal polynomials\n----------------------\n\nThe following functions evaluate values of orthogonal polynomials:\n\n.. autosummary::\n :toctree: generated/\n\n assoc_laguerre\n eval_legendre\n eval_chebyt\n eval_chebyu\n eval_chebyc\n eval_chebys\n eval_jacobi\n eval_laguerre\n eval_genlaguerre\n eval_hermite\n eval_hermitenorm\n eval_gegenbauer\n eval_sh_legendre\n eval_sh_chebyt\n eval_sh_chebyu\n eval_sh_jacobi\n\nThe functions below, in turn, return the polynomial coefficients in\n:class:`~.orthopoly1d` objects, which function similarly as :ref:`numpy.poly1d`.\nThe :class:`~.orthopoly1d` class also has an attribute ``weights`` which returns\nthe roots, weights, and total weights for the appropriate form of Gaussian\nquadrature. These are returned in an ``n x 3`` array with roots in the first\ncolumn, weights in the second column, and total weights in the final column.\nNote that :class:`~.orthopoly1d` objects are converted to ``poly1d`` when doing\narithmetic, and lose information of the original orthogonal polynomial.\n\n.. autosummary::\n :toctree: generated/\n\n legendre -- [+]Legendre polynomial P_n(x) (lpn -- for function).\n chebyt -- [+]Chebyshev polynomial T_n(x)\n chebyu -- [+]Chebyshev polynomial U_n(x)\n chebyc -- [+]Chebyshev polynomial C_n(x)\n chebys -- [+]Chebyshev polynomial S_n(x)\n jacobi -- [+]Jacobi polynomial P^(alpha,beta)_n(x)\n laguerre -- [+]Laguerre polynomial, L_n(x)\n genlaguerre -- [+]Generalized (Associated) Laguerre polynomial, L^alpha_n(x)\n hermite -- [+]Hermite polynomial H_n(x)\n hermitenorm -- [+]Normalized Hermite polynomial, He_n(x)\n gegenbauer -- [+]Gegenbauer (Ultraspherical) polynomials, C^(alpha)_n(x)\n sh_legendre -- [+]shifted Legendre polynomial, P*_n(x)\n sh_chebyt -- [+]shifted Chebyshev polynomial, T*_n(x)\n sh_chebyu -- [+]shifted Chebyshev polynomial, U*_n(x)\n sh_jacobi -- [+]shifted Jacobi polynomial, J*_n(x) = G^(p,q)_n(x)\n\n.. warning::\n\n Computing values of high-order polynomials (around ``order > 20``) using\n polynomial coefficients is numerically unstable. To evaluate polynomial\n values, the ``eval_*`` functions should be used instead.\n\nRoots and weights for orthogonal polynomials\n\n.. autosummary::\n :toctree: generated/\n\n c_roots\n cg_roots\n h_roots\n he_roots\n j_roots\n js_roots\n l_roots\n la_roots\n p_roots\n ps_roots\n s_roots\n t_roots\n ts_roots\n u_roots\n us_roots\n\n\nHypergeometric Functions\n------------------------\n\n.. autosummary::\n :toctree: generated/\n\n hyp2f1 -- Gauss hypergeometric function (2F1)\n hyp1f1 -- Confluent hypergeometric function (1F1)\n hyperu -- Confluent hypergeometric function (U)\n hyp0f1 -- Confluent hypergeometric limit function (0F1)\n hyp2f0 -- Hypergeometric function (2F0)\n hyp1f2 -- Hypergeometric function (1F2)\n hyp3f0 -- Hypergeometric function (3F0)\n\n\nParabolic Cylinder Functions\n----------------------------\n\n.. autosummary::\n :toctree: generated/\n\n pbdv -- Parabolic cylinder function Dv(x) and derivative.\n pbvv -- Parabolic cylinder function Vv(x) and derivative.\n pbwa -- Parabolic cylinder function W(a,x) and derivative.\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n pbdv_seq -- [+]Sequence of parabolic cylinder functions Dv(x)\n pbvv_seq -- [+]Sequence of parabolic cylinder functions Vv(x)\n pbdn_seq -- [+]Sequence of parabolic cylinder functions Dn(z), complex z\n\nMathieu and Related Functions\n-----------------------------\n\n.. autosummary::\n :toctree: generated/\n\n mathieu_a -- Characteristic values for even solution (ce_m)\n mathieu_b -- Characteristic values for odd solution (se_m)\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n mathieu_even_coef -- [+]sequence of expansion coefficients for even solution\n mathieu_odd_coef -- [+]sequence of expansion coefficients for odd solution\n\nThe following return both function and first derivative:\n\n.. autosummary::\n :toctree: generated/\n\n mathieu_cem -- Even Mathieu function\n mathieu_sem -- Odd Mathieu function\n mathieu_modcem1 -- Even modified Mathieu function of the first kind\n mathieu_modcem2 -- Even modified Mathieu function of the second kind\n mathieu_modsem1 -- Odd modified Mathieu function of the first kind\n mathieu_modsem2 -- Odd modified Mathieu function of the second kind\n\nSpheroidal Wave Functions\n-------------------------\n\n.. autosummary::\n :toctree: generated/\n\n pro_ang1 -- Prolate spheroidal angular function of the first kind\n pro_rad1 -- Prolate spheroidal radial function of the first kind\n pro_rad2 -- Prolate spheroidal radial function of the second kind\n obl_ang1 -- Oblate spheroidal angular function of the first kind\n obl_rad1 -- Oblate spheroidal radial function of the first kind\n obl_rad2 -- Oblate spheroidal radial function of the second kind\n pro_cv -- Compute characteristic value for prolate functions\n obl_cv -- Compute characteristic value for oblate functions\n pro_cv_seq -- Compute sequence of prolate characteristic values\n obl_cv_seq -- Compute sequence of oblate characteristic values\n\nThe following functions require pre-computed characteristic value:\n\n.. autosummary::\n :toctree: generated/\n\n pro_ang1_cv -- Prolate spheroidal angular function of the first kind\n pro_rad1_cv -- Prolate spheroidal radial function of the first kind\n pro_rad2_cv -- Prolate spheroidal radial function of the second kind\n obl_ang1_cv -- Oblate spheroidal angular function of the first kind\n obl_rad1_cv -- Oblate spheroidal radial function of the first kind\n obl_rad2_cv -- Oblate spheroidal radial function of the second kind\n\nKelvin Functions\n----------------\n\n.. autosummary::\n :toctree: generated/\n\n kelvin -- All Kelvin functions (order 0) and derivatives.\n kelvin_zeros -- [+]Zeros of All Kelvin functions (order 0) and derivatives\n ber -- Kelvin function ber x\n bei -- Kelvin function bei x\n berp -- Derivative of Kelvin function ber x\n beip -- Derivative of Kelvin function bei x\n ker -- Kelvin function ker x\n kei -- Kelvin function kei x\n kerp -- Derivative of Kelvin function ker x\n keip -- Derivative of Kelvin function kei x\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n ber_zeros -- [+]Zeros of Kelvin function bei x\n bei_zeros -- [+]Zeros of Kelvin function ber x\n berp_zeros -- [+]Zeros of derivative of Kelvin function ber x\n beip_zeros -- [+]Zeros of derivative of Kelvin function bei x\n ker_zeros -- [+]Zeros of Kelvin function kei x\n kei_zeros -- [+]Zeros of Kelvin function ker x\n kerp_zeros -- [+]Zeros of derivative of Kelvin function ker x\n keip_zeros -- [+]Zeros of derivative of Kelvin function kei x\n\nCombinatorics\n-------------\n\n.. autosummary::\n :toctree: generated/\n\n comb -- [+]Combinations of N things taken k at a time, \"N choose k\"\n perm -- [+]Permutations of N things taken k at a time, \"k-permutations of N\"\n\nOther Special Functions\n-----------------------\n\n.. autosummary::\n :toctree: generated/\n\n agm -- Arithmetic-Geometric Mean\n bernoulli -- Bernoulli numbers\n binom -- Binomial coefficient.\n diric -- Dirichlet function (periodic sinc)\n euler -- Euler numbers\n expn -- Exponential integral.\n exp1 -- Exponential integral of order 1 (for complex argument)\n expi -- Another exponential integral -- Ei(x)\n factorial -- The factorial function, n! = special.gamma(n+1)\n factorial2 -- Double factorial, (n!)!\n factorialk -- [+](...((n!)!)!...)! where there are k '!'\n shichi -- Hyperbolic sine and cosine integrals.\n sici -- Integral of the sinc and \"cosinc\" functions.\n spence -- Dilogarithm integral.\n lambertw -- Lambert W function\n zeta -- Riemann zeta function of two arguments.\n zetac -- Standard Riemann zeta function minus 1.\n\nConvenience Functions\n---------------------\n\n.. autosummary::\n :toctree: generated/\n\n cbrt -- Cube root.\n exp10 -- 10 raised to the x power.\n exp2 -- 2 raised to the x power.\n radian -- radian angle given degrees, minutes, and seconds.\n cosdg -- cosine of the angle given in degrees.\n sindg -- sine of the angle given in degrees.\n tandg -- tangent of the angle given in degrees.\n cotdg -- cotangent of the angle given in degrees.\n log1p -- log(1+x)\n expm1 -- exp(x)-1\n cosm1 -- cos(x)-1\n round -- round the argument to the nearest integer. If argument ends in 0.5 exactly, pick the nearest even integer.\n xlogy -- x*log(y)\n xlog1py -- x*log1p(y)\n exprel -- (exp(x)-1)/x\n sinc -- sin(x)/x\n\n.. [+] in the description indicates a function which is not a universal\n.. function and does not follow broadcasting and automatic\n.. array-looping rules.\n\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nfrom ._ufuncs import *\n\nfrom .basic import *\nfrom . import specfun\nfrom . import orthogonal\nfrom .orthogonal import *\nfrom .spfun_stats import multigammaln\nfrom ._ellip_harm import ellip_harm, ellip_harm_2, ellip_normal\nfrom .lambertw import lambertw\n\n\n__all__ = [s for s in dir() if not s.startswith('_')]\n\nfrom numpy.dual import register_func\nregister_func('i0',i0)\ndel register_func\n\nfrom numpy.testing import Tester\ntest = Tester().test\n"
] | [
[
"numpy.dual.register_func",
"numpy.testing.Tester"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pradip026/passengerCOVIDscan | [
"1ebbe23beb91963679a97d8e9fe45354c47bbbff"
] | [
"passengerCOVIDscan/glove_detection/tensorflow_infer.py"
] | [
"# -*- coding:utf-8 -*-\nimport cv2\nimport time\nimport argparse\nimport os\nimport numpy as np\nfrom PIL import Image\n#from keras.models import model_from_json\nfrom .utils.anchor_generator import generate_anchors\nfrom .utils.anchor_decode import decode_bbox\nfrom .utils.nms import single_class_non_max_suppression\nfrom .load_model.tensorflow_loader import load_tf_model, tf_inference\n\nMODEL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"models/face_mask_detection.pb\")\nsess, graph = load_tf_model(MODEL_PATH)\n# anchor configuration\nfeature_map_sizes = [[33, 33], [17, 17], [9, 9], [5, 5], [3, 3]]\nanchor_sizes = [[0.04, 0.056], [0.08, 0.11], [0.16, 0.22], [0.32, 0.45], [0.64, 0.72]]\nanchor_ratios = [[1, 0.62, 0.42]] * 5\n\n# generate anchors\nanchors = generate_anchors(feature_map_sizes, anchor_sizes, anchor_ratios)\n\n# for inference , the batch size is 1, the model output shape is [1, N, 4],\n# so we expand dim for anchors to [1, anchor_num, 4]\nanchors_exp = np.expand_dims(anchors, axis=0)\n\nid2class = {0: 'glove', 1: 'Noglove'}\n\n\ndef inference(image,\n conf_thresh=0.5,\n iou_thresh=0.4,\n target_shape=(160, 160),\n draw_result=True,\n show_result=True\n ):\n '''\n Main function of detection inference\n :param image: 3D numpy array of image\n :param conf_thresh: the min threshold of classification probabity.\n :param iou_thresh: the IOU threshold of NMS\n :param target_shape: the model input size.\n :param draw_result: whether to daw bounding box to the image.\n :param show_result: whether to display the image.\n :return:\n '''\n # image = np.copy(image)\n output_info = []\n height, width, _ = image.shape\n image_resized = cv2.resize(image, target_shape)\n image_np = image_resized / 255.0 # 归一化到0~1\n image_exp = np.expand_dims(image_np, axis=0)\n y_bboxes_output, y_cls_output = tf_inference(sess, graph, image_exp)\n\n # remove the batch dimension, for batch is always 1 for inference.\n y_bboxes = decode_bbox(anchors_exp, y_bboxes_output)[0]\n y_cls = y_cls_output[0]\n # To speed up, do single class NMS, not multiple classes NMS.\n bbox_max_scores = np.max(y_cls, axis=1)\n bbox_max_score_classes = np.argmax(y_cls, axis=1)\n\n # keep_idx is the alive bounding box after nms.\n keep_idxs = single_class_non_max_suppression(y_bboxes,\n bbox_max_scores,\n conf_thresh=conf_thresh,\n iou_thresh=iou_thresh,\n )\n\n for idx in keep_idxs:\n conf = float(bbox_max_scores[idx])\n class_id = bbox_max_score_classes[idx]\n bbox = y_bboxes[idx]\n # clip the coordinate, avoid the value exceed the image boundary.\n xmin = max(0, int(bbox[0] * width))\n ymin = max(0, int(bbox[1] * height))\n xmax = min(int(bbox[2] * width), width)\n ymax = min(int(bbox[3] * height), height)\n\n if draw_result:\n if class_id == 0:\n color = (0, 255, 0)\n else:\n color = (255, 0, 0)\n cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 2)\n cv2.putText(image, \"%s: %.2f\" % (id2class[class_id], conf), (xmin + 2, ymin - 2),\n cv2.FONT_HERSHEY_SIMPLEX, 0.8, color)\n output_info.append([class_id, conf, xmin, ymin, xmax, ymax])\n\n if show_result:\n Image.fromarray(image).show()\n return output_info\n\n\ndef run_on_video(video_path, output_video_name, conf_thresh):\n cap = cv2.VideoCapture(video_path)\n height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\n fps = cap.get(cv2.CAP_PROP_FPS)\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n # writer = cv2.VideoWriter(output_video_name, fourcc, int(fps), (int(width), int(height)))\n total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n if not cap.isOpened():\n raise ValueError(\"Video open failed.\")\n return\n status = True\n idx = 0\n while status:\n start_stamp = time.time()\n status, img_raw = cap.read()\n img_raw = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB)\n read_frame_stamp = time.time()\n if (status):\n inference(img_raw,\n conf_thresh,\n iou_thresh=0.5,\n target_shape=(260, 260),\n draw_result=True,\n show_result=False)\n cv2.imshow('image', img_raw[:, :, ::-1])\n cv2.waitKey(1)\n inference_stamp = time.time()\n # writer.write(img_raw)\n write_frame_stamp = time.time()\n idx += 1\n print(\"%d of %d\" % (idx, total_frames))\n print(\"read_frame:%f, infer time:%f, write time:%f\" % (read_frame_stamp - start_stamp,\n inference_stamp - read_frame_stamp,\n write_frame_stamp - inference_stamp))\n # writer.release()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Face Mask Detection\")\n parser.add_argument('--img-mode', type=int, default=1, help='set 1 to run on image, 0 to run on video.')\n parser.add_argument('--img-path', type=str, help='path to your image.')\n parser.add_argument('--video-path', type=str, default='0', help='path to your video, `0` means to use camera.')\n # parser.add_argument('--hdf5', type=str, help='keras hdf5 file')\n args = parser.parse_args()\n if args.img_mode:\n imgPath = args.img_path\n img = cv2.imread(imgPath)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n inference(img, show_result=True, target_shape=(260, 260))\n else:\n video_path = args.video_path\n if args.video_path == '0':\n video_path = 0\n run_on_video(video_path, '', conf_thresh=0.5)\n"
] | [
[
"numpy.max",
"numpy.expand_dims",
"numpy.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KokBob/InitProject | [
"63b7cefb9a130118db9ff5405c5dd87bbe34e9f3"
] | [
"data_postprocessing_10.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n20181010\r\nciklaminima\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os \r\nimport pandas as pd\r\nimport _dataPostprLib_ as lib\r\nimport seaborn as sns\r\nimport importlib \r\n#%%\r\nsns.set()\r\n#sns.set_context(\"poster\")\r\nsns.set_context(\"paper\")\r\n#sns.color_palette(\"Paired\")\r\nseq_col_brew = sns.color_palette('hls', 12)\r\nsns.set_palette(seq_col_brew)\r\n\r\nplt.close('all')\r\npath_glob = r'U:\\projects\\0005_Moventas_RCA\\40_measurement'\r\ntest_bench_name = ['Data_test_run_63526_PPH-5700', 'Data_test_run_63527_PPH-5700']\r\n#%%\r\npath_test_bench_i = path_glob + '\\\\' + test_bench_name[0]\r\npath_meas = os.listdir(path_test_bench_i)\r\n#%% \r\ni = 0\r\n\r\nlc_repos = []\r\nfor lc in path_meas:\r\n \r\n#load_collection = path_meas[0] \r\n load_collection = lc \r\n #load_collection = path_meas[-1] \r\n path_mea_i = path_test_bench_i + '\\\\' + load_collection \r\n meas_i = os.listdir(path_mea_i)\r\n \r\n data_repos = []\r\n for mf in meas_i:\r\n h_,r_,freq_,name_ = lib.catch_mea(mf)\r\n mea_file = path_mea_i + '\\\\' + mf \r\n data_i = pd.read_csv(mea_file,sep=';',header=3, skiprows = [4])\r\n t_i = lib.time_vector(freq_,data_i)\r\n mea_dict = {'data': data_i, \r\n 't': t_i,\r\n 'name': name_,\r\n 'load': load_collection}\r\n \r\n data_repos.append(mea_dict)\r\n# lib.plot_Torque_Temp_pls1(data_repos)\r\n# lib.plot_Torque_Temp_pls2(data_repos)\r\n lib.plot_Torque_Temp_pls(data_repos)\r\n lc_repos.append(data_repos)\r\n# data_repos_actual = data_repos[i]\r\n#%%\r\n# lib.plot_Torque_Temp_pls1(data_repos)\r\n# lib.plot_Torque_Temp_pls2(data_repos)\r\n# lib.plot_Torque_Temp_pls(data_repos)\r\n# i += 1"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.close"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.