repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
cambridgeltl/cancer-hallmark-cnn | [
"a1aba55ba425aa0deac4f80c97572a146e4097bb"
] | [
"keras/ltlib/evaluation.py"
] | [
"import numpy as np # TODO remove dependency\n\nfrom collections import namedtuple\nfrom itertools import chain\n\nfrom sklearn import metrics as skmetrics\n\nfrom util import unique\n\nfrom logging import warn\n\nBinaryClassificationCounts = namedtuple('BinaryClassificationCounts',\n 'tp tn fp fn')\nBinaryClassificationMetrics = namedtuple('BinaryClassificationMetrics',\n 'tp tn fp fn acc prec rec fscore')\nPrCurvePoint = namedtuple('PrCurvePoint', 'prec rec fscore threshold')\n\ndef accuracy(gold, pred):\n if len(gold) != len(pred):\n raise ValueError('count mismatch')\n correct = sum(int(g == p) for g, p in zip(gold, pred))\n return 1.*correct/len(gold)\n\ndef tp_tn_fp_fn(gold, pred):\n \"\"\"Return (TP, FN, FP, FN) counts for gold and prediced values.\n\n Assumes that 0 is negative and all others positive.\n \"\"\"\n tp, tn, fp, fn = 0, 0, 0, 0\n for g, p in zip(gold, pred):\n if g == p:\n if g == 0:\n tn += 1\n else:\n tp += 1\n else:\n if g == 0:\n fp += 1\n else:\n fn += 1\n return BinaryClassificationCounts(tp, tn, fp, fn)\n\ndef precision_recall_fscore(tp, fp, fn):\n \"\"\"Return (precision, recall, f-score) for given counts.\"\"\"\n prec = 0.0 if tp + fp == 0 else 1.*tp / (tp + fp)\n rec = 0.0 if tp + fn == 0 else 1.*tp / (tp + fn)\n f = 0.0 if prec + rec == 0.0 else 2 * prec * rec / (prec + rec)\n return prec, rec, f\n\ndef evaluate_binary_classification(gold, pred, positive):\n \"\"\"Evaluate binary classification performance.\n\n Map labels in positive to 1 and others to 0.\n\n Return BinaryClassificationMetrics.\n \"\"\"\n if len(gold) != len(pred):\n raise ValueError('count mismatch')\n\n gold = _binarize(gold, positive)\n pred = _binarize(pred, positive)\n\n if not any(i for i in gold):\n warn('no positive gold labels for %s' % str(positive))\n\n acc = accuracy(gold, pred)\n tp, tn, fp, fn = tp_tn_fp_fn(gold, pred)\n prec, rec, f = precision_recall_fscore(tp, fp, fn)\n\n return BinaryClassificationMetrics(tp, tn, fp, fn, acc, prec, rec, f)\n\ndef _binarize(a, positive):\n \"\"\"Return values mapped to 1 or 0.\n\n Map values in positive to 1 and others to 0.\n \"\"\"\n return [1 if i in positive else 0 for i in a]\n\ndef average_precision_recall_fscore(results, micro=True):\n \"\"\"Return average precision, recall and f-score for list of\n BinaryClassificationMetrics.\n \"\"\"\n if micro:\n total = BinaryClassificationMetrics(*tuple(np.sum(results, axis=0)))\n return precision_recall_fscore(total.tp, total.fp, total.fn)\n else:\n avg = BinaryClassificationMetrics(*tuple(np.average(results, axis=0)))\n return avg.prec, avg.rec, avg.fscore\n\ndef _positive_label(labels):\n \"\"\"Return label representing the positive class or None if ambiguous.\"\"\"\n if set(labels) == set(['positive', 'negative']):\n return 'positive'\n elif set(labels) == set(['pos', 'neg']):\n return 'pos'\n else:\n return None # TODO other alternatives\n\ndef is_binary_labeling(labels):\n \"\"\"Return True iff given labels represent binary classification.\"\"\"\n return len(labels) == 2 and _positive_label(labels) is not None\n\ndef _binary_labels(dataitems):\n gold = dataitems.target_strs\n pred = dataitems.prediction_strs\n labels = unique(chain(gold, pred))\n return is_binary_labeling(labels)\n\ndef f1_score(prec, rec):\n from math import isnan\n if isnan(prec) or isnan(rec) or prec+rec == 0.0:\n return float('nan')\n else:\n return 2*prec*rec/(prec+rec)\n\ndef max_f_point(dataitems):\n \"\"\"Return PrCurvePoint with maximal f1 score.\"\"\"\n import logging\n from sklearn.metrics import precision_recall_curve\n y_true = np.argmax(dataitems.targets, axis=-1)\n prob_neg = dataitems.predictions[:,0] # 1st column\n prob_pos = dataitems.predictions[:,1] # 2nd column\n pos_score = prob_pos - prob_neg\n precs, recs, tholds = precision_recall_curve(y_true, pos_score)\n max_f, max_point = float('-inf'), PrCurvePoint(None, None, None, None)\n for p, r, t in zip(precs, recs, tholds):\n f = f1_score(p, r)\n if f > max_f:\n max_f, max_point = f, PrCurvePoint(p, r, f, t)\n return max_point\n\ndef evaluate_binary_labeling(dataitems):\n gold = dataitems.target_strs\n pred = dataitems.prediction_strs\n labels = unique(chain(gold, pred))\n pos = _positive_label(labels)\n res = {}\n res['acc'] = accuracy(gold, pred)\n bcm = evaluate_binary_classification(gold, pred, pos)\n res.update(bcm._asdict())\n res['auc'] = skmetrics.roc_auc_score(dataitems.targets,\n dataitems.predictions)\n res['ap'] = skmetrics.average_precision_score(dataitems.targets,\n dataitems.predictions)\n maxfp = max_f_point(dataitems)\n res.update({ 'maxf-{}'.format(k): v for k, v in maxfp._asdict().items() })\n return res\n\ndef summarize_classification(results):\n return (\n 'acc: {acc:.2%} auc: {auc:.2%} ap: {ap:.2%} ' +\n 'f: {fscore:.2%} (p:{prec:.1%} r:{rec:.1%} ' +\n 'tp:{tp} fp:{fp} fn:{fn}) ' +\n 'maxf: {maxf-fscore:.2%} (p:{maxf-prec:.1%} r:{maxf-rec:.1%} ' +\n 'th:{maxf-threshold:.2})'\n ).format(**results)\n\ndef evaluate_classification(dataitems):\n if _binary_labels(dataitems):\n return evaluate_binary_labeling(dataitems)\n else:\n raise NotImplementedError()\n"
] | [
[
"numpy.sum",
"numpy.argmax",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.precision_recall_curve",
"sklearn.metrics.average_precision_score",
"numpy.average"
]
] |
Jallet/keras-jl-ac-mean | [
"2bbc1596192fb8c3aefc4a8126482a5283574a59"
] | [
"keras/utils/np_utils.py"
] | [
"from __future__ import absolute_import\nimport numpy as np\nimport scipy as sp\nfrom six.moves import range\nfrom six.moves import zip\n\n\ndef to_categorical(y, nb_classes=None):\n '''Convert class vector (integers from 0 to nb_classes)\n to binary class matrix, for use with categorical_crossentropy.\n '''\n if not nb_classes:\n nb_classes = np.max(y)+1\n Y = np.zeros((len(y), nb_classes))\n for i in range(len(y)):\n Y[i, y[i]] = 1.\n return Y\n\n\ndef normalize(a, axis=-1, order=2):\n l2 = np.atleast_1d(np.linalg.norm(a, order, axis))\n l2[l2 == 0] = 1\n return a / np.expand_dims(l2, axis)\n\n\ndef binary_logloss(p, y):\n epsilon = 1e-15\n p = sp.maximum(epsilon, p)\n p = sp.minimum(1-epsilon, p)\n res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))\n res *= -1.0/len(y)\n return res\n\n\ndef multiclass_logloss(P, Y):\n npreds = [P[i][Y[i]-1] for i in range(len(Y))]\n score = -(1. / len(Y)) * np.sum(np.log(npreds))\n return score\n\n\ndef accuracy(p, y):\n return np.mean([a == b for a, b in zip(p, y)])\n\n\ndef probas_to_classes(y_pred):\n if len(y_pred.shape) > 1 and y_pred.shape[1] > 1:\n return categorical_probas_to_classes(y_pred)\n return np.array([1 if p > 0.5 else 0 for p in y_pred])\n\n\ndef categorical_probas_to_classes(p):\n return np.argmax(p, axis=1)\n\n\ndef convert_kernel(kernel, dim_ordering='th'):\n '''Converts a kernel matrix (numpy array)\n from Theano format to TensorFlow format\n (or reciprocally, since the transformation\n is its own inverse).\n '''\n new_kernel = np.copy(kernel)\n if dim_ordering == 'th':\n w = kernel.shape[2]\n h = kernel.shape[3]\n for i in range(w):\n for j in range(h):\n new_kernel[:, :, i, j] = kernel[:, :, w - i - 1, h - j - 1]\n elif dim_ordering == 'tf':\n w = kernel.shape[0]\n h = kernel.shape[1]\n for i in range(w):\n for j in range(h):\n new_kernel[i, j, :, :] = kernel[w - i - 1, h - j - 1, :, :]\n else:\n raise Exception('Invalid dim_ordering: ' + str(dim_ordering))\n return new_kernel\n"
] | [
[
"scipy.maximum",
"scipy.subtract",
"scipy.log",
"numpy.argmax",
"scipy.minimum",
"numpy.copy",
"numpy.expand_dims",
"numpy.max",
"numpy.log",
"numpy.array",
"numpy.linalg.norm"
]
] |
abraia/abraia-python | [
"e49e3869b2ee7e6b1bcb41e0cc1ae126ac39e202"
] | [
"abraia/hsi.py"
] | [
"import os\nimport wget\nimport tempfile\nimport numpy as np\nimport scipy.io as sio\nimport scipy.ndimage as nd\n\nfrom PIL import Image\nfrom sklearn.svm import SVC\nfrom sklearn.utils import resample\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n\nfrom tensorflow import keras\nfrom keras.utils import np_utils\nfrom keras.models import Model, load_model\nfrom keras.layers import Input, Conv2D, Conv3D, Flatten, Dense, Reshape, Dropout\n\nfrom .plot import plot_image, plot_images, plot_train_history\n\ntempdir = tempfile.gettempdir()\n\n\ndef download(url):\n basename = os.path.basename(url)\n dest = os.path.join(tempdir, basename)\n if not os.path.exists(dest):\n wget.download(url, dest)\n return dest\n\n\ndef load_dataset(dataset):\n \"\"\"Load one of the available hyperspectral datasets (IP, PU, SA, KSC).\"\"\"\n if dataset == 'IP':\n data_hsi = sio.loadmat(download(\n 'http://www.ehu.eus/ccwintco/uploads/6/67/Indian_pines_corrected.mat'))['indian_pines_corrected']\n gt_hsi = sio.loadmat(download(\n 'http://www.ehu.eus/ccwintco/uploads/c/c4/Indian_pines_gt.mat'))['indian_pines_gt']\n class_names = ['', 'Alfalfa', 'Corn-notill', 'Corn-mintill', 'Corn', 'Grass-pasture',\n 'Grass-trees', 'Grass-pasture-mowed', 'Hay-windrowed', 'Oats', 'Soybean-notill',\n 'Soybean-mintill', 'Soybean-clean', 'Wheat', 'Woods', 'Buildings Grass Trees Drives',\n 'Stone Steel Towers']\n return data_hsi, gt_hsi, class_names\n if dataset == 'PU':\n data_hsi = sio.loadmat(download(\n 'http://www.ehu.eus/ccwintco/uploads/e/ee/PaviaU.mat'))['paviaU']\n gt_hsi = sio.loadmat(download(\n 'http://www.ehu.eus/ccwintco/uploads/5/50/PaviaU_gt.mat'))['paviaU_gt']\n class_names = ['', 'Asphalt', 'Meadows', 'Gravel', 'Trees', 'Painted metal sheets',\n 'Bare Soil', 'Bitumen', 'Self-Blocking Bricks', 'Shadows']\n return data_hsi, gt_hsi, class_names\n if dataset == 'SA':\n data_hsi = sio.loadmat(download(\n 'http://www.ehu.eus/ccwintco/uploads/a/a3/Salinas_corrected.mat'))['salinas_corrected']\n gt_hsi = sio.loadmat(download(\n 'http://www.ehu.eus/ccwintco/uploads/f/fa/Salinas_gt.mat'))['salinas_gt']\n class_names = ['', 'Brocoli_green_weeds_1', 'Brocoli_green_weeds_2', 'Fallow', 'Fallow_rough_plow',\n 'Fallow_smooth', 'Stubble', 'Celery', 'Grapes_untrained', 'Soil_vinyard_develop',\n 'Corn_senesced_green_weeds', 'Lettuce_romaine_4wk', 'Lettuce_romaine_5wk',\n 'Lettuce_romaine_6wk', 'Lettuce_romaine_7wk', 'Vinyard_untrained', 'Vinyard_vertical_trellis']\n return data_hsi, gt_hsi, class_names\n if dataset == 'KSC':\n data_hsi = sio.loadmat(download(\n 'http://www.ehu.es/ccwintco/uploads/2/26/KSC.mat'))['KSC']\n gt_hsi = sio.loadmat(download(\n 'http://www.ehu.es/ccwintco/uploads/a/a6/KSC_gt.mat'))['KSC_gt']\n return data_hsi, gt_hsi\n\n\ndef random(img, n_bands=6, indexes=False):\n \"\"\"Returns a list of random bands\"\"\"\n bands = []\n indexes = []\n for i in range(n_bands):\n q = np.random.randint(img.shape[2])\n indexes.append(q)\n bands.append(img[:, :, q])\n if indexes:\n return bands, indexes\n return bands\n\n\ndef rgb(img, bands=None):\n \"\"\"Returns the RGB image from the selected bands (R, G, B)\"\"\"\n from spectral import get_rgb\n return get_rgb(img, bands=bands)\n\n\ndef ndvi(img, red_band, nir_band):\n \"\"\"Returns the NDVI image from the specified read and nir bands\"\"\"\n from spectral import ndvi\n return ndvi(img, red_band, nir_band)\n\n\ndef resample(img, n_samples=32):\n \"\"\"Resamples the number of spectral bands (n_samples)\"\"\"\n h, w, d = img.shape\n X = img.reshape((h * w), d)\n r = resample(np.transpose(X), n_samples=n_samples)\n return np.transpose(r).reshape(h, w, n_samples)\n\n\ndef resize(img, size):\n \"\"\"Resize the image to the given size (w, h)\"\"\"\n return np.array(Image.fromarray(img).resize(size, resample=Image.LANCZOS))\n\n\ndef normalize(img):\n \"\"\"Normalize the image to the range [0, 1]\"\"\"\n min, max = np.amin(img), np.amax(img)\n return (img - min) / (max - min)\n\n\ndef saliency(img):\n \"\"\"Calculate saliency map of the image\"\"\"\n smaps = []\n for n in range(img.shape[2]):\n band = img[:, :, n]\n h, w = band.shape\n fft = np.fft.fft2(resize(band, (64, 64)))\n log_amplitude, phase = np.log(np.absolute(fft)), np.angle(fft)\n spectral_residual = log_amplitude - nd.uniform_filter(log_amplitude, size=3, mode='nearest')\n smap = np.absolute(np.fft.ifft2(np.exp(spectral_residual + 1.j * phase)))\n smap = nd.gaussian_filter(smap, sigma=3)\n smaps.append(normalize(resize(smap, (w, h))))\n return np.sum(np.dstack(smaps), axis=2)\n\n\ndef spectrum(img, point=None):\n \"\"\"Get the spectrum at a given point (x, y)\n\n When a point is not specified the spectrum of the most salient point is returned.\n \"\"\"\n if point is None:\n sal = saliency(img)\n idx = np.unravel_index(np.argmax(sal), sal.shape)\n point = (idx[1], idx[0])\n return img[point[1], point[0], :]\n\n\ndef split_train_test(X, y, train_ratio=0.7):\n \"\"\"Split data for training and test\"\"\"\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_ratio, stratify=y)\n return X_train, X_test, y_train, y_test\n\n\ndef principal_components(img, n_components=3, spectrum=False):\n \"\"\"Calculate principal components of the image\"\"\"\n h, w, d = img.shape\n X = img.reshape((h * w), d)\n pca = PCA(n_components=n_components, whiten=True)\n bands = pca.fit_transform(X).reshape(h, w, n_components)\n if spectrum:\n bands, pca.components_\n return bands\n\n\ndef pad_with_zeros(X, margin=2):\n newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2* margin, X.shape[2]))\n newX[margin:X.shape[0] + margin, margin:X.shape[1] + margin, :] = X\n return newX\n\n\ndef create_patch(data, height_index, width_index, patch_size):\n height_slice = slice(height_index, height_index + patch_size)\n width_slice = slice(width_index, width_index + patch_size)\n return data[height_slice, width_slice, :]\n\n\n# TODO: Convert create patches to generator with batch_size parameter\ndef create_patches(X, patch_size):\n patches = []\n width, height = X.shape[1], X.shape[0]\n X = pad_with_zeros(X, patch_size // 2)\n for i in range(height):\n for j in range(width):\n image_patch = create_patch(X, i, j, patch_size)\n patches.append(image_patch.reshape(image_patch.shape + (1,)).astype('float32'))\n return np.array(patches)\n\n\ndef create_image_cubes(X, y, patch_size):\n width, height = X.shape[1], X.shape[0]\n patchesData = create_patches(X, patch_size)\n labels = []\n for i in range(height):\n for j in range(width):\n labels.append(y[i, j])\n patchesLabels = np.array(labels)\n return patchesData, patchesLabels\n\n\ndef generate_training_data(X, y, patch_size, train_ratio=0.7):\n X, y = create_image_cubes(X, y, patch_size)\n X_train, X_test, y_train, y_test = split_train_test(X, y, train_ratio)\n X_train = X_train.reshape(-1, patch_size, patch_size, X.shape[-1], 1)\n X_test = X_test.reshape(-1, patch_size, patch_size, X.shape[-1], 1)\n return X_train, X_test, y_train, y_test\n\n\ndef create_hsn_model(input_shape, n_classes):\n input_layer = Input((*input_shape, 1))\n ## convolutional layers\n conv_layer1 = Conv3D(filters=8, kernel_size=(3, 3, 7), activation='relu')(input_layer)\n conv_layer2 = Conv3D(filters=16, kernel_size=(3, 3, 5), activation='relu')(conv_layer1)\n conv_layer3 = Conv3D(filters=32, kernel_size=(3, 3, 3), activation='relu')(conv_layer2)\n conv_layer3 = Reshape((conv_layer3.shape[1], conv_layer3.shape[2], conv_layer3.shape[3] * conv_layer3.shape[4]))(conv_layer3)\n conv_layer4 = Conv2D(filters=64, kernel_size=(3,3), activation='relu')(conv_layer3)\n flatten_layer = Flatten()(conv_layer4)\n ## fully connected layers\n dense_layer1 = Dense(units=256, activation='relu')(flatten_layer)\n dense_layer1 = Dropout(0.4)(dense_layer1)\n dense_layer2 = Dense(units=128, activation='relu')(dense_layer1)\n dense_layer2 = Dropout(0.4)(dense_layer2)\n output_layer = Dense(units=n_classes, activation='softmax')(dense_layer2)\n # define and compile the model with input layer and output layer\n model = Model(inputs=input_layer, outputs=output_layer)\n adam = keras.optimizers.Adam(learning_rate=0.001, decay=1e-06)\n model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])\n return model\n\n\ndef predict_hsn_model(model, X, patch_size):\n width, height = X.shape[1], X.shape[0]\n X_pred = create_patches(X, patch_size)\n y_pred = np.argmax(model.predict(X_pred), axis=1)\n return y_pred.reshape(height, width).astype(int)\n\n\nclass HyperspectralModel:\n def __init__(self, name, *args):\n self.name = name\n if self.name == 'svm':\n self.model = SVC(C=150, kernel='rbf')\n elif self.name == 'hsn':\n self.input_shape, self.n_classes = args\n self.model = create_hsn_model(self.input_shape, self.n_classes) # Hybrid Spectral Net\n\n def train(self, X, y, train_ratio=0.7, epochs=50):\n if self.name == 'svm':\n X_train, X_test, y_train, y_test = train_test_split(X.reshape(-1, X.shape[-1]), y, train_size=train_ratio, stratify=y)\n self.model.fit(X_train, y_train)\n return y_test, self.model.predict(X_test)\n elif self.name == 'hsn':\n X = principal_components(X, n_components=self.input_shape[2])\n X_train, X_test, y_train, y_test = generate_training_data(X, y, self.input_shape[0], train_ratio)\n self.history = self.model.fit(x=X_train, y=np_utils.to_categorical(y_train), batch_size=256, epochs=epochs)\n return y_test, np.argmax(self.model.predict(X_test), axis=1)\n\n def predict(self, X):\n if self.name == 'svm':\n return self.model.predict(X.reshape(-1, X.shape[2])).reshape(X.shape[0], X.shape[1])\n elif self.name == 'hsn':\n X = principal_components(X, n_components=self.input_shape[2])\n return predict_hsn_model(self.model, X, self.input_shape[0])\n \n def plot_history():\n if self.history:\n plot_train_history(self.history)\n \n def save(self, filename='model.h5'):\n self.model.save(filename)\n\n def load(self, filename='model.h5'):\n self.model = load_model(filename)\n\n\ndef create_model(name, *args):\n \"\"\"Create a new model: svm or hsn\"\"\"\n return HyperspectralModel(name, *args)\n"
] | [
[
"tensorflow.keras.optimizers.Adam",
"numpy.transpose",
"sklearn.svm.SVC",
"numpy.zeros",
"numpy.argmax",
"numpy.exp",
"numpy.amax",
"numpy.amin",
"scipy.ndimage.gaussian_filter",
"numpy.dstack",
"numpy.angle",
"numpy.absolute",
"numpy.array",
"numpy.random.randint",
"scipy.ndimage.uniform_filter",
"sklearn.model_selection.train_test_split",
"sklearn.decomposition.PCA"
]
] |
rcelebi/android-elfali | [
"314d9cd9b607460f8bfea80fc828b1521ca18443"
] | [
"jni-build/jni/include/tensorflow/python/training/server_lib_test.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tf.GrpcServer.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\n\nclass GrpcServerTest(tf.test.TestCase):\n\n def testRunStep(self):\n server = tf.train.Server.create_local_server()\n\n with tf.Session(server.target) as sess:\n c = tf.constant([[2, 1]])\n d = tf.constant([[1], [2]])\n e = tf.matmul(c, d)\n self.assertAllEqual([[4]], sess.run(e))\n # TODO(mrry): Add `server.stop()` and `server.join()` when these work.\n\n def testMultipleSessions(self):\n server = tf.train.Server.create_local_server()\n\n c = tf.constant([[2, 1]])\n d = tf.constant([[1], [2]])\n e = tf.matmul(c, d)\n\n sess_1 = tf.Session(server.target)\n sess_2 = tf.Session(server.target)\n\n self.assertAllEqual([[4]], sess_1.run(e))\n self.assertAllEqual([[4]], sess_2.run(e))\n\n sess_1.close()\n sess_2.close()\n # TODO(mrry): Add `server.stop()` and `server.join()` when these work.\n\n # Verifies behavior of multiple variables with multiple sessions connecting to\n # the same server.\n def testSameVariablesNoClear(self):\n server = tf.train.Server.create_local_server()\n\n with tf.Session(server.target) as sess_1:\n v0 = tf.Variable([[2, 1]], name=\"v0\")\n v1 = tf.Variable([[1], [2]], name=\"v1\")\n v2 = tf.matmul(v0, v1)\n sess_1.run([v0.initializer, v1.initializer])\n self.assertAllEqual([[4]], sess_1.run(v2))\n\n with tf.Session(server.target) as sess_2:\n new_v0 = tf.get_default_graph().get_tensor_by_name(\"v0:0\")\n new_v1 = tf.get_default_graph().get_tensor_by_name(\"v1:0\")\n new_v2 = tf.matmul(new_v0, new_v1)\n self.assertAllEqual([[4]], sess_2.run(new_v2))\n\n # Verifies behavior of tf.Session.reset().\n def testSameVariablesClear(self):\n server = tf.train.Server.create_local_server()\n\n # Creates a graph with 2 variables.\n v0 = tf.Variable([[2, 1]], name=\"v0\")\n v1 = tf.Variable([[1], [2]], name=\"v1\")\n v2 = tf.matmul(v0, v1)\n\n # Verifies that both sessions connecting to the same target return\n # the same results.\n sess_1 = tf.Session(server.target)\n sess_2 = tf.Session(server.target)\n sess_1.run(tf.initialize_all_variables())\n self.assertAllEqual([[4]], sess_1.run(v2))\n self.assertAllEqual([[4]], sess_2.run(v2))\n\n # Resets target. sessions abort. Use sess_2 to verify.\n tf.Session.reset(server.target)\n with self.assertRaises(tf.errors.AbortedError):\n self.assertAllEqual([[4]], sess_2.run(v2))\n\n # Connects to the same target. Device memory for the variables would have\n # been released, so they will be unitialized.\n sess_2 = tf.Session(server.target)\n with self.assertRaises(tf.errors.FailedPreconditionError):\n sess_2.run(v2)\n # Reinitialzes the variables.\n sess_2.run(tf.initialize_all_variables())\n self.assertAllEqual([[4]], sess_2.run(v2))\n sess_2.close()\n\n # Verifies behavior of tf.Session.reset() with multiple containers using\n # default container names as defined by the target name.\n def testSameVariablesClearContainer(self):\n # Starts two servers with different names so they map to different\n # resource \"containers\".\n server0 = tf.train.Server({\"local0\": [\"localhost:0\"]}, protocol=\"grpc\",\n start=True)\n server1 = tf.train.Server({\"local1\": [\"localhost:0\"]}, protocol=\"grpc\",\n start=True)\n\n # Creates a graph with 2 variables.\n v0 = tf.Variable(1.0, name=\"v0\")\n v1 = tf.Variable(2.0, name=\"v0\")\n\n # Initializes the variables. Verifies that the values are correct.\n sess_0 = tf.Session(server0.target)\n sess_1 = tf.Session(server1.target)\n sess_0.run(v0.initializer)\n sess_1.run(v1.initializer)\n self.assertAllEqual(1.0, sess_0.run(v0))\n self.assertAllEqual(2.0, sess_1.run(v1))\n\n # Resets container \"local0\". Verifies that v0 is no longer initialized.\n tf.Session.reset(server0.target, [\"local0\"])\n sess = tf.Session(server0.target)\n with self.assertRaises(tf.errors.FailedPreconditionError):\n sess.run(v0)\n # Reinitializes v0 for the following test.\n sess.run(v0.initializer)\n\n # Verifies that v1 is still valid.\n self.assertAllEqual(2.0, sess_1.run(v1))\n\n # Resets container \"local1\". Verifies that v1 is no longer initialized.\n tf.Session.reset(server1.target, [\"local1\"])\n sess = tf.Session(server1.target)\n with self.assertRaises(tf.errors.FailedPreconditionError):\n sess.run(v1)\n # Verifies that v0 is still valid.\n sess = tf.Session(server0.target)\n self.assertAllEqual(1.0, sess.run(v0))\n\n # Verifies behavior of tf.Session.reset() with multiple containers using\n # tf.container.\n def testMultipleContainers(self):\n with tf.container(\"test0\"):\n v0 = tf.Variable(1.0, name=\"v0\")\n with tf.container(\"test1\"):\n v1 = tf.Variable(2.0, name=\"v0\")\n server = tf.train.Server.create_local_server()\n sess = tf.Session(server.target)\n sess.run(tf.initialize_all_variables())\n self.assertAllEqual(1.0, sess.run(v0))\n self.assertAllEqual(2.0, sess.run(v1))\n\n # Resets container. Session aborts.\n tf.Session.reset(server.target, [\"test0\"])\n with self.assertRaises(tf.errors.AbortedError):\n sess.run(v1)\n\n # Connects to the same target. Device memory for the v0 would have\n # been released, so it will be unitialized. But v1 should still\n # be valid.\n sess = tf.Session(server.target)\n with self.assertRaises(tf.errors.FailedPreconditionError):\n sess.run(v0)\n self.assertAllEqual(2.0, sess.run(v1))\n\n # Verifies various reset failures.\n def testResetFails(self):\n # Creates variable with container name.\n with tf.container(\"test0\"):\n v0 = tf.Variable(1.0, name=\"v0\")\n # Creates variable with default container.\n v1 = tf.Variable(2.0, name=\"v1\")\n # Verifies resetting the non-existent target returns error.\n with self.assertRaises(tf.errors.NotFoundError):\n tf.Session.reset(\"nonexistent\", [\"test0\"])\n\n # Verifies resetting with config.\n # Verifies that resetting target with no server times out.\n with self.assertRaises(tf.errors.DeadlineExceededError):\n tf.Session.reset(\"grpc://localhost:0\", [\"test0\"],\n config=tf.ConfigProto(operation_timeout_in_ms=5))\n\n # Verifies no containers are reset with non-existent container.\n server = tf.train.Server.create_local_server()\n sess = tf.Session(server.target)\n sess.run(tf.initialize_all_variables())\n self.assertAllEqual(1.0, sess.run(v0))\n self.assertAllEqual(2.0, sess.run(v1))\n # No container is reset, but the server is reset.\n tf.Session.reset(server.target, [\"test1\"])\n # Verifies that both variables are still valid.\n sess = tf.Session(server.target)\n self.assertAllEqual(1.0, sess.run(v0))\n self.assertAllEqual(2.0, sess.run(v1))\n\n def testLargeConstant(self):\n server = tf.train.Server.create_local_server()\n with tf.Session(server.target) as sess:\n const_val = np.empty([10000, 3000], dtype=np.float32)\n const_val.fill(0.5)\n c = tf.constant(const_val)\n shape_t = tf.shape(c)\n self.assertAllEqual([10000, 3000], sess.run(shape_t))\n\n def testLargeFetch(self):\n server = tf.train.Server.create_local_server()\n with tf.Session(server.target) as sess:\n c = tf.fill([10000, 3000], 0.5)\n expected_val = np.empty([10000, 3000], dtype=np.float32)\n expected_val.fill(0.5)\n self.assertAllEqual(expected_val, sess.run(c))\n\n def testLargeFeed(self):\n server = tf.train.Server.create_local_server()\n with tf.Session(server.target) as sess:\n feed_val = np.empty([10000, 3000], dtype=np.float32)\n feed_val.fill(0.5)\n p = tf.placeholder(tf.float32, shape=[10000, 3000])\n min_t = tf.reduce_min(p)\n max_t = tf.reduce_max(p)\n min_val, max_val = sess.run([min_t, max_t], feed_dict={p: feed_val})\n self.assertEqual(0.5, min_val)\n self.assertEqual(0.5, max_val)\n\n def testCloseCancelsBlockingOperation(self):\n server = tf.train.Server.create_local_server()\n sess = tf.Session(server.target)\n\n q = tf.FIFOQueue(10, [tf.float32])\n enqueue_op = q.enqueue(37.0)\n dequeue_t = q.dequeue()\n\n sess.run(enqueue_op)\n sess.run(dequeue_t)\n\n def blocking_dequeue():\n with self.assertRaises(tf.errors.CancelledError):\n sess.run(dequeue_t)\n\n blocking_thread = self.checkedThread(blocking_dequeue)\n blocking_thread.start()\n time.sleep(0.5)\n sess.close()\n blocking_thread.join()\n\n def testSetConfiguration(self):\n config = tf.ConfigProto(\n gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.1))\n\n # Configure a server using the default local server options.\n server = tf.train.Server.create_local_server(config=config, start=False)\n self.assertEqual(\n 0.1,\n server.server_def.default_session_config\n .gpu_options.per_process_gpu_memory_fraction)\n\n # Configure a server using an explicit ServerDefd with an\n # overridden config.\n cluster_def = tf.train.ClusterSpec(\n {\"localhost\": [\"localhost:0\"]}).as_cluster_def()\n server_def = tf.train.ServerDef(\n cluster=cluster_def, job_name=\"localhost\", task_index=0,\n protocol=\"grpc\")\n server = tf.train.Server(server_def, config=config, start=False)\n self.assertEqual(\n 0.1,\n server.server_def.default_session_config\n .gpu_options.per_process_gpu_memory_fraction)\n\n def testInvalidHostname(self):\n with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, \"port\"):\n _ = tf.train.Server({\"local\": [\"localhost\"]},\n job_name=\"local\",\n task_index=0)\n\n def testInteractiveSession(self):\n server = tf.train.Server.create_local_server()\n # TODO(b/29900832): Remove this assertion when the bug is fixed.\n a = tf.constant(1.0)\n with self.assertRaisesRegexp(tf.errors.UnimplementedError, \"pruned\"):\n sess = tf.InteractiveSession(target=server.target)\n sess.run(a)\n\n # TODO(b/29900832): The following code fails (without the unimplemented\n # check in `tensorflow::MasterSession`):\n # a = tf.constant(1.0)\n # b = tf.constant(2.0)\n # self.assertEqual(1.0, sess.run(a))\n # self.assertEqual(2.0, sess.run(b))\n\n\nclass ServerDefTest(tf.test.TestCase):\n\n def testLocalServer(self):\n cluster_def = tf.train.ClusterSpec(\n {\"local\": [\"localhost:2222\"]}).as_cluster_def()\n server_def = tf.train.ServerDef(\n cluster=cluster_def, job_name=\"local\", task_index=0, protocol=\"grpc\")\n\n self.assertProtoEquals(\"\"\"\n cluster {\n job { name: 'local' tasks { key: 0 value: 'localhost:2222' } }\n }\n job_name: 'local' task_index: 0 protocol: 'grpc'\n \"\"\", server_def)\n\n # Verifies round trip from Proto->Spec->Proto is correct.\n cluster_spec = tf.train.ClusterSpec(cluster_def)\n self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())\n\n def testTwoProcesses(self):\n cluster_def = tf.train.ClusterSpec(\n {\"local\": [\"localhost:2222\", \"localhost:2223\"]}).as_cluster_def()\n server_def = tf.train.ServerDef(\n cluster=cluster_def, job_name=\"local\", task_index=1, protocol=\"grpc\")\n\n self.assertProtoEquals(\"\"\"\n cluster {\n job { name: 'local' tasks { key: 0 value: 'localhost:2222' }\n tasks { key: 1 value: 'localhost:2223' } }\n }\n job_name: 'local' task_index: 1 protocol: 'grpc'\n \"\"\", server_def)\n\n # Verifies round trip from Proto->Spec->Proto is correct.\n cluster_spec = tf.train.ClusterSpec(cluster_def)\n self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())\n\n def testTwoJobs(self):\n cluster_def = tf.train.ClusterSpec(\n {\"ps\": [\"ps0:2222\", \"ps1:2222\"],\n \"worker\": [\"worker0:2222\", \"worker1:2222\", \"worker2:2222\"]}\n ).as_cluster_def()\n server_def = tf.train.ServerDef(\n cluster=cluster_def, job_name=\"worker\", task_index=2, protocol=\"grpc\")\n\n self.assertProtoEquals(\"\"\"\n cluster {\n job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }\n tasks { key: 1 value: 'ps1:2222' } }\n job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }\n tasks { key: 1 value: 'worker1:2222' }\n tasks { key: 2 value: 'worker2:2222' } }\n }\n job_name: 'worker' task_index: 2 protocol: 'grpc'\n \"\"\", server_def)\n\n # Verifies round trip from Proto->Spec->Proto is correct.\n cluster_spec = tf.train.ClusterSpec(cluster_def)\n self.assertProtoEquals(cluster_def, cluster_spec.as_cluster_def())\n\n def testClusterSpec(self):\n cluster_spec = tf.train.ClusterSpec(\n {\"ps\": [\"ps0:2222\", \"ps1:2222\"],\n \"worker\": [\"worker0:2222\", \"worker1:2222\", \"worker2:2222\"]})\n\n expected_proto = \"\"\"\n job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }\n tasks { key: 1 value: 'ps1:2222' } }\n job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }\n tasks { key: 1 value: 'worker1:2222' }\n tasks { key: 2 value: 'worker2:2222' } }\n \"\"\"\n\n self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())\n self.assertProtoEquals(\n expected_proto, tf.train.ClusterSpec(cluster_spec).as_cluster_def())\n self.assertProtoEquals(\n expected_proto,\n tf.train.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())\n self.assertProtoEquals(\n expected_proto,\n tf.train.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.initialize_all_variables",
"tensorflow.train.Server.create_local_server",
"tensorflow.reduce_max",
"tensorflow.matmul",
"tensorflow.InteractiveSession",
"tensorflow.Variable",
"tensorflow.reduce_min",
"tensorflow.train.ServerDef",
"tensorflow.constant",
"tensorflow.test.main",
"tensorflow.train.Server",
"tensorflow.shape",
"tensorflow.Session",
"tensorflow.Session.reset",
"tensorflow.FIFOQueue",
"tensorflow.ConfigProto",
"tensorflow.placeholder",
"numpy.empty",
"tensorflow.fill",
"tensorflow.train.ClusterSpec",
"tensorflow.container",
"tensorflow.get_default_graph",
"tensorflow.GPUOptions"
]
] |
Huite/timml | [
"5eb52066be094326343fe26b46555253fef44dc9"
] | [
"timml/model.py"
] | [
"\"\"\"\nModel classes\n\n\"\"\"\n\nimport numpy as np\nimport sys\nimport inspect # Used for storing the input\nfrom .aquifer import Aquifer\nfrom .aquifer_parameters import param_maq, param_3d\nfrom .constant import ConstantStar\nfrom .util import PlotTim\nimport multiprocessing as mp\n\n__all__ = ['Model', 'ModelMaq', 'Model3D']\n\nclass Model(PlotTim):\n \"\"\"\n Model Class to create a model object consisting of an arbitrary\n sequence of aquifer layers and leaky layers.\n Use ModelMaq for regular sequence of aquifers and leaky layers.\n Use Model3D for multi-layer model of a single aquifer\n \n Parameters\n ----------\n kaq : array\n hydraulic conductivity of each aquifer from the top down\n z : array\n elevation tops and bottoms of all layers\n layers may have zero thickness\n c : array\n resistance between two consecutive aquifer layers\n if ltype[0]='a': length is number of aquifers - 1\n if ltype[0]='l': length is number of aquifers\n npor : array\n porosity of all layers from the top down\n ltype : array of characters\n array indicating for each layer whether it is\n 'a' aquifer layer\n 'l' leaky layer\n \n \"\"\"\n \n def __init__(self, kaq, c, z, npor, ltype, f2py=False):\n # All input variables are numpy arrays\n # That should be checked outside this function\n self.elementlist = []\n self.elementdict = {} # only elements that have a label\n self.aq = Aquifer(self, kaq, c, z, npor, ltype)\n self.modelname = 'ml' # Used for writing out input\n self.f2py = False\n if f2py:\n try:\n from .src import besselaesnew\n self.f2py = True\n except:\n print('FORTRAN extension not found while f2py=True')\n print('Using Numba instead')\n\n def initialize(self):\n # remove inhomogeneity elements (they are added again)\n self.elementlist = [e for e in self.elementlist if not e.inhomelement]\n self.aq.initialize()\n for e in self.elementlist:\n e.initialize()\n\n def add_element(self, e):\n self.elementlist.append(e)\n if e.label is not None: self.elementdict[e.label] = e\n\n def remove_element(self, e):\n \"\"\"Remove element `e` from model\n \"\"\"\n \n if e.label is not None: self.elementdict.pop(e.label)\n self.elementlist.remove(e)\n\n def storeinput(self, frame):\n self.inputargs, _, _, self.inputvalues = inspect.getargvalues(frame)\n\n def potential(self, x, y, aq=None):\n if aq is None: aq = self.aq.find_aquifer_data(x, y)\n pot = np.zeros(aq.naq)\n for e in aq.elementlist:\n pot += e.potential(x, y, aq)\n rv = np.sum(pot * aq.eigvec, 1)\n if aq.ltype[0] == 'l':\n # potential for head above leaky layer\n rv += aq.constantstar.potstar\n return rv\n\n def disvec(self, x, y, aq=None):\n \"\"\"Discharge vector at `x`, `y`\n \n Returns\n -------\n \n qxqy : array size (2, naq)\n first row is Qx in each aquifer layer, second row is Qy\n \"\"\"\n \n if aq is None: aq = self.aq.find_aquifer_data(x, y)\n rv = np.zeros((2, aq.naq))\n for e in aq.elementlist:\n rv += e.disvec(x, y, aq)\n rv = np.sum(rv[:, np.newaxis, :] * aq.eigvec, 2)\n return rv\n \n def qztop(self, x, y, aq=None):\n if aq is None: aq = self.aq.find_aquifer_data(x, y)\n rv = 0.0\n if aq.ltype[0] == 'a': # otherwise recharge cannot be added\n for e in aq.elementlist:\n rv += e.qztop(x, y)\n return rv\n\n def head(self, x, y, layers=None, aq=None):\n \"\"\"Head at `x`, `y`\n \n Returns\n -------\n \n h : array length `naq` or `len(layers)`\n head in all `layers` (if not `None`), \n or all layers of aquifer (otherwise)\n \"\"\"\n \n if aq is None: aq = self.aq.find_aquifer_data(x, y)\n rv = self.potential(x, y, aq) / aq.T\n if layers is None:\n return rv\n else:\n return rv[layers]\n\n def headgrid(self, xg, yg, layers=None, printrow=False):\n \"\"\"Grid of heads\n \n Parameters\n ----------\n xg : array\n x values of grid\n yg : array\n y values of grid\n layers : integer, list or array, optional\n layers for which grid is returned\n printrow : boolean, optional\n prints dot to screen for each row of grid if set to `True`\n \n Returns\n -------\n h : array size `nlayers, ny, nx`\n \n See also\n --------\n \n :func:`~timml.model.Model.headgrid2`\n\n \"\"\"\n \n nx, ny = len(xg), len(yg)\n if layers is None:\n Nlayers = self.aq.find_aquifer_data(xg[0], yg[0]).naq\n else:\n Nlayers = len(np.atleast_1d(layers))\n h = np.empty((Nlayers, ny, nx))\n for j in range(ny):\n if printrow:\n print('.', end='', flush=True)\n for i in range(nx):\n h[:, j, i] = self.head(xg[i], yg[j], layers)\n if printrow:\n print('', flush=True)\n return h\n\n def headgrid2(self, x1, x2, nx, y1, y2, ny, layers=None, printrow=False):\n \"\"\"Grid of heads\n \n Parameters\n ----------\n x1, x2, nx : \n x values are generated as linspace(x1, x2, nx)\n y1, y2, ny : \n y values are generated as linspace(y1, y2, ny)\n layers : integer, list or array, optional\n layers for which grid is returned\n printrow : boolean, optional\n prints dot to screen for each row of grid if set to `True`\n \n Returns\n -------\n h : array size `nlayers, ny, nx`\n \n See also\n --------\n \n :func:`~timml.model.Model.headgrid`\n \n \"\"\"\n \n xg, yg = np.linspace(x1, x2, nx), np.linspace(y1, y2, ny)\n return self.headgrid(xg, yg, layers=layers, printrow=printrow)\n\n def headalongline(self, x, y, layers=None):\n \"\"\"Head along line or curve\n \n Parameters\n ----------\n x : array\n x values of line\n y : array\n y values of line\n layers : integer, list or array, optional\n layers for which grid is returned\n \n Returns\n -------\n h : array size `nlayers, nx`\n\n \"\"\"\n \n xg, yg = np.atleast_1d(x), np.atleast_1d(y)\n if layers is None:\n Nlayers = self.aq.find_aquifer_data(xg[0], yg[0]).naq\n else:\n Nlayers = len(np.atleast_1d(layers))\n nx = len(xg)\n if len(yg) == 1:\n yg = yg * np.ones(nx)\n h = np.zeros((Nlayers, nx))\n for i in range(nx):\n h[:, i] = self.head(xg[i], yg[i], layers)\n return h\n \n def disvecalongline(self, x, y, layers=None):\n '''Returns Qx[Nlayers,len(x)], Qy[Nlayers,len(x)]\n Assumes same number of layers for each x and y\n layers may be None or list of layers for which head is computed'''\n xg, yg = np.atleast_1d(x), np.atleast_1d(y)\n if layers is None:\n nlayers = self.aq.find_aquifer_data(xg[0], yg[0]).naq\n else:\n nlayers = len(np.atleast_1d(layers))\n nx = len(xg)\n if len(yg) == 1:\n yg = yg * np.ones(nx)\n Qx = np.zeros((nlayers, nx))\n Qy = np.zeros((nlayers, nx))\n for i in range(nx):\n Qx[:, i], Qy[:, 1] = self.disvec(xg[i], yg[i], layers)\n return Qx, Qy\n \n# def disvec_direction(self, s, x1, y1, cdirection):\n# pass\n# \n# def discharge_across_line(self, x1, y1, x2, y2, layers=None):\n# if layers is None:\n# nlayers = self.aq.find_aquifer_data(x1, y1).naq\n# else:\n# nlayers = len(np.atleast_1d(layers))\n# z1 = x1 + y1 * 1j\n# z2 = x2 + y2 * 1j\n# normvec = (z2 - z1) / np.abs(z2 - z1) * np.exp(-np.pi * 1j / 2)\n# disvec = self.disvec(xg[i], yg[i], layers)\n \n def velocity(self, x, y, z):\n return self.velocomp(x, y, z)\n \n def velocomp(self, x, y, z, aq=None, layer_ltype=None):\n if aq is None: aq = self.aq.find_aquifer_data(x, y)\n assert z <= aq.z[0] and z >= aq.z[-1], \"z value not inside aquifer\"\n if layer_ltype is None:\n layer, ltype, dummy = aq.findlayer(z)\n else:\n layer, ltype = layer_ltype\n h = self.head(x, y, aq=aq)\n # qz between aquifer layers\n qzlayer = np.zeros(aq.naq + 1)\n qzlayer[1:-1] = (h[1:] - h[:-1]) / aq.c[1:]\n if aq.ltype[0] == 'l':\n qzlayer[0] = (h[0] - aq.hstar) / aq.c[0]\n if ltype == 'l':\n vz = qzlayer[layer] / aq.nporll[layer]\n vx = 0\n vy = 0\n else:\n qzbot = qzlayer[layer + 1]\n qztop = qzlayer[layer]\n if layer == 0:\n qztop += self.qztop(x, y) \n vz = (qzbot + (z - aq.zaqbot[layer]) / aq.Haq[layer] * \\\n (qztop - qzbot)) / aq.nporaq[layer] \n qx, qy = self.disvec(x, y, aq=aq)\n vx = qx[layer] / (aq.Haq[layer] * aq.nporaq[layer])\n vy = qy[layer] / (aq.Haq[layer] * aq.nporaq[layer])\n return np.array([vx, vy, vz])\n \n def solve(self, printmat=0, sendback=0, silent=False):\n '''Compute solution'''\n # Initialize elements\n self.initialize()\n # Compute number of equations\n self.neq = np.sum([e.nunknowns for e in self.elementlist])\n if self.neq == 0: return\n if silent is False:\n print('Number of elements, Number of equations:', len(\n self.elementlist), ',', self.neq)\n if self.neq == 0:\n if silent is False: print('No unknowns. Solution complete')\n return\n mat = np.empty((self.neq, self.neq))\n rhs = np.empty(self.neq)\n ieq = 0\n for e in self.elementlist:\n if e.nunknowns > 0:\n mat[ieq:ieq + e.nunknowns, :], rhs[ieq:ieq + e.nunknowns] = \\\n e.equation()\n ieq += e.nunknowns\n if silent is False:\n print('.', end='', flush=True)\n if printmat:\n return mat, rhs\n sol = np.linalg.solve(mat, rhs)\n icount = 0\n for e in self.elementlist:\n if e.nunknowns > 0:\n e.setparams(sol[icount:icount + e.nunknowns])\n icount += e.nunknowns\n if silent is False:\n print() # needed cause the dots are printed\n print('solution complete')\n elif (silent == 'dot') or (silent == '.'):\n print('.', end='', flush=True)\n if sendback:\n return sol\n return\n\n def solve_mp(self, nproc=4, printmat=0, sendback=0, silent=False):\n '''Compute solution, multiprocessing implementation.\n Note: estimated speedup approximately by factor of\n number of physical cores. Virtual cores do not improve\n calculation time.'''\n # Initialize elements\n self.initialize()\n # Compute number of equations\n self.neq = np.sum([e.nunknowns for e in self.elementlist])\n if self.neq == 0: return\n if silent is False:\n print('Number of elements, Number of equations:', len(\n self.elementlist), ',', self.neq)\n if self.neq == 0:\n if silent is False: print('No unknowns. Solution complete')\n return\n mat = np.empty((self.neq, self.neq))\n rhs = np.empty(self.neq)\n\n # start multiprocessing\n if nproc is None:\n nproc = mp.cpu_count() - 1 # make no. of processes equal to 1 less than no. of cores\n elif nproc > mp.cpu_count():\n print(\"Given 'nproc' larger than no. of cores on machine. Setting 'nproc' to {}.\".format(mp.cpu_count()))\n nproc = mp.cpu_count()\n\n pool = mp.Pool(processes=nproc)\n results = []\n for e in self.elementlist:\n if e.nunknowns > 0:\n results.append(pool.apply_async(e.equation))\n if silent is False:\n print('.', end='', flush=True)\n\n pool.close()\n pool.join()\n\n mat = np.empty((self.neq, self.neq))\n rhs = np.zeros(self.neq)\n\n ieq = 0\n\n for p in results:\n imat, irhs = p.get()\n mat[ieq:ieq + imat.shape[0], :] = imat\n rhs[ieq:ieq + irhs.shape[0]] = irhs\n ieq += imat.shape[0]\n\n # end multiprocessing\n\n if printmat:\n return mat, rhs\n sol = np.linalg.solve(mat, rhs)\n icount = 0\n for e in self.elementlist:\n if e.nunknowns > 0:\n e.setparams(sol[icount:icount + e.nunknowns])\n icount += e.nunknowns\n if silent is False:\n print() # needed cause the dots are printed\n print('solution complete')\n elif (silent == 'dot') or (silent == '.'):\n print('.', end='', flush=True)\n if sendback:\n return sol\n return\n \n def write(self):\n rv = self.modelname + ' = ' + self.name + '(\\n'\n for key in self.inputargs[1:]: # The first argument (self) is ignored\n if isinstance(self.inputvalues[key], np.ndarray):\n rv += key + ' = ' + np.array2string(self.inputvalues[key], \n separator=',') + ',\\n'\n elif isinstance(self.inputvalues[key],str): \n rv += key + \" = '\" + self.inputvalues[key] + \"',\\n\"\n else:\n rv += key + ' = ' + str(self.inputvalues[key]) + ',\\n'\n rv += ')\\n'\n return rv\n \n def writemodel(self, fname):\n self.initialize() # So that the model can be written without solving first\n f = open(fname, 'w')\n f.write('from timml import *\\n')\n f.write(self.write())\n for e in self.elementlist:\n f.write(e.write())\n f.close()\n \nclass ModelMaq(Model):\n \"\"\"\n Create a Model object by specifying a mult-aquifer sequence of\n aquifer-leakylayer-aquifer-leakylayer-aquifer etc\n \n Parameters\n ----------\n kaq : float, array or list\n Hydraulic conductivity of each aquifer from the top down.\n If float, hydraulic conductivity is the same in all aquifers.\n z : array or list\n Elevation of tops and bottoms of the aquifers from the top down.\n Leaky layers may have zero thickness.\n * if topboundary='conf': length is 2 * number of aquifers\n * if topboundary='semi': length is 2 * number of aquifers + 1 \n as top of leaky layer on top of systems needs to be specified\n c : float, array or list\n Resistance of leaky layers from the top down.\n * if float, resistance is the same for all leaky layers\n * if topboundary='conf': length is number of aquifers - 1\n * if topboundary='semi': length is number of aquifers\n npor : float, array or list\n Porosity of all aquifers and leaky layers from the top down.\n * if float, porosity is the same for all layers\n * if topboundary='conf': length is 2 * number of aquifers - 1\n * if topboundary='semi': length is 2 * number of aquifers\n topboundary : string, 'conf' or 'semi' (default is 'conf')\n Indicates whether the topboundary is confined ('conf') or\n semi-confined ('semi').\n hstar : float or None (default is None)\n Head value above semi-confining top, only read if topboundary='semi'.\n\n Examples\n --------\n >>> ml = ModelMaq(kaq=[10, 20], z=[20, 12, 10, 0], c=1000)\n \n \"\"\"\n \n def __init__(self, kaq=1, z=[1, 0], c=[], npor=0.3, topboundary='conf',\n hstar=None, f2py=False):\n self.storeinput(inspect.currentframe())\n kaq, c, npor, ltype = param_maq(kaq, z, c, npor, topboundary)\n Model.__init__(self, kaq, c, z, npor, ltype, f2py)\n self.name = 'ModelMaq'\n if self.aq.ltype[0] == 'l':\n ConstantStar(self, hstar, aq=self.aq)\n \nclass Model3D(Model):\n \"\"\"\n Model3D Class to create a multi-layer model object consisting of\n many aquifer layers. The resistance between the layers is computed\n from the vertical hydraulic conductivity of the layers.\n \n Parameters\n ----------\n kaq : float, array or list\n hydraulic conductivity of each layer from the top down\n if float, hydraulic conductivity is the same in all aquifers\n z : array or list\n elevation of top of system followed by bottoms of all layers\n from the top down\n bottom of layer is automatically equal to top of layer below it\n length is number of aquifer layers + 1\n kzoverkh : float\n vertical anisotropy ratio vertical k divided by horizontal k\n if float, value is the same for all layers\n length is number of layers\n npor : float, array or list\n porosity of all aquifer layers\n from the top down\n if float, porosity is the same for all layers\n if topboundary='conf': length is number of layers\n if topboundary='semi': length is number of layers + 1\n topboundary : string, 'conf' or 'semi' (default is 'conf')\n indicating whether the top is confined ('conf') or\n semi-confined ('semi')\n topres : float\n resistance of top semi-confining layer (read if topboundary='semi')\n topthick: float\n thickness of top semi-confining layer (read if topboundary='semi')\n hstar : float or None (default is None)\n head value above semi-confining top (read if topboundary='semi')\n\n Examples\n --------\n >>> ml = Model3D(kaq=10, z=np.arange(20, -1, -2), kzoverkh=0.1)\n \n \"\"\"\n \n def __init__(self, kaq=1, z=[1, 0], kzoverkh=1, npor=0.3,\n topboundary='conf', topres=0, topthick=0, hstar=0,\n f2py=False):\n '''Model3D\n for semi-confined aquifers, set top equal to 'semi' and provide\n topres: resistance of top\n tophick: thickness of top\n hstar: head above top'''\n self.storeinput(inspect.currentframe())\n kaq, c, npor, ltype = param_3d(kaq, z, kzoverkh, npor, topboundary,\n topres)\n if topboundary == 'semi':\n z = np.hstack((z[0] + topthick, z))\n Model.__init__(self, kaq, c, z, npor, ltype, f2py)\n self.name = 'Model3D'\n if self.aq.ltype[0] == 'l':\n ConstantStar(self, hstar, aq=self.aq)\n\n"
] | [
[
"numpy.sum",
"numpy.ones",
"numpy.linalg.solve",
"numpy.empty",
"numpy.zeros",
"numpy.array2string",
"numpy.atleast_1d",
"numpy.hstack",
"numpy.array",
"numpy.linspace"
]
] |
pearlfranz20/AL_Core | [
"6592079330c7ec3ca264b86f8414970ddab06c0e"
] | [
"apprentice/learners/when_learners/actor_critic.py"
] | [
"import torch\nimport torch.nn as nn\n\n\nclass ValueNet(nn.Module):\n \"\"\"\n The part of the actor critic network that computes the state value. Also,\n returns the hidden layer before state valuation, for use in action network.\n \"\"\"\n\n def __init__(self, n_inputs: int, n_hidden: int = None):\n \"\"\"\n Specify the number of inputs. Also, specify the number of nodes in each\n hidden layer. If no value is provided for the number of hidden, then\n it is set to half the number of inputs.\n \"\"\"\n super(ValueNet, self).__init__()\n\n if n_hidden is None:\n n_hidden = (n_inputs + 2) // 2\n\n self.n_hidden = n_hidden\n\n self.hidden = nn.Sequential(\n nn.Linear(n_inputs, n_hidden),\n nn.ReLU()\n )\n\n self.value = nn.Linear(n_hidden, 1)\n\n def forward(self, x):\n \"\"\"\n Returns the value of the state and the hidden layer values.\n \"\"\"\n x = self.hidden(x)\n return self.value(x), x\n\n\nclass ActionNet(nn.Module):\n \"\"\"\n The part of the actor critic network that computes the action value.\n \"\"\"\n\n def __init__(self, n_action_inputs: int, n_value_hidden: int,\n n_action_hidden: int = None):\n \"\"\"\n Takes as input the action features and the hidden values from the value\n net. Returns a value for the action.\n \"\"\"\n super(ActionNet, self).__init__()\n\n if n_action_hidden is None:\n n_action_hidden = (n_action_inputs + n_value_hidden + 2) // 2\n\n self.hidden = nn.Sequential(\n nn.Linear(n_action_inputs + n_value_hidden, n_action_hidden),\n nn.ReLU()\n )\n\n self.action_value = nn.Linear(n_action_hidden, 1)\n\n def forward(self, action_x, value_hidden):\n \"\"\"\n Returns the value of the state and the hidden layer values.\n \"\"\"\n x = self.hidden(torch.cat((action_x, value_hidden), 1))\n return self.action_value(x)\n"
] | [
[
"torch.nn.ReLU",
"torch.nn.Linear",
"torch.cat"
]
] |
hakanhp/chanel | [
"6825b60e86c46daabb18f40f1e45d3de2ff8e983"
] | [
"tensorflow_model_analysis/eval_saved_model/testutil.py"
] | [
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utilities for writing tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\nimport math\nimport tempfile\nimport tensorflow as tf\nfrom tensorflow_model_analysis.types_compat import Dict, Iterable, Union, Sequence, Tuple\n\nfrom tensorflow.core.example import example_pb2\n\n\nclass TensorflowModelAnalysisTest(tf.test.TestCase):\n \"\"\"Test class that extends tf.test.TestCase with extra functionality.\"\"\"\n\n def setUp(self):\n self.longMessage = True # pylint: disable=invalid-name\n\n def _getTempDir(self):\n return tempfile.mkdtemp()\n\n def _makeExample(self, **kwargs):\n \"\"\"Make a TensorFlow Example with the given fields.\n\n The arguments can be singleton values, or a list of values, e.g.\n _makeExample(age=3.0, fruits=['apples', 'pears', 'oranges']).\n Empty lists are not allowed, since we won't be able to deduce the type.\n\n Args:\n **kwargs: Each key=value pair defines a field in the example to be\n constructed. The name of the field will be key, and the value will be\n value. The type will be deduced from the type of the value.\n\n Returns:\n TensorFlow.Example with the corresponding fields set to the corresponding\n values.\n\n Raises:\n ValueError: One of the arguments was an empty list.\n TypeError: One of the elements (or one of the elements in a list) had an\n unsupported type.\n \"\"\"\n result = example_pb2.Example()\n for key, value in kwargs.items():\n if isinstance(value, float) or isinstance(value, int):\n result.features.feature[key].float_list.value[:] = [value]\n elif isinstance(value, str):\n result.features.feature[key].bytes_list.value[:] = [value]\n elif isinstance(value, list):\n if len(value) == 0: # pylint: disable=g-explicit-length-test\n raise ValueError('empty lists not allowed, but field %s was an empty '\n 'list' % key)\n if isinstance(value[0], float) or isinstance(value[0], int):\n result.features.feature[key].float_list.value[:] = value\n elif isinstance(value[0], str):\n result.features.feature[key].bytes_list.value[:] = value\n else:\n raise TypeError('field %s was a list, but the first element had '\n 'unknown type %s' % key, type(value[0]))\n else:\n raise TypeError('unrecognised type for field %s: type %s' %\n (key, type(value)))\n return result\n\n def assertHasKeyWithValueAlmostEqual(self,\n d,\n key,\n value,\n places = 5):\n self.assertIn(key, d)\n self.assertAlmostEqual(d[key], value, places=places, msg='key %s' % key)\n\n def assertDictElementsAlmostEqual(self,\n got_values_dict,\n expected_values_dict,\n places = 5):\n for key, expected_value in expected_values_dict.items():\n self.assertHasKeyWithValueAlmostEqual(got_values_dict, key,\n expected_value, places)\n\n def assertDictMatrixRowsAlmostEqual(\n self,\n got_values_dict,\n expected_values_dict,\n places = 5):\n \"\"\"Fails if got_values_dict does not match values in expected_values_dict.\n\n For each entry, expected_values_dict provides the row index and the values\n of that row to be compared to the bucketing result in got_values_dict. For\n example:\n got_values_dict={'key', [[1,2,3],[4,5,6],[7,8,9]]}\n you can check the first and last row of got_values_dict[key] by setting\n expected_values_dict={'key', [(0,[1,2,3]), (2,[7,8,9])]}\n\n Args:\n got_values_dict: The dict got, where each value represents a full\n bucketing result.\n expected_values_dict: The expected dict. It may contain a subset of keys\n in got_values_dict. The value is of type \"Iterable[Tuple[int,\n Iterable[scalar]]]\", where each Tuple contains the index of a row to be\n checked and the expected values of that row.\n places: The number of decimal places to compare.\n \"\"\"\n for key, expected_value in expected_values_dict.items():\n self.assertIn(key, got_values_dict)\n for (row, values) in expected_value:\n self.assertSequenceAlmostEqual(\n got_values_dict[key][row],\n values,\n places=places,\n msg_prefix='for key %s, row %d: ' % (key, row))\n\n def assertSequenceAlmostEqual(self,\n got_seq,\n expected_seq,\n places = 5,\n msg_prefix=''):\n got = list(got_seq)\n expected = list(expected_seq)\n self.assertEqual(\n len(got), len(expected), msg=msg_prefix + 'lengths do not match')\n for index, (a, b) in enumerate(zip(got, expected)):\n msg = msg_prefix + 'at index %d. sequences were: %s and %s' % (index, got,\n expected),\n if math.isnan(a) or math.isnan(b):\n self.assertEqual(math.isnan(a), math.isnan(b), msg=msg)\n else:\n self.assertAlmostEqual(a, b, msg=msg, places=places)\n"
] | [
[
"tensorflow.core.example.example_pb2.Example"
]
] |
MDoid10111/EMNLP2020 | [
"97e4da06abc72873a4830cfa53c035a27eb3975b"
] | [
"torch_utils.py"
] | [
"import numpy as np\nimport torch, os\nimport torch.nn.utils.rnn as rnn_utils\nfrom typing import Tuple\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom PIL import Image\nimport torchvision\nfrom torchvision import transforms\n\n\ndef flatten(x):\n '''\n flatten high dimensional tensor x into an array\n :param x: shape (B, D1, D2, ...)\n :return: 1 dimensional tensor\n '''\n dims = x.size()[1:] #remove the first dimension as it is batch dimension\n num_features = 1\n for s in dims: num_features *= s\n return x.contiguous().view(-1, num_features)\n\n\ndef gpu(tensor, gpu=False):\n\n if gpu: return tensor.cuda()\n else: return tensor\n\n\ndef cpu(tensor):\n if tensor.is_cuda: return tensor.cpu()\n else: return tensor\n\n\ndef minibatch(*tensors, **kwargs):\n\n batch_size = kwargs['batch_size']\n\n if len(tensors) == 1:\n tensor = tensors[0]\n for i in range(0, len(tensor), batch_size):\n yield tensor[i:i + batch_size]\n else:\n for i in range(0, len(tensors[0]), batch_size):\n yield tuple(x[i:i + batch_size] for x in tensors)\n\n\ndef shuffle(*arrays, **kwargs):\n \"\"\"This is not an inplace operation. Therefore, you can shuffle without worrying changing data.\"\"\"\n if len(set(len(x) for x in arrays)) != 1:\n raise ValueError('All inputs to shuffle must have '\n 'the same length.')\n\n shuffle_indices = np.arange(len(arrays[0]))\n np.random.shuffle(shuffle_indices) # fix this for reproducible\n\n if len(arrays) == 1:\n return arrays[0][shuffle_indices]\n else:\n return tuple(x[shuffle_indices] for x in arrays)\n\n\ndef assert_no_grad(variable):\n\n if variable.requires_grad:\n raise ValueError(\n \"nn criterions don't compute the gradient w.r.t. targets - please \"\n \"mark these variables as volatile or not requiring gradients\"\n )\n\n\ndef numpy2tensor(x, dtype):\n # torch.tensor(torch.from_numpy(var), dtype = torch.int, torch.long)\n return torch.tensor(torch.from_numpy(x), dtype = dtype)\n\n\ndef tensor2numpy(x):\n # return x.numpy()\n return cpu(x).numpy()\n\n\ndef set_seed(seed, cuda=False):\n\n torch.manual_seed(seed)\n if cuda: torch.cuda.manual_seed(seed)\n\n\ndef create_mask_tensor(query: torch.Tensor, doc: torch.Tensor, threshold: int = 0):\n \"\"\"\n Creating masking of two tensor. These two tensors are integer tensor\n Parameters\n\n ----------\n query: (B, L)\n doc: (B, R)\n threshold: when it is 0, means we ignore padding tokens. when it is 1, it means we ignore <unk> or oov words\n Returns\n -------\n\n \"\"\"\n assert query.size(0) == doc.size(0)\n assert len(query.size()) == 2 and len(doc.size()) == 2\n query_mask = query > threshold\n doc_mask = doc > threshold\n query_mask = query_mask.unsqueeze(2) # (B, L, 1)\n doc_mask = doc_mask.unsqueeze(2) # (B, R, 1)\n doc_mask = doc_mask.permute(0, 2, 1) # (B, 1, R)\n\n mask_tensor = torch.bmm(query_mask.float(), doc_mask.float()) # (B, L, R)\n return mask_tensor # , torch.sum(query_mask, dim = 1).squeeze(), torch.sum(doc_mask, dim = 1).squeeze()\n\n\ndef create_mask_tensor_image(left_indices: torch.Tensor, right_indices: torch.Tensor, threshold: int = 0):\n \"\"\"\n Creating masking of two tensor. These two tensors are integer tensor\n Parameters\n\n ----------\n left_indices: (B1, n1, M1)\n right_indices: (B, n, M2)\n threshold: when it is 0, means we ignore padding tokens. when it is 1, it means we ignore <unk> or oov words\n Returns\n -------\n\n \"\"\"\n B1, n1, M1 = left_indices.size()\n B, n, M2 = right_indices.size()\n assert n1 == 1\n left_mask = left_indices > 0\n right_mask = right_indices > 0\n left_mask = left_mask.view(B1, M1, 1)\n if B1 == 1: left_mask = left_mask.expand(B, M1, 1) # during testing\n right_mask = right_mask.view(B, n * M2, 1)\n ans = torch.bmm(left_mask.float(), right_mask.permute(0, 2, 1).float())\n ans = ans.view(B, M1, n, M2).permute(0, 2, 1, 3) # (B, n, M1, M2)\n return ans\n\n\ndef count_parameters(model: nn.Module):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\ndef get_sorted_index_and_reverse_index(base_array: np.ndarray):\n \"\"\"\n We use sorted_index = np.argsort(-base_array) to find the indices to short the array decreasingly.\n We also need to find the indices to restoring the original order of elements of base_array\n after apply sorted_index.\n This method is important because we need to input the tensor to GRU/LSTM with packed sequence.\n Parameters\n ----------\n base_array: (B, )\n\n Returns\n -------\n\n \"\"\"\n assert type(base_array) == np.ndarray\n batch_size = base_array.shape[0]\n assert base_array.shape == (batch_size,)\n new_indices = np.argsort(-base_array)\n old_indices = np.arange(batch_size)\n r = np.stack([new_indices, old_indices], axis = 1)\n r = r[np.argsort(r[:, 0])]\n restoring_indices = r[:, 1] # the retoring indices. This method is tested very carefully.\n return new_indices, restoring_indices\n\n\ndef packing_sequence(seq: torch.Tensor, seq_lens: np.ndarray, new_index) -> torch.Tensor:\n \"\"\"\n Prepare a packed sequence to input to an RNN. It is required that the length of sequences in `seq` must be sorted.\n After\n\n Parameters\n ----------\n seq: (B, L, D) where L is length of sequence\n seq_lens: (B, )\n new_index: (B, ) this index is used to make sequence lengths sorted\n old_index: (B, ) this index is used to restore the sequence lengths\n Returns\n -------\n\n \"\"\"\n return rnn_utils.pack_padded_sequence(seq[new_index], seq_lens[new_index], batch_first = True)\n\n\ndef torch_repeat_dim0(A: torch.tensor, n: int):\n \"\"\"\n Repeat tensor across a dimension\n Parameters\n ----------\n A\n axis\n\n Returns\n -------\n\n \"\"\"\n assert len(A.size()) == 3\n d1, d2, d3 = A.size()\n A = A.unsqueeze(0).transpose(0, 1).repeat(1, n, 1, 1).view(-1, d2, d3)\n assert A.size() == (n * d1, d2, d3)\n return A\n\n\ndef boolean_mask(target: torch.Tensor, mask: torch.Tensor):\n \"\"\"\n Mimick tf.boolean_mask\n Copied from https://discuss.pytorch.org/t/slicing-tensor-using-boolean-list/7354/3\n Parameters\n ----------\n target\n mask\n\n Returns\n -------\n\n \"\"\"\n x = mask == True\n # y=torch.arange(0,3)\n # x=torch.Tensor([True,False,True])==True\n # print(y[x])\n return target[x]\n\ndef torch_argsort(input, dim=None, descending=False):\n \"\"\"Returns the indices that sort a tensor along a given dimension in ascending\n order by value.\n This is the second value returned by :meth:`torch.sort`. See its documentation\n for the exact semantics of this method.\n Args:\n input (Tensor): the input tensor\n dim (int, optional): the dimension to sort along\n descending (bool, optional): controls the sorting order (ascending or descending)\n Example::\n >>> a = torch.randn(4, 4)\n >>> a\n tensor([[ 0.0785, 1.5267, -0.8521, 0.4065],\n [ 0.1598, 0.0788, -0.0745, -1.2700],\n [ 1.2208, 1.0722, -0.7064, 1.2564],\n [ 0.0669, -0.2318, -0.8229, -0.9280]])\n >>> torch.argsort(a, dim=1)\n tensor([[2, 0, 3, 1],\n [3, 2, 1, 0],\n [2, 1, 0, 3],\n [3, 2, 1, 0]])\n \"\"\"\n # copy from https://github.com/pytorch/pytorch/pull/9600/files\n if dim is None:\n return torch.sort(input, -1, descending)[1]\n return torch.sort(input, dim, descending)[1]\n\n\ndef _predict_process_ids(user_ids, item_ids, num_items, use_cuda):\n \"\"\"\n\n Parameters\n ----------\n user_ids\n item_ids\n num_items\n use_cuda\n\n Returns\n -------\n\n \"\"\"\n if item_ids is None:\n item_ids = np.arange(num_items, dtype=np.int64)\n\n if np.isscalar(user_ids):\n user_ids = np.array(user_ids, dtype=np.int64)\n\n user_ids = torch.from_numpy(user_ids.reshape(-1, 1).astype(np.int64))\n item_ids = torch.from_numpy(item_ids.reshape(-1, 1).astype(np.int64))\n if item_ids.size()[0] != user_ids.size(0):\n user_ids = user_ids.expand(item_ids.size())\n\n user_var = gpu(user_ids, use_cuda)\n item_var = gpu(item_ids, use_cuda)\n\n return user_var.squeeze(), item_var.squeeze()\n\n\ndef idf(total_docs: int, term_freq: int) -> float:\n \"\"\"compute inverse doc frequency. If a term appears at all docs, then, its value is low for discrimination.\n If a term does not show in any doc, then, we simply use set denominator to 1 => largest idf value \"\"\"\n assert term_freq <= total_docs, \"The number of documents that contain a term must be smaller than total_docs\"\n return np.log((1.0 + total_docs) / float(term_freq + 1.0)) + 1.0\n\n\ndef moving_average(input_tensor: torch.Tensor, window_size: int, dimension: int):\n \"\"\"\n\n Parameters\n ----------\n input_tensor: torch.Tensor of shape (B, L, D)\n window_size: sliding windows size\n dimension: dimension we want to apply sliding window\n\n Returns\n -------\n\n \"\"\"\n ret = torch.cumsum(input_tensor, dim = dimension)\n # print(\"Here:\", ret, ret.shape)\n ret[:, window_size:] = ret[:, window_size:] - ret[:, :-window_size]\n return ret[:, window_size - 1:] / window_size\n\n\ndef cosine_distance(a: torch.Tensor, b: torch.Tensor):\n \"\"\"\n Compute the cosine distance between two tensors. This implementation saves a lot of memory since\n memory complexity is O(B x L x R)\n Parameters\n ----------\n a: `torch.Tensor` shape (B, L, D)\n b: `torch.Tensor` shape (B, R, D)\n\n Returns\n -------\n\n \"\"\"\n assert len(a.size()) == len(b.size()) == 3\n A_square = (a * a).sum(dim = - 1) # B, L\n B_square = (b * b).sum(dim = -1) # B, R\n dot = torch.bmm(a, b.permute(0, 2, 1)) # B, L, R\n # added abs in case of negative, added 1e-10 to avoid nan gradient of sqrt\n return torch.sqrt(torch.abs(A_square.unsqueeze(-1) - 2 * dot + B_square.unsqueeze(1)) + 1e-10)\n\n\ndef l1_distance(a: torch.Tensor, b: torch.Tensor):\n \"\"\"\n Compute the l1 distance between two tensors. This implementation consumes a lot of memory since\n mem complexity is O(B x L x R x D) due to x - y. I tried many ways but this is the best thing I can do\n Parameters\n ----------\n a: `torch.Tensor` shape (B, L, D)\n b: `torch.Tensor` shape (B, R, D)\n\n Returns\n -------\n\n \"\"\"\n assert len(a.size()) == len(b.size()) == 3\n x = a.unsqueeze(2) # (B, L, 1, D)\n y = b.unsqueeze(1) # (B, 1, R, D)\n return torch.norm(x - y, p = 1, dim = -1)\n\n\ndef _get_doc_context_copacrr(doc: torch.Tensor, doc_mask: torch.Tensor, context_window_size: int) -> torch.Tensor:\n \"\"\"\n\n Parameters\n ----------\n doc: with shape (B, R, D)\n doc_mask: binary tensor that differentiate real tokens from padding tokens (B, R)\n\n Returns\n -------\n a tensor of shape (B, R, D) which indicates the context representation of each token in doc.\n We also reset padding tokens to zero since they have no context\n \"\"\"\n\n def moving_average(a: torch.Tensor, window_size: int, dimension: int):\n ret = torch.cumsum(a, dim = dimension)\n # print(\"Here:\", ret, ret.shape)\n ret[:, window_size:] = ret[:, window_size:] - ret[:, :-window_size]\n return ret[:, window_size - 1:] / window_size\n\n left = context_window_size // 2\n right = context_window_size - left - 1 # in case context windows is an even number then left=x//2, right=x-x//2\n y = F.pad(doc, (0, 0, left, right)) # (B, c/2 + R + c/2, D)\n document_context = moving_average(y, window_size = context_window_size, dimension = 1)\n document_context = document_context * doc_mask.unsqueeze(-1).float()\n return document_context\n\n\ndef init_weights(m):\n \"\"\"\n Copied from https://discuss.pytorch.org/t/how-are-layer-weights-and-biases-initialized-by-default/13073/3\n Examples:\n >>> w = nn.Linear(3, 4)\n >>> w.apply(init_weights)\n \"\"\"\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\n if hasattr(m.bias, \"data\"): m.bias.data.fill_(0)\n if isinstance(m, nn.Conv2d):\n torch.nn.init.xavier_uniform_(m.weight)\n if m.bias:\n torch.nn.init.xavier_uniform_(m.bias)\n\n\ndef auto_rnn(rnn_cell: nn.RNN, input_feats: torch.Tensor,\n lens: torch.Tensor, new_indices: torch.Tensor, restoring_indices: torch.Tensor, max_len: int):\n \"\"\"\n\n Parameters\n ----------\n rnn_cell : a rnn cell\n input_feats: `torch.Tensor` (B, L, D)\n lens: `torch.Tensor` (B, )\n new_indices: `torch.Tensor` (B, )\n restoring_indices: `torch.Tensor` (B, )\n max_len: int\n Returns\n -------\n\n \"\"\"\n return rnn_cell((input_feats, lens, new_indices, restoring_indices), max_len=max_len, return_h=False)[0]\n\n\ndef rnn_last_h(rnn_cell: nn.RNN, input_feats: torch.Tensor,\n lens: torch.Tensor, new_indices: torch.Tensor, restoring_indices: torch.Tensor, max_len: int):\n \"\"\"\n return the last hidden vectors of an RNN\n Parameters\n ----------\n rnn_cell : a rnn cell\n input_feats: `torch.Tensor` (B, L, D)\n lens: `torch.Tensor` (B, )\n new_indices: `torch.Tensor` (B, )\n restoring_indices: `torch.Tensor` (B, )\n max_len: int\n Returns\n -------\n\n \"\"\"\n return rnn_cell((input_feats, lens, new_indices, restoring_indices), max_len=max_len, return_h=True)[1]\n\n\ndef retrieve_elements_from_indices(tensor: torch.Tensor, indices: torch.Tensor):\n \"\"\"\n Copied from https://discuss.pytorch.org/t/pooling-using-idices-from-another-max-pooling/37209/4\n How does this work? (Checked\n Parameters\n ----------\n tensor: torch.Tensor shape B, C, L, R\n indices: torch.Tensor shape (B, C, L, R) the values are indices where the last two dimensions are flattened\n\n Returns\n -------\n\n \"\"\"\n flattened_tensor = tensor.flatten(start_dim=2)\n output = flattened_tensor.gather(dim=2, index=indices.flatten(start_dim=2)).view_as(indices)\n return output\n\n\ndata_transforms = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])\n\n\ndef load_images(infile):\n im = Image.open(infile).convert('RGB')\n return data_transforms(im)\n"
] | [
[
"torch.nn.init.xavier_uniform_",
"numpy.random.shuffle",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.nn.functional.pad",
"numpy.argsort",
"torch.norm",
"numpy.arange",
"torch.cumsum",
"torch.from_numpy",
"numpy.stack",
"numpy.isscalar",
"numpy.array",
"torch.sort"
]
] |
kjarczak/balticlsc_module | [
"d104c66fbfeb2147e8a40a0fa5170326843854c5"
] | [
"examples/face_recogniser/content/processing.py"
] | [
"import os\nfrom typing import List, Tuple, Dict\n\nimport face_recognition\n\nfrom matplotlib import pyplot, patches\n\nfrom PIL import Image\n\nimport numpy as np\n\nfrom balticlsc.access.ftp import upload_file, get_connection\nfrom balticlsc.configs.credential.ftp import FTPCredential\nfrom balticlsc.scheme.api import init_baltic_api\nfrom balticlsc.scheme.logger import logger\nfrom balticlsc.scheme.pin import Pin, MissingPin, PinAttribute, ValuesAttribute\nfrom balticlsc.scheme.processing import ProcessingInterface\nfrom balticlsc.scheme.utils import camel_to_snake, get_random_output_folder\n\nMODULE_VERSION = 'latest'\n\n\nclass Processing(ProcessingInterface):\n def process(self, msg_uid: str, input_pin: Pin, output_pin_name_to_value: Dict[str, Pin]) -> None:\n logger.info('module version = ' + MODULE_VERSION)\n logger.info('starting processing for input pin=\"' + str(input_pin) + '\"')\n input_access_credential = input_pin.getattr(PinAttribute.ACCESS_CREDENTIAL)\n input_folder = input_pin.getattr(PinAttribute.ACCESS_PATH)\n\n if input_access_credential is None:\n raise ValueError(f'missing access credential in the input pin={str(input_pin)}')\n\n if input_folder is None:\n raise ValueError(f'missing access path in the input pin={str(input_pin)}')\n\n input_ftp_credential = FTPCredential(**input_access_credential)\n # START # Establish the output access credential and folder # START #\n output_pin_name: str = 'Output'\n\n if output_pin_name not in output_pin_name_to_value:\n error_msg = 'missing pin with name=\"' + output_pin_name + '\" in output pins config'\n logger.error(error_msg)\n raise MissingPin([pin for pin in output_pin_name_to_value.values()], error_msg)\n\n output_pin = output_pin_name_to_value[output_pin_name]\n logger.info('loading output pin=' + str(output_pin))\n output_access_credential = output_pin.getattr(PinAttribute.ACCESS_CREDENTIAL)\n\n if output_access_credential is None:\n logger.info('output pin access credentials is None, using input access credentials')\n output_ftp_credential = input_ftp_credential\n else:\n output_access_credential = {camel_to_snake(key): value for key, value in output_access_credential.items()}\n\n if str(output_access_credential) == str(input_access_credential):\n logger.info('input and output access credential are the same')\n output_ftp_credential = input_ftp_credential\n else:\n output_ftp_credential = FTPCredential(**output_access_credential)\n\n output_access_path = output_pin.getattr(PinAttribute.ACCESS_PATH)\n\n if output_access_path is None:\n logger.info('access path is not provided in output config')\n logger.info('setting random generated string as output folder name')\n output_folder = get_random_output_folder(input_folder)\n else:\n output_access_path = {camel_to_snake(key): value for key, value in output_access_path.items()}\n\n if 'resource_path' not in output_access_path:\n logger.info('missing \"resource_path\" value in output access path')\n logger.info('setting random generated string as output folder name')\n output_folder = get_random_output_folder(input_folder)\n else:\n output_folder = output_access_path['resource_path']\n logger.info('setting output folder based on output pin config \"resource_path\"=' + output_folder)\n # STOP # Establish output credentials and folder # STOP #\n logger.info('connecting to input ftp server: ' + input_ftp_credential.host)\n input_ftp = get_connection(input_ftp_credential)\n\n if output_ftp_credential != input_ftp_credential:\n logger.info('connecting to output ftp server: ' + output_ftp_credential.host)\n output_ftp = get_connection(output_ftp_credential)\n else:\n logger.info('using the same connection as output ftp')\n output_ftp = input_ftp\n # START # process and send files # START #\n logger.info('changing ftp working directory to \"' + input_folder + '\"')\n input_ftp.cwd(input_folder)\n logger.info('working directory changed')\n logger.info('listing files in the working directory ...')\n filenames: List[str] = input_ftp.nlst()\n logger.info('handling ' + str(len(filenames)) + ' files')\n os.makedirs('tmp', exist_ok=True)\n\n for filename in filenames:\n if not filename.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif')):\n logger.warning('wrong format of the file \"' + filename + '\", omitting')\n continue\n\n logger.info('downloading file \"' + filename + '\"')\n filepath = 'tmp/' + filename\n # Save the image locally\n with open(filepath, 'wb') as file:\n input_ftp.retrbinary(\"RETR \" + filename, file.write)\n # Mark faces and save the image\n image = np.array(Image.open(filepath))\n im = Image.fromarray(image)\n im.save(filepath)\n height: int = image.shape[0]\n width: int = image.shape[1]\n dpi: int = 100\n faces_coords: List[Tuple[int]] = face_recognition.face_locations(image)\n figure = pyplot.figure(frameon=False, dpi=dpi)\n figure.set_size_inches(width / dpi, height / dpi)\n ax = pyplot.Axes(figure, [0., 0., 1., 1.])\n ax.set_axis_off()\n figure.add_axes(ax)\n ax.imshow(image)\n logger.info('adding ' + str(len(faces_coords)) + ' faces to image \"' + filename + '\"')\n fig = pyplot.gcf()\n fig.savefig(fname=filepath, dpi=dpi, bbox_inches='tight')\n\n for index in range(len(faces_coords)):\n x_start = faces_coords[index][3]\n y_start = faces_coords[index][0]\n x_width = (faces_coords[index][1] - faces_coords[index][3])\n y_height = (faces_coords[index][2] - faces_coords[index][0])\n rect = patches.Rectangle((x_start, y_start), x_width, y_height,\n edgecolor='r', facecolor=\"none\")\n ax.add_patch(rect)\n\n pyplot.savefig(fname=filepath, dpi=dpi, bbox_inches='tight')\n pyplot.close()\n # Send file to ftp\n with open(filepath, 'rb') as file:\n logger.info('uploading file \"' + filename + '\" into ' + output_folder)\n upload_file(filename, output_folder, output_ftp, file)\n file.close() # close file and FTP\n\n input_ftp.cwd(input_folder)\n # STOP # process and send files # STOP #\n input_ftp.quit()\n\n if output_ftp_credential != input_ftp_credential:\n output_ftp.quit()\n\n rest_client.send_output_token(\n base_msg_uid=msg_uid,\n values={\n ValuesAttribute.RESOURCE_PATH: output_folder\n },\n output_pin_name=output_pin.getattr(PinAttribute.NAME))\n rest_client.send_ack_token(\n msg_uids=[msg_uid],\n is_final=True,\n is_failed=False,\n )\n\n\napp, rest_client = init_baltic_api(Processing)\n"
] | [
[
"matplotlib.pyplot.Axes",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.savefig",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.close"
]
] |
waterzxj/UNF | [
"5eda8e7c60116735f595f4b21b24547708b36cf5"
] | [
"UNF/training/metric.py"
] | [
"#coding:utf-8\n\nimport torch\n\nfrom learner_util import get_ner_BIO\n\n\nclass Metric(object):\n def __call__(self,\n predictions,\n gold_labels,\n mask=None):\n \"\"\"\n metric的抽象类\n\n :params predictions 预测结果的tensor\n :params gold_labels 实际结果的tensor\n :mask mask\n \"\"\"\n raise NotImplementedError\n\n def get_metric(self, reset=False):\n \"\"\"\n 返回metric的指标\n \"\"\"\n raise NotImplementedError\n\n def reset(self):\n \"\"\"\n 重置内部状态\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def unwrap_to_tensors(*tensors):\n \"\"\"\n 把tensor安全的copy到cpu进行操作,避免gpu的oom\n \"\"\"\n return (x.detach().cpu() if isinstance(x, torch.Tensor) else x for x in tensors)\n\n @classmethod\n def from_option(cls, conf):\n return cls(**conf)\n\n\nclass F1Measure(Metric):\n def __init__(self, positive_label):\n \"\"\"\n 准确率、召回率、F值的评价指标\n \"\"\"\n super(F1Measure, self).__init__()\n self._positive_label = positive_label\n self._true_positives = 0.0\n self._true_negatives = 0.0\n self._false_positives = 0.0\n self._false_negatives = 0.0\n \n def __call__(self,\n predictions,\n gold_labels,\n mask=None):\n predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels, mask)\n num_classes = predictions.size(-1)\n if (gold_labels >= num_classes).any():\n raise Exception(\"A gold label passed to F1Measure contains an id >= {}, \"\n \"the number of classes.\".format(num_classes))\n if mask is None:\n mask = torch.ones_like(gold_labels)\n mask = mask.float()\n gold_labels = gold_labels.float()\n\n self.update(predictions, gold_labels, mask)\n\n def update(self, predictions, gold_labels, mask):\n positive_label_mask = gold_labels.eq(self._positive_label).float()\n negative_label_mask = 1.0 - positive_label_mask\n\n argmax_predictions = predictions.max(-1)[1].float().squeeze(-1)\n\n # True Negatives: correct non-positive predictions.\n correct_null_predictions = (argmax_predictions !=\n self._positive_label).float() * negative_label_mask\n self._true_negatives += (correct_null_predictions.float() * mask).sum()\n\n # True Positives: correct positively labeled predictions.\n correct_non_null_predictions = (argmax_predictions ==\n self._positive_label).float() * positive_label_mask\n self._true_positives += (correct_non_null_predictions * mask).sum()\n\n # False Negatives: incorrect negatively labeled predictions.\n incorrect_null_predictions = (argmax_predictions !=\n self._positive_label).float() * positive_label_mask\n self._false_negatives += (incorrect_null_predictions * mask).sum()\n\n # False Positives: incorrect positively labeled predictions\n incorrect_non_null_predictions = (argmax_predictions ==\n self._positive_label).float() * negative_label_mask\n self._false_positives += (incorrect_non_null_predictions * mask).sum()\n\n def get_metric(self, reset=False):\n \"\"\"\n 返回准确率、召回率、F值评价指标\n \"\"\"\n # print('TP',self._true_positives,'TN',self._true_negatives,'FP',self._false_positives,'FN',self._false_negatives)\n\n precision = float(self._true_positives) / float(self._true_positives + self._false_positives + 1e-13)\n recall = float(self._true_positives) / float(self._true_positives + self._false_negatives + 1e-13)\n f1_measure = 2. * ((precision * recall) / (precision + recall + 1e-13))\n if reset:\n self.reset()\n return {\"precision\":precision, \"recall\": recall, \"f1_measure\":f1_measure}\n\n def reset(self):\n self._true_positives = 0.0\n self._true_negatives = 0.0\n self._false_positives = 0.0\n self._false_negatives = 0.0\n\n\nclass NerF1Measure(Metric):\n def __init__(self, label_vocab):\n self.golden_num = 0.0\n self.predict_num = 0.0\n self.right_num = 0.0\n self.label_vocab = label_vocab\n\n def reset(self):\n \"\"\"\n 重置内部状态\n \"\"\"\n self.golden_num = 0.0\n self.predict_num = 0.0\n self.right_num = 0.0\n\n def get_metric(self, reset=False):\n \"\"\"\n 返回metric的指标\n \"\"\"\n if self.predict_num == 0.0:\n precision = -1\n else:\n precision = (self.right_num+0.0)/self.predict_num\n\n if self.golden_num == 0.0:\n recall = -1\n else:\n recall = (self.right_num+0.0)/self.golden_num\n\n if (precision == -1) or (recall == -1) or (precision+recall) <= 0.:\n f_measure = -1\n else:\n f_measure = 2*precision*recall/(precision+recall)\n\n if reset:\n self.reset()\n\n return {\"precision\":precision, \"recall\": recall, \"f1_measure\":f_measure}\n\n def update(self, gold_matrix, pred_matrix):\n right_ner = list(set(gold_matrix).intersection(set(pred_matrix)))\n self.golden_num += len(gold_matrix)\n self.predict_num += len(pred_matrix)\n self.right_num += len(right_ner)\n\n def __call__(self,\n predictions,\n gold_labels,\n mask=None):\n \"\"\"\n metric的抽象类\n\n :params predictions 预测结果的tensor\n :params gold_labels 实际结果的tensor\n :mask mask\n \"\"\"\n batch_size = gold_labels.size(0)\n seq_len = gold_labels.size(1)\n predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels,\n mask)\n\n predictions = predictions.tolist()\n gold_labels = gold_labels.tolist()\n mask = mask.tolist()\n\n for idx in range(batch_size):\n pred = [self.label_vocab[predictions[idx][idy]] for idy in range(seq_len) if mask[idx][idy] != 0]\n gold = [self.label_vocab[gold_labels[idx][idy]] for idy in range(seq_len) if mask[idx][idy] != 0]\n\n\n gold_matrix = get_ner_BIO(gold)\n pred_matrix = get_ner_BIO(pred)\n self.update(gold_matrix, pred_matrix)\n\n\n\n\n\n"
] | [
[
"torch.ones_like"
]
] |
sibo/pysimm_tacticity | [
"cfb20851b26b87b736dbb6a2f4c4e7b668d680d5"
] | [
"pysimm/apps/random_walk.py"
] | [
"# ******************************************************************************\n# pysimm.apps.random_walk module\n# ******************************************************************************\n#\n# psuedo random walk algorithm written using pysimm tools\n#\n# ******************************************************************************\n# License\n# ******************************************************************************\n# The MIT License (MIT)\n#\n# Copyright (c) 2016 Michael E. Fortunato, Coray M. Colina\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\nfrom time import strftime\nfrom itertools import permutations, izip\n\nimport numpy as np\n\nfrom pysimm import system, lmps, forcefield, calc\nfrom pysimm import error_print\n\n\ndef find_last_backbone_vector(s, m):\n \"\"\"pysimm.apps.random_walk.find_last_backbone_vector\n\n Finds vector between backbone atoms in terminal monomer. Requires current system s, and reference monomer m.\n\n Args:\n s: :class:`~pysimm.system.System` object\n m: :class:`~pysimm.system.System` object\n Returns:\n list of vector components\n \"\"\"\n head_pos = [0, 0, 0]\n tail_pos = [0, 0, 0]\n for p in s.particles[-1*m.particles.count:]:\n if p.linker == 'head':\n head_pos = [p.x, p.y, p.z]\n elif p.linker == 'tail':\n tail_pos = [p.x, p.y, p.z]\n return [head_pos[0] - tail_pos[0], head_pos[1] - tail_pos[1], head_pos[2] - tail_pos[2]]\n\n\ndef copolymer(m, nmon, s_=None, **kwargs):\n \"\"\"pysimm.apps.random_walk.copolymer\n\n Builds copolymer using random walk methodology using pattern\n\n Args:\n m: list of reference monomer :class:`~pysimm.system.System`s\n nmon: total number of monomers to add to chain\n s_: :class:`~pysimm.system.System` in which to build polymer chain (None)\n settings: dictionary of simulation settings\n density: density at which to build polymer (0.3)\n forcefield: :class:`~pysimm.forcefield.Forcefield` object to acquire new force field parameters\n capped: True/False if monomers are capped\n unwrap: True to unwrap final system\n traj: True to build xyz trajectory of polymer growth (True)\n pattern: list of pattern for monomer repeat units, should match length of m ([1 for _ in range(len(m))])\n limit: during MD, limit atomic displacement by this max value (LAMMPS ONLY)\n sim: :class:`~pysimm.lmps.Simulation` object for relaxation between polymer growth\n Returns:\n new copolymer :class:`~pysimm.system.System`\n \"\"\"\n m = [x.copy() for x in m]\n\n settings = kwargs.get('settings', {})\n density = kwargs.get('density', 0.3)\n f = kwargs.get('forcefield')\n capped = kwargs.get('capped')\n unwrap = kwargs.get('unwrap')\n traj = kwargs.get('traj', True)\n pattern = kwargs.get('pattern', [1 for _ in range(len(m))])\n limit = kwargs.get('limit', 0.1)\n sim = kwargs.get('sim')\n\n for m_ in m:\n m_.add_particle_bonding()\n for p in m_.particles:\n if p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('H'):\n p.linker = 'head'\n elif p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('T'):\n p.linker = 'tail'\n m_.remove_linker_types()\n\n if s_ is None:\n s = system.replicate(m[0], 1, density=density/nmon)\n else:\n s = system.replicate(m[0], 1, s_=s_, density=density/nmon)\n print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), 1, nmon))\n\n for p in s.particles:\n if p.linker == 'head':\n last_head = p\n\n elif p.linker == 'tail':\n last_tail = p\n\n for m_ in m:\n if capped:\n m_.particles.remove(1)\n m_.remove_spare_bonding()\n m_.add_particle_bonding()\n\n s.add_particle_bonding()\n \n if traj:\n s.write_xyz('random_walk.xyz')\n\n temp_nmon = 1\n\n while True:\n\n m_ = m.pop(0)\n m.append(m_)\n p_ = pattern.pop(0)\n pattern.append(p_)\n\n if temp_nmon == 1 and p_ == 1:\n m_ = m.pop(0)\n m.append(m_)\n p_ = pattern.pop(0)\n pattern.append(p_)\n elif temp_nmon == 1:\n p_ -= 1\n\n for insert in range(p_):\n\n head = None\n tail = None\n\n backbone_vector = np.array([last_head.x - last_tail.x,\n last_head.y - last_tail.y,\n last_head.z - last_tail.z])\n\n ref_head = None\n ref_tail = None\n for p in m_.particles:\n if p.linker == 'head':\n ref_head = p\n elif p.linker == 'tail':\n ref_tail = p\n if ref_head and ref_tail:\n ref_backbone_vector = np.array([ref_head.x - ref_tail.x,\n ref_head.y - ref_tail.y,\n ref_head.z - ref_tail.z])\n rot_matrix = calc.find_rotation(ref_backbone_vector, backbone_vector)\n m_.rotate(around=ref_tail, rot_matrix=rot_matrix)\n translation_vector = [last_tail.x - ref_tail.x,\n last_tail.y - ref_tail.y,\n last_tail.z - ref_tail.z]\n for p in m_.particles:\n p.x = p.x + translation_vector[0] + 3*backbone_vector[0]\n p.y = p.y + translation_vector[1] + 3*backbone_vector[1]\n p.z = p.z + translation_vector[2] + 3*backbone_vector[2]\n else:\n print('reference molecule has no head or tail')\n\n n = m_.copy()\n\n if capped:\n s.particles.remove(s.particles.count)\n s.remove_spare_bonding()\n s.add_particle_bonding()\n\n s.add(n, change_dim=False)\n\n s.add_particle_bonding()\n\n head = last_head\n for p in s.particles[-1*n.particles.count:]:\n if p.linker == 'tail':\n tail = p\n\n s.make_new_bonds(head, tail, f)\n temp_nmon += 1\n print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), temp_nmon, nmon))\n\n if unwrap:\n s.unwrap()\n \n if sim is None:\n sim = lmps.Simulation(s, name='relax_%03d' % (temp_nmon), log='relax.log', **settings)\n sim.add_md(ensemble='nve', limit=limit, **settings)\n sim.add_min(**settings)\n if isinstance(sim, lmps.Simulation):\n sim.system = s\n sim.name = 'relax_%03d' % (temp_nmon)\n sim.run(np=settings.get('np'))\n\n if unwrap:\n s.unwrap()\n\n if unwrap:\n s.wrap()\n\n for p in s.particles[-1*n.particles.count:]:\n if p.linker == 'head':\n last_head = p\n elif p.linker == 'tail':\n last_tail = p\n\n if temp_nmon >= nmon:\n break\n \n if unwrap:\n if not s.unwrap():\n error_print('something went wrong')\n return s\n \n if traj:\n s.write_xyz('random_walk.xyz', append=True)\n \n if unwrap:\n s.wrap()\n \n for p in s.particles:\n if p not in s.molecules[p.molecule.tag].particles:\n s.molecules[p.molecule.tag].particles.add(p)\n\n s.write_lammps('polymer.lmps')\n s.unwrap()\n s.write_xyz('polymer.xyz')\n\n return s\n\n\ndef random_walk(m, nmon, s_=None, **kwargs):\n \"\"\"pysimm.apps.random_walk.random_walk\n\n Builds homopolymer using random walk methodology\n\n Args:\n m: reference monomer :class:`~pysimm.system.System`\n nmon: total number of monomers to add to chain\n s_: :class:`~pysimm.system.System` in which to build polymer chain (None)\n extra_bonds: EXPERMINTAL, True if making ladder backbone polymer\n settings: dictionary of simulation settings\n density: density at which to build polymer (0.3)\n forcefield: :class:`~pysimm.forcefield.Forcefield` object to acquire new force field parameters\n capped: True/False if monomers are capped\n unwrap: True to unwrap final system\n traj: True to build xyz trajectory of polymer growth (True)\n limit: during MD, limit atomic displacement by this max value (LAMMPS ONLY)\n sim: :class:`~pysimm.lmps.Simulation` object for relaxation between polymer growth\n Returns:\n new polymer :class:`~pysimm.system.System`\n \"\"\"\n m = m.copy()\n\n extra_bonds = kwargs.get('extra_bonds', False)\n\n settings = kwargs.get('settings', {})\n density = kwargs.get('density', 0.3)\n f = kwargs.get('forcefield')\n capped = kwargs.get('capped')\n unwrap = kwargs.get('unwrap')\n traj = kwargs.get('traj', True)\n limit = kwargs.get('limit', 0.1)\n sim = kwargs.get('sim')\n\n m.add_particle_bonding()\n\n for p in m.particles:\n if p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('H'):\n p.linker = 'head'\n elif p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('T'):\n p.linker = 'tail'\n\n m.remove_linker_types()\n\n if s_ is None:\n s = system.replicate(m, 1, density=density/nmon)\n else:\n s = system.replicate(m, 1, s_=s_, density=None)\n print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), 1, nmon))\n\n if traj:\n s.write_xyz('random_walk.xyz')\n\n if capped:\n m.particles.remove(1)\n m.remove_spare_bonding()\n m.add_particle_bonding()\n\n for insertion in range(nmon - 1):\n\n head = None\n tail = None\n\n backbone_vector = np.array(find_last_backbone_vector(s, m))\n\n for p, p_ in izip(s.particles[-1*m.particles.count:], m.particles):\n p_.x = p.x + 3*backbone_vector[0]\n p_.y = p.y + 3*backbone_vector[1]\n p_.z = p.z + 3*backbone_vector[2]\n\n n = m.copy()\n\n if capped:\n s.particles.remove(s.particles.count)\n s.remove_spare_bonding()\n s.add_particle_bonding()\n\n if extra_bonds:\n heads = []\n for p in s.particles[-1*n.particles.count:]:\n if p.linker == 'head':\n heads.append(p)\n else:\n for p in s.particles[-1*n.particles.count:]:\n if p.linker == 'head':\n head = p\n\n s.add(n, change_dim=False)\n\n s.add_particle_bonding()\n\n if extra_bonds:\n tails = []\n for p in s.particles[-1*n.particles.count:]:\n if p.linker == 'tail':\n tails.append(p)\n else:\n for p in s.particles[-1*n.particles.count:]:\n if p.linker == 'tail':\n tail = p\n\n for p in s.particles:\n if not p.bonded_to:\n print(p.tag)\n\n if head and tail:\n s.make_new_bonds(head, tail, f)\n print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), insertion+2, nmon))\n elif extra_bonds and len(heads) == len(tails):\n for h, t in izip(heads, tails):\n s.make_new_bonds(h, t, f)\n print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), insertion+2, nmon))\n else:\n print('cannot find head and tail')\n\n if sim is None:\n sim = lmps.Simulation(s, name='relax_%03d' % (insertion+2), log='relax.log', **settings)\n sim.add_md(ensemble='nve', limit=limit, **settings)\n sim.add_min(**settings)\n if isinstance(sim, lmps.Simulation):\n sim.system = s\n sim.name = 'relax_%03d' % (insertion+2)\n sim.run(np=settings.get('np'))\n\n if unwrap:\n if not s.unwrap():\n error_print('something went wrong')\n return s\n\n if traj:\n s.write_xyz('random_walk.xyz', append=True)\n\n if unwrap:\n s.wrap()\n \n for p in s.particles:\n if p not in s.molecules[p.molecule.tag].particles:\n s.molecules[p.molecule.tag].particles.add(p)\n\n s.write_lammps('polymer.lmps')\n s.unwrap()\n s.write_xyz('polymer.xyz')\n\n return s\n"
] | [
[
"numpy.array"
]
] |
llimeht/sasview | [
"d0c10746a2397c5021ed8bbc842ba99243a9b0ac"
] | [
"test/sascalculator/utest_sas_gen.py"
] | [
"\"\"\"\nUnit tests for the sas_gen\n\"\"\"\n\nimport os.path\nimport warnings\nwarnings.simplefilter(\"ignore\")\n\nimport unittest\nimport numpy as np\n\nfrom sas.sascalc.calculator import sas_gen\n\n\ndef find(filename):\n return os.path.join(os.path.dirname(__file__), 'data', filename)\n\n\nclass sas_gen_test(unittest.TestCase):\n\n def setUp(self):\n self.sldloader = sas_gen.SLDReader()\n self.pdbloader = sas_gen.PDBReader()\n self.omfloader = sas_gen.OMFReader()\n\n def test_sldreader(self):\n \"\"\"\n Test .sld file loaded\n \"\"\"\n f = self.sldloader.read(find(\"sld_file.sld\"))\n self.assertEqual(f.pos_x[0], -40.5)\n self.assertEqual(f.pos_y[0], -13.5)\n self.assertEqual(f.pos_z[0], -13.5)\n\n def test_pdbreader(self):\n \"\"\"\n Test .pdb file loaded\n \"\"\"\n f = self.pdbloader.read(find(\"c60.pdb\"))\n self.assertEqual(f.pos_x[0], -0.733)\n self.assertEqual(f.pos_y[0], -1.008)\n self.assertEqual(f.pos_z[0], 3.326)\n\n def test_omfreader(self):\n \"\"\"\n Test .omf file loaded\n \"\"\"\n f = self.omfloader.read(find(\"A_Raw_Example-1.omf\"))\n output = sas_gen.OMF2SLD()\n output.set_data(f)\n self.assertEqual(f.mx[0], 0)\n self.assertEqual(f.my[0], 0)\n self.assertEqual(f.mz[0], 0)\n self.assertEqual(output.pos_x[0], 0.0)\n self.assertEqual(output.pos_y[0], 0.0)\n self.assertEqual(output.pos_z[0], 0.0)\n\n def test_calculator(self):\n \"\"\"\n Test that the calculator calculates.\n \"\"\"\n f = self.omfloader.read(find(\"A_Raw_Example-1.omf\"))\n omf2sld = sas_gen.OMF2SLD()\n omf2sld.set_data(f)\n model = sas_gen.GenSAS()\n model.set_sld_data(omf2sld.output)\n x = np.linspace(0, 0.1, 11)[1:]\n model.runXY([x, x])\n\n\nif __name__ == '__main__':\n unittest.main()\n\n"
] | [
[
"numpy.linspace"
]
] |
ddasdkimo/Towards-Realtime-MOT | [
"cfe0e26331969450b6e2a645dfa5c14947514ba5"
] | [
"track.py"
] | [
"import os\r\nimport os.path as osp\r\nimport cv2\r\nimport logging\r\nimport argparse\r\nimport motmetrics as mm\r\n\r\nimport torch\r\nfrom tracker.multitracker import JDETracker\r\nfrom utils import visualization as vis\r\nfrom utils.log import logger\r\nfrom utils.timer import Timer\r\nfrom utils.evaluation import Evaluator\r\nfrom utils.parse_config import parse_model_cfg\r\nimport utils.datasets as datasets\r\nfrom utils.utils import *\r\n\r\n\r\ndef write_results(filename, results, data_type):\r\n if data_type == 'mot':\r\n save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\\n'\r\n elif data_type == 'kitti':\r\n save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\\n'\r\n else:\r\n raise ValueError(data_type)\r\n\r\n with open(filename, 'w') as f:\r\n for frame_id, tlwhs, track_ids in results:\r\n if data_type == 'kitti':\r\n frame_id -= 1\r\n for tlwh, track_id in zip(tlwhs, track_ids):\r\n if track_id < 0:\r\n continue\r\n x1, y1, w, h = tlwh\r\n x2, y2 = x1 + w, y1 + h\r\n line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)\r\n f.write(line)\r\n logger.info('save results to {}'.format(filename))\r\ndef conversion_frame_init(opt,frame_rate):\r\n global tracker,timer,results,frame_id,objopt,resultscamera\r\n objopt = opt\r\n tracker = JDETracker(opt, frame_rate=frame_rate)\r\n timer = Timer()\r\n results = []\r\n resultscamera = [None] * 1000\r\n \r\n frame_id = 0\r\n\r\ndef conversion_frame(img, img0):\r\n global tracker,timer,resultscamera,frame_id,objopt\r\n # run tracking\r\n timer.tic()\r\n blob = torch.from_numpy(img).cuda().unsqueeze(0)\r\n online_targets = tracker.update(blob, img0)\r\n online_tlwhs = []\r\n online_ids = []\r\n for t in online_targets:\r\n tlwh = t.tlwh\r\n tid = t.track_id\r\n vertical = tlwh[2] / tlwh[3] > 1.6\r\n if tlwh[2] * tlwh[3] > objopt.min_box_area and not vertical:\r\n online_tlwhs.append(tlwh)\r\n online_ids.append(tid)\r\n timer.toc()\r\n count = frame_id%1000\r\n resultscamera[count] = (frame_id + 1, online_tlwhs, online_ids)\r\n online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,\r\n fps=1. / timer.average_time)\r\n frame_id += 1\r\n return online_im,resultscamera[count]\r\n\r\ndef eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30):\r\n '''\r\n Processes the video sequence given and provides the output of tracking result (write the results in video file)\r\n\r\n It uses JDE model for getting information about the online targets present.\r\n\r\n Parameters\r\n ----------\r\n opt : Namespace\r\n Contains information passed as commandline arguments.\r\n\r\n dataloader : LoadVideo\r\n Instance of LoadVideo class used for fetching the image sequence and associated data.\r\n\r\n data_type : String\r\n Type of dataset corresponding(similar) to the given video.\r\n\r\n result_filename : String\r\n The name(path) of the file for storing results.\r\n\r\n save_dir : String\r\n Path to the folder for storing the frames containing bounding box information (Result frames).\r\n\r\n show_image : bool\r\n Option for shhowing individial frames during run-time.\r\n\r\n frame_rate : int\r\n Frame-rate of the given video.\r\n\r\n Returns\r\n -------\r\n (Returns are not significant here)\r\n frame_id : int\r\n Sequence number of the last sequence\r\n '''\r\n\r\n if save_dir:\r\n mkdir_if_missing(save_dir)\r\n tracker = JDETracker(opt, frame_rate=frame_rate)\r\n timer = Timer()\r\n results = []\r\n frame_id = 0\r\n for path, img, img0 in dataloader:\r\n if frame_id % 20 == 0:\r\n logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1./max(1e-5, timer.average_time)))\r\n\r\n # run tracking\r\n timer.tic()\r\n blob = torch.from_numpy(img).cuda().unsqueeze(0)\r\n online_targets = tracker.update(blob, img0)\r\n online_tlwhs = []\r\n online_ids = []\r\n for t in online_targets:\r\n tlwh = t.tlwh\r\n tid = t.track_id\r\n vertical = tlwh[2] / tlwh[3] > 1.6\r\n if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:\r\n online_tlwhs.append(tlwh)\r\n online_ids.append(tid)\r\n timer.toc()\r\n # save results\r\n results.append((frame_id + 1, online_tlwhs, online_ids))\r\n if show_image or save_dir is not None:\r\n online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,\r\n fps=1. / timer.average_time)\r\n if show_image:\r\n cv2.imshow('online_im', online_im)\r\n if save_dir is not None:\r\n cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)\r\n frame_id += 1\r\n # save results\r\n write_results(result_filename, results, data_type)\r\n return frame_id, timer.average_time, timer.calls\r\n\r\n\r\ndef main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo', \r\n save_images=False, save_videos=False, show_image=True):\r\n logger.setLevel(logging.INFO)\r\n result_root = os.path.join(data_root, '..', 'results', exp_name)\r\n mkdir_if_missing(result_root)\r\n data_type = 'mot'\r\n\r\n # Read config\r\n cfg_dict = parse_model_cfg(opt.cfg)\r\n opt.img_size = [int(cfg_dict[0]['width']), int(cfg_dict[0]['height'])]\r\n\r\n # run tracking\r\n accs = []\r\n n_frame = 0\r\n timer_avgs, timer_calls = [], []\r\n for seq in seqs:\r\n output_dir = os.path.join(data_root, '..','outputs', exp_name, seq) if save_images or save_videos else None\r\n\r\n logger.info('start seq: {}'.format(seq))\r\n dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)\r\n result_filename = os.path.join(result_root, '{}.txt'.format(seq))\r\n meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read() \r\n frame_rate = int(meta_info[meta_info.find('frameRate')+10:meta_info.find('\\nseqLength')])\r\n nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,\r\n save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)\r\n n_frame += nf\r\n timer_avgs.append(ta)\r\n timer_calls.append(tc)\r\n\r\n # eval\r\n logger.info('Evaluate seq: {}'.format(seq))\r\n evaluator = Evaluator(data_root, seq, data_type)\r\n accs.append(evaluator.eval_file(result_filename))\r\n if save_videos:\r\n output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))\r\n cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)\r\n os.system(cmd_str)\r\n timer_avgs = np.asarray(timer_avgs)\r\n timer_calls = np.asarray(timer_calls)\r\n all_time = np.dot(timer_avgs, timer_calls)\r\n avg_time = all_time / np.sum(timer_calls)\r\n logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))\r\n\r\n # get summary\r\n metrics = mm.metrics.motchallenge_metrics\r\n mh = mm.metrics.create()\r\n summary = Evaluator.get_summary(accs, seqs, metrics)\r\n strsummary = mm.io.render_summary(\r\n summary,\r\n formatters=mh.formatters,\r\n namemap=mm.io.motchallenge_metric_names\r\n )\r\n print(strsummary)\r\n Evaluator.save_summary(summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(prog='track.py')\r\n parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')\r\n parser.add_argument('--weights', type=str, default='weights/latest.pt', help='path to weights file')\r\n parser.add_argument('--iou-thres', type=float, default=0.5, help='iou threshold required to qualify as detected')\r\n parser.add_argument('--conf-thres', type=float, default=0.5, help='object confidence threshold')\r\n parser.add_argument('--nms-thres', type=float, default=0.4, help='iou threshold for non-maximum suppression')\r\n parser.add_argument('--min-box-area', type=float, default=200, help='filter out tiny boxes')\r\n parser.add_argument('--track-buffer', type=int, default=30, help='tracking buffer')\r\n parser.add_argument('--test-mot16', action='store_true', help='tracking buffer')\r\n parser.add_argument('--save-images', action='store_true', help='save tracking results (image)')\r\n parser.add_argument('--save-videos', action='store_true', help='save tracking results (video)')\r\n opt = parser.parse_args()\r\n print(opt, end='\\n\\n')\r\n \r\n if not opt.test_mot16:\r\n seqs_str = '''MOT17-02-SDP\r\n MOT17-04-SDP\r\n MOT17-05-SDP\r\n MOT17-09-SDP\r\n MOT17-10-SDP\r\n MOT17-11-SDP\r\n MOT17-13-SDP\r\n '''\r\n data_root = '/home/wangzd/datasets/MOT/MOT17/images/train'\r\n else:\r\n seqs_str = '''MOT16-01\r\n MOT16-03\r\n MOT16-06\r\n MOT16-07\r\n MOT16-08\r\n MOT16-12\r\n MOT16-14'''\r\n data_root = '/home/wangzd/datasets/MOT/MOT16/images/test'\r\n seqs = [seq.strip() for seq in seqs_str.split()]\r\n\r\n main(opt,\r\n data_root=data_root,\r\n seqs=seqs,\r\n exp_name=opt.weights.split('/')[-2],\r\n show_image=False,\r\n save_images=opt.save_images, \r\n save_videos=opt.save_videos)\r\n\r\n"
] | [
[
"torch.from_numpy"
]
] |
erikw/taiga_stats | [
"7e28ffff5169707e248be6a4ab6e31326fc2ca85"
] | [
"taiga_stats/helpers.py"
] | [
"import datetime as dt\nimport sys\n\nimport matplotlib\n\nimport taiga_stats.constants as c\n\nmatplotlib.use(\"TkAgg\") # Reference: https://stackoverflow.com/a/48374671/265508\n\n\nDOT_HEADER_FMT = \"\"\"digraph {:s} {{\n labelloc=\"t\";\n //labelfontsize=\"40\"\n label=\"{:s}\";\n //size=\"7.5,10\"\n ratio=\"compress\"\n //orientation=landscape\n\"\"\"\n\n\ndef get_tag_str(tag):\n return \"\" if tag == c.TAG_MATCH_ALL else tag\n\n\ndef get_stories_with_tag(project, tag):\n uss = project.list_user_stories()\n ret_uss = None\n if tag == c.TAG_MATCH_ALL:\n ret_uss = uss\n else:\n ret_uss = []\n for us in uss:\n if us.tags and tag in us.tags:\n ret_uss.append(us)\n\n if ret_uss is None or len(ret_uss) == 0:\n print(\n \"Warning: no userstories matching '{:s}' was found.\".format(tag),\n file=sys.stderr,\n )\n sys.exit(1)\n return ret_uss\n\n\ndef get_us_stauts_id_from_name(project, name):\n statuses = project.list_user_story_statuses()\n for status in statuses:\n if status.name == name:\n return status.id\n return None\n\n\ndef get_us_status_name_from_id(project, status_id):\n statuses = project.list_user_story_statuses()\n for status in statuses:\n if status.id == status_id:\n return status.name\n return None\n\n\ndef remove_closed_stories(_project, uss):\n ret_uss = []\n for us in uss:\n if not us.is_closed:\n ret_uss.append(us)\n return ret_uss\n\n\ndef get_statuses_sorted_by_order(project):\n statuses = project.list_user_story_statuses()\n return sorted(statuses, key=lambda status: status.order)\n\n\ndef get_statuses_sorted_by_id(project):\n statuses = project.list_user_story_statuses()\n return sorted(statuses, key=lambda status: status.id)\n\n\ndef get_status_id_sorted(project):\n return [status.id for status in get_statuses_sorted_by_order(project)]\n\n\ndef get_status_and_names_sorted(project):\n status_ids = get_status_id_sorted(project)[::-1]\n status_names = []\n for status_id in status_ids:\n status_names.append(get_us_status_name_from_id(project, status_id))\n\n return status_ids, status_names\n\n\ndef get_dot_header(name, title):\n return DOT_HEADER_FMT.format(name, title)\n\n\ndef get_dot_footer():\n return \"}\"\n\n\ndef read_daily_cfd(path, tag):\n data_file = c.CFD_DATA_FILE_FMT.format(get_tag_str(tag))\n data_path = \"{:s}/{:s}\".format(path, data_file)\n data = []\n try:\n with open(data_path, \"r\", encoding=\"utf-8\") as fdata:\n row = 0\n for line in fdata:\n line = line.rstrip()\n parts = line.split(\"\\t\")\n if row == 0:\n data = [[] for _ in range(len(parts) + 1)]\n else:\n for col in range(len(parts)):\n value = parts[col]\n if col == 0: # First col is dates\n value = dt.datetime.strptime(value, \"%Y-%m-%d\")\n elif col == 1: # Second col is annotations\n pass\n else:\n value = int(value)\n data[col].append(value)\n\n row += 1\n except IOError as e:\n print(\n \"Could not read {:s}, error: {:s}\".format(data_path, str(e)),\n file=sys.stderr,\n )\n sys.exit(2)\n\n return data\n\n\nclass assert_args:\n \"\"\"\n Assert that the given arguments exists.\n \"\"\"\n\n def __init__(self, *args):\n self.needed_args = args\n\n def __call__(self, func):\n dec = self\n\n def wrapper(args):\n for arg in dec.needed_args:\n if arg not in args or args[arg] is None:\n print(\"Required argument ''{:s}' was not supplied on commandline or set in config file.\".format(arg))\n return 1\n func(args)\n\n return wrapper\n"
] | [
[
"matplotlib.use"
]
] |
mayankj/xView2-Solution | [
"804aa15a3d9f28c7c1d73e50ce0ed0c359a0493e"
] | [
"xview/models/unetv2.py"
] | [
"from functools import partial\r\nfrom typing import List, Union, Callable\r\n\r\nimport torch\r\nfrom pytorch_toolbelt.modules import ABN, ACT_RELU, ACT_SWISH\r\nfrom pytorch_toolbelt.modules import encoders as E\r\nfrom pytorch_toolbelt.modules.decoders import DecoderModule\r\nfrom pytorch_toolbelt.modules.encoders import EncoderModule\r\nfrom torch import nn\r\nfrom torch.nn import functional as F\r\n\r\nfrom .common import disaster_type_classifier, damage_types_classifier\r\nfrom ..dataset import OUTPUT_MASK_KEY, DISASTER_TYPE_KEY, DISASTER_TYPES, DAMAGE_TYPE_KEY, DAMAGE_TYPES\r\n\r\n__all__ = [\"UnetV2SegmentationModel\"]\r\n\r\n\r\nclass ConvBottleneck(nn.Module):\r\n def __init__(self, in_channels, out_channels):\r\n super().__init__()\r\n self.seq = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.ReLU(inplace=True))\r\n\r\n def forward(self, dec, enc):\r\n x = torch.cat([dec, enc], dim=1)\r\n return self.seq(x)\r\n\r\n\r\nclass UnetDecoderBlock(nn.Module):\r\n def __init__(self, in_channels, middle_channels, out_channels):\r\n super().__init__()\r\n self.layer = nn.Sequential(\r\n nn.Upsample(scale_factor=2), nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.ReLU(inplace=True)\r\n )\r\n\r\n def forward(self, x):\r\n return self.layer(x)\r\n\r\n\r\nclass UNetDecoderV2(DecoderModule):\r\n def __init__(\r\n self,\r\n feature_maps: List[int],\r\n decoder_features: List[int],\r\n mask_channels: int,\r\n last_upsample_filters=None,\r\n dropout=0.0,\r\n abn_block=ABN,\r\n ):\r\n super().__init__()\r\n\r\n if not isinstance(decoder_features, list):\r\n decoder_features = [decoder_features * (2 ** i) for i in range(len(feature_maps))]\r\n\r\n if last_upsample_filters is None:\r\n last_upsample_filters = decoder_features[0]\r\n\r\n self.encoder_features = feature_maps\r\n self.decoder_features = decoder_features\r\n self.decoder_stages = nn.ModuleList([self.get_decoder(idx) for idx in range(0, len(self.decoder_features))])\r\n\r\n self.bottlenecks = nn.ModuleList(\r\n [\r\n ConvBottleneck(self.encoder_features[-i - 2] + f, f)\r\n for i, f in enumerate(reversed(self.decoder_features[:]))\r\n ]\r\n )\r\n\r\n self.output_filters = decoder_features\r\n\r\n self.last_upsample = UnetDecoderBlock(decoder_features[0], last_upsample_filters, last_upsample_filters)\r\n\r\n self.final = nn.Conv2d(last_upsample_filters, mask_channels, kernel_size=1)\r\n\r\n def get_decoder(self, layer):\r\n in_channels = (\r\n self.encoder_features[layer + 1]\r\n if layer + 1 == len(self.decoder_features)\r\n else self.decoder_features[layer + 1]\r\n )\r\n return UnetDecoderBlock(in_channels, self.decoder_features[layer], self.decoder_features[max(layer, 0)])\r\n\r\n def forward(self, feature_maps):\r\n\r\n last_dec_out = feature_maps[-1]\r\n\r\n x = last_dec_out\r\n for idx, bottleneck in enumerate(self.bottlenecks):\r\n rev_idx = -(idx + 1)\r\n decoder = self.decoder_stages[rev_idx]\r\n x = decoder(x)\r\n x = bottleneck(x, feature_maps[rev_idx - 1])\r\n\r\n x = self.last_upsample(x)\r\n\r\n f = self.final(x)\r\n\r\n return f\r\n\r\n\r\nclass UnetV2SegmentationModel(nn.Module):\r\n def __init__(\r\n self,\r\n encoder: EncoderModule,\r\n num_classes: int,\r\n disaster_type_classes: int,\r\n damage_type_classes: int,\r\n unet_channels: List[int],\r\n dropout=0.25,\r\n abn_block: Union[ABN, Callable[[int], nn.Module]] = ABN,\r\n full_size_mask=True,\r\n ):\r\n super().__init__()\r\n self.encoder = encoder\r\n\r\n feature_maps = [2 * fm for fm in encoder.output_filters]\r\n\r\n self.decoder = UNetDecoderV2(\r\n feature_maps=feature_maps,\r\n decoder_features=unet_channels,\r\n mask_channels=num_classes,\r\n dropout=dropout,\r\n abn_block=abn_block,\r\n )\r\n\r\n self.full_size_mask = full_size_mask\r\n if disaster_type_classes is not None:\r\n self.disaster_type_classifier = disaster_type_classifier(\r\n feature_maps[-1], disaster_type_classes, dropout=dropout\r\n )\r\n else:\r\n self.disaster_type_classifier = None\r\n\r\n if damage_type_classes is not None:\r\n self.damage_types_classifier = damage_types_classifier(\r\n feature_maps[-1], damage_type_classes, dropout=dropout\r\n )\r\n else:\r\n self.damage_types_classifier = None\r\n\r\n def forward(self, x):\r\n batch_size = x.size(0)\r\n pre, post = x[:, 0:3, ...], x[:, 3:6, ...]\r\n\r\n if self.training:\r\n x = torch.cat([pre, post], dim=0)\r\n features = self.encoder(x)\r\n features = [torch.cat([f[0:batch_size], f[batch_size : batch_size * 2]], dim=1) for f in features]\r\n else:\r\n pre_features, post_features = self.encoder(pre), self.encoder(post)\r\n features = [torch.cat([pre, post], dim=1) for pre, post in zip(pre_features, post_features)]\r\n\r\n # Decode mask\r\n mask = self.decoder(features)\r\n\r\n if self.full_size_mask:\r\n mask = F.interpolate(mask, size=x.size()[2:], mode=\"bilinear\", align_corners=False)\r\n\r\n output = {OUTPUT_MASK_KEY: mask}\r\n\r\n if self.disaster_type_classifier is not None:\r\n disaster_type = self.disaster_type_classifier(features[-1])\r\n output[DISASTER_TYPE_KEY] = disaster_type\r\n\r\n if self.damage_types_classifier is not None:\r\n damage_types = self.damage_types_classifier(features[-1])\r\n output[DAMAGE_TYPE_KEY] = damage_types\r\n\r\n return output\r\n\r\n\r\ndef efficientb3_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):\r\n encoder = E.EfficientNetB3Encoder(pretrained=pretrained,\r\n layers=[0, 1, 2, 4, 6],\r\n abn_params={\"activation\": ACT_RELU})\r\n return UnetV2SegmentationModel(\r\n encoder,\r\n num_classes=num_classes,\r\n disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,\r\n damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,\r\n unet_channels=[64, 128, 256, 256],\r\n dropout=dropout,\r\n abn_block=partial(ABN, activation=ACT_RELU),\r\n )\r\n\r\n\r\ndef densenet121_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):\r\n encoder = E.DenseNet121Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])\r\n return UnetV2SegmentationModel(\r\n encoder,\r\n num_classes=num_classes,\r\n disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,\r\n damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,\r\n unet_channels=[64, 128, 256, 256],\r\n dropout=dropout,\r\n abn_block=partial(ABN, activation=ACT_RELU),\r\n )\r\n\r\n\r\ndef densenet169_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):\r\n encoder = E.DenseNet169Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])\r\n return UnetV2SegmentationModel(\r\n encoder,\r\n num_classes=num_classes,\r\n disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,\r\n damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,\r\n unet_channels=[128, 128, 256, 256],\r\n dropout=dropout,\r\n abn_block=partial(ABN, activation=ACT_RELU),\r\n )\r\n\r\ndef resnet18_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):\r\n encoder = E.Resnet18Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])\r\n return UnetV2SegmentationModel(\r\n encoder,\r\n num_classes=num_classes,\r\n disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,\r\n damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,\r\n unet_channels=[64, 128, 256, 256],\r\n dropout=dropout,\r\n abn_block=partial(ABN, activation=ACT_RELU),\r\n )\r\ndef resnet34_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):\r\n encoder = E.Resnet34Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])\r\n return UnetV2SegmentationModel(\r\n encoder,\r\n num_classes=num_classes,\r\n disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,\r\n damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,\r\n unet_channels=[64, 128, 256, 256],\r\n dropout=dropout,\r\n abn_block=partial(ABN, activation=ACT_RELU),\r\n )\r\n\r\n\r\ndef resnet50_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):\r\n encoder = E.Resnet50Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])\r\n return UnetV2SegmentationModel(\r\n encoder,\r\n num_classes=num_classes,\r\n disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,\r\n damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,\r\n unet_channels=[96, 128, 256, 256],\r\n dropout=dropout,\r\n abn_block=partial(ABN, activation=ACT_RELU),\r\n )\r\n\r\n\r\ndef resnet101_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):\r\n encoder = E.Resnet101Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])\r\n return UnetV2SegmentationModel(\r\n encoder,\r\n num_classes=num_classes,\r\n disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,\r\n damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,\r\n unet_channels=[64, 128, 256, 384],\r\n dropout=dropout,\r\n abn_block=partial(ABN, activation=ACT_RELU),\r\n )\r\n\r\n\r\ndef seresnext50_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):\r\n encoder = E.SEResNeXt50Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])\r\n return UnetV2SegmentationModel(\r\n encoder,\r\n num_classes=num_classes,\r\n disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,\r\n damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,\r\n unet_channels=[64, 128, 256, 256],\r\n dropout=dropout,\r\n abn_block=partial(ABN, activation=ACT_RELU),\r\n )\r\n\r\n\r\ndef seresnext101_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):\r\n encoder = E.SEResNeXt101Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])\r\n return UnetV2SegmentationModel(\r\n encoder,\r\n num_classes=num_classes,\r\n disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,\r\n damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,\r\n unet_channels=[128, 128, 256, 384],\r\n dropout=dropout,\r\n abn_block=partial(ABN, activation=ACT_RELU),\r\n )\r\n"
] | [
[
"torch.nn.ReLU",
"torch.nn.Upsample",
"torch.cat",
"torch.nn.Conv2d"
]
] |
adelmuursepp/ML-React-App-Template | [
"d0afed66b8dd037464edc39b1be7709b6207e834"
] | [
"example/iris-data-classifier/ML-React-App-Template/service/model_generator.py"
] | [
"# Import libraries\nimport numpy as np\nprint('imported numpy')\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.externals import joblib\nimport pandas as pd\n\n\n\n\n\n#Otsustuspuud\nfrom sklearn.tree import DecisionTreeClassifier\n\nprint('imported all')\n\ndata_table = pd.read_csv('postags_lemmas_levels_data.csv')\ndata_table = data_table.drop(['Unnamed: 0','tekstikood', 'filename'], 1)\n\nprint('read data')\n\n# data_table.groupby(\"keeletase\").A.plot(kind='kde')\n#data_table.groupby(\"keeletase\").A.hist(alpha=0.4)|\n\nfrom sklearn.preprocessing import LabelEncoder\nlabelencoder_0 = LabelEncoder() #independent variable encoder\ndata_table.iloc[:,17] = labelencoder_0.fit_transform(data_table.iloc[:,17])\n\n#Transforming values into percentages of total and splitting into target and features\nfeatures = data_table.loc[:, \"A\":\"Z\"]\ntarget_var = data_table.loc[:, \"keeletase\"]\n\nprint('split to test and train')\n# X_train, X_test, y_train, y_test =\\\n# train_test_split(features.loc[:,'A':\"Z\"], target_var, test_size = 0.5, random_state=1111)\n\n\n\n# Get the dataset\n# dataset = datasets.load_iris()\n\n# Split the dataset into features and labels\nX = features\ny = target_var\n\n# Split the dataset into training (80%) and testing (20%) data\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0, shuffle = True)\n\n# Build the classifier and make prediction\nclassifier = DecisionTreeClassifier()\nclassifier.fit(X_train, y_train)\nprint('fit trainging data')\nprediction = classifier.predict(X_test)\n\n# Print the confusion matrix\n\n\n# Save the model to disk\njoblib.dump(classifier, 'classifier.joblib')\n\n\n\n\n\n"
] | [
[
"pandas.read_csv",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.preprocessing.LabelEncoder",
"sklearn.externals.joblib.dump",
"sklearn.model_selection.train_test_split"
]
] |
Garsiet/MchLE | [
"4afca0328a5710f16fa08f22b38431a6e84e6910"
] | [
"lab-10-2-mnist_nn.py"
] | [
"# Lab 10 MNIST and NN\nimport tensorflow as tf\nimport random\n# import matplotlib.pyplot as plt\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\ntf.set_random_seed(777) # reproducibility\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n# Check out https://www.tensorflow.org/get_started/mnist/beginners for\n# more information about the mnist dataset\n\n# parameters\nlearning_rate = 0.001\ntraining_epochs = 15\nbatch_size = 100\n\n# input place holders\nX = tf.placeholder(tf.float32, [None, 784])\nY = tf.placeholder(tf.float32, [None, 10])\n\n# weights & bias for nn layers\nW1 = tf.Variable(tf.random_normal([784, 256]))\nb1 = tf.Variable(tf.random_normal([256]))\nL1 = tf.nn.relu(tf.matmul(X, W1) + b1)\n\nW2 = tf.Variable(tf.random_normal([256, 256]))\nb2 = tf.Variable(tf.random_normal([256]))\nL2 = tf.nn.relu(tf.matmul(L1, W2) + b2)\n\nW3 = tf.Variable(tf.random_normal([256, 10]))\nb3 = tf.Variable(tf.random_normal([10]))\nhypothesis = tf.matmul(L2, W3) + b3\n\n# define cost/loss & optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=hypothesis, labels=Y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n# initialize\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\n# train my model\nfor epoch in range(training_epochs):\n avg_cost = 0\n total_batch = int(mnist.train.num_examples / batch_size)\n\n for i in range(total_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n feed_dict = {X: batch_xs, Y: batch_ys}\n c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)\n avg_cost += c / total_batch\n\n print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))\n\nprint('Learning Finished!')\n\n# Test model and check accuracy\ncorrect_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nprint('Accuracy:', sess.run(accuracy, feed_dict={\n X: mnist.test.images, Y: mnist.test.labels}))\n\n# Get one and predict\nr = random.randint(0, mnist.test.num_examples - 1)\nprint(\"Label: \", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))\nprint(\"Prediction: \", sess.run(\n tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1]}))\n\n# plt.imshow(mnist.test.images[r:r + 1].\n# reshape(28, 28), cmap='Greys', interpolation='nearest')\n# plt.show()\n\n'''\nEpoch: 0001 cost = 141.207671860\nEpoch: 0002 cost = 38.788445864\nEpoch: 0003 cost = 23.977515479\nEpoch: 0004 cost = 16.315132428\nEpoch: 0005 cost = 11.702554882\nEpoch: 0006 cost = 8.573139748\nEpoch: 0007 cost = 6.370995680\nEpoch: 0008 cost = 4.537178684\nEpoch: 0009 cost = 3.216900532\nEpoch: 0010 cost = 2.329708954\nEpoch: 0011 cost = 1.715552875\nEpoch: 0012 cost = 1.189857912\nEpoch: 0013 cost = 0.820965160\nEpoch: 0014 cost = 0.624131458\nEpoch: 0015 cost = 0.454633765\nLearning Finished!\nAccuracy: 0.9455\n'''\n"
] | [
[
"tensorflow.placeholder",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.global_variables_initializer",
"tensorflow.train.AdamOptimizer",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.matmul",
"tensorflow.cast",
"tensorflow.set_random_seed",
"tensorflow.Session",
"tensorflow.argmax",
"tensorflow.random_normal"
]
] |
bell-one/pifuhd | [
"3221d266a042ad58de702e65e588ada5426b08f6"
] | [
"apps/recon.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport sys\nimport os\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nROOT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nimport time\nimport json \nimport numpy as np\nimport cv2\nimport random\nimport torch\nimport torch.nn as nn\nfrom tqdm import tqdm\nfrom torch.utils.data import DataLoader\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport matplotlib\nfrom numpy.linalg import inv\n\nfrom lib.options import BaseOptions\nfrom lib.mesh_util import save_obj_mesh_with_color, reconstruction\nfrom lib.data import EvalWPoseDataset, EvalDataset\nfrom lib.model import HGPIFuNetwNML, HGPIFuMRNet\nfrom lib.geometry import index\n\nfrom PIL import Image\n\nparser = BaseOptions()\n\ndef gen_mesh(res, net, cuda, data, save_path, thresh=0.5, use_octree=True, components=False):\n image_tensor_global = data['img_512'].to(device=cuda)\n image_tensor = data['img'].to(device=cuda)\n calib_tensor = data['calib'].to(device=cuda)\n\n net.filter_global(image_tensor_global)\n net.filter_local(image_tensor[:,None])\n\n try:\n if net.netG.netF is not None:\n image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlF], 0)\n if net.netG.netB is not None:\n image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlB], 0)\n except:\n pass\n \n b_min = data['b_min']\n b_max = data['b_max']\n try:\n save_img_path = save_path[:-4] + '.png'\n save_img_list = []\n for v in range(image_tensor_global.shape[0]):\n save_img = (np.transpose(image_tensor_global[v].detach().cpu().numpy(), (1, 2, 0)) * 0.5 + 0.5)[:, :, ::-1] * 255.0\n save_img_list.append(save_img)\n save_img = np.concatenate(save_img_list, axis=1)\n cv2.imwrite(save_img_path, save_img)\n\n verts, faces, _, _ = reconstruction(\n net, cuda, calib_tensor, res, b_min, b_max, thresh, use_octree=use_octree, num_samples=50000)\n verts_tensor = torch.from_numpy(verts.T).unsqueeze(0).to(device=cuda).float()\n # if 'calib_world' in data:\n # calib_world = data['calib_world'].numpy()[0]\n # verts = np.matmul(np.concatenate([verts, np.ones_like(verts[:,:1])],1), inv(calib_world).T)[:,:3]\n\n color = np.zeros(verts.shape)\n interval = 50000\n for i in range(len(color) // interval + 1):\n left = i * interval\n if i == len(color) // interval:\n right = -1\n else:\n right = (i + 1) * interval\n net.calc_normal(verts_tensor[:, None, :, left:right], calib_tensor[:,None], calib_tensor)\n nml = net.nmls.detach().cpu().numpy()[0] * 0.5 + 0.5\n color[left:right] = nml.T\n\n save_obj_mesh_with_color(save_path, verts, faces, color)\n except Exception as e:\n print(e)\n\n\ndef gen_mesh_imgColor(res, net, cuda, data, save_path, thresh=0.5, use_octree=True, components=False):\n image_tensor_global = data['img_512'].to(device=cuda)\n image_tensor = data['img'].to(device=cuda)\n calib_tensor = data['calib'].to(device=cuda)\n\n net.filter_global(image_tensor_global)\n net.filter_local(image_tensor[:,None])\n\n try:\n if net.netG.netF is not None:\n image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlF], 0)\n if net.netG.netB is not None:\n image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlB], 0)\n except:\n pass\n\n b_min = data['b_min']\n b_max = data['b_max']\n try:\n save_img_path = save_path[:-4] + '.png'\n save_img_list = []\n for v in range(image_tensor_global.shape[0]):\n save_img = (np.transpose(image_tensor_global[v].detach().cpu().numpy(), (1, 2, 0)) * 0.5 + 0.5)[:, :, ::-1] * 255.0\n save_img_list.append(save_img)\n save_img = np.concatenate(save_img_list, axis=1)\n cv2.imwrite(save_img_path, save_img)\n\n verts, faces, _, _ = reconstruction(\n net, cuda, calib_tensor, res, b_min, b_max, thresh, use_octree=use_octree, num_samples=100000)\n verts_tensor = torch.from_numpy(verts.T).unsqueeze(0).to(device=cuda).float()\n\n # if this returns error, projection must be defined somewhere else\n xyz_tensor = net.projection(verts_tensor, calib_tensor[:1])\n uv = xyz_tensor[:, :2, :]\n color = index(image_tensor[:1], uv).detach().cpu().numpy()[0].T\n color = color * 0.5 + 0.5\n\n if 'calib_world' in data:\n calib_world = data['calib_world'].numpy()[0]\n verts = np.matmul(np.concatenate([verts, np.ones_like(verts[:,:1])],1), inv(calib_world).T)[:,:3]\n\n save_obj_mesh_with_color(save_path, verts, faces, color)\n\n except Exception as e:\n print(e)\n\n\ndef recon(opt, use_rect=False):\n # load checkpoints\n state_dict_path = None\n if opt.load_netMR_checkpoint_path is not None:\n state_dict_path = opt.load_netMR_checkpoint_path\n elif opt.resume_epoch < 0:\n state_dict_path = '%s/%s_train_latest' % (opt.checkpoints_path, opt.name)\n opt.resume_epoch = 0\n else:\n state_dict_path = '%s/%s_train_epoch_%d' % (opt.checkpoints_path, opt.name, opt.resume_epoch)\n \n start_id = opt.start_id\n end_id = opt.end_id\n\n cuda = torch.device('cuda:%d' % opt.gpu_id if torch.cuda.is_available() else 'cpu')\n\n state_dict = None\n if state_dict_path is not None and os.path.exists(state_dict_path):\n print('Resuming from ', state_dict_path)\n state_dict = torch.load(state_dict_path, map_location=cuda) \n print('Warning: opt is overwritten.')\n dataroot = opt.dataroot\n resolution = opt.resolution\n results_path = opt.results_path\n loadSize = opt.loadSize\n \n opt = state_dict['opt']\n opt.dataroot = dataroot\n opt.resolution = resolution\n opt.results_path = results_path\n opt.loadSize = loadSize\n else:\n raise Exception('failed loading state dict!', state_dict_path)\n \n # parser.print_options(opt)\n\n if use_rect:\n test_dataset = EvalDataset(opt)\n else:\n test_dataset = EvalWPoseDataset(opt)\n\n print('test data size: ', len(test_dataset))\n projection_mode = test_dataset.projection_mode\n\n opt_netG = state_dict['opt_netG']\n netG = HGPIFuNetwNML(opt_netG, projection_mode).to(device=cuda)\n netMR = HGPIFuMRNet(opt, netG, projection_mode).to(device=cuda)\n\n def set_eval():\n netG.eval()\n\n # load checkpoints\n netMR.load_state_dict(state_dict['model_state_dict'])\n\n os.makedirs(opt.checkpoints_path, exist_ok=True)\n os.makedirs(opt.results_path, exist_ok=True)\n os.makedirs('%s/%s/recon' % (opt.results_path, opt.name), exist_ok=True)\n\n if start_id < 0:\n start_id = 0\n if end_id < 0:\n end_id = len(test_dataset)\n\n ## test\n with torch.no_grad():\n set_eval()\n\n print('generate mesh (test) ...')\n for i in tqdm(range(start_id, end_id)):\n if i >= len(test_dataset):\n break\n \n # for multi-person processing, set it to False\n if True:\n test_data = test_dataset[i]\n\n save_path = '%s/%s/recon/result_%s_%d.obj' % (opt.results_path, opt.name, test_data['name'], opt.resolution)\n\n print(save_path)\n gen_mesh_imgColor(opt.resolution, netMR, cuda, test_data, save_path, components=opt.use_compose)\n else:\n for j in range(test_dataset.get_n_person(i)):\n test_dataset.person_id = j\n test_data = test_dataset[i]\n save_path = '%s/%s/recon/result_%s_%d.obj' % (opt.results_path, opt.name, test_data['name'], j)\n gen_mesh_imgColor(opt.resolution, netMR, cuda, test_data, save_path, components=opt.use_compose)\n\ndef reconWrapper(args=None, use_rect=False):\n opt = parser.parse(args)\n recon(opt, use_rect)\n\nif __name__ == '__main__':\n reconWrapper()\n \n"
] | [
[
"torch.load",
"numpy.zeros",
"numpy.linalg.inv",
"torch.no_grad",
"numpy.ones_like",
"torch.cuda.is_available",
"torch.from_numpy",
"numpy.concatenate",
"torch.cat"
]
] |
rahulgovind/pysph | [
"3d493e6f2c5284ea9c0f0d008e4eb9a0870da0d9"
] | [
"pysph/sph/rigid_body.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Rigid body related equations.\n\"\"\"\nfrom pysph.base.reduce_array import parallel_reduce_array\nfrom pysph.sph.equation import Equation\nfrom pysph.sph.integrator_step import IntegratorStep\nimport numpy as np\nimport numpy\nfrom math import sqrt\n\n\ndef skew(vec):\n import sympy as S\n x, y, z = vec[0], vec[1], vec[2]\n return S.Matrix([[0, -z, y], [z, 0, -x], [-y, x, 0]])\n\n\ndef get_alpha_dot():\n \"\"\"Use sympy to perform most of the math and use the resulting formulae\n to calculate:\n\n inv(I) (\\tau - w x (I w))\n \"\"\"\n import sympy as S\n ixx, iyy, izz, ixy, ixz, iyz = S.symbols(\"ixx, iyy, izz, ixy, ixz, iyz\")\n tx, ty, tz = S.symbols(\"tx, ty, tz\")\n wx, wy, wz = S.symbols('wx, wy, wz')\n tau = S.Matrix([tx, ty, tz])\n I = S.Matrix([[ixx, ixy, ixz], [ixy, iyy, iyz], [ixz, iyz, izz]])\n w = S.Matrix([wx, wy, wz])\n Iinv = I.inv()\n Iinv.simplify()\n # inv(I) (\\tau - w x (Iw))\n res = Iinv*(tau - w.cross(I*w))\n res.simplify()\n # Now do some awesome sympy magic.\n syms, result = S.cse(res, symbols=S.numbered_symbols('tmp'))\n for lhs, rhs in syms:\n print(\"%s = %s\" % (lhs, rhs))\n for i in range(3):\n print(\"omega_dot[%d] =\" % i, result[0][i])\n\n\ndef get_torque():\n \"\"\"Use sympy to perform some simple math.\n R x F\n C_m x F\n w x r\n \"\"\"\n import sympy as S\n x, y, z, fx, fy, fz = S.symbols(\"x, y, z, fx, fy, fz\")\n R = S.Matrix([x, y, z])\n F = S.Matrix([fx, fy, fz])\n print(\"Torque:\", R.cross(F))\n cx, cy, cz = S.symbols('cx, cy, cz')\n d = S.Matrix([cx, cy, cz])\n print(\"c_m x f = \", d.cross(F))\n wx, wy, wz = S.symbols('wx, wy, wz')\n rx, ry, rz = S.symbols('rx, ry, rz')\n w = S.Matrix([wx, wy, wz])\n r = S.Matrix([rx, ry, rz])\n print(\"w x r = %s\" % w.cross(r))\n\n\n# This is defined to silence editor warnings for the use of declare.\ndef declare(*args): pass\n\n\nclass RigidBodyMoments(Equation):\n def reduce(self, dst, t, dt):\n # FIXME: this will be slow in opencl\n nbody = declare('int')\n i = declare('int')\n base_mi = declare('int')\n base = declare('int')\n nbody = dst.num_body[0]\n if dst.gpu:\n dst.gpu.pull('omega', 'x', 'y', 'z', 'fx', 'fy', 'fz')\n\n d_mi = declare('object')\n m = declare('object')\n x = declare('object')\n y = declare('object')\n z = declare('object')\n fx = declare('object')\n fy = declare('object')\n fz = declare('object')\n d_mi = dst.mi\n cond = declare('object')\n for i in range(nbody):\n cond = dst.body_id == i\n base = i*16\n m = dst.m[cond]\n x = dst.x[cond]\n y = dst.y[cond]\n z = dst.z[cond]\n # Find the total_mass, center of mass and second moments.\n d_mi[base + 0] = numpy.sum(m)\n d_mi[base + 1] = numpy.sum(m*x)\n d_mi[base + 2] = numpy.sum(m*y)\n d_mi[base + 3] = numpy.sum(m*z)\n # Only do the lower triangle of values moments of inertia.\n d_mi[base + 4] = numpy.sum(m*(y*y + z*z))\n d_mi[base + 5] = numpy.sum(m*(x*x + z*z))\n d_mi[base + 6] = numpy.sum(m*(x*x + y*y))\n\n d_mi[base + 7] = -numpy.sum(m*x*y)\n d_mi[base + 8] = -numpy.sum(m*x*z)\n d_mi[base + 9] = -numpy.sum(m*y*z)\n\n # the total force and torque\n fx = dst.fx[cond]\n fy = dst.fy[cond]\n fz = dst.fz[cond]\n d_mi[base + 10] = numpy.sum(fx)\n d_mi[base + 11] = numpy.sum(fy)\n d_mi[base + 12] = numpy.sum(fz)\n\n # Calculate the torque and reduce it.\n d_mi[base + 13] = numpy.sum(y*fz - z*fy)\n d_mi[base + 14] = numpy.sum(z*fx - x*fz)\n d_mi[base + 15] = numpy.sum(x*fy - y*fx)\n\n # Reduce the temporary mi values in parallel across processors.\n d_mi[:] = parallel_reduce_array(dst.mi)\n\n # Set the reduced values.\n for i in range(nbody):\n base_mi = i*16\n base = i*3\n m = d_mi[base_mi + 0]\n dst.total_mass[i] = m\n cx = d_mi[base_mi + 1]/m\n cy = d_mi[base_mi + 2]/m\n cz = d_mi[base_mi + 3]/m\n dst.cm[base + 0] = cx\n dst.cm[base + 1] = cy\n dst.cm[base + 2] = cz\n\n # The actual moment of inertia about center of mass from parallel\n # axes theorem.\n ixx = d_mi[base_mi + 4] - (cy*cy + cz*cz)*m\n iyy = d_mi[base_mi + 5] - (cx*cx + cz*cz)*m\n izz = d_mi[base_mi + 6] - (cx*cx + cy*cy)*m\n ixy = d_mi[base_mi + 7] + cx*cy*m\n ixz = d_mi[base_mi + 8] + cx*cz*m\n iyz = d_mi[base_mi + 9] + cy*cz*m\n\n d_mi[base_mi + 0] = ixx\n d_mi[base_mi + 1] = ixy\n d_mi[base_mi + 2] = ixz\n d_mi[base_mi + 3] = ixy\n d_mi[base_mi + 4] = iyy\n d_mi[base_mi + 5] = iyz\n d_mi[base_mi + 6] = ixz\n d_mi[base_mi + 7] = iyz\n d_mi[base_mi + 8] = izz\n\n fx = d_mi[base_mi + 10]\n fy = d_mi[base_mi + 11]\n fz = d_mi[base_mi + 12]\n dst.force[base + 0] = fx\n dst.force[base + 1] = fy\n dst.force[base + 2] = fz\n\n # Acceleration of CM.\n dst.ac[base + 0] = fx/m\n dst.ac[base + 1] = fy/m\n dst.ac[base + 2] = fz/m\n\n # Find torque about the Center of Mass and not origin.\n tx = d_mi[base_mi + 13]\n ty = d_mi[base_mi + 14]\n tz = d_mi[base_mi + 15]\n tx -= cy*fz - cz*fy\n ty -= -cx*fz + cz*fx\n tz -= cx*fy - cy*fx\n dst.torque[base + 0] = tx\n dst.torque[base + 1] = ty\n dst.torque[base + 2] = tz\n\n wx = dst.omega[base + 0]\n wy = dst.omega[base + 1]\n wz = dst.omega[base + 2]\n # Find omega_dot from: omega_dot = inv(I) (\\tau - w x (Iw))\n # This was done using the sympy code above.\n tmp0 = iyz**2\n tmp1 = ixy**2\n tmp2 = ixz**2\n tmp3 = ixx*iyy\n tmp4 = ixy*ixz\n tmp5 = 1./(ixx*tmp0 + iyy*tmp2 - 2*iyz*tmp4 + izz*tmp1 - izz*tmp3)\n tmp6 = ixy*izz - ixz*iyz\n tmp7 = ixz*wx + iyz*wy + izz*wz\n tmp8 = ixx*wx + ixy*wy + ixz*wz\n tmp9 = tmp7*wx - tmp8*wz + ty\n tmp10 = ixy*iyz - ixz*iyy\n tmp11 = ixy*wx + iyy*wy + iyz*wz\n tmp12 = -tmp11*wx + tmp8*wy + tz\n tmp13 = tmp11*wz - tmp7*wy + tx\n tmp14 = ixx*iyz - tmp4\n dst.omega_dot[base + 0] = tmp5*(-tmp10*tmp12 -\n tmp13*(iyy*izz - tmp0) + tmp6*tmp9)\n dst.omega_dot[base + 1] = tmp5*(tmp12*tmp14 +\n tmp13*tmp6 - tmp9*(ixx*izz - tmp2))\n dst.omega_dot[base + 2] = tmp5*(-tmp10*tmp13 -\n tmp12*(-tmp1 + tmp3) + tmp14*tmp9)\n if dst.gpu:\n dst.gpu.push(\n 'total_mass', 'mi', 'cm', 'force', 'ac', 'torque',\n 'omega_dot'\n )\n\n\nclass RigidBodyMotion(Equation):\n def initialize(self, d_idx, d_x, d_y, d_z, d_u, d_v, d_w,\n d_cm, d_vc, d_ac, d_omega, d_body_id):\n base = declare('int')\n base = d_body_id[d_idx]*3\n wx = d_omega[base + 0]\n wy = d_omega[base + 1]\n wz = d_omega[base + 2]\n rx = d_x[d_idx] - d_cm[base + 0]\n ry = d_y[d_idx] - d_cm[base + 1]\n rz = d_z[d_idx] - d_cm[base + 2]\n\n d_u[d_idx] = d_vc[base + 0] + wy*rz - wz*ry\n d_v[d_idx] = d_vc[base + 1] + wz*rx - wx*rz\n d_w[d_idx] = d_vc[base + 2] + wx*ry - wy*rx\n\n\nclass BodyForce(Equation):\n def __init__(self, dest, sources, gx=0.0, gy=0.0, gz=0.0):\n self.gx = gx\n self.gy = gy\n self.gz = gz\n super(BodyForce, self).__init__(dest, sources)\n\n def initialize(self, d_idx, d_m, d_fx, d_fy, d_fz, d_num_body, d_mi):\n d_fx[d_idx] = d_m[d_idx]*self.gx\n d_fy[d_idx] = d_m[d_idx]*self.gy\n d_fz[d_idx] = d_m[d_idx]*self.gz\n\n\nclass SummationDensityBoundary(Equation):\n r\"\"\"Equation to find the density of the\n fluid particle due to any boundary or a rigid body\n\n :math:`\\rho_a = \\sum_b {\\rho}_fluid V_b W_{ab}`\n\n \"\"\"\n def __init__(self, dest, sources, fluid_rho=1000.0):\n self.fluid_rho = fluid_rho\n super(SummationDensityBoundary, self).__init__(dest, sources)\n\n def loop(self, d_idx, d_rho, s_idx, s_m, s_V, WIJ):\n d_rho[d_idx] += self.fluid_rho * s_V[s_idx] * WIJ\n\n\nclass NumberDensity(Equation):\n def initialize(self, d_idx, d_V):\n d_V[d_idx] = 0.0\n\n def loop(self, d_idx, d_V, WIJ):\n d_V[d_idx] += WIJ\n\n\nclass SummationDensityRigidBody(Equation):\n def __init__(self, dest, sources, rho0):\n self.rho0 = rho0\n super(SummationDensityRigidBody, self).__init__(dest, sources)\n\n def initialize(self, d_idx, d_rho):\n d_rho[d_idx] = 0.0\n\n def loop(self, d_idx, d_rho, s_idx, s_V, WIJ):\n d_rho[d_idx] += self.rho0/s_V[s_idx]*WIJ\n\n\nclass ViscosityRigidBody(Equation):\n\n \"\"\"The viscous acceleration on the fluid/solid due to a boundary.\n Implemented from Akinci et al. http://dx.doi.org/10.1145/2185520.2185558\n\n Use this with the fluid as a destination and body as source.\n \"\"\"\n\n def __init__(self, dest, sources, rho0, nu):\n self.nu = nu\n self.rho0 = rho0\n super(ViscosityRigidBody, self).__init__(dest, sources)\n\n def loop(self, d_idx, d_m, d_au, d_av, d_aw, d_rho,\n s_idx, s_V, s_fx, s_fy, s_fz,\n EPS, VIJ, XIJ, R2IJ, DWIJ):\n phi_b = self.rho0/(s_V[s_idx]*d_rho[d_idx])\n vijdotxij = min(VIJ[0]*XIJ[0] + VIJ[1]*XIJ[1] + VIJ[2]*XIJ[2], 0.0)\n\n fac = self.nu*phi_b*vijdotxij/(R2IJ + EPS)\n ax = fac*DWIJ[0]\n ay = fac*DWIJ[1]\n az = fac*DWIJ[2]\n d_au[d_idx] += ax\n d_av[d_idx] += ay\n d_aw[d_idx] += az\n s_fx[s_idx] += -d_m[d_idx]*ax\n s_fy[s_idx] += -d_m[d_idx]*ay\n s_fz[s_idx] += -d_m[d_idx]*az\n\n\nclass PressureRigidBody(Equation):\n\n \"\"\"The pressure acceleration on the fluid/solid due to a boundary.\n Implemented from Akinci et al. http://dx.doi.org/10.1145/2185520.2185558\n\n Use this with the fluid as a destination and body as source.\n \"\"\"\n\n def __init__(self, dest, sources, rho0):\n self.rho0 = rho0\n super(PressureRigidBody, self).__init__(dest, sources)\n\n def loop(self, d_idx, d_m, d_rho, d_au, d_av, d_aw, d_p,\n s_idx, s_V, s_fx, s_fy, s_fz, DWIJ):\n rho1 = 1.0/d_rho[d_idx]\n fac = -d_p[d_idx]*rho1*rho1*self.rho0/s_V[s_idx]\n ax = fac*DWIJ[0]\n ay = fac*DWIJ[1]\n az = fac*DWIJ[2]\n d_au[d_idx] += ax\n d_av[d_idx] += ay\n d_aw[d_idx] += az\n s_fx[s_idx] += -d_m[d_idx]*ax\n s_fy[s_idx] += -d_m[d_idx]*ay\n s_fz[s_idx] += -d_m[d_idx]*az\n\n\nclass AkinciRigidFluidCoupling(Equation):\n \"\"\"Force between a solid sphere and a SPH fluid particle. This is\n implemented using Akinci's[1] force and additional force from solid\n bodies pressure which is implemented by Liu[2]\n\n [1]'Versatile Rigid-Fluid Coupling for Incompressible SPH'\n\n URL: https://graphics.ethz.ch/~sobarbar/papers/Sol12/Sol12.pdf\n\n [2]A 3D Simulation of a Moving Solid in Viscous Free-Surface Flows by\n Coupling SPH and DEM\n\n https://doi.org/10.1155/2017/3174904\n\n\n Note: Here forces for both the phases are added at once.\n Please make sure that this force is applied only once\n for both the particle properties.\n\n \"\"\"\n def __init__(self, dest, sources, fluid_rho=1000):\n super(AkinciRigidFluidCoupling, self).__init__(dest, sources)\n self.fluid_rho = fluid_rho\n\n def loop(self, d_idx, d_m, d_rho, d_au, d_av, d_aw, d_p,\n s_idx, s_V, s_fx, s_fy, s_fz, DWIJ, s_m, s_p, s_rho):\n\n psi = s_V[s_idx] * self.fluid_rho\n\n _t1 = 2 * d_p[d_idx] / (d_rho[d_idx]**2)\n\n d_au[d_idx] += -psi * _t1 * DWIJ[0]\n d_av[d_idx] += -psi * _t1 * DWIJ[1]\n d_aw[d_idx] += -psi * _t1 * DWIJ[2]\n\n s_fx[s_idx] += d_m[d_idx] * psi * _t1 * DWIJ[0]\n s_fy[s_idx] += d_m[d_idx] * psi * _t1 * DWIJ[1]\n s_fz[s_idx] += d_m[d_idx] * psi * _t1 * DWIJ[2]\n\n\nclass LiuFluidForce(Equation):\n \"\"\"Force between a solid sphere and a SPH fluid particle. This is\n implemented using Akinci's[1] force and additional force from solid\n bodies pressure which is implemented by Liu[2]\n\n [1]'Versatile Rigid-Fluid Coupling for Incompressible SPH'\n\n URL: https://graphics.ethz.ch/~sobarbar/papers/Sol12/Sol12.pdf\n\n [2]A 3D Simulation of a Moving Solid in Viscous Free-Surface Flows by\n Coupling SPH and DEM\n\n https://doi.org/10.1155/2017/3174904\n\n\n Note: Here forces for both the phases are added at once.\n Please make sure that this force is applied only once\n for both the particle properties.\n\n \"\"\"\n def __init__(self, dest, sources):\n super(LiuFluidForce, self).__init__(dest, sources)\n\n def loop(self, d_idx, d_m, d_rho, d_au, d_av, d_aw, d_p,\n s_idx, s_V, s_fx, s_fy, s_fz, DWIJ, s_m, s_p, s_rho):\n _t1 = s_p[s_idx] / (s_rho[s_idx]**2) + d_p[d_idx] / (d_rho[d_idx]**2)\n\n d_au[d_idx] += -s_m[s_idx] * _t1 * DWIJ[0]\n d_av[d_idx] += -s_m[s_idx] * _t1 * DWIJ[1]\n d_aw[d_idx] += -s_m[s_idx] * _t1 * DWIJ[2]\n\n s_fx[s_idx] += d_m[d_idx] * s_m[s_idx] * _t1 * DWIJ[0]\n s_fy[s_idx] += d_m[d_idx] * s_m[s_idx] * _t1 * DWIJ[1]\n s_fz[s_idx] += d_m[d_idx] * s_m[s_idx] * _t1 * DWIJ[2]\n\n\nclass RigidBodyForceGPUGems(Equation):\n \"\"\"This is inspired from\n http://http.developer.nvidia.com/GPUGems3/gpugems3_ch29.html\n and\n BK Mishra's article on DEM\n http://dx.doi.org/10.1016/S0301-7516(03)00032-2\n A review of computer simulation of tumbling mills by the discrete element\n method: Part I - contact mechanics\n \"\"\"\n def __init__(self, dest, sources, k=1.0, d=1.0, eta=1.0, kt=1.0):\n \"\"\"Note that d is a factor multiplied with the \"h\" of the particle.\n \"\"\"\n self.k = k\n self.d = d\n self.eta = eta\n self.kt = kt\n super(RigidBodyForceGPUGems, self).__init__(dest, sources)\n\n def loop(self, d_idx, d_fx, d_fy, d_fz, d_h, d_total_mass, XIJ,\n RIJ, R2IJ, VIJ):\n vijdotrij = VIJ[0]*XIJ[0] + VIJ[1]*XIJ[1] + VIJ[2]*XIJ[2]\n if RIJ > 1e-9:\n vijdotrij_r2ij = vijdotrij/R2IJ\n nij_x = XIJ[0]/RIJ\n nij_y = XIJ[1]/RIJ\n nij_z = XIJ[2]/RIJ\n else:\n vijdotrij_r2ij = 0.0\n nij_x = 0.0\n nij_y = 0.0\n nij_z = 0.0\n vijt_x = VIJ[0] - vijdotrij_r2ij*XIJ[0]\n vijt_y = VIJ[1] - vijdotrij_r2ij*XIJ[1]\n vijt_z = VIJ[2] - vijdotrij_r2ij*XIJ[2]\n\n d = self.d*d_h[d_idx]\n fac = self.k*d_total_mass[0]/d*max(d - RIJ, 0.0)\n\n d_fx[d_idx] += fac*nij_x - self.eta*VIJ[0] - self.kt*vijt_x\n d_fy[d_idx] += fac*nij_y - self.eta*VIJ[1] - self.kt*vijt_y\n d_fz[d_idx] += fac*nij_z - self.eta*VIJ[2] - self.kt*vijt_z\n\n\nclass RigidBodyCollision(Equation):\n \"\"\"Force between two spheres is implemented using DEM contact force law.\n\n Refer https://doi.org/10.1016/j.powtec.2011.09.019 for more\n information.\n\n Open-source MFIX-DEM software for gas–solids flows:\n Part I—Verification studies .\n\n \"\"\"\n def __init__(self, dest, sources, kn=1e3, mu=0.5, en=0.8):\n \"\"\"Initialise the required coefficients for force calculation.\n\n\n Keyword arguments:\n kn -- Normal spring stiffness (default 1e3)\n mu -- friction coefficient (default 0.5)\n en -- coefficient of restitution (0.8)\n\n Given these coefficients, tangential spring stiffness, normal and\n tangential damping coefficient are calculated by default.\n\n \"\"\"\n self.kn = kn\n self.kt = 2. / 7. * kn\n m_eff = np.pi * 0.5**2 * 1e-6 * 2120\n self.gamma_n = -(2 * np.sqrt(kn * m_eff) * np.log(en)) / (\n np.sqrt(np.pi**2 + np.log(en)**2))\n self.gamma_t = 0.5 * self.gamma_n\n self.mu = mu\n super(RigidBodyCollision, self).__init__(dest, sources)\n\n def loop(self, d_idx, d_fx, d_fy, d_fz, d_h, d_total_mass, d_rad_s,\n d_tang_disp_x, d_tang_disp_y, d_tang_disp_z, d_tang_velocity_x,\n d_tang_velocity_y, d_tang_velocity_z, s_idx, s_rad_s, XIJ, RIJ,\n R2IJ, VIJ):\n overlap = 0\n if RIJ > 1e-9:\n overlap = d_rad_s[d_idx] + s_rad_s[s_idx] - RIJ\n\n if overlap > 0:\n # normal vector passing from particle i to j\n nij_x = -XIJ[0] / RIJ\n nij_y = -XIJ[1] / RIJ\n nij_z = -XIJ[2] / RIJ\n\n # overlap speed: a scalar\n vijdotnij = VIJ[0] * nij_x + VIJ[1] * nij_y + VIJ[2] * nij_z\n\n # normal velocity\n vijn_x = vijdotnij * nij_x\n vijn_y = vijdotnij * nij_y\n vijn_z = vijdotnij * nij_z\n\n # normal force with conservative and dissipation part\n fn_x = -self.kn * overlap * nij_x - self.gamma_n * vijn_x\n fn_y = -self.kn * overlap * nij_y - self.gamma_n * vijn_y\n fn_z = -self.kn * overlap * nij_z - self.gamma_n * vijn_z\n\n # ----------------------Tangential force---------------------- #\n\n # tangential velocity\n d_tang_velocity_x[d_idx] = VIJ[0] - vijn_x\n d_tang_velocity_y[d_idx] = VIJ[1] - vijn_y\n d_tang_velocity_z[d_idx] = VIJ[2] - vijn_z\n\n dtvx = d_tang_velocity_x[d_idx]\n dtvy = d_tang_velocity_y[d_idx]\n dtvz = d_tang_velocity_z[d_idx]\n _tang = sqrt(dtvx*dtvx + dtvy*dtvy + dtvz*dtvz)\n\n # tangential unit vector\n tij_x = 0\n tij_y = 0\n tij_z = 0\n if _tang > 0:\n tij_x = d_tang_velocity_x[d_idx] / _tang\n tij_y = d_tang_velocity_y[d_idx] / _tang\n tij_z = d_tang_velocity_z[d_idx] / _tang\n\n # damping force or dissipation\n ft_x_d = -self.gamma_t * d_tang_velocity_x[d_idx]\n ft_y_d = -self.gamma_t * d_tang_velocity_y[d_idx]\n ft_z_d = -self.gamma_t * d_tang_velocity_z[d_idx]\n\n # tangential spring force\n ft_x_s = -self.kt * d_tang_disp_x[d_idx]\n ft_y_s = -self.kt * d_tang_disp_y[d_idx]\n ft_z_s = -self.kt * d_tang_disp_z[d_idx]\n\n ft_x = ft_x_d + ft_x_s\n ft_y = ft_y_d + ft_y_s\n ft_z = ft_z_d + ft_z_s\n\n # coulomb law\n ftij = sqrt((ft_x**2) + (ft_y**2) + (ft_z**2))\n fnij = sqrt((fn_x**2) + (fn_y**2) + (fn_z**2))\n\n _fnij = self.mu * fnij\n\n if _fnij < ftij:\n ft_x = -_fnij * tij_x\n ft_y = -_fnij * tij_y\n ft_z = -_fnij * tij_z\n\n d_fx[d_idx] += fn_x + ft_x\n d_fy[d_idx] += fn_y + ft_y\n d_fz[d_idx] += fn_z + ft_z\n else:\n d_tang_velocity_x[d_idx] = 0\n d_tang_velocity_y[d_idx] = 0\n d_tang_velocity_z[d_idx] = 0\n\n d_tang_disp_x[d_idx] = 0\n d_tang_disp_y[d_idx] = 0\n d_tang_disp_z[d_idx] = 0\n\n\nclass RigidBodyWallCollision(Equation):\n \"\"\"Force between sphere and a wall is implemented using\n DEM contact force law.\n\n Refer https://doi.org/10.1016/j.powtec.2011.09.019 for more\n information.\n\n Open-source MFIX-DEM software for gas–solids flows:\n Part I—Verification studies .\n\n \"\"\"\n def __init__(self, dest, sources, kn=1e3, mu=0.5, en=0.8):\n \"\"\"Initialise the required coefficients for force calculation.\n\n\n Keyword arguments:\n kn -- Normal spring stiffness (default 1e3)\n mu -- friction coefficient (default 0.5)\n en -- coefficient of restitution (0.8)\n\n Given these coefficients, tangential spring stiffness, normal and\n tangential damping coefficient are calculated by default.\n\n \"\"\"\n self.kn = kn\n self.kt = 2. / 7. * kn\n m_eff = np.pi * 0.5**2 * 1e-6 * 2120\n self.gamma_n = -(2 * np.sqrt(kn * m_eff) * np.log(en)) / (\n np.sqrt(np.pi**2 + np.log(en)**2))\n print(self.gamma_n)\n self.gamma_t = 0.5 * self.gamma_n\n self.mu = mu\n super(RigidBodyWallCollision, self).__init__(dest, sources)\n\n def loop(self, d_idx, d_fx, d_fy, d_fz, d_h, d_total_mass, d_rad_s,\n d_tang_disp_x, d_tang_disp_y, d_tang_disp_z, d_tang_velocity_x,\n d_tang_velocity_y, d_tang_velocity_z, s_idx, XIJ, RIJ,\n R2IJ, VIJ, s_nx, s_ny, s_nz):\n # check overlap amount\n overlap = d_rad_s[d_idx] - (XIJ[0] * s_nx[s_idx] + XIJ[1] *\n s_ny[s_idx] + XIJ[2] * s_nz[s_idx])\n\n if overlap > 0:\n # basic variables: normal vector\n nij_x = -s_nx[s_idx]\n nij_y = -s_ny[s_idx]\n nij_z = -s_nz[s_idx]\n\n # overlap speed: a scalar\n vijdotnij = VIJ[0] * nij_x + VIJ[1] * nij_y + VIJ[2] * nij_z\n\n # normal velocity\n vijn_x = vijdotnij * nij_x\n vijn_y = vijdotnij * nij_y\n vijn_z = vijdotnij * nij_z\n\n # normal force with conservative and dissipation part\n fn_x = -self.kn * overlap * nij_x - self.gamma_n * vijn_x\n fn_y = -self.kn * overlap * nij_y - self.gamma_n * vijn_y\n fn_z = -self.kn * overlap * nij_z - self.gamma_n * vijn_z\n\n # ----------------------Tangential force---------------------- #\n\n # tangential velocity\n d_tang_velocity_x[d_idx] = VIJ[0] - vijn_x\n d_tang_velocity_y[d_idx] = VIJ[1] - vijn_y\n d_tang_velocity_z[d_idx] = VIJ[2] - vijn_z\n\n _tang = (\n (d_tang_velocity_x[d_idx]**2) + (d_tang_velocity_y[d_idx]**2) +\n (d_tang_velocity_z[d_idx]**2))**(1. / 2.)\n\n # tangential unit vector\n tij_x = 0\n tij_y = 0\n tij_z = 0\n if _tang > 0:\n tij_x = d_tang_velocity_x[d_idx] / _tang\n tij_y = d_tang_velocity_y[d_idx] / _tang\n tij_z = d_tang_velocity_z[d_idx] / _tang\n\n # damping force or dissipation\n ft_x_d = -self.gamma_t * d_tang_velocity_x[d_idx]\n ft_y_d = -self.gamma_t * d_tang_velocity_y[d_idx]\n ft_z_d = -self.gamma_t * d_tang_velocity_z[d_idx]\n\n # tangential spring force\n ft_x_s = -self.kt * d_tang_disp_x[d_idx]\n ft_y_s = -self.kt * d_tang_disp_y[d_idx]\n ft_z_s = -self.kt * d_tang_disp_z[d_idx]\n\n ft_x = ft_x_d + ft_x_s\n ft_y = ft_y_d + ft_y_s\n ft_z = ft_z_d + ft_z_s\n\n # coulomb law\n ftij = ((ft_x**2) + (ft_y**2) + (ft_z**2))**(1. / 2.)\n fnij = ((fn_x**2) + (fn_y**2) + (fn_z**2))**(1. / 2.)\n\n _fnij = self.mu * fnij\n\n if _fnij < ftij:\n ft_x = -_fnij * tij_x\n ft_y = -_fnij * tij_y\n ft_z = -_fnij * tij_z\n\n d_fx[d_idx] += fn_x + ft_x\n d_fy[d_idx] += fn_y + ft_y\n d_fz[d_idx] += fn_z + ft_z\n # print(d_fz[d_idx])\n else:\n d_tang_velocity_x[d_idx] = 0\n d_tang_velocity_y[d_idx] = 0\n d_tang_velocity_z[d_idx] = 0\n\n d_tang_disp_x[d_idx] = 0\n d_tang_disp_y[d_idx] = 0\n d_tang_disp_z[d_idx] = 0\n\n\nclass EulerStepRigidBody(IntegratorStep):\n \"\"\"Fast but inaccurate integrator. Use this for testing\"\"\"\n def initialize(self):\n pass\n\n def stage1(self, d_idx, d_u, d_v, d_w, d_x, d_y, d_z,\n d_omega, d_omega_dot, d_vc, d_ac, d_num_body,\n dt=0.0):\n _i = declare('int')\n _j = declare('int')\n base = declare('int')\n if d_idx == 0:\n for _i in range(d_num_body[0]):\n base = 3*_i\n for _j in range(3):\n d_vc[base + _j] += d_ac[base + _j]*dt\n d_omega[base + _j] += d_omega_dot[base + _j]*dt\n\n d_x[d_idx] += dt*d_u[d_idx]\n d_y[d_idx] += dt*d_v[d_idx]\n d_z[d_idx] += dt*d_w[d_idx]\n\n\nclass RK2StepRigidBody(IntegratorStep):\n def initialize(self, d_idx, d_x, d_y, d_z, d_x0, d_y0, d_z0,\n d_omega, d_omega0, d_vc, d_vc0, d_num_body):\n _i = declare('int')\n _j = declare('int')\n base = declare('int')\n if d_idx == 0:\n for _i in range(d_num_body[0]):\n base = 3*_i\n for _j in range(3):\n d_vc0[base + _j] = d_vc[base + _j]\n d_omega0[base + _j] = d_omega[base + _j]\n\n d_x0[d_idx] = d_x[d_idx]\n d_y0[d_idx] = d_y[d_idx]\n d_z0[d_idx] = d_z[d_idx]\n\n def stage1(self, d_idx, d_u, d_v, d_w, d_x, d_y, d_z, d_x0, d_y0, d_z0,\n d_omega, d_omega_dot, d_vc, d_ac, d_omega0, d_vc0, d_num_body,\n dt=0.0):\n dtb2 = 0.5*dt\n _i = declare('int')\n j = declare('int')\n base = declare('int')\n if d_idx == 0:\n for _i in range(d_num_body[0]):\n base = 3*_i\n for j in range(3):\n d_vc[base + j] = d_vc0[base + j] + d_ac[base + j]*dtb2\n d_omega[base + j] = (d_omega0[base + j] +\n d_omega_dot[base + j]*dtb2)\n\n d_x[d_idx] = d_x0[d_idx] + dtb2*d_u[d_idx]\n d_y[d_idx] = d_y0[d_idx] + dtb2*d_v[d_idx]\n d_z[d_idx] = d_z0[d_idx] + dtb2*d_w[d_idx]\n\n def stage2(self, d_idx, d_u, d_v, d_w, d_x, d_y, d_z, d_x0, d_y0, d_z0,\n d_omega, d_omega_dot, d_vc, d_ac, d_omega0, d_vc0, d_num_body,\n dt=0.0):\n _i = declare('int')\n j = declare('int')\n base = declare('int')\n if d_idx == 0:\n for _i in range(d_num_body[0]):\n base = 3*_i\n for j in range(3):\n d_vc[base + j] = d_vc0[base + j] + d_ac[base + j]*dt\n d_omega[base + j] = (d_omega0[base + j] +\n d_omega_dot[base + j]*dt)\n\n d_x[d_idx] = d_x0[d_idx] + dt*d_u[d_idx]\n d_y[d_idx] = d_y0[d_idx] + dt*d_v[d_idx]\n d_z[d_idx] = d_z0[d_idx] + dt*d_w[d_idx]\n"
] | [
[
"numpy.sqrt",
"numpy.sum",
"numpy.log"
]
] |
yoxu515/aot-benchmark | [
"99f74f051c91ac221e44f3edab3534ae4dd233f7"
] | [
"tools/train.py"
] | [
"import importlib\nimport random\nimport sys\n\nsys.setrecursionlimit(10000)\nsys.path.append('.')\nsys.path.append('..')\n\nimport torch.multiprocessing as mp\n\nfrom networks.managers.trainer import Trainer\n\n\ndef main_worker(gpu, cfg, enable_amp=True):\n # Initiate a training manager\n trainer = Trainer(rank=gpu, cfg=cfg, enable_amp=enable_amp)\n # Start Training\n trainer.sequential_training()\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser(description=\"Train VOS\")\n parser.add_argument('--exp_name', type=str, default='')\n parser.add_argument('--stage', type=str, default='pre')\n parser.add_argument('--model', type=str, default='aott')\n\n parser.add_argument('--start_gpu', type=int, default=0)\n parser.add_argument('--gpu_num', type=int, default=-1)\n parser.add_argument('--batch_size', type=int, default=-1)\n parser.add_argument('--dist_url', type=str, default='')\n parser.add_argument('--amp', action='store_true')\n parser.set_defaults(amp=False)\n\n parser.add_argument('--pretrained_path', type=str, default='')\n\n parser.add_argument('--datasets', nargs='+', type=str, default=[])\n parser.add_argument('--lr', type=float, default=-1.)\n parser.add_argument('--total_step', type=int, default=-1.)\n parser.add_argument('--start_step', type=int, default=-1.)\n\n args = parser.parse_args()\n\n engine_config = importlib.import_module('configs.' + args.stage)\n\n cfg = engine_config.EngineConfig(args.exp_name, args.model)\n\n if len(args.datasets) > 0:\n cfg.DATASETS = args.datasets\n\n cfg.DIST_START_GPU = args.start_gpu\n if args.gpu_num > 0:\n cfg.TRAIN_GPUS = args.gpu_num\n if args.batch_size > 0:\n cfg.TRAIN_BATCH_SIZE = args.batch_size\n\n if args.pretrained_path != '':\n cfg.PRETRAIN_MODEL = args.pretrained_path\n\n if args.lr > 0:\n cfg.TRAIN_LR = args.lr\n\n if args.total_step > 0:\n cfg.TRAIN_TOTAL_STEPS = args.total_step\n\n if args.start_step > 0:\n cfg.TRAIN_START_STEP = args.start_step\n\n if args.dist_url == '':\n cfg.DIST_URL = 'tcp://127.0.0.1:123' + str(random.randint(0, 9)) + str(\n random.randint(0, 9))\n else:\n cfg.DIST_URL = args.dist_url\n # Use torch.multiprocessing.spawn to launch distributed processes\n mp.spawn(main_worker, nprocs=cfg.TRAIN_GPUS, args=(cfg, args.amp))\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.multiprocessing.spawn"
]
] |
ghislainp/mishchenko_brf | [
"de7fe70730b53f17fb7e7aa9a45f08bf7d97abd1"
] | [
"tests/test_mishchenko_refllib.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"Tests for `mishchenko_brf` package.\"\"\"\n\nimport numpy as np\n\nfrom mishchenko_brf.lib.refl import brf\n\n\ndef test_brf():\n \"\"\"Sample pytest test function with the pytest fixture as an argument.\"\"\"\n # from bs4 import BeautifulSoup\n # assert 'GitHub' in BeautifulSoup(response.content).title.string\n\n ssalb, _, legendre = setup()\n\n _, spherical_albedo, albedo, _, r = brf(ssalb, len(legendre), legendre)\n\n exptected_spherical_albedo, expected_albedo, expected_r1 = results()\n\n np.testing.assert_allclose(albedo, expected_albedo, atol=1e-6, rtol=0)\n\n r1 = np.concatenate([r[1, i, : i + 1] for i in range(r.shape[1])])\n np.testing.assert_allclose(r1, expected_r1, atol=1e-5, rtol=0)\n\n\ndef setup():\n\n ssalb = 0.85404045e00\n # 642\n Legendre_coef = [\n 0.1000000e01,\n 0.2512562e01,\n 0.3759305e01,\n 0.4408389e01,\n 0.5536463e01,\n 0.6260982e01,\n 0.7525636e01,\n 0.8312788e01,\n 0.9542491e01,\n 0.1040885e02,\n 0.1151645e02,\n 0.1244280e02,\n 0.1343854e02,\n 0.1442002e02,\n 0.1533074e02,\n 0.1628946e02,\n 0.1717182e02,\n 0.1807816e02,\n 0.1898665e02,\n 0.1978204e02,\n 0.2073036e02,\n 0.2142049e02,\n 0.2241713e02,\n 0.2301598e02,\n 0.2401247e02,\n 0.2456069e02,\n 0.2552589e02,\n 0.2607232e02,\n 0.2695832e02,\n 0.2752722e02,\n 0.2831653e02,\n 0.2892665e02,\n 0.2962000e02,\n 0.3025463e02,\n 0.3086891e02,\n 0.3150598e02,\n 0.3207453e02,\n 0.3268248e02,\n 0.3323146e02,\n 0.3378450e02,\n 0.3433640e02,\n 0.3482245e02,\n 0.3538333e02,\n 0.3580127e02,\n 0.3636525e02,\n 0.3672771e02,\n 0.3728034e02,\n 0.3760455e02,\n 0.3812729e02,\n 0.3843143e02,\n 0.3890899e02,\n 0.3920687e02,\n 0.3962926e02,\n 0.3992750e02,\n 0.4029233e02,\n 0.4059085e02,\n 0.4090206e02,\n 0.4119521e02,\n 0.4146075e02,\n 0.4174035e02,\n 0.4196960e02,\n 0.4222747e02,\n 0.4242858e02,\n 0.4265858e02,\n 0.4283710e02,\n 0.4303618e02,\n 0.4319451e02,\n 0.4336267e02,\n 0.4350045e02,\n 0.4364008e02,\n 0.4375514e02,\n 0.4386992e02,\n 0.4395939e02,\n 0.4405318e02,\n 0.4411450e02,\n 0.4419058e02,\n 0.4422209e02,\n 0.4428264e02,\n 0.4428387e02,\n 0.4432995e02,\n 0.4430155e02,\n 0.4433324e02,\n 0.4427669e02,\n 0.4429349e02,\n 0.4421068e02,\n 0.4421187e02,\n 0.4410481e02,\n 0.4408970e02,\n 0.4396023e02,\n 0.4392847e02,\n 0.4377812e02,\n 0.4372965e02,\n 0.4355963e02,\n 0.4349478e02,\n 0.4330600e02,\n 0.4322534e02,\n 0.4301853e02,\n 0.4292279e02,\n 0.4269857e02,\n 0.4258853e02,\n 0.4234756e02,\n 0.4222396e02,\n 0.4196694e02,\n 0.4183048e02,\n 0.4155822e02,\n 0.4140946e02,\n 0.4112286e02,\n 0.4096230e02,\n 0.4066235e02,\n 0.4049042e02,\n 0.4017813e02,\n 0.3999523e02,\n 0.3967166e02,\n 0.3947818e02,\n 0.3914435e02,\n 0.3894071e02,\n 0.3859761e02,\n 0.3838426e02,\n 0.3803282e02,\n 0.3781024e02,\n 0.3745135e02,\n 0.3722008e02,\n 0.3685457e02,\n 0.3661513e02,\n 0.3624382e02,\n 0.3599677e02,\n 0.3562045e02,\n 0.3536629e02,\n 0.3498576e02,\n 0.3472502e02,\n 0.3434105e02,\n 0.3407421e02,\n 0.3368756e02,\n 0.3341511e02,\n 0.3302651e02,\n 0.3274894e02,\n 0.3235911e02,\n 0.3207689e02,\n 0.3168649e02,\n 0.3140011e02,\n 0.3100977e02,\n 0.3071973e02,\n 0.3033004e02,\n 0.3003681e02,\n 0.2964833e02,\n 0.2935240e02,\n 0.2896567e02,\n 0.2866749e02,\n 0.2828303e02,\n 0.2798304e02,\n 0.2760134e02,\n 0.2729996e02,\n 0.2692148e02,\n 0.2661913e02,\n 0.2624432e02,\n 0.2594138e02,\n 0.2557065e02,\n 0.2526752e02,\n 0.2490123e02,\n 0.2459831e02,\n 0.2423680e02,\n 0.2393445e02,\n 0.2357803e02,\n 0.2327663e02,\n 0.2292556e02,\n 0.2262546e02,\n 0.2228000e02,\n 0.2198153e02,\n 0.2164193e02,\n 0.2134540e02,\n 0.2101185e02,\n 0.2071756e02,\n 0.2039027e02,\n 0.2009849e02,\n 0.1977763e02,\n 0.1948862e02,\n 0.1917433e02,\n 0.1888834e02,\n 0.1858075e02,\n 0.1829802e02,\n 0.1799722e02,\n 0.1771798e02,\n 0.1742405e02,\n 0.1714851e02,\n 0.1686151e02,\n 0.1658986e02,\n 0.1630983e02,\n 0.1604226e02,\n 0.1576923e02,\n 0.1550590e02,\n 0.1523988e02,\n 0.1498093e02,\n 0.1472193e02,\n 0.1446749e02,\n 0.1421550e02,\n 0.1396569e02,\n 0.1372068e02,\n 0.1347561e02,\n 0.1323754e02,\n 0.1299730e02,\n 0.1276613e02,\n 0.1253080e02,\n 0.1230645e02,\n 0.1207611e02,\n 0.1185852e02,\n 0.1163322e02,\n 0.1142231e02,\n 0.1120210e02,\n 0.1099778e02,\n 0.1078270e02,\n 0.1058487e02,\n 0.1037494e02,\n 0.1018351e02,\n 0.9978738e01,\n 0.9793600e01,\n 0.9593997e01,\n 0.9415044e01,\n 0.9220600e01,\n 0.9047715e01,\n 0.8858418e01,\n 0.8691482e01,\n 0.8507312e01,\n 0.8346198e01,\n 0.8167129e01,\n 0.8011710e01,\n 0.7837708e01,\n 0.7687854e01,\n 0.7518876e01,\n 0.7374456e01,\n 0.7210453e01,\n 0.7071336e01,\n 0.6912254e01,\n 0.6778307e01,\n 0.6624084e01,\n 0.6495174e01,\n 0.6345745e01,\n 0.6221738e01,\n 0.6077033e01,\n 0.5957794e01,\n 0.5817740e01,\n 0.5703134e01,\n 0.5567654e01,\n 0.5457548e01,\n 0.5326563e01,\n 0.5220821e01,\n 0.5094248e01,\n 0.4992739e01,\n 0.4870492e01,\n 0.4773085e01,\n 0.4655075e01,\n 0.4561642e01,\n 0.4447778e01,\n 0.4358190e01,\n 0.4248381e01,\n 0.4162514e01,\n 0.4056666e01,\n 0.3974395e01,\n 0.3872413e01,\n 0.3793618e01,\n 0.3695406e01,\n 0.3619966e01,\n 0.3525429e01,\n 0.3453228e01,\n 0.3362271e01,\n 0.3293193e01,\n 0.3205718e01,\n 0.3139651e01,\n 0.3055564e01,\n 0.2992397e01,\n 0.2911601e01,\n 0.2851228e01,\n 0.2773628e01,\n 0.2715944e01,\n 0.2641446e01,\n 0.2586348e01,\n 0.2514857e01,\n 0.2462248e01,\n 0.2393671e01,\n 0.2343453e01,\n 0.2277698e01,\n 0.2229778e01,\n 0.2166754e01,\n 0.2121041e01,\n 0.2060659e01,\n 0.2017065e01,\n 0.1959237e01,\n 0.1917674e01,\n 0.1862314e01,\n 0.1822700e01,\n 0.1769722e01,\n 0.1731977e01,\n 0.1681298e01,\n 0.1645344e01,\n 0.1596882e01,\n 0.1562643e01,\n 0.1516319e01,\n 0.1483723e01,\n 0.1439458e01,\n 0.1408435e01,\n 0.1366152e01,\n 0.1336633e01,\n 0.1296260e01,\n 0.1268180e01,\n 0.1229642e01,\n 0.1202937e01,\n 0.1166165e01,\n 0.1140775e01,\n 0.1105699e01,\n 0.1081566e01,\n 0.1048119e01,\n 0.1025186e01,\n 0.9933033e00,\n 0.9715168e00,\n 0.9411347e00,\n 0.9204422e00,\n 0.8914999e00,\n 0.8718511e00,\n 0.8442892e00,\n 0.8256361e00,\n 0.7993970e00,\n 0.7816934e00,\n 0.7567216e00,\n 0.7399231e00,\n 0.7161648e00,\n 0.7002287e00,\n 0.6776319e00,\n 0.6625175e00,\n 0.6410319e00,\n 0.6267001e00,\n 0.6062772e00,\n 0.5926905e00,\n 0.5732835e00,\n 0.5604061e00,\n 0.5419698e00,\n 0.5297674e00,\n 0.5122584e00,\n 0.5006981e00,\n 0.4840745e00,\n 0.4731249e00,\n 0.4573463e00,\n 0.4469774e00,\n 0.4320051e00,\n 0.4221882e00,\n 0.4079849e00,\n 0.3986924e00,\n 0.3852225e00,\n 0.3764283e00,\n 0.3636572e00,\n 0.3553362e00,\n 0.3432310e00,\n 0.3353594e00,\n 0.3238883e00,\n 0.3164434e00,\n 0.3055761e00,\n 0.2985361e00,\n 0.2882435e00,\n 0.2815877e00,\n 0.2718419e00,\n 0.2655505e00,\n 0.2563248e00,\n 0.2503791e00,\n 0.2416479e00,\n 0.2360299e00,\n 0.2277687e00,\n 0.2224615e00,\n 0.2146470e00,\n 0.2096341e00,\n 0.2022440e00,\n 0.1975101e00,\n 0.1905229e00,\n 0.1860533e00,\n 0.1794487e00,\n 0.1752294e00,\n 0.1689879e00,\n 0.1650056e00,\n 0.1591086e00,\n 0.1553506e00,\n 0.1497804e00,\n 0.1462348e00,\n 0.1409744e00,\n 0.1376297e00,\n 0.1326632e00,\n 0.1295086e00,\n 0.1248204e00,\n 0.1218456e00,\n 0.1174212e00,\n 0.1146165e00,\n 0.1104419e00,\n 0.1077980e00,\n 0.1038600e00,\n 0.1013680e00,\n 0.9765404e-01,\n 0.9530568e-01,\n 0.9180371e-01,\n 0.8959107e-01,\n 0.8628968e-01,\n 0.8420526e-01,\n 0.8109362e-01,\n 0.7913032e-01,\n 0.7619812e-01,\n 0.7434919e-01,\n 0.7158666e-01,\n 0.6984571e-01,\n 0.6724356e-01,\n 0.6560456e-01,\n 0.6315396e-01,\n 0.6161117e-01,\n 0.5930377e-01,\n 0.5785177e-01,\n 0.5567961e-01,\n 0.5431328e-01,\n 0.5226885e-01,\n 0.5098332e-01,\n 0.4905947e-01,\n 0.4785016e-01,\n 0.4604013e-01,\n 0.4490269e-01,\n 0.4320006e-01,\n 0.4213037e-01,\n 0.4052908e-01,\n 0.3952327e-01,\n 0.3801755e-01,\n 0.3707194e-01,\n 0.3565637e-01,\n 0.3476748e-01,\n 0.3343689e-01,\n 0.3260145e-01,\n 0.3135097e-01,\n 0.3056588e-01,\n 0.2939089e-01,\n 0.2865322e-01,\n 0.2754936e-01,\n 0.2685635e-01,\n 0.2581950e-01,\n 0.2516853e-01,\n 0.2419479e-01,\n 0.2358340e-01,\n 0.2266910e-01,\n 0.2209496e-01,\n 0.2123660e-01,\n 0.2069752e-01,\n 0.1989183e-01,\n 0.1938574e-01,\n 0.1862960e-01,\n 0.1815454e-01,\n 0.1744504e-01,\n 0.1699918e-01,\n 0.1633353e-01,\n 0.1591513e-01,\n 0.1529073e-01,\n 0.1489815e-01,\n 0.1431255e-01,\n 0.1394424e-01,\n 0.1339511e-01,\n 0.1304962e-01,\n 0.1253477e-01,\n 0.1221073e-01,\n 0.1172810e-01,\n 0.1142421e-01,\n 0.1097185e-01,\n 0.1068691e-01,\n 0.1026299e-01,\n 0.9995839e-02,\n 0.9598632e-02,\n 0.9348197e-02,\n 0.8976075e-02,\n 0.8741341e-02,\n 0.8392774e-02,\n 0.8172784e-02,\n 0.7846332e-02,\n 0.7640186e-02,\n 0.7334491e-02,\n 0.7141344e-02,\n 0.6855129e-02,\n 0.6674184e-02,\n 0.6406246e-02,\n 0.6236754e-02,\n 0.5985964e-02,\n 0.5827218e-02,\n 0.5592512e-02,\n 0.5443850e-02,\n 0.5224227e-02,\n 0.5085025e-02,\n 0.4879544e-02,\n 0.4749216e-02,\n 0.4556995e-02,\n 0.4434989e-02,\n 0.4255197e-02,\n 0.4140997e-02,\n 0.3972854e-02,\n 0.3865974e-02,\n 0.3708747e-02,\n 0.3608730e-02,\n 0.3461733e-02,\n 0.3368151e-02,\n 0.3230736e-02,\n 0.3143185e-02,\n 0.3014746e-02,\n 0.2932847e-02,\n 0.2812813e-02,\n 0.2736210e-02,\n 0.2624045e-02,\n 0.2552404e-02,\n 0.2447605e-02,\n 0.2380611e-02,\n 0.2282708e-02,\n 0.2220067e-02,\n 0.2128618e-02,\n 0.2070056e-02,\n 0.1984648e-02,\n 0.1929906e-02,\n 0.1850152e-02,\n 0.1798987e-02,\n 0.1724522e-02,\n 0.1676708e-02,\n 0.1607190e-02,\n 0.1562511e-02,\n 0.1497620e-02,\n 0.1455875e-02,\n 0.1395308e-02,\n 0.1356308e-02,\n 0.1299785e-02,\n 0.1263353e-02,\n 0.1210611e-02,\n 0.1176583e-02,\n 0.1127375e-02,\n 0.1095598e-02,\n 0.1049696e-02,\n 0.1020025e-02,\n 0.9772121e-03,\n 0.9495118e-03,\n 0.9095846e-03,\n 0.8837259e-03,\n 0.8464937e-03,\n 0.8223557e-03,\n 0.7876397e-03,\n 0.7651098e-03,\n 0.7327438e-03,\n 0.7117181e-03,\n 0.6815480e-03,\n 0.6619300e-03,\n 0.6338120e-03,\n 0.6155113e-03,\n 0.5893102e-03,\n 0.5722408e-03,\n 0.5478283e-03,\n 0.5319081e-03,\n 0.5091634e-03,\n 0.4943152e-03,\n 0.4731258e-03,\n 0.4592787e-03,\n 0.4395410e-03,\n 0.4266303e-03,\n 0.4082493e-03,\n 0.3962155e-03,\n 0.3791019e-03,\n 0.3678880e-03,\n 0.3519565e-03,\n 0.3415071e-03,\n 0.3266762e-03,\n 0.3169383e-03,\n 0.3031319e-03,\n 0.2940574e-03,\n 0.2812067e-03,\n 0.2727531e-03,\n 0.2607960e-03,\n 0.2529247e-03,\n 0.2418025e-03,\n 0.2344753e-03,\n 0.2241307e-03,\n 0.2173091e-03,\n 0.2076865e-03,\n 0.2013343e-03,\n 0.1923831e-03,\n 0.1864691e-03,\n 0.1781456e-03,\n 0.1726433e-03,\n 0.1649075e-03,\n 0.1597912e-03,\n 0.1526032e-03,\n 0.1478454e-03,\n 0.1411648e-03,\n 0.1367381e-03,\n 0.1305278e-03,\n 0.1264096e-03,\n 0.1206392e-03,\n 0.1168121e-03,\n 0.1114548e-03,\n 0.1079011e-03,\n 0.1029286e-03,\n 0.9962750e-04,\n 0.9500928e-04,\n 0.9194008e-04,\n 0.8765030e-04,\n 0.8479853e-04,\n 0.8081775e-04,\n 0.7817267e-04,\n 0.7448255e-04,\n 0.7203030e-04,\n 0.6860758e-04,\n 0.6633051e-04,\n 0.6315326e-04,\n 0.6103854e-04,\n 0.5809158e-04,\n 0.5613237e-04,\n 0.5340456e-04,\n 0.5159216e-04,\n 0.4906537e-04,\n 0.4738409e-04,\n 0.4504024e-04,\n 0.4348066e-04,\n 0.4130949e-04,\n 0.3986734e-04,\n 0.3786116e-04,\n 0.3653038e-04,\n 0.3467413e-04,\n 0.3343939e-04,\n 0.3171734e-04,\n 0.3057505e-04,\n 0.2898660e-04,\n 0.2793625e-04,\n 0.2646920e-04,\n 0.2549635e-04,\n 0.2413735e-04,\n 0.2323900e-04,\n 0.2198645e-04,\n 0.2116049e-04,\n 0.2000504e-04,\n 0.1924253e-04,\n 0.1817554e-04,\n 0.1747326e-04,\n 0.1649066e-04,\n 0.1584529e-04,\n 0.1494019e-04,\n 0.1434660e-04,\n 0.1351308e-04,\n 0.1296814e-04,\n 0.1220153e-04,\n 0.1170191e-04,\n 0.1099720e-04,\n 0.1053953e-04,\n 0.9892289e-05,\n ]\n\n return (\n ssalb,\n len(Legendre_coef),\n np.pad(Legendre_coef, (0, 700 - len(Legendre_coef))),\n )\n\n\ndef results():\n spherical_albedo = 0.1400516239529828\n\n albedo = [\n 0.57934552e00,\n 0.55945677e00,\n 0.53431237e00,\n 0.50788230e00,\n 0.48296762e00,\n 0.46127653e00,\n 0.44329438e00,\n 0.42849159e00,\n 0.41589457e00,\n 0.40462923e00,\n 0.39412692e00,\n 0.38407087e00,\n 0.37428829e00,\n 0.36468229e00,\n 0.35519615e00,\n 0.34579977e00,\n 0.33647874e00,\n 0.32722980e00,\n 0.31805637e00,\n 0.30896705e00,\n 0.29997292e00,\n 0.29108667e00,\n 0.28232241e00,\n 0.27369434e00,\n 0.26521713e00,\n 0.25690463e00,\n 0.24876949e00,\n 0.24082196e00,\n 0.23306957e00,\n 0.22551830e00,\n 0.21817389e00,\n 0.21104220e00,\n 0.20412904e00,\n 0.19744009e00,\n 0.19098036e00,\n 0.18475346e00,\n 0.17876221e00,\n 0.17300782e00,\n 0.16749054e00,\n 0.16220950e00,\n 0.15716265e00,\n 0.15234718e00,\n 0.14775957e00,\n 0.14339539e00,\n 0.13924994e00,\n 0.13531761e00,\n 0.13159263e00,\n 0.12806895e00,\n 0.12473993e00,\n 0.12159910e00,\n 0.11863959e00,\n 0.11585440e00,\n 0.11323670e00,\n 0.11077949e00,\n 0.10847593e00,\n 0.10631904e00,\n 0.10430222e00,\n 0.10241879e00,\n 0.10066233e00,\n 0.99026598e-01,\n 0.97505502e-01,\n 0.96093059e-01,\n 0.94783649e-01,\n 0.93571737e-01,\n 0.92452131e-01,\n 0.91419615e-01,\n 0.90469383e-01,\n 0.89596771e-01,\n 0.88797286e-01,\n 0.88066630e-01,\n 0.87400697e-01,\n 0.86795583e-01,\n 0.86247541e-01,\n 0.85752994e-01,\n 0.85308485e-01,\n 0.84910698e-01,\n 0.84556349e-01,\n 0.84242381e-01,\n 0.83965667e-01,\n 0.83723314e-01,\n 0.83512425e-01,\n 0.83330259e-01,\n 0.83174184e-01,\n 0.83041623e-01,\n 0.82930155e-01,\n 0.82837544e-01,\n 0.82761563e-01,\n 0.82700156e-01,\n 0.82651392e-01,\n 0.82613394e-01,\n 0.82584500e-01,\n 0.82563184e-01,\n 0.82548007e-01,\n 0.82537644e-01,\n 0.82530975e-01,\n 0.82526997e-01,\n 0.82524881e-01,\n 0.82523920e-01,\n 0.82523584e-01,\n 0.82523517e-01,\n ]\n\n expected_r1 = np.array(\n [\n 0.38368369e03,\n 0.25770578e03,\n 0.23945151e03,\n 0.16855780e03,\n 0.18244296e03,\n 0.16236093e03,\n 0.10911653e03,\n 0.12755070e03,\n 0.12647316e03,\n 0.10830920e03,\n 0.67582947e02,\n 0.83225288e02,\n 0.89081558e02,\n 0.82212189e02,\n 0.66570000e02,\n 0.39153576e02,\n 0.50326321e02,\n 0.57166462e02,\n 0.56031170e02,\n 0.47971886e02,\n 0.36434990e02,\n 0.21253523e02,\n 0.28237167e02,\n 0.33642113e02,\n 0.34688950e02,\n 0.31254200e02,\n 0.25037840e02,\n 0.18253815e02,\n 0.11248275e02,\n 0.15133494e02,\n 0.18625698e02,\n 0.20033745e02,\n 0.18957489e02,\n 0.16075739e02,\n 0.12519300e02,\n 0.92340946e01,\n 0.62269855e01,\n 0.82350597e01,\n 0.10240828e02,\n 0.11357998e02,\n 0.11247568e02,\n 0.10105079e02,\n 0.84183950e01,\n 0.66705170e01,\n 0.51578894e01,\n 0.37987945e01,\n 0.48435707e01,\n 0.59637489e01,\n 0.67243404e01,\n 0.69045143e01,\n 0.65221829e01,\n 0.57613211e01,\n 0.48524532e01,\n 0.39752564e01,\n 0.32225568e01,\n 0.25505664e01,\n 0.31431477e01,\n 0.38052323e01,\n 0.43142323e01,\n 0.45352106e01,\n 0.44408669e01,\n 0.40962334e01,\n 0.36127684e01,\n 0.30965683e01,\n 0.26170671e01,\n 0.22043598e01,\n 0.18349921e01,\n 0.22099471e01,\n 0.26385496e01,\n 0.29933913e01,\n 0.31896119e01,\n 0.31966636e01,\n 0.30375271e01,\n 0.27696035e01,\n 0.24563296e01,\n 0.21455364e01,\n 0.18630074e01,\n 0.16174023e01,\n 0.13863298e01,\n 0.16447055e01,\n 0.19445310e01,\n 0.22046304e01,\n 0.23685496e01,\n 0.24110959e01,\n 0.23400669e01,\n 0.21870027e01,\n 0.19907905e01,\n 0.17838671e01,\n 0.15866874e01,\n 0.14085795e01,\n 0.12514458e01,\n 0.10881330e01,\n 0.12770401e01,\n 0.14988452e01,\n 0.16977799e01,\n 0.18342333e01,\n 0.18884524e01,\n 0.18623250e01,\n 0.17742110e01,\n 0.16486713e01,\n 0.15075257e01,\n 0.13662242e01,\n 0.12339157e01,\n 0.11142954e01,\n 0.10071722e01,\n 0.88128895e00,\n 0.10257436e01,\n 0.11969687e01,\n 0.13544649e01,\n 0.14691297e01,\n 0.15254281e01,\n 0.15228883e01,\n 0.14728941e01,\n 0.13916924e01,\n 0.12941229e01,\n 0.11911522e01,\n 0.10903227e01,\n 0.99648142e00,\n 0.91146982e00,\n 0.83437926e00,\n 0.73236907e00,\n 0.84691751e00,\n 0.98372436e00,\n 0.11120189e01,\n 0.12095475e01,\n 0.12638915e01,\n 0.12736813e01,\n 0.12464422e01,\n 0.11935405e01,\n 0.11255139e01,\n 0.10501394e01,\n 0.97279239e00,\n 0.89753741e00,\n 0.82755452e00,\n 0.76412642e00,\n 0.70607662e00,\n 0.62143141e00,\n 0.71498531e00,\n 0.82739562e00,\n 0.93439400e00,\n 0.10183749e01,\n 0.10692183e01,\n 0.10852710e01,\n 0.10717980e01,\n 0.10371528e01,\n 0.98918498e00,\n 0.93369889e00,\n 0.87458736e00,\n 0.81457925e00,\n 0.75610143e00,\n 0.70160747e00,\n 0.65229672e00,\n 0.60690910e00,\n 0.53620493e00,\n 0.61444676e00,\n 0.70891893e00,\n 0.79989344e00,\n 0.87304217e00,\n 0.91996312e00,\n 0.93897974e00,\n 0.93395931e00,\n 0.91129071e00,\n 0.87703383e00,\n 0.83570266e00,\n 0.79033947e00,\n 0.74295175e00,\n 0.69498348e00,\n 0.64796978e00,\n 0.60397410e00,\n 0.56429613e00,\n 0.52768981e00,\n 0.46893141e00,\n 0.53562319e00,\n 0.61647099e00,\n 0.69504291e00,\n 0.75940472e00,\n 0.80246264e00,\n 0.82262319e00,\n 0.82284969e00,\n 0.80819505e00,\n 0.78347129e00,\n 0.75227189e00,\n 0.71705294e00,\n 0.67947024e00,\n 0.64061898e00,\n 0.60127056e00,\n 0.56240505e00,\n 0.52581406e00,\n 0.49295956e00,\n 0.46267310e00,\n 0.41456842e00,\n 0.47228998e00,\n 0.54249328e00,\n 0.61122215e00,\n 0.66834646e00,\n 0.70778871e00,\n 0.72807664e00,\n 0.73156416e00,\n 0.72236371e00,\n 0.70439237e00,\n 0.68056595e00,\n 0.65288788e00,\n 0.62274611e00,\n 0.59110469e00,\n 0.55858999e00,\n 0.52560019e00,\n 0.49273926e00,\n 0.46156633e00,\n 0.43370983e00,\n 0.40812615e00,\n 0.36973703e00,\n 0.42031151e00,\n 0.48198968e00,\n 0.54273206e00,\n 0.59380746e00,\n 0.62994283e00,\n 0.64979416e00,\n 0.65528655e00,\n 0.64984703e00,\n 0.63673460e00,\n 0.61836016e00,\n 0.59637630e00,\n 0.57194269e00,\n 0.54589856e00,\n 0.51883745e00,\n 0.49112943e00,\n 0.46296096e00,\n 0.43467191e00,\n 0.40760121e00,\n 0.38351870e00,\n 0.36154264e00,\n 0.33207551e00,\n 0.37681752e00,\n 0.43150941e00,\n 0.48563948e00,\n 0.53159100e00,\n 0.56474036e00,\n 0.58386314e00,\n 0.59056675e00,\n 0.58775848e00,\n 0.57819819e00,\n 0.56391406e00,\n 0.54628950e00,\n 0.52629930e00,\n 0.50466305e00,\n 0.48191690e00,\n 0.45844156e00,\n 0.43446112e00,\n 0.41004205e00,\n 0.38533735e00,\n 0.36147287e00,\n 0.34032100e00,\n 0.32119045e00,\n 0.29983068e00,\n 0.33969635e00,\n 0.38853076e00,\n 0.43707687e00,\n 0.47863159e00,\n 0.50910699e00,\n 0.52738410e00,\n 0.53481984e00,\n 0.53390729e00,\n 0.52700996e00,\n 0.51585591e00,\n 0.50161958e00,\n 0.48513207e00,\n 0.46701470e00,\n 0.44774175e00,\n 0.42767024e00,\n 0.40705225e00,\n 0.38602614e00,\n 0.36459178e00,\n 0.34277132e00,\n 0.32148623e00,\n 0.30266759e00,\n 0.28583673e00,\n 0.27165741e00,\n 0.30735224e00,\n 0.35116890e00,\n 0.39490715e00,\n 0.43263298e00,\n 0.46071306e00,\n 0.47812337e00,\n 0.48601636e00,\n 0.48654056e00,\n 0.48172772e00,\n 0.47305444e00,\n 0.46152285e00,\n 0.44784895e00,\n 0.43257853e00,\n 0.41613755e00,\n 0.39885530e00,\n 0.38097894e00,\n 0.36268044e00,\n 0.34404564e00,\n 0.32503796e00,\n 0.30558982e00,\n 0.28643203e00,\n 0.26951250e00,\n 0.25458133e00,\n 0.24664548e00,\n 0.27872956e00,\n 0.31819224e00,\n 0.35773003e00,\n 0.39206341e00,\n 0.41795400e00,\n 0.43447414e00,\n 0.44261932e00,\n 0.44425684e00,\n 0.44113833e00,\n 0.43451858e00,\n 0.42523941e00,\n 0.41390744e00,\n 0.40100044e00,\n 0.38690761e00,\n 0.37194157e00,\n 0.35634446e00,\n 0.34029481e00,\n 0.32391262e00,\n 0.30725011e00,\n 0.29025167e00,\n 0.27279079e00,\n 0.25542563e00,\n 0.24008393e00,\n 0.22675417e00,\n 0.22445151e00,\n 0.25343820e00,\n 0.28914347e00,\n 0.32500827e00,\n 0.35629919e00,\n 0.38011837e00,\n 0.39564186e00,\n 0.40376574e00,\n 0.40615430e00,\n 0.40434766e00,\n 0.39941984e00,\n 0.39206272e00,\n 0.38276103e00,\n 0.37190381e00,\n 0.35982931e00,\n 0.34683278e00,\n 0.33315977e00,\n 0.31900054e00,\n 0.30449098e00,\n 0.28971705e00,\n 0.27470860e00,\n 0.25940111e00,\n 0.24363182e00,\n 0.22780687e00,\n 0.21380231e00,\n 0.20184751e00,\n 0.20519748e00,\n 0.23159876e00,\n 0.26413625e00,\n 0.29684672e00,\n 0.32543322e00,\n 0.34727564e00,\n 0.36164755e00,\n 0.36939174e00,\n 0.37204832e00,\n 0.37103242e00,\n 0.36731219e00,\n 0.36147776e00,\n 0.35390341e00,\n 0.34486532e00,\n 0.33460709e00,\n 0.32336712e00,\n 0.31137651e00,\n 0.29884037e00,\n 0.28591970e00,\n 0.27272525e00,\n 0.25932097e00,\n 0.24572186e00,\n 0.23185994e00,\n 0.21755250e00,\n 0.20307408e00,\n 0.19022347e00,\n 0.17946769e00,\n 0.18898228e00,\n 0.21327148e00,\n 0.24319792e00,\n 0.27327064e00,\n 0.29953399e00,\n 0.31957966e00,\n 0.33274972e00,\n 0.33984205e00,\n 0.34230980e00,\n 0.34150216e00,\n 0.33835727e00,\n 0.33344826e00,\n 0.32711285e00,\n 0.31955174e00,\n 0.31089956e00,\n 0.30128181e00,\n 0.29084933e00,\n 0.27977982e00,\n 0.26825124e00,\n 0.25640994e00,\n 0.24435396e00,\n 0.23213391e00,\n 0.21975470e00,\n 0.20714773e00,\n 0.19412118e00,\n 0.18083785e00,\n 0.16899987e00,\n 0.15930425e00,\n 0.17541476e00,\n 0.19795303e00,\n 0.22571321e00,\n 0.25359881e00,\n 0.27793473e00,\n 0.29647639e00,\n 0.30860057e00,\n 0.31503823e00,\n 0.31714112e00,\n 0.31618607e00,\n 0.31309542e00,\n 0.30847403e00,\n 0.30271024e00,\n 0.29603517e00,\n 0.28855911e00,\n 0.28031746e00,\n 0.27133235e00,\n 0.26166755e00,\n 0.25144571e00,\n 0.24082130e00,\n 0.22993420e00,\n 0.21887848e00,\n 0.20769787e00,\n 0.19639082e00,\n 0.18488961e00,\n 0.17300032e00,\n 0.16079190e00,\n 0.14985578e00,\n 0.14110740e00,\n 0.16366631e00,\n 0.18467116e00,\n 0.21054901e00,\n 0.23656210e00,\n 0.25929046e00,\n 0.27663431e00,\n 0.28799024e00,\n 0.29400581e00,\n 0.29590416e00,\n 0.29484844e00,\n 0.29169577e00,\n 0.28704572e00,\n 0.28133944e00,\n 0.27490255e00,\n 0.26794240e00,\n 0.26054066e00,\n 0.25267535e00,\n 0.24428359e00,\n 0.23534042e00,\n 0.22590491e00,\n 0.21610361e00,\n 0.20607288e00,\n 0.19590905e00,\n 0.18565454e00,\n 0.17530420e00,\n 0.16479163e00,\n 0.15392394e00,\n 0.14269111e00,\n 0.13256522e00,\n 0.12466694e00,\n 0.15291582e00,\n 0.17249423e00,\n 0.19663572e00,\n 0.22094680e00,\n 0.24225558e00,\n 0.25860691e00,\n 0.26942277e00,\n 0.27527362e00,\n 0.27725279e00,\n 0.27639005e00,\n 0.27343193e00,\n 0.26890206e00,\n 0.26321408e00,\n 0.25673062e00,\n 0.24976483e00,\n 0.24254556e00,\n 0.23517576e00,\n 0.22762197e00,\n 0.21976374e00,\n 0.21149129e00,\n 0.20278960e00,\n 0.19374785e00,\n 0.18449736e00,\n 0.17514092e00,\n 0.16572388e00,\n 0.15623912e00,\n 0.14661992e00,\n 0.13667518e00,\n 0.12632957e00,\n 0.11693286e00,\n 0.10979707e00,\n 0.14267196e00,\n 0.16087982e00,\n 0.18335804e00,\n 0.20604582e00,\n 0.22601375e00,\n 0.24145372e00,\n 0.25182438e00,\n 0.25763780e00,\n 0.25987828e00,\n 0.25945812e00,\n 0.25701439e00,\n 0.25296855e00,\n 0.24764267e00,\n 0.24133593e00,\n 0.23435625e00,\n 0.22701317e00,\n 0.21957387e00,\n 0.21219650e00,\n 0.20488037e00,\n 0.19748402e00,\n 0.18982655e00,\n 0.18181197e00,\n 0.17347980e00,\n 0.16495080e00,\n 0.15633532e00,\n 0.14768384e00,\n 0.13898759e00,\n 0.13017787e00,\n 0.12106603e00,\n 0.11152479e00,\n 0.10278418e00,\n 0.96333064e-01,\n 0.13278867e00,\n 0.14967601e00,\n 0.17054874e00,\n 0.19166234e00,\n 0.21031891e00,\n 0.22485405e00,\n 0.23476954e00,\n 0.24053511e00,\n 0.24305005e00,\n 0.24313784e00,\n 0.24135487e00,\n 0.23804264e00,\n 0.23343392e00,\n 0.22772875e00,\n 0.22114034e00,\n 0.21392128e00,\n 0.20636588e00,\n 0.19877388e00,\n 0.19137226e00,\n 0.18422794e00,\n 0.17721902e00,\n 0.17011781e00,\n 0.16274700e00,\n 0.15508358e00,\n 0.14722840e00,\n 0.13929746e00,\n 0.13134745e00,\n 0.12336826e00,\n 0.11528943e00,\n 0.10692697e00,\n 0.98114364e-01,\n 0.89966424e-01,\n 0.84133029e-01,\n 0.12330588e00,\n 0.13893479e00,\n 0.15827183e00,\n 0.17786814e00,\n 0.19524175e00,\n 0.20886324e00,\n 0.21827731e00,\n 0.22391967e00,\n 0.22662000e00,\n 0.22713451e00,\n 0.22596700e00,\n 0.22341314e00,\n 0.21965274e00,\n 0.21481460e00,\n 0.20901735e00,\n 0.20240258e00,\n 0.19516377e00,\n 0.18756257e00,\n 0.17991112e00,\n 0.17249928e00,\n 0.16548070e00,\n 0.15879016e00,\n 0.15218471e00,\n 0.14541364e00,\n 0.13838096e00,\n 0.13115591e00,\n 0.12385615e00,\n 0.11654575e00,\n 0.10921578e00,\n 0.10179584e00,\n 0.94108447e-01,\n 0.85958004e-01,\n 0.78349575e-01,\n 0.73076993e-01,\n 0.11430455e00,\n 0.12874915e00,\n 0.14663576e00,\n 0.16478880e00,\n 0.18092515e00,\n 0.19363941e00,\n 0.20251557e00,\n 0.20795847e00,\n 0.21073578e00,\n 0.21154700e00,\n 0.21085797e00,\n 0.20893978e00,\n 0.20595059e00,\n 0.20198931e00,\n 0.19712524e00,\n 0.19142093e00,\n 0.18495877e00,\n 0.17787308e00,\n 0.17037868e00,\n 0.16277534e00,\n 0.15539503e00,\n 0.14847951e00,\n 0.14204761e00,\n 0.13587566e00,\n 0.12965593e00,\n 0.12321350e00,\n 0.11657458e00,\n 0.10985404e00,\n 0.10312499e00,\n 0.96382633e-01,\n 0.89558579e-01,\n 0.82482606e-01,\n 0.74937083e-01,\n 0.67823343e-01,\n 0.63062489e-01,\n 0.10584079e00,\n 0.11918116e00,\n 0.13571160e00,\n 0.15250790e00,\n 0.16746905e00,\n 0.17930275e00,\n 0.18762796e00,\n 0.19281991e00,\n 0.19558841e00,\n 0.19658093e00,\n 0.19623034e00,\n 0.19479063e00,\n 0.19241145e00,\n 0.18918501e00,\n 0.18516850e00,\n 0.18039672e00,\n 0.17489515e00,\n 0.16870056e00,\n 0.16189243e00,\n 0.15463088e00,\n 0.14718156e00,\n 0.13989125e00,\n 0.13307634e00,\n 0.12685405e00,\n 0.12105249e00,\n 0.11533057e00,\n 0.10943508e00,\n 0.10333905e00,\n 0.97149357e-01,\n 0.90949543e-01,\n 0.84741533e-01,\n 0.78459755e-01,\n 0.71940817e-01,\n 0.64950287e-01,\n 0.58292422e-01,\n 0.53999700e-01,\n 0.97934157e-01,\n 0.11025076e00,\n 0.12552127e00,\n 0.14105226e00,\n 0.15490949e00,\n 0.16590366e00,\n 0.17368492e00,\n 0.17860012e00,\n 0.18130451e00,\n 0.18239634e00,\n 0.18227696e00,\n 0.18118428e00,\n 0.17926148e00,\n 0.17660023e00,\n 0.17325978e00,\n 0.16927499e00,\n 0.16466121e00,\n 0.15942113e00,\n 0.15355882e00,\n 0.14710733e00,\n 0.14017075e00,\n 0.13296603e00,\n 0.12582819e00,\n 0.11912578e00,\n 0.11307607e00,\n 0.10758535e00,\n 0.10230618e00,\n 0.96913703e-01,\n 0.91320075e-01,\n 0.85618503e-01,\n 0.79903029e-01,\n 0.74183889e-01,\n 0.68398476e-01,\n 0.62389236e-01,\n 0.55908926e-01,\n 0.49671900e-01,\n 0.45807466e-01,\n 0.90577021e-01,\n 0.10194721e00,\n 0.11605130e00,\n 0.13040775e00,\n 0.14323507e00,\n 0.15343815e00,\n 0.16069512e00,\n 0.16532598e00,\n 0.16793491e00,\n 0.16907367e00,\n 0.16911317e00,\n 0.16827461e00,\n 0.16669342e00,\n 0.16445951e00,\n 0.16163501e00,\n 0.15826119e00,\n 0.15436088e00,\n 0.14993919e00,\n 0.14498582e00,\n 0.13948333e00,\n 0.13342866e00,\n 0.12687427e00,\n 0.11998184e00,\n 0.11305442e00,\n 0.10648688e00,\n 0.10058656e00,\n 0.95354967e-01,\n 0.90465494e-01,\n 0.85533582e-01,\n 0.80405675e-01,\n 0.75155161e-01,\n 0.69885492e-01,\n 0.64615801e-01,\n 0.59286319e-01,\n 0.53744264e-01,\n 0.47732841e-01,\n 0.41884389e-01,\n 0.38411867e-01,\n 0.83746620e-01,\n 0.94243065e-01,\n 0.10726915e00,\n 0.12053798e00,\n 0.13240825e00,\n 0.14187106e00,\n 0.14862999e00,\n 0.15297991e00,\n 0.15547749e00,\n 0.15663114e00,\n 0.15678266e00,\n 0.15613645e00,\n 0.15481880e00,\n 0.15291539e00,\n 0.15048827e00,\n 0.14758278e00,\n 0.14422987e00,\n 0.14044581e00,\n 0.13623075e00,\n 0.13156785e00,\n 0.12642694e00,\n 0.12077967e00,\n 0.11463551e00,\n 0.10810074e00,\n 0.10143317e00,\n 0.95029272e-01,\n 0.89268476e-01,\n 0.84254339e-01,\n 0.79705626e-01,\n 0.75194120e-01,\n 0.70498869e-01,\n 0.65667070e-01,\n 0.60809318e-01,\n 0.55953730e-01,\n 0.51043399e-01,\n 0.45929730e-01,\n 0.40349029e-01,\n 0.34859274e-01,\n 0.31745121e-01,\n 0.77413671e-01,\n 0.87103941e-01,\n 0.99134557e-01,\n 0.11139757e00,\n 0.12238043e00,\n 0.13115339e00,\n 0.13744320e00,\n 0.14152151e00,\n 0.14390105e00,\n 0.14504991e00,\n 0.14528263e00,\n 0.14478727e00,\n 0.14368038e00,\n 0.14204189e00,\n 0.13993120e00,\n 0.13739403e00,\n 0.13446525e00,\n 0.13116941e00,\n 0.12751934e00,\n 0.12351336e00,\n 0.11913250e00,\n 0.11434042e00,\n 0.10909266e00,\n 0.10336579e00,\n 0.97215243e-01,\n 0.90846524e-01,\n 0.84636919e-01,\n 0.79015903e-01,\n 0.74187510e-01,\n 0.69935963e-01,\n 0.65807395e-01,\n 0.61514482e-01,\n 0.57072140e-01,\n 0.52595474e-01,\n 0.48121743e-01,\n 0.43596964e-01,\n 0.38876079e-01,\n 0.33690531e-01,\n 0.28531600e-01,\n 0.25744777e-01,\n 0.71547434e-01,\n 0.80494061e-01,\n 0.91605820e-01,\n 0.10293934e00,\n 0.11310040e00,\n 0.12123200e00,\n 0.12708212e00,\n 0.13090093e00,\n 0.13316067e00,\n 0.13429219e00,\n 0.13458471e00,\n 0.13421088e00,\n 0.13327757e00,\n 0.13185826e00,\n 0.13000821e00,\n 0.12777114e00,\n 0.12518245e00,\n 0.12227035e00,\n 0.11905541e00,\n 0.11554869e00,\n 0.11174847e00,\n 0.10763626e00,\n 0.10317403e00,\n 0.98308414e-01,\n 0.92992358e-01,\n 0.87237559e-01,\n 0.81194960e-01,\n 0.75207628e-01,\n 0.69733076e-01,\n 0.65067738e-01,\n 0.61075501e-01,\n 0.57294834e-01,\n 0.53375702e-01,\n 0.49295910e-01,\n 0.45172136e-01,\n 0.41050550e-01,\n 0.36880266e-01,\n 0.32519296e-01,\n 0.27695602e-01,\n 0.22840958e-01,\n 0.20352198e-01,\n 0.66117376e-01,\n 0.74378133e-01,\n 0.84641933e-01,\n 0.95116824e-01,\n 0.10451740e00,\n 0.11205351e00,\n 0.11749266e00,\n 0.12106522e00,\n 0.12320609e00,\n 0.12431186e00,\n 0.12464833e00,\n 0.12437376e00,\n 0.12358582e00,\n 0.12235164e00,\n 0.12072182e00,\n 0.11873700e00,\n 0.11643104e00,\n 0.11383259e00,\n 0.11096542e00,\n 0.10784754e00,\n 0.10448926e00,\n 0.10088971e00,\n 0.97032204e-01,\n 0.92879705e-01,\n 0.88374905e-01,\n 0.83454721e-01,\n 0.78095064e-01,\n 0.72394073e-01,\n 0.66651307e-01,\n 0.61332978e-01,\n 0.56815393e-01,\n 0.53050254e-01,\n 0.49585145e-01,\n 0.46012942e-01,\n 0.42270541e-01,\n 0.38473442e-01,\n 0.34676433e-01,\n 0.30831696e-01,\n 0.26799770e-01,\n 0.22306219e-01,\n 0.17730433e-01,\n 0.15513073e-01,\n 0.61093956e-01,\n 0.68722166e-01,\n 0.78203514e-01,\n 0.87885372e-01,\n 0.96582450e-01,\n 0.10356604e00,\n 0.10862161e00,\n 0.11196126e00,\n 0.11398555e00,\n 0.11505950e00,\n 0.11542739e00,\n 0.11523422e00,\n 0.11456916e00,\n 0.11349328e00,\n 0.11205266e00,\n 0.11028446e00,\n 0.10822011e00,\n 0.10588704e00,\n 0.10330941e00,\n 0.10050797e00,\n 0.97499035e-01,\n 0.94292536e-01,\n 0.90888672e-01,\n 0.87273069e-01,\n 0.83411098e-01,\n 0.79244599e-01,\n 0.74700132e-01,\n 0.69725469e-01,\n 0.64370766e-01,\n 0.58887802e-01,\n 0.53736102e-01,\n 0.49357101e-01,\n 0.45792568e-01,\n 0.42613342e-01,\n 0.39362472e-01,\n 0.35933696e-01,\n 0.32438610e-01,\n 0.28940061e-01,\n 0.25393331e-01,\n 0.21661280e-01,\n 0.17468622e-01,\n 0.13150477e-01,\n 0.11185951e-01,\n 0.56448560e-01,\n 0.63493401e-01,\n 0.72252735e-01,\n 0.81202179e-01,\n 0.89248493e-01,\n 0.95719531e-01,\n 0.10041723e00,\n 0.10353691e00,\n 0.10544755e00,\n 0.10648517e00,\n 0.10687432e00,\n 0.10674787e00,\n 0.10618718e00,\n 0.10524774e00,\n 0.10397130e00,\n 0.10239167e00,\n 0.10053762e00,\n 0.98434702e-01,\n 0.96106045e-01,\n 0.93572617e-01,\n 0.90852953e-01,\n 0.87962106e-01,\n 0.84909752e-01,\n 0.81697099e-01,\n 0.78312054e-01,\n 0.74722745e-01,\n 0.70871852e-01,\n 0.66679642e-01,\n 0.62072858e-01,\n 0.57060491e-01,\n 0.51846057e-01,\n 0.46870694e-01,\n 0.42625420e-01,\n 0.39239943e-01,\n 0.36319576e-01,\n 0.33365458e-01,\n 0.30227283e-01,\n 0.27010450e-01,\n 0.23785481e-01,\n 0.20511542e-01,\n 0.17054949e-01,\n 0.13141878e-01,\n 0.90698684e-02,\n 0.73469649e-02,\n 0.52153420e-01,\n 0.58660157e-01,\n 0.66753164e-01,\n 0.75026073e-01,\n 0.82470380e-01,\n 0.88465959e-01,\n 0.92829920e-01,\n 0.95742144e-01,\n 0.97542584e-01,\n 0.98540656e-01,\n 0.98942772e-01,\n 0.98870747e-01,\n 0.98398849e-01,\n 0.97577512e-01,\n 0.96444599e-01,\n 0.95030688e-01,\n 0.93361929e-01,\n 0.91461726e-01,\n 0.89351647e-01,\n 0.87051816e-01,\n 0.84580876e-01,\n 0.81955560e-01,\n 0.79189852e-01,\n 0.76293178e-01,\n 0.73267482e-01,\n 0.70102490e-01,\n 0.66768855e-01,\n 0.63210987e-01,\n 0.59346184e-01,\n 0.55086352e-01,\n 0.50406374e-01,\n 0.45463238e-01,\n 0.40671837e-01,\n 0.36558144e-01,\n 0.33334181e-01,\n 0.30647837e-01,\n 0.27966481e-01,\n 0.25096513e-01,\n 0.22136096e-01,\n 0.19164244e-01,\n 0.16145656e-01,\n 0.12950187e-01,\n 0.93025165e-02,\n 0.54607159e-02,\n 0.39474810e-02,\n 0.48182234e-01,\n 0.54192506e-01,\n 0.61670437e-01,\n 0.69318332e-01,\n 0.76205671e-01,\n 0.81760220e-01,\n 0.85813068e-01,\n 0.88529900e-01,\n 0.90223983e-01,\n 0.91180287e-01,\n 0.91588661e-01,\n 0.91560833e-01,\n 0.91164641e-01,\n 0.90445958e-01,\n 0.89439072e-01,\n 0.88171646e-01,\n 0.86667374e-01,\n 0.84947526e-01,\n 0.83031908e-01,\n 0.80939271e-01,\n 0.78687482e-01,\n 0.76293327e-01,\n 0.73772058e-01,\n 0.71136616e-01,\n 0.68396017e-01,\n 0.65552600e-01,\n 0.62597387e-01,\n 0.59503239e-01,\n 0.56216817e-01,\n 0.52654829e-01,\n 0.48719283e-01,\n 0.44357602e-01,\n 0.39683431e-01,\n 0.35080492e-01,\n 0.31097105e-01,\n 0.28020034e-01,\n 0.25545072e-01,\n 0.23114407e-01,\n 0.20494236e-01,\n 0.17775815e-01,\n 0.15046233e-01,\n 0.12272255e-01,\n 0.93193343e-02,\n 0.59020361e-02,\n 0.22407323e-02,\n 0.87492354e-03,\n 0.44510506e-01,\n 0.50062627e-01,\n 0.56972671e-01,\n 0.64043038e-01,\n 0.70415020e-01,\n 0.75560495e-01,\n 0.79323418e-01,\n 0.81856459e-01,\n 0.83448350e-01,\n 0.84361628e-01,\n 0.84770963e-01,\n 0.84778860e-01,\n 0.84447332e-01,\n 0.83818108e-01,\n 0.82922280e-01,\n 0.81784874e-01,\n 0.80427296e-01,\n 0.78868859e-01,\n 0.77127583e-01,\n 0.75220726e-01,\n 0.73164918e-01,\n 0.70976183e-01,\n 0.68669744e-01,\n 0.66259526e-01,\n 0.63757502e-01,\n 0.61172180e-01,\n 0.58505908e-01,\n 0.55750374e-01,\n 0.52880324e-01,\n 0.49845133e-01,\n 0.46562638e-01,\n 0.42928446e-01,\n 0.38867969e-01,\n 0.34455679e-01,\n 0.30041935e-01,\n 0.26187586e-01,\n 0.23246434e-01,\n 0.20965882e-01,\n 0.18771386e-01,\n 0.16391607e-01,\n 0.13906728e-01,\n 0.11403601e-01,\n 0.88423118e-02,\n 0.60800756e-02,\n 0.28280553e-02,\n -0.70986536e-03,\n -0.19648359e-02,\n 0.41115671e-01,\n 0.46244897e-01,\n 0.52630525e-01,\n 0.59167176e-01,\n 0.65062307e-01,\n 0.69828428e-01,\n 0.73321380e-01,\n 0.75681835e-01,\n 0.77175975e-01,\n 0.78045711e-01,\n 0.78451805e-01,\n 0.78488372e-01,\n 0.78212120e-01,\n 0.77661060e-01,\n 0.76863378e-01,\n 0.75841703e-01,\n 0.74615397e-01,\n 0.73201917e-01,\n 0.71617633e-01,\n 0.69878295e-01,\n 0.67999192e-01,\n 0.65995254e-01,\n 0.63880973e-01,\n 0.61670251e-01,\n 0.59375945e-01,\n 0.57009004e-01,\n 0.54577064e-01,\n 0.52082047e-01,\n 0.49516134e-01,\n 0.46855822e-01,\n 0.44053324e-01,\n 0.41028392e-01,\n 0.37672661e-01,\n 0.33894073e-01,\n 0.29732887e-01,\n 0.25506891e-01,\n 0.21783372e-01,\n 0.18975813e-01,\n 0.16882956e-01,\n 0.14915733e-01,\n 0.12760971e-01,\n 0.10479322e-01,\n 0.81534600e-02,\n 0.57432470e-02,\n 0.31131236e-02,\n -0.13105665e-04,\n -0.34361165e-02,\n -0.45660972e-02,\n 0.37977196e-01,\n 0.42716030e-01,\n 0.48617344e-01,\n 0.54660756e-01,\n 0.60114693e-01,\n 0.64529106e-01,\n 0.67770794e-01,\n 0.69969401e-01,\n 0.71370378e-01,\n 0.72196685e-01,\n 0.72596297e-01,\n 0.72655670e-01,\n 0.72426699e-01,\n 0.71944013e-01,\n 0.71233213e-01,\n 0.70314772e-01,\n 0.69206156e-01,\n 0.67923151e-01,\n 0.66480570e-01,\n 0.64892717e-01,\n 0.63173585e-01,\n 0.61336990e-01,\n 0.59396494e-01,\n 0.57365343e-01,\n 0.55256244e-01,\n 0.53080887e-01,\n 0.50849102e-01,\n 0.48567444e-01,\n 0.46237227e-01,\n 0.43851011e-01,\n 0.41386928e-01,\n 0.38799874e-01,\n 0.36011849e-01,\n 0.32912444e-01,\n 0.29396715e-01,\n 0.25476838e-01,\n 0.21440787e-01,\n 0.17856820e-01,\n 0.15185956e-01,\n 0.13268581e-01,\n 0.11497635e-01,\n 0.95190108e-02,\n 0.73810630e-02,\n 0.51771011e-02,\n 0.28821086e-02,\n 0.37416635e-03,\n -0.26154167e-02,\n -0.58998531e-02,\n -0.68843709e-02,\n 0.35076261e-01,\n 0.39454699e-01,\n 0.44908728e-01,\n 0.50496329e-01,\n 0.55542119e-01,\n 0.59630550e-01,\n 0.62638551e-01,\n 0.64685628e-01,\n 0.65998107e-01,\n 0.66781543e-01,\n 0.67172192e-01,\n 0.67249492e-01,\n 0.67060962e-01,\n 0.66638172e-01,\n 0.66004358e-01,\n 0.65178059e-01,\n 0.64175054e-01,\n 0.63009582e-01,\n 0.61695036e-01,\n 0.60244419e-01,\n 0.58670532e-01,\n 0.56986067e-01,\n 0.55203587e-01,\n 0.53335473e-01,\n 0.51393870e-01,\n 0.49390342e-01,\n 0.47335327e-01,\n 0.45237295e-01,\n 0.43101642e-01,\n 0.40929142e-01,\n 0.38712744e-01,\n 0.36432110e-01,\n 0.34044892e-01,\n 0.31476185e-01,\n 0.28615938e-01,\n 0.25350343e-01,\n 0.21667363e-01,\n 0.17823832e-01,\n 0.14378536e-01,\n 0.11824818e-01,\n 0.10038571e-01,\n 0.84049767e-02,\n 0.65482627e-02,\n 0.45205113e-02,\n 0.24313403e-02,\n 0.26542676e-03,\n -0.20983638e-02,\n -0.49343021e-02,\n -0.80712391e-02,\n -0.89136148e-02,\n 0.32395583e-01,\n 0.36441319e-01,\n 0.41482292e-01,\n 0.46648715e-01,\n 0.51316999e-01,\n 0.55103421e-01,\n 0.57894230e-01,\n 0.59799597e-01,\n 0.61028276e-01,\n 0.61769772e-01,\n 0.62149592e-01,\n 0.62240742e-01,\n 0.62086754e-01,\n 0.61716419e-01,\n 0.61150856e-01,\n 0.60406826e-01,\n 0.59498589e-01,\n 0.58439020e-01,\n 0.57240289e-01,\n 0.55914193e-01,\n 0.54472402e-01,\n 0.52926507e-01,\n 0.51288098e-01,\n 0.49568728e-01,\n 0.47779780e-01,\n 0.45932278e-01,\n 0.44036478e-01,\n 0.42101391e-01,\n 0.40134147e-01,\n 0.38139164e-01,\n 0.36116980e-01,\n 0.34061395e-01,\n 0.31954251e-01,\n 0.29756844e-01,\n 0.27398031e-01,\n 0.24767753e-01,\n 0.21741455e-01,\n 0.18279733e-01,\n 0.14604551e-01,\n 0.11262298e-01,\n 0.87792939e-02,\n 0.70768204e-02,\n 0.55486909e-02,\n 0.38075072e-02,\n 0.19052560e-02,\n -0.45109729e-04,\n -0.20623163e-02,\n -0.42746197e-02,\n -0.69636726e-02,\n -0.99668913e-02,\n -0.10686405e-01,\n 0.29919144e-01,\n 0.33657782e-01,\n 0.38317338e-01,\n 0.43094639e-01,\n 0.47413833e-01,\n 0.50920542e-01,\n 0.53509615e-01,\n 0.55282630e-01,\n 0.56432150e-01,\n 0.57132918e-01,\n 0.57500545e-01,\n 0.57602141e-01,\n 0.57477590e-01,\n 0.57153169e-01,\n 0.56648072e-01,\n 0.55977501e-01,\n 0.55154376e-01,\n 0.54190353e-01,\n 0.53096451e-01,\n 0.51883381e-01,\n 0.50561778e-01,\n 0.49142279e-01,\n 0.47635533e-01,\n 0.46052203e-01,\n 0.44402875e-01,\n 0.42697888e-01,\n 0.40947042e-01,\n 0.39159160e-01,\n 0.37341885e-01,\n 0.35501439e-01,\n 0.33642113e-01,\n 0.31765264e-01,\n 0.29866640e-01,\n 0.27930658e-01,\n 0.25920473e-01,\n 0.23763975e-01,\n 0.21344284e-01,\n 0.18521296e-01,\n 0.15230944e-01,\n 0.11671958e-01,\n 0.83932094e-02,\n 0.59620556e-02,\n 0.43443809e-02,\n 0.29380166e-02,\n 0.13360849e-02,\n -0.42020026e-03,\n -0.22225457e-02,\n -0.40951176e-02,\n -0.61711343e-02,\n -0.87360274e-02,\n -0.11627702e-01,\n -0.12245870e-01,\n 0.27632145e-01,\n 0.31087343e-01,\n 0.35394758e-01,\n 0.39812610e-01,\n 0.43809064e-01,\n 0.47056779e-01,\n 0.49458548e-01,\n 0.51108032e-01,\n 0.52182894e-01,\n 0.52844279e-01,\n 0.53198714e-01,\n 0.53307895e-01,\n 0.53208377e-01,\n 0.52924160e-01,\n 0.52472707e-01,\n 0.51867817e-01,\n 0.51121201e-01,\n 0.50243411e-01,\n 0.49244415e-01,\n 0.48133992e-01,\n 0.46921846e-01,\n 0.45617707e-01,\n 0.44231363e-01,\n 0.42772591e-01,\n 0.41251190e-01,\n 0.39676767e-01,\n 0.38058497e-01,\n 0.36404900e-01,\n 0.34723751e-01,\n 0.33022195e-01,\n 0.31306833e-01,\n 0.29583117e-01,\n 0.27853649e-01,\n 0.26114259e-01,\n 0.24346959e-01,\n 0.22509510e-01,\n 0.20522581e-01,\n 0.18262342e-01,\n 0.15582572e-01,\n 0.12412144e-01,\n 0.89437887e-02,\n 0.57348427e-02,\n 0.33833538e-02,\n 0.18811167e-02,\n 0.61761297e-03,\n -0.83635934e-03,\n -0.24498748e-02,\n -0.41174246e-02,\n -0.58653555e-02,\n -0.78286622e-02,\n -0.10294430e-01,\n -0.13095230e-01,\n -0.13630256e-01,\n 0.25520688e-01,\n 0.28714316e-01,\n 0.32696646e-01,\n 0.36782503e-01,\n 0.40480625e-01,\n 0.43488596e-01,\n 0.45716532e-01,\n 0.47250807e-01,\n 0.48255347e-01,\n 0.48878800e-01,\n 0.49219336e-01,\n 0.49333718e-01,\n 0.49255468e-01,\n 0.49006518e-01,\n 0.48602745e-01,\n 0.48056658e-01,\n 0.47378853e-01,\n 0.46578914e-01,\n 0.45665938e-01,\n 0.44648807e-01,\n 0.43536380e-01,\n 0.42337555e-01,\n 0.41061286e-01,\n 0.39716586e-01,\n 0.38312454e-01,\n 0.36857765e-01,\n 0.35361193e-01,\n 0.33831026e-01,\n 0.32275263e-01,\n 0.30701900e-01,\n 0.29118838e-01,\n 0.27533358e-01,\n 0.25950057e-01,\n 0.24367830e-01,\n 0.22775861e-01,\n 0.21148378e-01,\n 0.19438008e-01,\n 0.17566392e-01,\n 0.15416590e-01,\n 0.12849388e-01,\n 0.97933495e-02,\n 0.64339326e-02,\n 0.33275706e-02,\n 0.10872351e-02,\n -0.28362754e-03,\n -0.14064757e-02,\n -0.27260752e-02,\n -0.42158812e-02,\n -0.57702521e-02,\n -0.74158944e-02,\n -0.92886547e-02,\n -0.11677106e-01,\n -0.14403825e-01,\n -0.14870039e-01,\n 0.23571694e-01,\n 0.26523935e-01,\n 0.30206172e-01,\n 0.33985410e-01,\n 0.37407782e-01,\n 0.40193867e-01,\n 0.42260528e-01,\n 0.43687437e-01,\n 0.44625875e-01,\n 0.45212984e-01,\n 0.45539256e-01,\n 0.45656916e-01,\n 0.45596711e-01,\n 0.45378670e-01,\n 0.45017261e-01,\n 0.44523854e-01,\n 0.43908067e-01,\n 0.43178570e-01,\n 0.42343616e-01,\n 0.41411307e-01,\n 0.40389728e-01,\n 0.39287016e-01,\n 0.38111381e-01,\n 0.36871076e-01,\n 0.35574421e-01,\n 0.34229808e-01,\n 0.32845549e-01,\n 0.31430013e-01,\n 0.29991681e-01,\n 0.28539073e-01,\n 0.27080379e-01,\n 0.25622051e-01,\n 0.24167063e-01,\n 0.22712942e-01,\n 0.21250511e-01,\n 0.19763654e-01,\n 0.18227577e-01,\n 0.16602959e-01,\n 0.14823279e-01,\n 0.12781378e-01,\n 0.10339825e-01,\n 0.74180500e-02,\n 0.41875704e-02,\n 0.12003324e-02,\n -0.92070986e-03,\n -0.21659597e-02,\n -0.31659470e-02,\n -0.43731593e-02,\n -0.57607451e-02,\n -0.72222133e-02,\n -0.87847579e-02,\n -0.10585406e-01,\n -0.12914552e-01,\n -0.15580352e-01,\n -0.15989216e-01,\n 0.21772955e-01,\n 0.24502428e-01,\n 0.27907638e-01,\n 0.31403694e-01,\n 0.34571216e-01,\n 0.37151974e-01,\n 0.39069071e-01,\n 0.40396009e-01,\n 0.41272413e-01,\n 0.41824844e-01,\n 0.42136710e-01,\n 0.42256072e-01,\n 0.42211138e-01,\n 0.42020235e-01,\n 0.41696560e-01,\n 0.41250426e-01,\n 0.40690560e-01,\n 0.40024836e-01,\n 0.39260726e-01,\n 0.38405582e-01,\n 0.37466776e-01,\n 0.36451757e-01,\n 0.35368055e-01,\n 0.34223344e-01,\n 0.33025496e-01,\n 0.31782612e-01,\n 0.30503126e-01,\n 0.29195679e-01,\n 0.27868953e-01,\n 0.26531136e-01,\n 0.25188750e-01,\n 0.23845278e-01,\n 0.22500057e-01,\n 0.21148371e-01,\n 0.19782964e-01,\n 0.18395819e-01,\n 0.16978383e-01,\n 0.15516935e-01,\n 0.13982293e-01,\n 0.12313342e-01,\n 0.10401287e-01,\n 0.81002032e-02,\n 0.53164582e-02,\n 0.22110264e-02,\n -0.66281413e-03,\n -0.26719193e-02,\n -0.38054741e-02,\n -0.47028475e-02,\n -0.58184257e-02,\n -0.71222009e-02,\n -0.85073700e-02,\n -0.10002246e-01,\n -0.11745773e-01,\n -0.14030821e-01,\n -0.16646272e-01,\n -0.17007222e-01,\n 0.20113155e-01,\n 0.22637051e-01,\n 0.25786523e-01,\n 0.29021049e-01,\n 0.31953044e-01,\n 0.34343820e-01,\n 0.36122233e-01,\n 0.37356097e-01,\n 0.38174324e-01,\n 0.38693711e-01,\n 0.38991190e-01,\n 0.39110996e-01,\n 0.39079025e-01,\n 0.38912032e-01,\n 0.38622018e-01,\n 0.38218360e-01,\n 0.37708975e-01,\n 0.37100986e-01,\n 0.36401182e-01,\n 0.35616249e-01,\n 0.34752883e-01,\n 0.33817910e-01,\n 0.32818370e-01,\n 0.31761579e-01,\n 0.30655265e-01,\n 0.29507659e-01,\n 0.28327364e-01,\n 0.27123058e-01,\n 0.25902657e-01,\n 0.24672238e-01,\n 0.23434937e-01,\n 0.22190256e-01,\n 0.20934626e-01,\n 0.19663457e-01,\n 0.18373784e-01,\n 0.17065847e-01,\n 0.15741942e-01,\n 0.14402619e-01,\n 0.13038933e-01,\n 0.11620902e-01,\n 0.10082259e-01,\n 0.83062556e-02,\n 0.61375611e-02,\n 0.34738728e-02,\n 0.47380762e-03,\n -0.23008718e-02,\n -0.42078327e-02,\n -0.52424278e-02,\n -0.60545313e-02,\n -0.70956550e-02,\n -0.83303098e-02,\n -0.96523445e-02,\n -0.11092181e-01,\n -0.12791218e-01,\n -0.15045415e-01,\n -0.17619489e-01,\n -0.17940814e-01,\n 0.18581720e-01,\n 0.20915883e-01,\n 0.23829265e-01,\n 0.26822245e-01,\n 0.29536562e-01,\n 0.31751547e-01,\n 0.33401374e-01,\n 0.34548633e-01,\n 0.35312355e-01,\n 0.35800364e-01,\n 0.36083620e-01,\n 0.36202855e-01,\n 0.36181841e-01,\n 0.36035892e-01,\n 0.35775941e-01,\n 0.35410490e-01,\n 0.34946699e-01,\n 0.34391019e-01,\n 0.33749603e-01,\n 0.33028524e-01,\n 0.32233991e-01,\n 0.31372394e-01,\n 0.30450473e-01,\n 0.29475490e-01,\n 0.28455326e-01,\n 0.27398327e-01,\n 0.26312927e-01,\n 0.25206672e-01,\n 0.24085123e-01,\n 0.22950860e-01,\n 0.21803081e-01,\n 0.20638589e-01,\n 0.19453924e-01,\n 0.18248010e-01,\n 0.17023819e-01,\n 0.15788162e-01,\n 0.14549590e-01,\n 0.13314429e-01,\n 0.12082065e-01,\n 0.10838719e-01,\n 0.95478874e-02,\n 0.81366943e-02,\n 0.64820210e-02,\n 0.44224774e-02,\n 0.18523625e-02,\n -0.10648414e-02,\n -0.37534775e-02,\n -0.55650654e-02,\n -0.65100682e-02,\n -0.72507914e-02,\n -0.82312562e-02,\n -0.94086677e-02,\n -0.10678349e-01,\n -0.12073795e-01,\n -0.13739666e-01,\n -0.15975196e-01,\n -0.18515551e-01,\n -0.18804787e-01,\n 0.17168749e-01,\n 0.19327780e-01,\n 0.22023177e-01,\n 0.24793051e-01,\n 0.27306184e-01,\n 0.29358532e-01,\n 0.30889180e-01,\n 0.31955894e-01,\n 0.32668613e-01,\n 0.33126883e-01,\n 0.33396173e-01,\n 0.33513986e-01,\n 0.33502162e-01,\n 0.33374704e-01,\n 0.33141572e-01,\n 0.32810479e-01,\n 0.32387879e-01,\n 0.31879608e-01,\n 0.31291280e-01,\n 0.30628527e-01,\n 0.29897207e-01,\n 0.29103544e-01,\n 0.28254325e-01,\n 0.27356898e-01,\n 0.26419124e-01,\n 0.25448931e-01,\n 0.24453351e-01,\n 0.23437385e-01,\n 0.22403084e-01,\n 0.21349376e-01,\n 0.20273272e-01,\n 0.19171963e-01,\n 0.18045446e-01,\n 0.16897894e-01,\n 0.15737535e-01,\n 0.14574960e-01,\n 0.13420397e-01,\n 0.12280968e-01,\n 0.11158198e-01,\n 0.10045296e-01,\n 0.89227157e-02,\n 0.77492185e-02,\n 0.64479085e-02,\n 0.48916428e-02,\n 0.29151232e-02,\n 0.41272587e-03,\n -0.24413855e-02,\n -0.50533284e-02,\n -0.67725605e-02,\n -0.76342914e-02,\n -0.83150435e-02,\n -0.92462925e-02,\n -0.10376302e-01,\n -0.11603034e-01,\n -0.12963645e-01,\n -0.14606736e-01,\n -0.16835131e-01,\n -0.19348454e-01,\n -0.19612487e-01,\n 0.15865134e-01,\n 0.17862506e-01,\n 0.20356622e-01,\n 0.22920400e-01,\n 0.25247563e-01,\n 0.27149413e-01,\n 0.28569562e-01,\n 0.29561354e-01,\n 0.30226331e-01,\n 0.30656436e-01,\n 0.30912070e-01,\n 0.31027781e-01,\n 0.31023609e-01,\n 0.30912362e-01,\n 0.30703111e-01,\n 0.30402854e-01,\n 0.30017478e-01,\n 0.29552329e-01,\n 0.29012615e-01,\n 0.28403712e-01,\n 0.27731372e-01,\n 0.27001891e-01,\n 0.26222091e-01,\n 0.25399221e-01,\n 0.24540421e-01,\n 0.23651907e-01,\n 0.22738006e-01,\n 0.21800319e-01,\n 0.20837659e-01,\n 0.19847298e-01,\n 0.18827075e-01,\n 0.17777784e-01,\n 0.16704248e-01,\n 0.15615269e-01,\n 0.14521973e-01,\n 0.13435473e-01,\n 0.12364727e-01,\n 0.11315233e-01,\n 0.10288181e-01,\n 0.92796814e-02,\n 0.82787825e-02,\n 0.72630350e-02,\n 0.61889994e-02,\n 0.49772034e-02,\n 0.34969368e-02,\n 0.15794969e-02,\n -0.87806711e-03,\n -0.36851568e-02,\n -0.62257764e-02,\n -0.78527220e-02,\n -0.86356523e-02,\n -0.92662042e-02,\n -0.10158284e-01,\n -0.11249557e-01,\n -0.12441714e-01,\n -0.13776368e-01,\n -0.15406553e-01,\n -0.17638773e-01,\n -0.20130811e-01,\n -0.20375945e-01,\n 0.14662431e-01,\n 0.16510550e-01,\n 0.18818781e-01,\n 0.21192145e-01,\n 0.23347380e-01,\n 0.25109937e-01,\n 0.26427617e-01,\n 0.27349673e-01,\n 0.27969934e-01,\n 0.28373329e-01,\n 0.28615609e-01,\n 0.28728599e-01,\n 0.28730700e-01,\n 0.28633634e-01,\n 0.28445678e-01,\n 0.28173234e-01,\n 0.27821736e-01,\n 0.27396221e-01,\n 0.26901733e-01,\n 0.26343603e-01,\n 0.25727598e-01,\n 0.25059966e-01,\n 0.24347208e-01,\n 0.23595579e-01,\n 0.22810331e-01,\n 0.21994932e-01,\n 0.21150416e-01,\n 0.20275565e-01,\n 0.19368108e-01,\n 0.18426621e-01,\n 0.17452605e-01,\n 0.16451407e-01,\n 0.15432071e-01,\n 0.14405738e-01,\n 0.13383573e-01,\n 0.12374919e-01,\n 0.11386278e-01,\n 0.10420920e-01,\n 0.94791828e-02,\n 0.85583618e-02,\n 0.76520443e-02,\n 0.67478423e-02,\n 0.58222217e-02,\n 0.48302943e-02,\n 0.36898023e-02,\n 0.22657616e-02,\n 0.38620809e-03,\n -0.20464570e-02,\n -0.48193890e-02,\n -0.72907535e-02,\n -0.88232690e-02,\n -0.95311515e-02,\n -0.10120570e-01,\n -0.10982438e-01,\n -0.12042844e-01,\n -0.13208187e-01,\n -0.14525170e-01,\n -0.16151898e-01,\n -0.18398402e-01,\n -0.20873869e-01,\n -0.21105917e-01,\n 0.13552637e-01,\n 0.15262923e-01,\n 0.17399436e-01,\n 0.19596824e-01,\n 0.21593064e-01,\n 0.23226669e-01,\n 0.24449309e-01,\n 0.25306473e-01,\n 0.25884863e-01,\n 0.26262935e-01,\n 0.26492154e-01,\n 0.26601870e-01,\n 0.26609030e-01,\n 0.26524415e-01,\n 0.26355645e-01,\n 0.26108669e-01,\n 0.25788641e-01,\n 0.25400463e-01,\n 0.24949163e-01,\n 0.24440058e-01,\n 0.23878768e-01,\n 0.23270955e-01,\n 0.22621866e-01,\n 0.21935685e-01,\n 0.21214921e-01,\n 0.20460036e-01,\n 0.19669790e-01,\n 0.18842377e-01,\n 0.17977156e-01,\n 0.17076379e-01,\n 0.16145866e-01,\n 0.15194753e-01,\n 0.14234038e-01,\n 0.13274660e-01,\n 0.12325864e-01,\n 0.11394359e-01,\n 0.10484057e-01,\n 0.95965564e-02,\n 0.87314118e-02,\n 0.78865895e-02,\n 0.70581776e-02,\n 0.62391688e-02,\n 0.54168063e-02,\n 0.45667454e-02,\n 0.36422682e-02,\n 0.25576062e-02,\n 0.11724485e-02,\n -0.68855018e-03,\n -0.31142109e-02,\n -0.58629848e-02,\n -0.82644373e-02,\n -0.96990969e-02,\n -0.10335494e-01,\n -0.10892544e-01,\n -0.11732457e-01,\n -0.12769138e-01,\n -0.13914877e-01,\n -0.15221934e-01,\n -0.16854212e-01,\n -0.19125070e-01,\n -0.21587910e-01,\n -0.21812512e-01,\n 0.12528375e-01,\n 0.14111324e-01,\n 0.16089143e-01,\n 0.18123835e-01,\n 0.19972973e-01,\n 0.21487134e-01,\n 0.22621555e-01,\n 0.23418266e-01,\n 0.23957409e-01,\n 0.24311502e-01,\n 0.24528088e-01,\n 0.24634261e-01,\n 0.24645658e-01,\n 0.24572272e-01,\n 0.24421265e-01,\n 0.24198342e-01,\n 0.23908518e-01,\n 0.23556618e-01,\n 0.23147549e-01,\n 0.22686291e-01,\n 0.22177676e-01,\n 0.21625938e-01,\n 0.21034168e-01,\n 0.20403879e-01,\n 0.19734910e-01,\n 0.19025959e-01,\n 0.18275727e-01,\n 0.17484382e-01,\n 0.16654858e-01,\n 0.15793348e-01,\n 0.14909030e-01,\n 0.14012692e-01,\n 0.13114928e-01,\n 0.12224767e-01,\n 0.11348879e-01,\n 0.10491370e-01,\n 0.96542966e-02,\n 0.88380333e-02,\n 0.80419006e-02,\n 0.72643128e-02,\n 0.65027624e-02,\n 0.57532680e-02,\n 0.50087180e-02,\n 0.42559886e-02,\n 0.34697335e-02,\n 0.26006724e-02,\n 0.15582409e-02,\n 0.19586713e-03,\n -0.16648462e-02,\n -0.41000666e-02,\n -0.68322704e-02,\n -0.91604441e-02,\n -0.10493010e-01,\n -0.11061922e-01,\n -0.11595163e-01,\n -0.12420692e-01,\n -0.13440105e-01,\n -0.14572812e-01,\n -0.15877264e-01,\n -0.17524028e-01,\n -0.19829245e-01,\n -0.22282675e-01,\n -0.22505168e-01,\n 0.11582712e-01,\n 0.13047962e-01,\n 0.14879054e-01,\n 0.16763248e-01,\n 0.18476224e-01,\n 0.19879704e-01,\n 0.20932244e-01,\n 0.21672688e-01,\n 0.22175148e-01,\n 0.22506684e-01,\n 0.22711273e-01,\n 0.22814000e-01,\n 0.22829419e-01,\n 0.22766909e-01,\n 0.22633271e-01,\n 0.22434004e-01,\n 0.22174012e-01,\n 0.21857906e-01,\n 0.21490037e-01,\n 0.21074377e-01,\n 0.20614149e-01,\n 0.20111440e-01,\n 0.19566908e-01,\n 0.18979838e-01,\n 0.18348834e-01,\n 0.17672971e-01,\n 0.16953107e-01,\n 0.16192837e-01,\n 0.15398719e-01,\n 0.14579884e-01,\n 0.13746735e-01,\n 0.12909485e-01,\n 0.12076905e-01,\n 0.11255554e-01,\n 0.10449625e-01,\n 0.96614184e-02,\n 0.88916803e-02,\n 0.81402799e-02,\n 0.74064764e-02,\n 0.66891308e-02,\n 0.59866421e-02,\n 0.52965824e-02,\n 0.46150312e-02,\n 0.39348379e-02,\n 0.32422331e-02,\n 0.25101355e-02,\n 0.16859862e-02,\n 0.67323016e-03,\n -0.68223715e-03,\n -0.25608686e-02,\n -0.50209761e-02,\n -0.77415816e-02,\n -0.99905552e-02,\n -0.11216282e-01,\n -0.11722405e-01,\n -0.12240239e-01,\n -0.13058126e-01,\n -0.14066087e-01,\n -0.15192053e-01,\n -0.16501144e-01,\n -0.18171404e-01,\n -0.20520791e-01,\n -0.22966992e-01,\n -0.23192288e-01,\n 0.10709423e-01,\n 0.12065823e-01,\n 0.13761187e-01,\n 0.15506121e-01,\n 0.17093049e-01,\n 0.18394005e-01,\n 0.19370625e-01,\n 0.20058842e-01,\n 0.20527244e-01,\n 0.20837912e-01,\n 0.21031609e-01,\n 0.21131653e-01,\n 0.21151649e-01,\n 0.21100447e-01,\n 0.20984545e-01,\n 0.20809161e-01,\n 0.20578744e-01,\n 0.20297162e-01,\n 0.19967660e-01,\n 0.19592566e-01,\n 0.19173032e-01,\n 0.18708948e-01,\n 0.18199200e-01,\n 0.17642427e-01,\n 0.17038029e-01,\n 0.16387362e-01,\n 0.15694451e-01,\n 0.14966104e-01,\n 0.14211375e-01,\n 0.13440318e-01,\n 0.12662663e-01,\n 0.11886760e-01,\n 0.11119010e-01,\n 0.10363686e-01,\n 0.96232807e-02,\n 0.88988189e-02,\n 0.81905108e-02,\n 0.74980101e-02,\n 0.68207132e-02,\n 0.61578541e-02,\n 0.55083996e-02,\n 0.48709009e-02,\n 0.42431629e-02,\n 0.36212225e-02,\n 0.29974312e-02,\n 0.23571376e-02,\n 0.16709621e-02,\n 0.88186091e-03,\n -0.11385441e-03,\n -0.14786492e-02,\n -0.33936626e-02,\n -0.58928211e-02,\n -0.86036716e-02,\n -0.10764806e-01,\n -0.11878857e-01,\n -0.12327690e-01,\n -0.12838322e-01,\n -0.13654754e-01,\n -0.14656777e-01,\n -0.15782116e-01,\n -0.17102929e-01,\n -0.18805560e-01,\n -0.21208614e-01,\n -0.23648826e-01,\n -0.23881109e-01,\n 0.99031366e-02,\n 0.11158862e-01,\n 0.12728675e-01,\n 0.14344782e-01,\n 0.15815102e-01,\n 0.17021233e-01,\n 0.17927697e-01,\n 0.18567791e-01,\n 0.19005040e-01,\n 0.19296985e-01,\n 0.19481450e-01,\n 0.19580156e-01,\n 0.19605840e-01,\n 0.19566806e-01,\n 0.19469030e-01,\n 0.19317091e-01,\n 0.19114554e-01,\n 0.18864036e-01,\n 0.18567108e-01,\n 0.18224217e-01,\n 0.17834747e-01,\n 0.17397350e-01,\n 0.16910696e-01,\n 0.16374495e-01,\n 0.15790507e-01,\n 0.15163027e-01,\n 0.14498919e-01,\n 0.13806998e-01,\n 0.13096904e-01,\n 0.12377877e-01,\n 0.11657882e-01,\n 0.10943062e-01,\n 0.10237600e-01,\n 0.95441425e-02,\n 0.88640014e-02,\n 0.81976959e-02,\n 0.75451867e-02,\n 0.69061988e-02,\n 0.62803319e-02,\n 0.56670737e-02,\n 0.50658169e-02,\n 0.44758078e-02,\n 0.38957228e-02,\n 0.33232321e-02,\n 0.27542519e-02,\n 0.21804536e-02,\n 0.15856215e-02,\n 0.93752530e-03,\n 0.17370789e-03,\n -0.81816822e-03,\n -0.22095586e-02,\n -0.41797617e-02,\n -0.67304755e-02,\n -0.94296988e-02,\n -0.11491459e-01,\n -0.12489353e-01,\n -0.12887697e-01,\n -0.13399359e-01,\n -0.14220017e-01,\n -0.15221213e-01,\n -0.16351623e-01,\n -0.17690992e-01,\n -0.19434931e-01,\n -0.21900531e-01,\n -0.24334403e-01,\n -0.24576908e-01,\n 0.91596041e-02,\n 0.10322293e-01,\n 0.11776099e-01,\n 0.13273214e-01,\n 0.14635916e-01,\n 0.15754675e-01,\n 0.16596718e-01,\n 0.17192930e-01,\n 0.17602194e-01,\n 0.17877869e-01,\n 0.18055072e-01,\n 0.18153975e-01,\n 0.18186348e-01,\n 0.18159697e-01,\n 0.18079169e-01,\n 0.17948311e-01,\n 0.17769367e-01,\n 0.17543392e-01,\n 0.17270328e-01,\n 0.16949220e-01,\n 0.16578663e-01,\n 0.16157566e-01,\n 0.15685990e-01,\n 0.15165905e-01,\n 0.14601668e-01,\n 0.14000042e-01,\n 0.13369541e-01,\n 0.12719298e-01,\n 0.12057992e-01,\n 0.11393145e-01,\n 0.10730589e-01,\n 0.10074451e-01,\n 0.94274255e-02,\n 0.87909503e-02,\n 0.81658233e-02,\n 0.75523388e-02,\n 0.69505223e-02,\n 0.63602584e-02,\n 0.57813367e-02,\n 0.52134916e-02,\n 0.46564899e-02,\n 0.41098930e-02,\n 0.35728957e-02,\n 0.30442516e-02,\n 0.25214758e-02,\n 0.19998695e-02,\n 0.14702382e-02,\n 0.91451913e-03,\n 0.29673893e-03,\n -0.45211281e-03,\n -0.14543475e-02,\n -0.28908835e-02,\n -0.49353936e-02,\n -0.75476863e-02,\n -0.10228912e-01,\n -0.12177330e-01,\n -0.13055839e-01,\n -0.13411977e-01,\n -0.13932705e-01,\n -0.14762470e-01,\n -0.15767431e-01,\n -0.16908331e-01,\n -0.18272618e-01,\n -0.20066334e-01,\n -0.22602497e-01,\n -0.25027955e-01,\n -0.25282444e-01,\n 0.84757134e-02,\n 0.95526502e-02,\n 0.10899562e-01,\n 0.12287115e-01,\n 0.13550865e-01,\n 0.14589507e-01,\n 0.15372781e-01,\n 0.15929360e-01,\n 0.16313823e-01,\n 0.16575595e-01,\n 0.16747160e-01,\n 0.16847055e-01,\n 0.16885890e-01,\n 0.16870106e-01,\n 0.16803687e-01,\n 0.16688865e-01,\n 0.16526472e-01,\n 0.16316228e-01,\n 0.16057082e-01,\n 0.15747746e-01,\n 0.15387391e-01,\n 0.14976392e-01,\n 0.14516956e-01,\n 0.14013476e-01,\n 0.13472388e-01,\n 0.12901680e-01,\n 0.12309901e-01,\n 0.11705250e-01,\n 0.11094769e-01,\n 0.10483908e-01,\n 0.98766321e-02,\n 0.92755975e-02,\n 0.86824028e-02,\n 0.80980305e-02,\n 0.75229523e-02,\n 0.69574793e-02,\n 0.64018168e-02,\n 0.58560292e-02,\n 0.53201127e-02,\n 0.47941157e-02,\n 0.42779413e-02,\n 0.37713288e-02,\n 0.32739611e-02,\n 0.27850254e-02,\n 0.23029358e-02,\n 0.18249402e-02,\n 0.13457378e-02,\n 0.85484359e-03,\n 0.33187922e-03,\n -0.26360160e-03,\n -0.10086164e-02,\n -0.20368681e-02,\n -0.35384803e-02,\n -0.56763412e-02,\n -0.83571654e-02,\n -0.11009140e-01,\n -0.12828228e-01,\n -0.13585953e-01,\n -0.13909469e-01,\n -0.14446771e-01,\n -0.15289789e-01,\n -0.16302353e-01,\n -0.17458308e-01,\n -0.18853234e-01,\n -0.20704713e-01,\n -0.23318050e-01,\n -0.25730822e-01,\n -0.25997423e-01,\n 0.78488868e-02,\n 0.88471249e-02,\n 0.10095963e-01,\n 0.11383073e-01,\n 0.12556248e-01,\n 0.13521745e-01,\n 0.14251599e-01,\n 0.14772387e-01,\n 0.15134628e-01,\n 0.15383984e-01,\n 0.15550311e-01,\n 0.15650392e-01,\n 0.15693463e-01,\n 0.15684696e-01,\n 0.15626790e-01,\n 0.15520756e-01,\n 0.15366406e-01,\n 0.15162837e-01,\n 0.14909022e-01,\n 0.14604477e-01,\n 0.14249890e-01,\n 0.13847613e-01,\n 0.13401926e-01,\n 0.12918934e-01,\n 0.12406076e-01,\n 0.11871213e-01,\n 0.11321891e-01,\n 0.10764673e-01,\n 0.10204747e-01,\n 0.96458644e-02,\n 0.90905391e-02,\n 0.85403854e-02,\n 0.79964781e-02,\n 0.74595166e-02,\n 0.69300104e-02,\n 0.64083328e-02,\n 0.58948221e-02,\n 0.53897803e-02,\n 0.48935004e-02,\n 0.44061504e-02,\n 0.39277528e-02,\n 0.34583132e-02,\n 0.29976317e-02,\n 0.25451381e-02,\n 0.20998786e-02,\n 0.16600611e-02,\n 0.12224072e-02,\n 0.78081363e-03,\n 0.32335857e-03,\n -0.17330737e-03,\n -0.75507897e-03,\n -0.15084407e-02,\n -0.25801368e-02,\n -0.41686138e-02,\n -0.64185774e-02,\n -0.91707250e-02,\n -0.11776590e-01,\n -0.13448806e-01,\n -0.14086716e-01,\n -0.14388485e-01,\n -0.14948927e-01,\n -0.15807996e-01,\n -0.16831016e-01,\n -0.18005654e-01,\n -0.19435892e-01,\n -0.21352261e-01,\n -0.24047775e-01,\n -0.26441328e-01,\n -0.26718097e-01,\n 0.72761136e-02,\n 0.82024941e-02,\n 0.93617616e-02,\n 0.10557142e-01,\n 0.11647610e-01,\n 0.12546310e-01,\n 0.13227314e-01,\n 0.13715181e-01,\n 0.14056569e-01,\n 0.14293512e-01,\n 0.14453239e-01,\n 0.14550695e-01,\n 0.14593684e-01,\n 0.14586142e-01,\n 0.14529741e-01,\n 0.14424738e-01,\n 0.14270628e-01,\n 0.14066807e-01,\n 0.13813181e-01,\n 0.13510717e-01,\n 0.13161879e-01,\n 0.12770806e-01,\n 0.12343196e-01,\n 0.11885820e-01,\n 0.11405828e-01,\n 0.10910179e-01,\n 0.10404940e-01,\n 0.98949000e-02,\n 0.93836067e-02,\n 0.88735307e-02,\n 0.83662188e-02,\n 0.78627151e-02,\n 0.73637851e-02,\n 0.68700551e-02,\n 0.63821264e-02,\n 0.59005306e-02,\n 0.54257177e-02,\n 0.49582440e-02,\n 0.44985944e-02,\n 0.40470236e-02,\n 0.36037776e-02,\n 0.31689804e-02,\n 0.27423929e-02,\n 0.23237155e-02,\n 0.19122795e-02,\n 0.15068311e-02,\n 0.11052121e-02,\n 0.70368673e-03,\n 0.29509133e-03,\n -0.13425731e-03,\n -0.61128259e-03,\n -0.11886779e-02,\n -0.19643204e-02,\n -0.30994283e-02,\n -0.47985963e-02,\n -0.71782242e-02,\n -0.99987285e-02,\n -0.12535306e-01,\n -0.14042607e-01,\n -0.14564627e-01,\n -0.14856030e-01,\n -0.15444703e-01,\n -0.16321121e-01,\n -0.17356047e-01,\n -0.18551625e-01,\n -0.20020738e-01,\n -0.22008330e-01,\n -0.24788912e-01,\n -0.27153788e-01,\n -0.27435988e-01,\n 0.67528659e-02,\n 0.76138428e-02,\n 0.86914934e-02,\n 0.98031582e-02,\n 0.10817948e-01,\n 0.11655207e-01,\n 0.12290766e-01,\n 0.12747228e-01,\n 0.13067597e-01,\n 0.13290452e-01,\n 0.13440499e-01,\n 0.13530949e-01,\n 0.13568352e-01,\n 0.13555742e-01,\n 0.13494246e-01,\n 0.13384038e-01,\n 0.13225075e-01,\n 0.13017694e-01,\n 0.12763139e-01,\n 0.12463950e-01,\n 0.12124085e-01,\n 0.11748808e-01,\n 0.11344243e-01,\n 0.10916842e-01,\n 0.10472860e-01,\n 0.10017826e-01,\n 0.95562525e-02,\n 0.90915291e-02,\n 0.86259460e-02,\n 0.81609664e-02,\n 0.76976661e-02,\n 0.72367843e-02,\n 0.67789229e-02,\n 0.63247578e-02,\n 0.58749435e-02,\n 0.54301512e-02,\n 0.49911011e-02,\n 0.45584417e-02,\n 0.41327197e-02,\n 0.37144551e-02,\n 0.33040245e-02,\n 0.29014633e-02,\n 0.25067537e-02,\n 0.21196145e-02,\n 0.17395143e-02,\n 0.13655368e-02,\n 0.99622435e-03,\n 0.62900473e-03,\n 0.25951708e-03,\n -0.12054403e-03,\n -0.52714220e-03,\n -0.99177333e-03,\n -0.15755766e-02,\n -0.23898906e-02,\n -0.36114564e-02,\n -0.54468848e-02,\n -0.79709487e-02,\n -0.10849346e-01,\n -0.13287026e-01,\n -0.14611776e-01,\n -0.15024997e-01,\n -0.15317407e-01,\n -0.15937271e-01,\n -0.16830366e-01,\n -0.17877102e-01,\n -0.19094490e-01,\n -0.20604689e-01,\n -0.22668475e-01,\n -0.25534086e-01,\n -0.27857255e-01,\n -0.28136132e-01,\n 0.62725060e-02,\n 0.70738452e-02,\n 0.80769034e-02,\n 0.91117928e-02,\n 0.10056756e-01,\n 0.10836687e-01,\n 0.11428916e-01,\n 0.11854174e-01,\n 0.12152059e-01,\n 0.12357980e-01,\n 0.12494393e-01,\n 0.12573079e-01,\n 0.12599719e-01,\n 0.12576928e-01,\n 0.12505878e-01,\n 0.12387235e-01,\n 0.12221836e-01,\n 0.12011216e-01,\n 0.11757963e-01,\n 0.11465836e-01,\n 0.11139663e-01,\n 0.10784952e-01,\n 0.10407481e-01,\n 0.10012860e-01,\n 0.96060764e-02,\n 0.91912504e-02,\n 0.87715685e-02,\n 0.83493032e-02,\n 0.79259127e-02,\n 0.75023416e-02,\n 0.70792502e-02,\n 0.66572730e-02,\n 0.62370505e-02,\n 0.58192150e-02,\n 0.54045119e-02,\n 0.49937805e-02,\n 0.45878929e-02,\n 0.41875835e-02,\n 0.37935111e-02,\n 0.34063701e-02,\n 0.30265125e-02,\n 0.26541308e-02,\n 0.22892184e-02,\n 0.19315761e-02,\n 0.15806772e-02,\n 0.12358783e-02,\n 0.89602108e-03,\n 0.55933301e-03,\n 0.22290465e-03,\n -0.11837736e-03,\n -0.47439354e-03,\n -0.86409569e-03,\n -0.13247277e-02,\n -0.19277434e-02,\n -0.28001412e-02,\n -0.41344408e-02,\n -0.61328700e-02,\n -0.88115418e-02,\n -0.11727598e-01,\n -0.14030309e-01,\n -0.15156906e-01,\n -0.15471662e-01,\n -0.15775396e-01,\n -0.16426805e-01,\n -0.17333917e-01,\n -0.18390546e-01,\n -0.19628605e-01,\n -0.21180209e-01,\n -0.23323257e-01,\n -0.26269557e-01,\n -0.28532716e-01,\n -0.28793098e-01,\n 0.58267605e-02,\n 0.65732352e-02,\n 0.75074541e-02,\n 0.84711155e-02,\n 0.93507199e-02,\n 0.10076155e-01,\n 0.10626074e-01,\n 0.11019482e-01,\n 0.11292907e-01,\n 0.11478950e-01,\n 0.11598186e-01,\n 0.11661368e-01,\n 0.11673780e-01,\n 0.11638117e-01,\n 0.11556003e-01,\n 0.11428879e-01,\n 0.11258594e-01,\n 0.11047787e-01,\n 0.10800015e-01,\n 0.10519676e-01,\n 0.10211711e-01,\n 0.98812887e-02,\n 0.95334202e-02,\n 0.91725811e-02,\n 0.88025127e-02,\n 0.84261857e-02,\n 0.80457665e-02,\n 0.76626753e-02,\n 0.72779190e-02,\n 0.68921465e-02,\n 0.65058237e-02,\n 0.61194906e-02,\n 0.57338160e-02,\n 0.53495029e-02,\n 0.49673491e-02,\n 0.45883162e-02,\n 0.42133094e-02,\n 0.38431743e-02,\n 0.34787988e-02,\n 0.31207474e-02,\n 0.27695056e-02,\n 0.24253791e-02,\n 0.20884008e-02,\n 0.17583206e-02,\n 0.14347950e-02,\n 0.11171702e-02,\n 0.80453861e-03,\n 0.49563375e-03,\n 0.18838409e-03,\n -0.12067244e-03,\n -0.43773217e-03,\n -0.77462569e-03,\n -0.11541138e-02,\n -0.16205981e-02,\n -0.22580458e-02,\n -0.32116263e-02,\n -0.46886820e-02,\n -0.68765748e-02,\n -0.97123422e-02,\n -0.12634131e-01,\n -0.14760367e-01,\n -0.15676778e-01,\n -0.15906356e-01,\n -0.16229935e-01,\n -0.16910229e-01,\n -0.17826060e-01,\n -0.18888203e-01,\n -0.20143216e-01,\n -0.21733690e-01,\n -0.23955522e-01,\n -0.26971251e-01,\n -0.29147699e-01,\n -0.29363733e-01,\n 0.54070717e-02,\n 0.61022816e-02,\n 0.69719772e-02,\n 0.78685069e-02,\n 0.86859381e-02,\n 0.93587395e-02,\n 0.98667927e-02,\n 0.10227575e-01,\n 0.10474904e-01,\n 0.10638993e-01,\n 0.10738960e-01,\n 0.10784928e-01,\n 0.10782113e-01,\n 0.10733562e-01,\n 0.10641565e-01,\n 0.10508422e-01,\n 0.10336863e-01,\n 0.10130250e-01,\n 0.98925652e-02,\n 0.96282410e-02,\n 0.93418919e-02,\n 0.90379883e-02,\n 0.87205544e-02,\n 0.83929868e-02,\n 0.80580246e-02,\n 0.77177160e-02,\n 0.73734652e-02,\n 0.70262798e-02,\n 0.66767824e-02,\n 0.63254600e-02,\n 0.59727905e-02,\n 0.56192488e-02,\n 0.52654315e-02,\n 0.49121627e-02,\n 0.45603677e-02,\n 0.42109452e-02,\n 0.38648082e-02,\n 0.35230482e-02,\n 0.31864857e-02,\n 0.28557458e-02,\n 0.25313764e-02,\n 0.22137545e-02,\n 0.19029069e-02,\n 0.15987142e-02,\n 0.13008172e-02,\n 0.10086106e-02,\n 0.72139426e-03,\n 0.43811998e-03,\n 0.15723592e-03,\n -0.12376621e-03,\n -0.40906059e-03,\n -0.70619176e-03,\n -0.10294477e-02,\n -0.14063339e-02,\n -0.18904223e-02,\n -0.25809507e-02,\n -0.36431164e-02,\n -0.52961027e-02,\n -0.76976335e-02,\n -0.10682030e-01,\n -0.13564016e-01,\n -0.15468677e-01,\n -0.16168477e-01,\n -0.16328586e-01,\n -0.16677296e-01,\n -0.17379910e-01,\n -0.18295975e-01,\n -0.19355783e-01,\n -0.20619802e-01,\n -0.22241637e-01,\n -0.24535159e-01,\n -0.27597541e-01,\n -0.29646901e-01,\n -0.29775085e-01,\n 0.50061666e-02,\n 0.56525916e-02,\n 0.64607500e-02,\n 0.72929878e-02,\n 0.80504669e-02,\n 0.86719748e-02,\n 0.91386139e-02,\n 0.94665419e-02,\n 0.96872011e-02,\n 0.98287789e-02,\n 0.99092564e-02,\n 0.99384002e-02,\n 0.99215917e-02,\n 0.98623503e-02,\n 0.97636050e-02,\n 0.96283276e-02,\n 0.94598122e-02,\n 0.92617264e-02,\n 0.90380237e-02,\n 0.87927980e-02,\n 0.85300365e-02,\n 0.82533574e-02,\n 0.79658525e-02,\n 0.76700603e-02,\n 0.73679010e-02,\n 0.70607387e-02,\n 0.67495992e-02,\n 0.64351326e-02,\n 0.61177979e-02,\n 0.57979897e-02,\n 0.54761712e-02,\n 0.51528304e-02,\n 0.48285746e-02,\n 0.45042443e-02,\n 0.41807955e-02,\n 0.38590934e-02,\n 0.35402139e-02,\n 0.32251289e-02,\n 0.29146906e-02,\n 0.26096692e-02,\n 0.23106080e-02,\n 0.20178384e-02,\n 0.17315357e-02,\n 0.14516271e-02,\n 0.11777402e-02,\n 0.90943626e-03,\n 0.64600032e-03,\n 0.38659063e-03,\n 0.12990853e-03,\n -0.12597593e-03,\n -0.38408191e-03,\n -0.64947037e-03,\n -0.93128323e-03,\n -0.12469848e-02,\n -0.16304143e-02,\n -0.21464191e-02,\n -0.29125693e-02,\n -0.41156993e-02,\n -0.59804209e-02,\n -0.86140092e-02,\n -0.11723425e-01,\n -0.14505944e-01,\n -0.16143575e-01,\n -0.16627248e-01,\n -0.16734317e-01,\n -0.17108738e-01,\n -0.17822174e-01,\n -0.18725207e-01,\n -0.19769130e-01,\n -0.21026963e-01,\n -0.22663718e-01,\n -0.25009871e-01,\n -0.28077208e-01,\n -0.29936161e-01,\n -0.29903086e-01,\n 0.46191481e-02,\n 0.52183974e-02,\n 0.59670168e-02,\n 0.67369491e-02,\n 0.74361749e-02,\n 0.80076661e-02,\n 0.84337806e-02,\n 0.87295286e-02,\n 0.89242216e-02,\n 0.90442616e-02,\n 0.91067078e-02,\n 0.91211265e-02,\n 0.90931328e-02,\n 0.90266792e-02,\n 0.89251632e-02,\n 0.87919217e-02,\n 0.86304275e-02,\n 0.84443204e-02,\n 0.82372911e-02,\n 0.80128787e-02,\n 0.77742967e-02,\n 0.75243628e-02,\n 0.72654327e-02,\n 0.69993068e-02,\n 0.67273136e-02,\n 0.64504785e-02,\n 0.61694812e-02,\n 0.58848094e-02,\n 0.55968449e-02,\n 0.53059584e-02,\n 0.50125136e-02,\n 0.47170832e-02,\n 0.44203578e-02,\n 0.41231164e-02,\n 0.38261772e-02,\n 0.35306206e-02,\n 0.32374398e-02,\n 0.29475170e-02,\n 0.26617502e-02,\n 0.23809616e-02,\n 0.21057320e-02,\n 0.18363672e-02,\n 0.15731406e-02,\n 0.13160043e-02,\n 0.10647054e-02,\n 0.81881043e-03,\n 0.57776505e-03,\n 0.34065990e-03,\n 0.10639309e-03,\n -0.12660332e-03,\n -0.36068761e-03,\n -0.59937662e-03,\n -0.84880623e-03,\n -0.11204274e-02,\n -0.14357190e-02,\n -0.18365043e-02,\n -0.24020236e-02,\n -0.32714859e-02,\n -0.46532732e-02,\n -0.67663151e-02,\n -0.96402615e-02,\n -0.12831893e-01,\n -0.15441692e-01,\n -0.16770078e-01,\n -0.17045652e-01,\n -0.17114457e-01,\n -0.17508077e-01,\n -0.18213695e-01,\n -0.19082699e-01,\n -0.20087397e-01,\n -0.21311399e-01,\n -0.22931056e-01,\n -0.25289452e-01,\n -0.28288210e-01,\n -0.29855460e-01,\n -0.29540431e-01,\n 0.42438372e-02,\n 0.47970237e-02,\n 0.54875640e-02,\n 0.61967750e-02,\n 0.68393080e-02,\n 0.73622656e-02,\n 0.77493028e-02,\n 0.80144051e-02,\n 0.81849089e-02,\n 0.82855793e-02,\n 0.83326427e-02,\n 0.83354777e-02,\n 0.82998471e-02,\n 0.82299737e-02,\n 0.81294924e-02,\n 0.80018509e-02,\n 0.78504579e-02,\n 0.76786745e-02,\n 0.74897227e-02,\n 0.72865360e-02,\n 0.70716739e-02,\n 0.68472759e-02,\n 0.66150147e-02,\n 0.63761803e-02,\n 0.61317808e-02,\n 0.58825328e-02,\n 0.56289569e-02,\n 0.53714588e-02,\n 0.51103933e-02,\n 0.48460537e-02,\n 0.45788516e-02,\n 0.43093446e-02,\n 0.40383078e-02,\n 0.37663926e-02,\n 0.34944667e-02,\n 0.32235801e-02,\n 0.29546390e-02,\n 0.26885115e-02,\n 0.24261076e-02,\n 0.21682207e-02,\n 0.19154118e-02,\n 0.16681626e-02,\n 0.14266440e-02,\n 0.11909056e-02,\n 0.96084451e-03,\n 0.73608221e-03,\n 0.51601185e-03,\n 0.29983022e-03,\n 0.86455977e-04,\n -0.12548176e-03,\n -0.33780126e-03,\n -0.55322744e-03,\n -0.77611348e-03,\n -0.10140111e-02,\n -0.12810014e-02,\n -0.16041107e-02,\n -0.20356334e-02,\n -0.26726879e-02,\n -0.36793316e-02,\n -0.52829208e-02,\n -0.76788506e-02,\n -0.10785309e-01,\n -0.13992673e-01,\n -0.16345447e-01,\n -0.17329855e-01,\n -0.17411666e-01,\n -0.17451219e-01,\n -0.17846853e-01,\n -0.18514944e-01,\n -0.19316018e-01,\n -0.20241588e-01,\n -0.21382602e-01,\n -0.22925586e-01,\n -0.25219092e-01,\n -0.28025590e-01,\n -0.29143283e-01,\n -0.28364539e-01,\n 0.38802244e-02,\n 0.43883347e-02,\n 0.50221425e-02,\n 0.56721950e-02,\n 0.62597329e-02,\n 0.67359642e-02,\n 0.70858784e-02,\n 0.73225168e-02,\n 0.74713244e-02,\n 0.75554666e-02,\n 0.75903148e-02,\n 0.75849718e-02,\n 0.75452076e-02,\n 0.74753161e-02,\n 0.73789642e-02,\n 0.72595305e-02,\n 0.71201995e-02,\n 0.69639226e-02,\n 0.67933705e-02,\n 0.66109342e-02,\n 0.64186375e-02,\n 0.62180408e-02,\n 0.60103335e-02,\n 0.57964721e-02,\n 0.55771875e-02,\n 0.53530186e-02,\n 0.51244237e-02,\n 0.48917811e-02,\n 0.46553644e-02,\n 0.44155009e-02,\n 0.41726306e-02,\n 0.39273482e-02,\n 0.36802413e-02,\n 0.34321158e-02,\n 0.31838417e-02,\n 0.29362249e-02,\n 0.26901832e-02,\n 0.24466240e-02,\n 0.22063667e-02,\n 0.19700988e-02,\n 0.17385191e-02,\n 0.15121014e-02,\n 0.12910479e-02,\n 0.10755060e-02,\n 0.86542196e-03,\n 0.66050846e-03,\n 0.46016547e-03,\n 0.26359106e-03,\n 0.69742789e-04,\n -0.12264148e-03,\n -0.31505234e-03,\n -0.50968054e-03,\n -0.70983008e-03,\n -0.92059938e-03,\n -0.11514680e-02,\n -0.14202188e-02,\n -0.17611791e-02,\n -0.22402222e-02,\n -0.29767312e-02,\n -0.41622054e-02,\n -0.60354555e-02,\n -0.87412195e-02,\n -0.12048509e-01,\n -0.15178418e-01,\n -0.17183300e-01,\n -0.17799482e-01,\n -0.17704254e-01,\n -0.17711623e-01,\n -0.18075820e-01,\n -0.18659104e-01,\n -0.19336520e-01,\n -0.20114373e-01,\n -0.21086661e-01,\n -0.22448380e-01,\n -0.24543686e-01,\n -0.26968870e-01,\n -0.27425798e-01,\n -0.25982859e-01,\n 0.35297391e-02,\n 0.39938814e-02,\n 0.45724809e-02,\n 0.51651788e-02,\n 0.56997212e-02,\n 0.61313836e-02,\n 0.64464915e-02,\n 0.66571729e-02,\n 0.67870081e-02,\n 0.68575540e-02,\n 0.68832617e-02,\n 0.68728351e-02,\n 0.68318904e-02,\n 0.67646406e-02,\n 0.66746362e-02,\n 0.65650316e-02,\n 0.64386618e-02,\n 0.62980657e-02,\n 0.61454801e-02,\n 0.59827766e-02,\n 0.58114552e-02,\n 0.56327097e-02,\n 0.54474538e-02,\n 0.52563460e-02,\n 0.50598914e-02,\n 0.48585772e-02,\n 0.46528103e-02,\n 0.44429065e-02,\n 0.42291693e-02,\n 0.40119542e-02,\n 0.37917392e-02,\n 0.35690265e-02,\n 0.33443815e-02,\n 0.31186463e-02,\n 0.28926407e-02,\n 0.26670455e-02,\n 0.24426919e-02,\n 0.22205103e-02,\n 0.20012076e-02,\n 0.17854705e-02,\n 0.15739684e-02,\n 0.13671899e-02,\n 0.11654871e-02,\n 0.96899009e-03,\n 0.77774836e-03,\n 0.59148681e-03,\n 0.40965760e-03,\n 0.23145005e-03,\n 0.55848002e-04,\n -0.11826525e-03,\n -0.29229349e-03,\n -0.46810537e-03,\n -0.64812414e-03,\n -0.83613564e-03,\n -0.10385482e-02,\n -0.12673559e-02,\n -0.15453240e-02,\n -0.19167364e-02,\n -0.24653263e-02,\n -0.33373679e-02,\n -0.47518569e-02,\n -0.69443211e-02,\n -0.99710366e-02,\n -0.13414696e-01,\n -0.16345950e-01,\n -0.17911386e-01,\n -0.18145934e-01,\n -0.17885052e-01,\n -0.17836532e-01,\n -0.18110715e-01,\n -0.18532671e-01,\n -0.18993681e-01,\n -0.19508589e-01,\n -0.20170461e-01,\n -0.21185948e-01,\n -0.22894341e-01,\n -0.24724348e-01,\n -0.24370475e-01,\n -0.22268195e-01,\n 0.31942856e-02,\n 0.36158317e-02,\n 0.41410751e-02,\n 0.46785669e-02,\n 0.51624421e-02,\n 0.55519687e-02,\n 0.58347746e-02,\n 0.60220673e-02,\n 0.61355513e-02,\n 0.61951405e-02,\n 0.62143006e-02,\n 0.62012500e-02,\n 0.61613480e-02,\n 0.60985861e-02,\n 0.60162526e-02,\n 0.59171882e-02,\n 0.58038891e-02,\n 0.56785326e-02,\n 0.55429381e-02,\n 0.53985524e-02,\n 0.52465028e-02,\n 0.50876997e-02,\n 0.49228426e-02,\n 0.47523943e-02,\n 0.45767589e-02,\n 0.43963231e-02,\n 0.42114421e-02,\n 0.40224311e-02,\n 0.38296313e-02,\n 0.36334537e-02,\n 0.34343314e-02,\n 0.32326737e-02,\n 0.30291770e-02,\n 0.28245456e-02,\n 0.26194807e-02,\n 0.24146992e-02,\n 0.22109461e-02,\n 0.20089839e-02,\n 0.18095284e-02,\n 0.16132607e-02,\n 0.14207716e-02,\n 0.12325735e-02,\n 0.10491284e-02,\n 0.87066717e-03,\n 0.69715641e-03,\n 0.52842626e-03,\n 0.36394782e-03,\n 0.20294097e-03,\n 0.44450713e-04,\n -0.11262445e-03,\n -0.26963896e-03,\n -0.42814302e-03,\n -0.59004046e-03,\n -0.75823971e-03,\n -0.93725865e-03,\n -0.11351963e-02,\n -0.13674999e-02,\n -0.16641312e-02,\n -0.20829688e-02,\n -0.27305069e-02,\n -0.37841904e-02,\n -0.54863039e-02,\n -0.80432529e-02,\n -0.11373957e-01,\n -0.14846842e-01,\n -0.17431933e-01,\n -0.18471425e-01,\n -0.18317387e-01,\n -0.17884307e-01,\n -0.17721877e-01,\n -0.17807389e-01,\n -0.17944368e-01,\n -0.18038927e-01,\n -0.18111341e-01,\n -0.18263325e-01,\n -0.18744303e-01,\n -0.19931488e-01,\n -0.21152221e-01,\n -0.20258194e-01,\n -0.18154927e-01,\n 0.28755944e-02,\n 0.32562227e-02,\n 0.37303183e-02,\n 0.42150859e-02,\n 0.46508745e-02,\n 0.50008302e-02,\n 0.52538323e-02,\n 0.54201568e-02,\n 0.55196281e-02,\n 0.55704820e-02,\n 0.55851364e-02,\n 0.55712480e-02,\n 0.55338335e-02,\n 0.54765893e-02,\n 0.54024858e-02,\n 0.53140409e-02,\n 0.52134213e-02,\n 0.51024402e-02,\n 0.49825548e-02,\n 0.48549226e-02,\n 0.47204504e-02,\n 0.45797881e-02,\n 0.44334051e-02,\n 0.42817341e-02,\n 0.41251029e-02,\n 0.39637811e-02,\n 0.37980722e-02,\n 0.36283240e-02,\n 0.34549329e-02,\n 0.32783179e-02,\n 0.30987991e-02,\n 0.29169142e-02,\n 0.27332988e-02,\n 0.25485274e-02,\n 0.23631887e-02,\n 0.21780862e-02,\n 0.19938366e-02,\n 0.18110086e-02,\n 0.16303621e-02,\n 0.14525431e-02,\n 0.12780537e-02,\n 0.11074701e-02,\n 0.94128802e-03,\n 0.77982916e-03,\n 0.62306761e-03,\n 0.47078525e-03,\n 0.32257999e-03,\n 0.17765176e-03,\n 0.35159428e-04,\n -0.10600228e-03,\n -0.24719909e-03,\n -0.38967439e-03,\n -0.53508411e-03,\n -0.68561384e-03,\n -0.84459153e-03,\n -0.10175847e-02,\n -0.12152690e-02,\n -0.14582346e-02,\n -0.17864328e-02,\n -0.22758693e-02,\n -0.30617819e-02,\n -0.43552169e-02,\n -0.64090681e-02,\n -0.93599092e-02,\n -0.12933343e-01,\n -0.16276378e-01,\n -0.18346461e-01,\n -0.18781584e-01,\n -0.18226096e-01,\n -0.17576260e-01,\n -0.17187940e-01,\n -0.16925601e-01,\n -0.16586989e-01,\n -0.16100395e-01,\n -0.15514784e-01,\n -0.15000328e-01,\n -0.14952605e-01,\n -0.15897794e-01,\n -0.17156484e-01,\n -0.16811144e-01,\n -0.16059641e-01,\n 0.25749404e-02,\n 0.29165992e-02,\n 0.33420587e-02,\n 0.37768418e-02,\n 0.41672788e-02,\n 0.44802395e-02,\n 0.47057904e-02,\n 0.48532858e-02,\n 0.49406979e-02,\n 0.49845842e-02,\n 0.49962578e-02,\n 0.49827271e-02,\n 0.49485951e-02,\n 0.48972480e-02,\n 0.48313839e-02,\n 0.47532097e-02,\n 0.46645310e-02,\n 0.45668236e-02,\n 0.44612922e-02,\n 0.43488974e-02,\n 0.42303428e-02,\n 0.41060960e-02,\n 0.39765178e-02,\n 0.38419361e-02,\n 0.37026398e-02,\n 0.35588457e-02,\n 0.34108032e-02,\n 0.32588942e-02,\n 0.31035459e-02,\n 0.29450748e-02,\n 0.27838917e-02,\n 0.26205610e-02,\n 0.24555970e-02,\n 0.22894754e-02,\n 0.21228124e-02,\n 0.19562438e-02,\n 0.17903824e-02,\n 0.16257121e-02,\n 0.14628675e-02,\n 0.13024764e-02,\n 0.11450466e-02,\n 0.99116936e-03,\n 0.84134901e-03,\n 0.69589523e-03,\n 0.55489980e-03,\n 0.41809730e-03,\n 0.28507152e-03,\n 0.15523843e-03,\n 0.27696389e-04,\n -0.98706871e-04,\n -0.22511023e-03,\n -0.35272306e-03,\n -0.48294108e-03,\n -0.61748014e-03,\n -0.75878931e-03,\n -0.91083115e-03,\n -0.10810896e-02,\n -0.12839842e-02,\n -0.15473457e-02,\n -0.19251802e-02,\n -0.25176301e-02,\n -0.34943304e-02,\n -0.50974879e-02,\n -0.75646825e-02,\n -0.10904021e-01,\n -0.14594574e-01,\n -0.17590806e-01,\n -0.18962247e-01,\n -0.18717308e-01,\n -0.17718159e-01,\n -0.16742069e-01,\n -0.15938997e-01,\n -0.15096456e-01,\n -0.14041902e-01,\n -0.12778138e-01,\n -0.11481387e-01,\n -0.10537255e-01,\n -0.10632942e-01,\n -0.12457354e-01,\n -0.15134930e-01,\n -0.16634356e-01,\n -0.17927606e-01,\n 0.22929932e-02,\n 0.25978240e-02,\n 0.29773721e-02,\n 0.33650806e-02,\n 0.37129789e-02,\n 0.39914814e-02,\n 0.41917670e-02,\n 0.43222816e-02,\n 0.43991888e-02,\n 0.44373926e-02,\n 0.44470965e-02,\n 0.44346289e-02,\n 0.44041476e-02,\n 0.43587163e-02,\n 0.43007480e-02,\n 0.42321626e-02,\n 0.41544731e-02,\n 0.40688911e-02,\n 0.39764121e-02,\n 0.38778088e-02,\n 0.37736101e-02,\n 0.36642191e-02,\n 0.35499295e-02,\n 0.34309430e-02,\n 0.33074785e-02,\n 0.31797730e-02,\n 0.30480758e-02,\n 0.29127430e-02,\n 0.27741243e-02,\n 0.26325521e-02,\n 0.24885421e-02,\n 0.23425936e-02,\n 0.21950973e-02,\n 0.20465283e-02,\n 0.18974531e-02,\n 0.17483715e-02,\n 0.15998175e-02,\n 0.14523078e-02,\n 0.13063080e-02,\n 0.11623653e-02,\n 0.10211229e-02,\n 0.88305573e-03,\n 0.74868003e-03,\n 0.61835541e-03,\n 0.49214705e-03,\n 0.36987942e-03,\n 0.25109114e-03,\n 0.13533220e-03,\n 0.21703247e-04,\n -0.90926857e-04,\n -0.20352546e-03,\n -0.31733362e-03,\n -0.43350609e-03,\n -0.55338885e-03,\n -0.67877601e-03,\n -0.81268302e-03,\n -0.96038845e-03,\n -0.11320858e-02,\n -0.13475646e-02,\n -0.16453972e-02,\n -0.20989443e-02,\n -0.28392603e-02,\n -0.40741763e-02,\n -0.60647582e-02,\n -0.89876316e-02,\n -0.12647802e-01,\n -0.16243244e-01,\n -0.18616004e-01,\n -0.19093577e-01,\n -0.18077876e-01,\n -0.16530357e-01,\n -0.15029374e-01,\n -0.13550565e-01,\n -0.11885053e-01,\n -0.99943755e-02,\n -0.81011895e-02,\n -0.66745249e-02,\n -0.64049885e-02,\n -0.81325630e-02,\n -0.12245839e-01,\n -0.17089149e-01,\n -0.20167973e-01,\n -0.22245478e-01,\n 0.20299123e-02,\n 0.23001628e-02,\n 0.26366366e-02,\n 0.29802567e-02,\n 0.32884392e-02,\n 0.35349382e-02,\n 0.37119628e-02,\n 0.38270687e-02,\n 0.38946737e-02,\n 0.39280793e-02,\n 0.39364123e-02,\n 0.39253496e-02,\n 0.38986101e-02,\n 0.38588869e-02,\n 0.38082711e-02,\n 0.37484372e-02,\n 0.36807158e-02,\n 0.36061385e-02,\n 0.35254795e-02,\n 0.34393084e-02,\n 0.33480865e-02,\n 0.32521591e-02,\n 0.31517555e-02,\n 0.30470104e-02,\n 0.29380592e-02,\n 0.28251486e-02,\n 0.27085962e-02,\n 0.25886497e-02,\n 0.24655664e-02,\n 0.23397931e-02,\n 0.22118504e-02,\n 0.20821332e-02,\n 0.19509925e-02,\n 0.18189276e-02,\n 0.16863818e-02,\n 0.15537632e-02,\n 0.14214842e-02,\n 0.12901434e-02,\n 0.11600459e-02,\n 0.10316600e-02,\n 0.90566400e-03,\n 0.78258343e-03,\n 0.66282053e-03,\n 0.54670696e-03,\n 0.43438902e-03,\n 0.32569954e-03,\n 0.22029619e-03,\n 0.11765160e-03,\n 0.16918209e-04,\n -0.82841863e-04,\n -0.18264857e-03,\n -0.28356965e-03,\n -0.38668749e-03,\n -0.49303711e-03,\n -0.60395780e-03,\n -0.72176283e-03,\n -0.85029344e-03,\n -0.99679129e-03,\n -0.11756377e-02,\n -0.14147292e-02,\n -0.17678216e-02,\n -0.23343621e-02,\n -0.32830362e-02,\n -0.48576673e-02,\n -0.73089227e-02,\n -0.10680980e-01,\n -0.14495158e-01,\n -0.17674342e-01,\n -0.19089572e-01,\n -0.18462013e-01,\n -0.16541397e-01,\n -0.14258571e-01,\n -0.11979854e-01,\n -0.96273897e-02,\n -0.71729873e-02,\n -0.49013551e-02,\n -0.33841925e-02,\n -0.33057944e-02,\n -0.52674399e-02,\n -0.96266540e-02,\n -0.15894135e-01,\n -0.21601008e-01,\n -0.24401912e-01,\n -0.25605565e-01,\n 0.17854190e-02,\n 0.20233756e-02,\n 0.23196433e-02,\n 0.26221615e-02,\n 0.28933994e-02,\n 0.31102363e-02,\n 0.32658295e-02,\n 0.33668731e-02,\n 0.34261208e-02,\n 0.34553525e-02,\n 0.34626699e-02,\n 0.34531134e-02,\n 0.34299616e-02,\n 0.33955548e-02,\n 0.33516849e-02,\n 0.32997937e-02,\n 0.32410624e-02,\n 0.31763858e-02,\n 0.31063606e-02,\n 0.30314266e-02,\n 0.29519610e-02,\n 0.28682463e-02,\n 0.27804545e-02,\n 0.26887048e-02,\n 0.25930938e-02,\n 0.24938518e-02,\n 0.23912911e-02,\n 0.22855958e-02,\n 0.21769952e-02,\n 0.20659990e-02,\n 0.19530528e-02,\n 0.18384572e-02,\n 0.17226556e-02,\n 0.16060474e-02,\n 0.14889956e-02,\n 0.13717994e-02,\n 0.12548816e-02,\n 0.11386774e-02,\n 0.10235143e-02,\n 0.90985751e-03,\n 0.79824886e-03,\n 0.68927044e-03,\n 0.58327196e-03,\n 0.48052520e-03,\n 0.38123387e-03,\n 0.28524606e-03,\n 0.19232801e-03,\n 0.10188403e-03,\n 0.13148935e-04,\n -0.74649892e-04,\n -0.16256304e-03,\n -0.25152223e-03,\n -0.34248683e-03,\n -0.43625894e-03,\n -0.53394295e-03,\n -0.63723064e-03,\n -0.74901024e-03,\n -0.87452604e-03,\n -0.10243639e-02,\n -0.12190320e-02,\n -0.14982653e-02,\n -0.19371029e-02,\n -0.26686641e-02,\n -0.39032472e-02,\n -0.59056627e-02,\n -0.88597359e-02,\n -0.12578117e-01,\n -0.16234819e-01,\n -0.18552493e-01,\n -0.18625131e-01,\n -0.16659645e-01,\n -0.13658211e-01,\n -0.10453119e-01,\n -0.73239915e-02,\n -0.43831225e-02,\n -0.19895632e-02,\n -0.75665663e-03,\n -0.12641177e-02,\n -0.37606840e-02,\n -0.81160162e-02,\n -0.14009449e-01,\n -0.20516826e-01,\n -0.25252979e-01,\n -0.26749684e-01,\n -0.26949666e-01,\n 0.15589494e-02,\n 0.17668876e-02,\n 0.20257886e-02,\n 0.22901346e-02,\n 0.25271075e-02,\n 0.27164973e-02,\n 0.28523379e-02,\n 0.29405085e-02,\n 0.29921937e-02,\n 0.30177287e-02,\n 0.30242268e-02,\n 0.30161096e-02,\n 0.29962433e-02,\n 0.29666633e-02,\n 0.29289203e-02,\n 0.28842422e-02,\n 0.28336111e-02,\n 0.27777818e-02,\n 0.27172987e-02,\n 0.26525310e-02,\n 0.25837307e-02,\n 0.25110987e-02,\n 0.24347827e-02,\n 0.23548861e-02,\n 0.22715516e-02,\n 0.21849549e-02,\n 0.20953005e-02,\n 0.20027831e-02,\n 0.19077143e-02,\n 0.18105077e-02,\n 0.17114856e-02,\n 0.16110209e-02,\n 0.15095366e-02,\n 0.14073697e-02,\n 0.13047857e-02,\n 0.12020444e-02,\n 0.10995269e-02,\n 0.99746429e-03,\n 0.89634967e-03,\n 0.79651998e-03,\n 0.69846597e-03,\n 0.60272694e-03,\n 0.50964806e-03,\n 0.41947985e-03,\n 0.33230876e-03,\n 0.24821598e-03,\n 0.16693448e-03,\n 0.87793873e-04,\n 0.10210203e-04,\n -0.66529035e-04,\n -0.14337612e-03,\n -0.22124445e-03,\n -0.30088221e-03,\n -0.38298120e-03,\n -0.46846602e-03,\n -0.55856747e-03,\n -0.65543054e-03,\n -0.76301250e-03,\n -0.88915287e-03,\n -0.10492229e-02,\n -0.12729177e-02,\n -0.16169698e-02,\n -0.21851282e-02,\n -0.31512871e-02,\n -0.47627124e-02,\n -0.72655436e-02,\n -0.10682470e-01,\n -0.14481976e-01,\n -0.17479382e-01,\n -0.18369909e-01,\n -0.16695704e-01,\n -0.13192192e-01,\n -0.90477718e-02,\n -0.50837407e-02,\n -0.17456600e-02,\n 0.50775765e-03,\n 0.11405456e-02,\n -0.15771549e-03,\n -0.32012013e-02,\n -0.74220751e-02,\n -0.12348277e-01,\n -0.17880509e-01,\n -0.23407269e-01,\n -0.26902754e-01,\n -0.27527036e-01,\n -0.27351677e-01,\n 0.13498098e-02,\n 0.15299571e-02,\n 0.17542662e-02,\n 0.19832880e-02,\n 0.21885794e-02,\n 0.23526303e-02,\n 0.24702842e-02,\n 0.25466559e-02,\n 0.25914581e-02,\n 0.26136602e-02,\n 0.26194309e-02,\n 0.26126066e-02,\n 0.25956824e-02,\n 0.25704356e-02,\n 0.25382130e-02,\n 0.25000437e-02,\n 0.24567009e-02,\n 0.24088090e-02,\n 0.23568932e-02,\n 0.23012925e-02,\n 0.22421621e-02,\n 0.21796003e-02,\n 0.21137316e-02,\n 0.20446836e-02,\n 0.19726283e-02,\n 0.18976876e-02,\n 0.18199275e-02,\n 0.17396152e-02,\n 0.16571450e-02,\n 0.15727388e-02,\n 0.14866672e-02,\n 0.13993700e-02,\n 0.13112020e-02,\n 0.12224860e-02,\n 0.11333575e-02,\n 0.10441266e-02,\n 0.95500512e-03,\n 0.86619117e-03,\n 0.77818817e-03,\n 0.69130980e-03,\n 0.60596765e-03,\n 0.52263023e-03,\n 0.44163744e-03,\n 0.36320378e-03,\n 0.28736555e-03,\n 0.21433813e-03,\n 0.14382407e-03,\n 0.75184740e-04,\n 0.79467936e-05,\n -0.58581456e-04,\n -0.12522114e-03,\n -0.19278513e-03,\n -0.26187330e-03,\n -0.33317646e-03,\n -0.40739289e-03,\n -0.48542794e-03,\n -0.56885718e-03,\n -0.66080934e-03,\n -0.76709641e-03,\n -0.89942804e-03,\n -0.10802879e-02,\n -0.13528165e-02,\n -0.17975677e-02,\n -0.25551619e-02,\n -0.38419224e-02,\n -0.59169503e-02,\n -0.89288345e-02,\n -0.12605214e-01,\n -0.15980633e-01,\n -0.17610485e-01,\n -0.16458714e-01,\n -0.12757922e-01,\n -0.78148004e-02,\n -0.30447829e-02,\n 0.58803929e-03,\n 0.24830827e-02,\n 0.23169408e-02,\n 0.21695795e-03,\n -0.31757678e-02,\n -0.70471163e-02,\n -0.10940184e-01,\n -0.15055859e-01,\n -0.19874115e-01,\n -0.24761699e-01,\n -0.27603908e-01,\n -0.27851652e-01,\n -0.27539870e-01,\n 0.11573090e-02,\n 0.13118297e-02,\n 0.15042420e-02,\n 0.17007013e-02,\n 0.18768052e-02,\n 0.20175355e-02,\n 0.21184757e-02,\n 0.21840241e-02,\n 0.22225198e-02,\n 0.22416625e-02,\n 0.22467452e-02,\n 0.22410604e-02,\n 0.22267597e-02,\n 0.22053893e-02,\n 0.21781034e-02,\n 0.21457481e-02,\n 0.21089534e-02,\n 0.20682334e-02,\n 0.20240291e-02,\n 0.19766658e-02,\n 0.19262728e-02,\n 0.18728701e-02,\n 0.18165240e-02,\n 0.17574138e-02,\n 0.16956952e-02,\n 0.16314362e-02,\n 0.15646521e-02,\n 0.14956590e-02,\n 0.14248277e-02,\n 0.13522609e-02,\n 0.12782239e-02,\n 0.12031234e-02,\n 0.11273304e-02,\n 0.10510463e-02,\n 0.97442413e-03,\n 0.89773658e-03,\n 0.82104641e-03,\n 0.74460293e-03,\n 0.66874258e-03,\n 0.59397513e-03,\n 0.52050536e-03,\n 0.44870362e-03,\n 0.37896109e-03,\n 0.31140892e-03,\n 0.24619559e-03,\n 0.18341106e-03,\n 0.12278830e-03,\n 0.63891501e-04,\n 0.61988922e-05,\n -0.50909595e-04,\n -0.10814507e-03,\n -0.16616660e-03,\n -0.22550514e-03,\n -0.28680690e-03,\n -0.35062019e-03,\n -0.41758217e-03,\n -0.48887444e-03,\n -0.56698389e-03,\n -0.65631996e-03,\n -0.76583039e-03,\n -0.91279822e-03,\n -0.11302640e-02,\n -0.14807846e-02,\n -0.20767434e-02,\n -0.31004245e-02,\n -0.47965096e-02,\n -0.73740366e-02,\n -0.10751202e-01,\n -0.14214644e-01,\n -0.16375747e-01,\n -0.15823588e-01,\n -0.12226088e-01,\n -0.67599509e-02,\n -0.13356704e-02,\n 0.24713113e-02,\n 0.38747264e-02,\n 0.28679580e-02,\n 0.11903104e-03,\n -0.33430753e-02,\n -0.66695027e-02,\n -0.96058799e-02,\n -0.12532249e-01,\n -0.16198792e-01,\n -0.21001449e-01,\n -0.25733067e-01,\n -0.28118331e-01,\n -0.27977712e-01,\n -0.27381005e-01,\n 0.98083925e-03,\n 0.11118341e-02,\n 0.12749631e-02,\n 0.14415317e-02,\n 0.15908498e-02,\n 0.17101859e-02,\n 0.17957988e-02,\n 0.18514196e-02,\n 0.18841210e-02,\n 0.19004331e-02,\n 0.19048484e-02,\n 0.19001653e-02,\n 0.18882133e-02,\n 0.18703013e-02,\n 0.18473966e-02,\n 0.18202132e-02,\n 0.17892951e-02,\n 0.17550559e-02,\n 0.17178158e-02,\n 0.16778519e-02,\n 0.16353233e-02,\n 0.15902224e-02,\n 0.15425772e-02,\n 0.14925570e-02,\n 0.14402777e-02,\n 0.13857534e-02,\n 0.13290886e-02,\n 0.12705566e-02,\n 0.12104037e-02,\n 0.11487597e-02,\n 0.10858473e-02,\n 0.10220249e-02,\n 0.95764559e-03,\n 0.89280307e-03,\n 0.82777342e-03,\n 0.76263881e-03,\n 0.69743692e-03,\n 0.63244364e-03,\n 0.56787435e-03,\n 0.50428713e-03,\n 0.44183279e-03,\n 0.38074178e-03,\n 0.32141685e-03,\n 0.26393912e-03,\n 0.20854473e-03,\n 0.15521001e-03,\n 0.10369664e-03,\n 0.53771990e-04,\n 0.48405859e-05,\n -0.43625452e-04,\n -0.92199283e-04,\n -0.14142317e-03,\n -0.19180843e-03,\n -0.24387297e-03,\n -0.29810442e-03,\n -0.35491036e-03,\n -0.41522519e-03,\n -0.48093690e-03,\n -0.55558363e-03,\n -0.64595538e-03,\n -0.76544558e-03,\n -0.93956996e-03,\n -0.12170107e-02,\n -0.16869402e-02,\n -0.24994221e-02,\n -0.38717622e-02,\n -0.60297726e-02,\n -0.90123136e-02,\n -0.12333807e-01,\n -0.14771741e-01,\n -0.14760138e-01,\n -0.11492035e-01,\n -0.58502830e-02,\n -0.39222432e-04,\n 0.38003121e-02,\n 0.46805777e-02,\n 0.29477894e-02,\n -0.19698367e-03,\n -0.34707133e-02,\n -0.61565377e-02,\n -0.82763545e-02,\n -0.10341127e-01,\n -0.13046334e-01,\n -0.17025929e-01,\n -0.22158746e-01,\n -0.26648698e-01,\n -0.28339002e-01,\n -0.27549669e-01,\n -0.26395839e-01,\n 0.81985246e-03,\n 0.92936639e-03,\n 0.10657535e-02,\n 0.12050235e-02,\n 0.13298774e-02,\n 0.14296711e-02,\n 0.15012774e-02,\n 0.15478174e-02,\n 0.15752059e-02,\n 0.15889057e-02,\n 0.15926794e-02,\n 0.15888754e-02,\n 0.15790189e-02,\n 0.15641790e-02,\n 0.15451561e-02,\n 0.15225782e-02,\n 0.14969195e-02,\n 0.14685052e-02,\n 0.14375511e-02,\n 0.14042476e-02,\n 0.13687719e-02,\n 0.13311717e-02,\n 0.12914606e-02,\n 0.12497153e-02,\n 0.12060066e-02,\n 0.11603512e-02,\n 0.11129703e-02,\n 0.10640363e-02,\n 0.10136392e-02,\n 0.96200767e-03,\n 0.90932270e-03,\n 0.85588358e-03,\n 0.80193224e-03,\n 0.74761838e-03,\n 0.69321913e-03,\n 0.63866674e-03,\n 0.58403739e-03,\n 0.52954548e-03,\n 0.47544795e-03,\n 0.42208337e-03,\n 0.36977371e-03,\n 0.31858659e-03,\n 0.26881197e-03,\n 0.22063967e-03,\n 0.17424425e-03,\n 0.12957763e-03,\n 0.86436587e-04,\n 0.44699889e-04,\n 0.37974135e-05,\n -0.36786892e-04,\n -0.77447279e-04,\n -0.11859972e-03,\n -0.16077398e-03,\n -0.20438076e-03,\n -0.24981576e-03,\n -0.29733716e-03,\n -0.34771886e-03,\n -0.40232454e-03,\n -0.46408881e-03,\n -0.53816853e-03,\n -0.63491607e-03,\n -0.77419216e-03,\n -0.99393132e-03,\n -0.13644308e-02,\n -0.20072565e-02,\n -0.31077981e-02,\n -0.48824749e-02,\n -0.74348073e-02,\n -0.10457132e-01,\n -0.12935034e-01,\n -0.13322859e-01,\n -0.10510216e-01,\n -0.50372370e-02,\n 0.82519592e-03,\n 0.45412472e-02,\n 0.49564531e-02,\n 0.27259453e-02,\n -0.53395459e-03,\n -0.34414190e-02,\n -0.54976437e-02,\n -0.69722342e-02,\n -0.84393835e-02,\n -0.10464143e-01,\n -0.13568074e-01,\n -0.18088147e-01,\n -0.23360549e-01,\n -0.27191006e-01,\n -0.27798105e-01,\n -0.26014257e-01,\n -0.23941122e-01,\n 0.67388569e-03,\n 0.76391455e-03,\n 0.87603967e-03,\n 0.99053816e-03,\n 0.10931880e-02,\n 0.11752385e-02,\n 0.12341201e-02,\n 0.12724005e-02,\n 0.12949465e-02,\n 0.13062552e-02,\n 0.13094257e-02,\n 0.13063867e-02,\n 0.12983767e-02,\n 0.12862559e-02,\n 0.12706857e-02,\n 0.12522101e-02,\n 0.12312382e-02,\n 0.12080271e-02,\n 0.11827156e-02,\n 0.11554065e-02,\n 0.11262480e-02,\n 0.10953922e-02,\n 0.10628636e-02,\n 0.10286153e-02,\n 0.99264085e-03,\n 0.95504802e-03,\n 0.91611175e-03,\n 0.87590015e-03,\n 0.83438883e-03,\n 0.79185999e-03,\n 0.74851344e-03,\n 0.70453959e-03,\n 0.66006870e-03,\n 0.61538786e-03,\n 0.57062833e-03,\n 0.52572385e-03,\n 0.48075779e-03,\n 0.43583699e-03,\n 0.39130700e-03,\n 0.34728937e-03,\n 0.30423937e-03,\n 0.26211512e-03,\n 0.22102211e-03,\n 0.18136523e-03,\n 0.14320243e-03,\n 0.10641267e-03,\n 0.70891205e-04,\n 0.36568774e-04,\n 0.29850753e-05,\n -0.30428575e-04,\n -0.63911211e-04,\n -0.97726115e-04,\n -0.13241505e-03,\n -0.16834371e-03,\n -0.20574214e-03,\n -0.24483376e-03,\n -0.28623073e-03,\n -0.33093616e-03,\n -0.38131754e-03,\n -0.44136596e-03,\n -0.51900127e-03,\n -0.62973931e-03,\n -0.80306787e-03,\n -0.10939565e-02,\n -0.15995824e-02,\n -0.24733264e-02,\n -0.39081872e-02,\n -0.60330946e-02,\n -0.86653326e-02,\n -0.10997782e-01,\n -0.11619342e-01,\n -0.93000261e-02,\n -0.42796796e-02,\n 0.12966705e-02,\n 0.47367909e-02,\n 0.48012077e-02,\n 0.23494719e-02,\n -0.77558518e-03,\n -0.32301398e-02,\n -0.47397721e-02,\n -0.57379329e-02,\n -0.67931442e-02,\n -0.83390176e-02,\n -0.10730988e-01,\n -0.14351542e-01,\n -0.19255424e-01,\n -0.24215246e-01,\n -0.26824960e-01,\n -0.25858946e-01,\n -0.22609137e-01,\n -0.19117815e-01,\n 0.54258847e-03,\n 0.61508559e-03,\n 0.70537691e-03,\n 0.79757918e-03,\n 0.88023918e-03,\n 0.94631047e-03,\n 0.99372561e-03,\n 0.10245563e-02,\n 0.10427287e-02,\n 0.10518713e-02,\n 0.10544811e-02,\n 0.10520979e-02,\n 0.10457006e-02,\n 0.10359808e-02,\n 0.10234818e-02,\n 0.10086508e-02,\n 0.99183375e-03,\n 0.97323948e-03,\n 0.95295400e-03,\n 0.93100907e-03,\n 0.90750493e-03,\n 0.88267523e-03,\n 0.85658359e-03,\n 0.82906929e-03,\n 0.80003683e-03,\n 0.76972524e-03,\n 0.73839410e-03,\n 0.70602290e-03,\n 0.67255105e-03,\n 0.63823280e-03,\n 0.60333178e-03,\n 0.56789321e-03,\n 0.53199811e-03,\n 0.49601536e-03,\n 0.45993822e-03,\n 0.42375299e-03,\n 0.38750842e-03,\n 0.35126708e-03,\n 0.31534981e-03,\n 0.27984436e-03,\n 0.24513889e-03,\n 0.21121174e-03,\n 0.17798673e-03,\n 0.14601613e-03,\n 0.11531570e-03,\n 0.85622109e-04,\n 0.56977922e-04,\n 0.29328881e-04,\n 0.23219493e-05,\n -0.24609051e-04,\n -0.51613693e-04,\n -0.78829580e-04,\n -0.10677023e-03,\n -0.13576847e-03,\n -0.16589028e-03,\n -0.19740456e-03,\n -0.23071076e-03,\n -0.26662330e-03,\n -0.30694614e-03,\n -0.35483547e-03,\n -0.41624482e-03,\n -0.50326047e-03,\n -0.63863368e-03,\n -0.86484943e-03,\n -0.12584554e-02,\n -0.19429043e-02,\n -0.30815345e-02,\n -0.48037078e-02,\n -0.70073316e-02,\n -0.90698786e-02,\n -0.97764749e-02,\n -0.79293735e-02,\n -0.35561759e-02,\n 0.14528841e-02,\n 0.44905520e-02,\n 0.43353909e-02,\n 0.19235054e-02,\n -0.88058808e-03,\n -0.28684181e-02,\n -0.39436910e-02,\n -0.46082577e-02,\n -0.53751953e-02,\n -0.65655066e-02,\n -0.84049981e-02,\n -0.11191580e-01,\n -0.15233751e-01,\n -0.20167785e-01,\n -0.24146404e-01,\n -0.24793619e-01,\n -0.21611178e-01,\n -0.16357245e-01,\n -0.11137053e-01,\n 0.42571200e-03,\n 0.48259774e-03,\n 0.55344484e-03,\n 0.62578957e-03,\n 0.69064385e-03,\n 0.74247847e-03,\n 0.77967369e-03,\n 0.80386118e-03,\n 0.81812969e-03,\n 0.82533201e-03,\n 0.82742231e-03,\n 0.82559290e-03,\n 0.82059979e-03,\n 0.81299205e-03,\n 0.80320571e-03,\n 0.79159206e-03,\n 0.77843288e-03,\n 0.76390296e-03,\n 0.74805156e-03,\n 0.73085778e-03,\n 0.71239163e-03,\n 0.69290656e-03,\n 0.67251193e-03,\n 0.65096776e-03,\n 0.62812673e-03,\n 0.60432340e-03,\n 0.57976606e-03,\n 0.55436889e-03,\n 0.52808766e-03,\n 0.50110236e-03,\n 0.47373594e-03,\n 0.44590727e-03,\n 0.41769189e-03,\n 0.38944930e-03,\n 0.36113514e-03,\n 0.33273242e-03,\n 0.30424978e-03,\n 0.27580233e-03,\n 0.24755922e-03,\n 0.21969499e-03,\n 0.19240828e-03,\n 0.16580452e-03,\n 0.13966703e-03,\n 0.11454133e-03,\n 0.90488385e-04,\n 0.67134846e-04,\n 0.44648674e-04,\n 0.22957218e-04,\n 0.17676108e-05,\n -0.19375148e-04,\n -0.40583618e-04,\n -0.61928760e-04,\n -0.83859930e-04,\n -0.10665116e-03,\n -0.13027368e-03,\n -0.15504849e-03,\n -0.18114736e-03,\n -0.20928915e-03,\n -0.24077985e-03,\n -0.27812496e-03,\n -0.32568991e-03,\n -0.39278640e-03,\n -0.49672712e-03,\n -0.66978729e-03,\n -0.97107701e-03,\n -0.14971532e-02,\n -0.23801890e-02,\n -0.37356641e-02,\n -0.55097779e-02,\n -0.72345133e-02,\n -0.79152975e-02,\n -0.64894208e-02,\n -0.28653429e-02,\n 0.13867198e-02,\n 0.39382749e-02,\n 0.36805528e-02,\n 0.15104047e-02,\n -0.85980649e-03,\n -0.24118538e-02,\n -0.31621221e-02,\n -0.36013129e-02,\n -0.41603735e-02,\n -0.50707515e-02,\n -0.64725522e-02,\n -0.85721835e-02,\n -0.11695998e-01,\n -0.15934667e-01,\n -0.20252047e-01,\n -0.22257976e-01,\n -0.20039089e-01,\n -0.14158701e-01,\n -0.70773158e-02,\n -0.11583769e-02,\n 0.32308648e-03,\n 0.36626059e-03,\n 0.42002957e-03,\n 0.47493313e-03,\n 0.52414869e-03,\n 0.56347938e-03,\n 0.59169793e-03,\n 0.61004912e-03,\n 0.62088534e-03,\n 0.62637340e-03,\n 0.62798650e-03,\n 0.62661694e-03,\n 0.62283722e-03,\n 0.61707193e-03,\n 0.60965854e-03,\n 0.60086075e-03,\n 0.59088779e-03,\n 0.57988992e-03,\n 0.56789967e-03,\n 0.55486214e-03,\n 0.54083182e-03,\n 0.52603544e-03,\n 0.51060918e-03,\n 0.49428100e-03,\n 0.47690922e-03,\n 0.45883877e-03,\n 0.44021819e-03,\n 0.42093766e-03,\n 0.40098530e-03,\n 0.38046407e-03,\n 0.35971330e-03,\n 0.33858226e-03,\n 0.31714686e-03,\n 0.29569783e-03,\n 0.27421847e-03,\n 0.25265093e-03,\n 0.23099354e-03,\n 0.20943003e-03,\n 0.18794239e-03,\n 0.16680970e-03,\n 0.14604363e-03,\n 0.12587215e-03,\n 0.10602640e-03,\n 0.86910019e-04,\n 0.68677633e-04,\n 0.50912015e-04,\n 0.33862842e-04,\n 0.17410162e-04,\n 0.13113118e-05,\n -0.14745443e-04,\n -0.30839601e-04,\n -0.47037924e-04,\n -0.63687643e-04,\n -0.81004437e-04,\n -0.98914243e-04,\n -0.11776508e-03,\n -0.13753906e-03,\n -0.15888251e-03,\n -0.18270283e-03,\n -0.21094215e-03,\n -0.24672056e-03,\n -0.29703090e-03,\n -0.37476781e-03,\n -0.50383923e-03,\n -0.72860206e-03,\n -0.11220841e-02,\n -0.17865424e-02,\n -0.28166992e-02,\n -0.41856705e-02,\n -0.55507738e-02,\n -0.61370027e-02,\n -0.50718379e-02,\n -0.22190376e-02,\n 0.11870584e-02,\n 0.32180531e-02,\n 0.29439172e-02,\n 0.11399792e-02,\n -0.75004215e-03,\n -0.19179990e-02,\n -0.24332437e-02,\n -0.27225760e-02,\n -0.31262944e-02,\n -0.38075345e-02,\n -0.48529524e-02,\n -0.64000175e-02,\n -0.87149860e-02,\n -0.12041125e-01,\n -0.15928924e-01,\n -0.18521670e-01,\n -0.17406538e-01,\n -0.11980806e-01,\n -0.43471926e-02,\n 0.24430454e-02,\n 0.64657470e-02,\n 0.23461202e-03,\n 0.26596323e-03,\n 0.30500707e-03,\n 0.34487346e-03,\n 0.38060735e-03,\n 0.40916057e-03,\n 0.42964329e-03,\n 0.44296391e-03,\n 0.45083603e-03,\n 0.45483361e-03,\n 0.45601939e-03,\n 0.45503493e-03,\n 0.45229631e-03,\n 0.44811305e-03,\n 0.44273314e-03,\n 0.43635038e-03,\n 0.42911444e-03,\n 0.42114657e-03,\n 0.41246001e-03,\n 0.40299704e-03,\n 0.39280354e-03,\n 0.38204770e-03,\n 0.37087622e-03,\n 0.35903638e-03,\n 0.34639766e-03,\n 0.33326566e-03,\n 0.31975409e-03,\n 0.30575541e-03,\n 0.29127134e-03,\n 0.27634719e-03,\n 0.26128866e-03,\n 0.24593511e-03,\n 0.23035664e-03,\n 0.21476793e-03,\n 0.19919084e-03,\n 0.18352638e-03,\n 0.16776577e-03,\n 0.15213400e-03,\n 0.13650597e-03,\n 0.12117127e-03,\n 0.10604783e-03,\n 0.91420814e-04,\n 0.77014876e-04,\n 0.63102707e-04,\n 0.49865157e-04,\n 0.36943147e-04,\n 0.24579298e-04,\n 0.12646942e-04,\n 0.93583714e-06,\n -0.10726711e-04,\n -0.22409520e-04,\n -0.34169330e-04,\n -0.46268411e-04,\n -0.58847447e-04,\n -0.71843184e-04,\n -0.85561762e-04,\n -0.99899378e-04,\n -0.11538975e-03,\n -0.13264996e-03,\n -0.15311912e-03,\n -0.17894371e-03,\n -0.21519083e-03,\n -0.27111307e-03,\n -0.36378118e-03,\n -0.52517361e-03,\n -0.80816675e-03,\n -0.12878858e-02,\n -0.20363682e-02,\n -0.30407030e-02,\n -0.40585143e-02,\n -0.45180605e-02,\n -0.37541774e-02,\n -0.16340034e-02,\n 0.92716783e-03,\n 0.24486976e-02,\n 0.22101640e-02,\n 0.82245702e-03,\n -0.59355749e-03,\n -0.14349412e-02,\n -0.17818650e-02,\n -0.19710022e-02,\n -0.22550686e-02,\n -0.27460111e-02,\n -0.34975049e-02,\n -0.45994679e-02,\n -0.62465151e-02,\n -0.86843111e-02,\n -0.11772950e-01,\n -0.14247025e-01,\n -0.13897673e-01,\n -0.95402114e-02,\n -0.26456779e-02,\n 0.37637560e-02,\n 0.75113312e-02,\n 0.82785152e-02,\n 0.16024915e-03,\n 0.18166313e-03,\n 0.20833101e-03,\n 0.23556003e-03,\n 0.25996531e-03,\n 0.27946456e-03,\n 0.29345075e-03,\n 0.30254686e-03,\n 0.30792650e-03,\n 0.31066421e-03,\n 0.31148014e-03,\n 0.31080900e-03,\n 0.30893844e-03,\n 0.30608277e-03,\n 0.30241298e-03,\n 0.29805984e-03,\n 0.29311786e-03,\n 0.28767882e-03,\n 0.28175494e-03,\n 0.27529473e-03,\n 0.26832905e-03,\n 0.26097632e-03,\n 0.25335979e-03,\n 0.24527783e-03,\n 0.23663967e-03,\n 0.22766842e-03,\n 0.21843993e-03,\n 0.20887410e-03,\n 0.19898124e-03,\n 0.18877641e-03,\n 0.17849728e-03,\n 0.16801062e-03,\n 0.15736978e-03,\n 0.14671541e-03,\n 0.13608270e-03,\n 0.12537625e-03,\n 0.11459773e-03,\n 0.10394175e-03,\n 0.93250273e-04,\n 0.82784871e-04,\n 0.72433824e-04,\n 0.62449340e-04,\n 0.52622323e-04,\n 0.43097367e-04,\n 0.34058194e-04,\n 0.25217514e-04,\n 0.16786646e-04,\n 0.86424679e-05,\n 0.63486573e-06,\n -0.73354054e-05,\n -0.15307762e-04,\n -0.23343211e-04,\n -0.31608852e-04,\n -0.40205730e-04,\n -0.49073769e-04,\n -0.58462469e-04,\n -0.68241599e-04,\n -0.78819096e-04,\n -0.90594622e-04,\n -0.10456181e-03,\n -0.12214016e-03,\n -0.14677636e-03,\n -0.18476187e-03,\n -0.24763195e-03,\n -0.35712289e-03,\n -0.54928858e-03,\n -0.87579578e-03,\n -0.13871513e-02,\n -0.20772433e-02,\n -0.27833104e-02,\n -0.31113259e-02,\n -0.25939168e-02,\n -0.11258019e-02,\n 0.66039932e-03,\n 0.17193498e-02,\n 0.15396239e-02,\n 0.55894168e-03,\n -0.42620959e-03,\n -0.99759083e-03,\n -0.12229718e-02,\n -0.13435301e-02,\n -0.15339972e-02,\n -0.18679998e-02,\n -0.23784370e-02,\n -0.31225604e-02,\n -0.42320755e-02,\n -0.58987723e-02,\n -0.81079528e-02,\n -0.10059725e-01,\n -0.10070107e-01,\n -0.69461395e-02,\n -0.15970924e-02,\n 0.35173711e-02,\n 0.64823525e-02,\n 0.69939806e-02,\n 0.58447313e-02,\n 0.99998928e-04,\n 0.11336149e-03,\n 0.13000234e-03,\n 0.14699313e-03,\n 0.16222145e-03,\n 0.17438777e-03,\n 0.18311353e-03,\n 0.18878833e-03,\n 0.19214548e-03,\n 0.19385564e-03,\n 0.19436714e-03,\n 0.19395056e-03,\n 0.19278521e-03,\n 0.19100346e-03,\n 0.18871219e-03,\n 0.18599514e-03,\n 0.18291226e-03,\n 0.17952279e-03,\n 0.17582905e-03,\n 0.17179834e-03,\n 0.16745202e-03,\n 0.16285951e-03,\n 0.15811202e-03,\n 0.15307403e-03,\n 0.14767774e-03,\n 0.14207418e-03,\n 0.13631718e-03,\n 0.13035099e-03,\n 0.12418116e-03,\n 0.11780974e-03,\n 0.11139606e-03,\n 0.10484913e-03,\n 0.98205564e-04,\n 0.91553651e-04,\n 0.84926301e-04,\n 0.78246710e-04,\n 0.71511990e-04,\n 0.64868305e-04,\n 0.58195605e-04,\n 0.51666491e-04,\n 0.45197361e-04,\n 0.38973347e-04,\n 0.32842243e-04,\n 0.26893933e-04,\n 0.21249898e-04,\n 0.15730866e-04,\n 0.10472757e-04,\n 0.53960257e-05,\n 0.39317513e-06,\n -0.45791730e-05,\n -0.95536843e-05,\n -0.14566379e-04,\n -0.19728041e-04,\n -0.25091455e-04,\n -0.30624815e-04,\n -0.36488018e-04,\n -0.42586758e-04,\n -0.49185081e-04,\n -0.56528501e-04,\n -0.65242508e-04,\n -0.76187855e-04,\n -0.91523340e-04,\n -0.11515653e-03,\n -0.15424404e-03,\n -0.22232164e-03,\n -0.34186023e-03,\n -0.54521387e-03,\n -0.86433312e-03,\n -0.12963127e-02,\n -0.17405468e-02,\n -0.19500151e-02,\n -0.16286803e-02,\n -0.70585759e-03,\n 0.42125979e-03,\n 0.10887617e-02,\n 0.97085984e-03,\n 0.34771912e-03,\n -0.27306599e-03,\n -0.62840234e-03,\n -0.76508470e-03,\n -0.83742116e-03,\n -0.95511600e-03,\n -0.11631359e-02,\n -0.14807530e-02,\n -0.19422875e-02,\n -0.26292126e-02,\n -0.36683388e-02,\n -0.50780545e-02,\n -0.63869823e-02,\n -0.64897770e-02,\n -0.44976156e-02,\n -0.92731958e-03,\n 0.25404119e-02,\n 0.45405924e-02,\n 0.48503033e-02,\n 0.40294230e-02,\n 0.27683314e-02,\n 0.53884014e-04,\n 0.61084211e-04,\n 0.70050810e-04,\n 0.79205915e-04,\n 0.87411223e-04,\n 0.93966446e-04,\n 0.98667610e-04,\n 0.10172475e-03,\n 0.10353317e-03,\n 0.10445446e-03,\n 0.10473084e-03,\n 0.10450836e-03,\n 0.10388237e-03,\n 0.10292211e-03,\n 0.10168495e-03,\n 0.10021854e-03,\n 0.98558390e-04,\n 0.96735770e-04,\n 0.94745832e-04,\n 0.92573500e-04,\n 0.90232592e-04,\n 0.87755776e-04,\n 0.85199157e-04,\n 0.82488186e-04,\n 0.79575933e-04,\n 0.76551951e-04,\n 0.73451745e-04,\n 0.70241033e-04,\n 0.66919587e-04,\n 0.63485968e-04,\n 0.60029004e-04,\n 0.56497996e-04,\n 0.52914555e-04,\n 0.49328813e-04,\n 0.45763074e-04,\n 0.42167223e-04,\n 0.38533406e-04,\n 0.34952976e-04,\n 0.31360858e-04,\n 0.27841659e-04,\n 0.24351986e-04,\n 0.21002805e-04,\n 0.17696189e-04,\n 0.14492869e-04,\n 0.11447947e-04,\n 0.84757721e-05,\n 0.56412364e-05,\n 0.29090747e-05,\n 0.20986288e-06,\n -0.24671158e-05,\n -0.51494958e-05,\n -0.78480261e-05,\n -0.10632524e-04,\n -0.13520086e-04,\n -0.16503614e-04,\n -0.19662311e-04,\n -0.22948981e-04,\n -0.26503118e-04,\n -0.30458663e-04,\n -0.35155379e-04,\n -0.41044819e-04,\n -0.49301310e-04,\n -0.62018436e-04,\n -0.83043000e-04,\n -0.11966786e-03,\n -0.18399033e-03,\n -0.29346853e-03,\n -0.46542627e-03,\n -0.69852220e-03,\n -0.93878357e-03,\n -0.10528294e-02,\n -0.88006578e-03,\n -0.38117066e-03,\n 0.22924476e-03,\n 0.59057289e-03,\n 0.52561943e-03,\n 0.18708916e-03,\n -0.14889365e-03,\n -0.34005527e-03,\n -0.41272707e-03,\n -0.45099761e-03,\n -0.51413663e-03,\n -0.62613422e-03,\n -0.79706579e-03,\n -0.10450790e-02,\n -0.14138672e-02,\n -0.19733894e-02,\n -0.27403242e-02,\n -0.34682453e-02,\n -0.35488785e-02,\n -0.24659703e-02,\n -0.48296375e-03,\n 0.14571503e-02,\n 0.25734871e-02,\n 0.27371510e-02,\n 0.22676513e-02,\n 0.15553402e-02,\n 0.87312493e-03,\n 0.21932714e-04,\n 0.24863417e-04,\n 0.28513090e-04,\n 0.32239484e-04,\n 0.35579265e-04,\n 0.38247381e-04,\n 0.40160809e-04,\n 0.41405037e-04,\n 0.42140975e-04,\n 0.42515869e-04,\n 0.42628493e-04,\n 0.42538381e-04,\n 0.42284017e-04,\n 0.41893109e-04,\n 0.41388936e-04,\n 0.40791474e-04,\n 0.40115974e-04,\n 0.39374940e-04,\n 0.38564984e-04,\n 0.37680649e-04,\n 0.36728081e-04,\n 0.35719502e-04,\n 0.34679109e-04,\n 0.33576438e-04,\n 0.32390075e-04,\n 0.31158215e-04,\n 0.29896764e-04,\n 0.28590852e-04,\n 0.27239597e-04,\n 0.25841982e-04,\n 0.24434608e-04,\n 0.22996630e-04,\n 0.21537227e-04,\n 0.20077452e-04,\n 0.18627210e-04,\n 0.17164399e-04,\n 0.15684327e-04,\n 0.14226660e-04,\n 0.12765439e-04,\n 0.11332698e-04,\n 0.99115950e-05,\n 0.85493384e-05,\n 0.72026537e-05,\n 0.58994046e-05,\n 0.46591872e-05,\n 0.34499117e-05,\n 0.22957561e-05,\n 0.11843766e-05,\n 0.84998014e-07,\n -0.10040685e-05,\n -0.20963864e-05,\n -0.31941836e-05,\n -0.43282666e-05,\n -0.55030100e-05,\n -0.67179099e-05,\n -0.80033133e-05,\n -0.93413128e-05,\n -0.10787697e-04,\n -0.12397514e-04,\n -0.14309594e-04,\n -0.16705344e-04,\n -0.20065328e-04,\n -0.25239178e-04,\n -0.33791446e-04,\n -0.48691152e-04,\n -0.74860320e-04,\n -0.11940804e-03,\n -0.18940083e-03,\n -0.28432420e-03,\n -0.38224252e-03,\n -0.42882701e-03,\n -0.35856038e-03,\n -0.15526464e-03,\n 0.93624702e-04,\n 0.24092782e-03,\n 0.21429047e-03,\n 0.76112388e-04,\n -0.60849816e-04,\n -0.13861567e-03,\n -0.16805886e-03,\n -0.18353794e-03,\n -0.20919893e-03,\n -0.25477342e-03,\n -0.32431912e-03,\n -0.42517454e-03,\n -0.57509379e-03,\n -0.80277218e-03,\n -0.11159416e-02,\n -0.14153966e-02,\n -0.14518298e-02,\n -0.10097986e-02,\n -0.19427406e-03,\n 0.60561736e-03,\n 0.10654813e-02,\n 0.11315914e-02,\n 0.93659549e-03,\n 0.64201583e-03,\n 0.36030522e-03,\n 0.14866883e-03,\n 0.41634707e-05,\n 0.47198023e-05,\n 0.54126149e-05,\n 0.61199912e-05,\n 0.67539763e-05,\n 0.72604594e-05,\n 0.76236788e-05,\n 0.78598650e-05,\n 0.79995607e-05,\n 0.80707214e-05,\n 0.80921054e-05,\n 0.80750178e-05,\n 0.80267509e-05,\n 0.79525425e-05,\n 0.78568110e-05,\n 0.77433706e-05,\n 0.76151509e-05,\n 0.74745158e-05,\n 0.73207625e-05,\n 0.71528843e-05,\n 0.69720704e-05,\n 0.67805959e-05,\n 0.65831077e-05,\n 0.63738212e-05,\n 0.61485744e-05,\n 0.59146905e-05,\n 0.56752488e-05,\n 0.54273883e-05,\n 0.51709094e-05,\n 0.49056021e-05,\n 0.46384298e-05,\n 0.43654286e-05,\n 0.40883574e-05,\n 0.38112394e-05,\n 0.35359881e-05,\n 0.32583384e-05,\n 0.29773378e-05,\n 0.27006156e-05,\n 0.24232718e-05,\n 0.21512824e-05,\n 0.18814886e-05,\n 0.16229329e-05,\n 0.13672592e-05,\n 0.11198919e-05,\n 0.88442783e-06,\n 0.65489411e-06,\n 0.43578387e-06,\n 0.22484058e-06,\n 0.16117639e-07,\n -0.19059540e-06,\n -0.39797052e-06,\n -0.60633920e-06,\n -0.82165008e-06,\n -0.10446255e-05,\n -0.12752704e-05,\n -0.15192638e-05,\n -0.17732648e-05,\n -0.20478196e-05,\n -0.23534012e-05,\n -0.27163851e-05,\n -0.31711115e-05,\n -0.38089131e-05,\n -0.47909707e-05,\n -0.64142382e-05,\n -0.92423534e-05,\n -0.14209581e-04,\n -0.22665536e-04,\n -0.35952206e-04,\n -0.53973064e-04,\n -0.72565315e-04,\n -0.81414371e-04,\n -0.68077694e-04,\n -0.29477938e-04,\n 0.17784128e-04,\n 0.45754921e-04,\n 0.40691128e-04,\n 0.14446910e-04,\n -0.11559983e-04,\n -0.26320620e-04,\n -0.31904819e-04,\n -0.34839621e-04,\n -0.39709423e-04,\n -0.48360336e-04,\n -0.61561077e-04,\n -0.80702943e-04,\n -0.10915505e-03,\n -0.15237252e-03,\n -0.21185735e-03,\n -0.26881794e-03,\n -0.27586648e-03,\n -0.19191137e-03,\n -0.36795147e-04,\n 0.11542100e-03,\n 0.20291690e-03,\n 0.21544744e-03,\n 0.17828873e-03,\n 0.12219916e-03,\n 0.68575428e-04,\n 0.28294970e-04,\n 0.53851386e-05,\n ]\n )\n return spherical_albedo, albedo, expected_r1\n"
] | [
[
"numpy.array",
"numpy.testing.assert_allclose"
]
] |
ririw/scipy | [
"680ecf8c52966343827903e6b7983b1ef7323fe2"
] | [
"scipy/sparse/compressed.py"
] | [
"\"\"\"Base class for sparse matrix formats using compressed storage.\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\n__all__ = []\n\nfrom warnings import warn\nimport operator\n\nimport numpy as np\nfrom scipy._lib._util import _prune_array\n\nfrom .base import spmatrix, isspmatrix, SparseEfficiencyWarning\nfrom .data import _data_matrix, _minmax_mixin\nfrom .dia import dia_matrix\nfrom . import _sparsetools\nfrom ._sparsetools import (get_csr_submatrix, csr_sample_offsets, csr_todense,\n csr_sample_values, csr_row_index, csr_row_slice,\n csr_column_index1, csr_column_index2)\nfrom ._index import IndexMixin\nfrom .sputils import (upcast, upcast_char, to_native, isdense, isshape,\n getdtype, isscalarlike, isintlike, get_index_dtype,\n downcast_intp_index, get_sum_dtype, check_shape,\n matrix, asmatrix, is_pydata_spmatrix)\n\n\nclass _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin):\n \"\"\"base matrix class for compressed row- and column-oriented matrices\"\"\"\n\n def __init__(self, arg1, shape=None, dtype=None, copy=False):\n _data_matrix.__init__(self)\n\n if isspmatrix(arg1):\n if arg1.format == self.format and copy:\n arg1 = arg1.copy()\n else:\n arg1 = arg1.asformat(self.format)\n self._set_self(arg1)\n\n elif isinstance(arg1, tuple):\n if isshape(arg1):\n # It's a tuple of matrix dimensions (M, N)\n # create empty matrix\n self._shape = check_shape(arg1)\n M, N = self.shape\n # Select index dtype large enough to pass array and\n # scalar parameters to sparsetools\n idx_dtype = get_index_dtype(maxval=max(M, N))\n self.data = np.zeros(0, getdtype(dtype, default=float))\n self.indices = np.zeros(0, idx_dtype)\n self.indptr = np.zeros(self._swap((M, N))[0] + 1,\n dtype=idx_dtype)\n else:\n if len(arg1) == 2:\n # (data, ij) format\n from .coo import coo_matrix\n other = self.__class__(coo_matrix(arg1, shape=shape))\n self._set_self(other)\n elif len(arg1) == 3:\n # (data, indices, indptr) format\n (data, indices, indptr) = arg1\n\n # Select index dtype large enough to pass array and\n # scalar parameters to sparsetools\n maxval = None\n if shape is not None:\n maxval = max(shape)\n idx_dtype = get_index_dtype((indices, indptr),\n maxval=maxval,\n check_contents=True)\n\n self.indices = np.array(indices, copy=copy,\n dtype=idx_dtype)\n self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)\n self.data = np.array(data, copy=copy, dtype=dtype)\n else:\n raise ValueError(\"unrecognized {}_matrix \"\n \"constructor usage\".format(self.format))\n\n else:\n # must be dense\n try:\n arg1 = np.asarray(arg1)\n except Exception:\n raise ValueError(\"unrecognized {}_matrix constructor usage\"\n \"\".format(self.format))\n from .coo import coo_matrix\n self._set_self(self.__class__(coo_matrix(arg1, dtype=dtype)))\n\n # Read matrix dimensions given, if any\n if shape is not None:\n self._shape = check_shape(shape)\n else:\n if self.shape is None:\n # shape not already set, try to infer dimensions\n try:\n major_dim = len(self.indptr) - 1\n minor_dim = self.indices.max() + 1\n except Exception:\n raise ValueError('unable to infer matrix dimensions')\n else:\n self._shape = check_shape(self._swap((major_dim,\n minor_dim)))\n\n if dtype is not None:\n self.data = self.data.astype(dtype, copy=False)\n\n self.check_format(full_check=False)\n\n def getnnz(self, axis=None):\n if axis is None:\n return int(self.indptr[-1])\n else:\n if axis < 0:\n axis += 2\n axis, _ = self._swap((axis, 1 - axis))\n _, N = self._swap(self.shape)\n if axis == 0:\n return np.bincount(downcast_intp_index(self.indices),\n minlength=N)\n elif axis == 1:\n return np.diff(self.indptr)\n raise ValueError('axis out of bounds')\n\n getnnz.__doc__ = spmatrix.getnnz.__doc__\n\n def _set_self(self, other, copy=False):\n \"\"\"take the member variables of other and assign them to self\"\"\"\n\n if copy:\n other = other.copy()\n\n self.data = other.data\n self.indices = other.indices\n self.indptr = other.indptr\n self._shape = check_shape(other.shape)\n\n def check_format(self, full_check=True):\n \"\"\"check whether the matrix format is valid\n\n Parameters\n ----------\n full_check : bool, optional\n If `True`, rigorous check, O(N) operations. Otherwise\n basic check, O(1) operations (default True).\n \"\"\"\n # use _swap to determine proper bounds\n major_name, minor_name = self._swap(('row', 'column'))\n major_dim, minor_dim = self._swap(self.shape)\n\n # index arrays should have integer data types\n if self.indptr.dtype.kind != 'i':\n warn(\"indptr array has non-integer dtype ({})\"\n \"\".format(self.indptr.dtype.name), stacklevel=3)\n if self.indices.dtype.kind != 'i':\n warn(\"indices array has non-integer dtype ({})\"\n \"\".format(self.indices.dtype.name), stacklevel=3)\n\n idx_dtype = get_index_dtype((self.indptr, self.indices))\n self.indptr = np.asarray(self.indptr, dtype=idx_dtype)\n self.indices = np.asarray(self.indices, dtype=idx_dtype)\n self.data = to_native(self.data)\n\n # check array shapes\n for x in [self.data.ndim, self.indices.ndim, self.indptr.ndim]:\n if x != 1:\n raise ValueError('data, indices, and indptr should be 1-D')\n\n # check index pointer\n if (len(self.indptr) != major_dim + 1):\n raise ValueError(\"index pointer size ({}) should be ({})\"\n \"\".format(len(self.indptr), major_dim + 1))\n if (self.indptr[0] != 0):\n raise ValueError(\"index pointer should start with 0\")\n\n # check index and data arrays\n if (len(self.indices) != len(self.data)):\n raise ValueError(\"indices and data should have the same size\")\n if (self.indptr[-1] > len(self.indices)):\n raise ValueError(\"Last value of index pointer should be less than \"\n \"the size of index and data arrays\")\n\n self.prune()\n\n if full_check:\n # check format validity (more expensive)\n if self.nnz > 0:\n if self.indices.max() >= minor_dim:\n raise ValueError(\"{} index values must be < {}\"\n \"\".format(minor_name, minor_dim))\n if self.indices.min() < 0:\n raise ValueError(\"{} index values must be >= 0\"\n \"\".format(minor_name))\n if np.diff(self.indptr).min() < 0:\n raise ValueError(\"index pointer values must form a \"\n \"non-decreasing sequence\")\n\n # if not self.has_sorted_indices():\n # warn('Indices were not in sorted order. Sorting indices.')\n # self.sort_indices()\n # assert(self.has_sorted_indices())\n # TODO check for duplicates?\n\n #######################\n # Boolean comparisons #\n #######################\n\n def _scalar_binopt(self, other, op):\n \"\"\"Scalar version of self._binopt, for cases in which no new nonzeros\n are added. Produces a new spmatrix in canonical form.\n \"\"\"\n self.sum_duplicates()\n res = self._with_data(op(self.data, other), copy=True)\n res.eliminate_zeros()\n return res\n\n def __eq__(self, other):\n # Scalar other.\n if isscalarlike(other):\n if np.isnan(other):\n return self.__class__(self.shape, dtype=np.bool_)\n\n if other == 0:\n warn(\"Comparing a sparse matrix with 0 using == is inefficient\"\n \", try using != instead.\", SparseEfficiencyWarning,\n stacklevel=3)\n all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))\n inv = self._scalar_binopt(other, operator.ne)\n return all_true - inv\n else:\n return self._scalar_binopt(other, operator.eq)\n # Dense other.\n elif isdense(other):\n return self.todense() == other\n # Pydata sparse other.\n elif is_pydata_spmatrix(other):\n return NotImplemented\n # Sparse other.\n elif isspmatrix(other):\n warn(\"Comparing sparse matrices using == is inefficient, try using\"\n \" != instead.\", SparseEfficiencyWarning, stacklevel=3)\n # TODO sparse broadcasting\n if self.shape != other.shape:\n return False\n elif self.format != other.format:\n other = other.asformat(self.format)\n res = self._binopt(other, '_ne_')\n all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))\n return all_true - res\n else:\n return False\n\n def __ne__(self, other):\n # Scalar other.\n if isscalarlike(other):\n if np.isnan(other):\n warn(\"Comparing a sparse matrix with nan using != is\"\n \" inefficient\", SparseEfficiencyWarning, stacklevel=3)\n all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))\n return all_true\n elif other != 0:\n warn(\"Comparing a sparse matrix with a nonzero scalar using !=\"\n \" is inefficient, try using == instead.\",\n SparseEfficiencyWarning, stacklevel=3)\n all_true = self.__class__(np.ones(self.shape), dtype=np.bool_)\n inv = self._scalar_binopt(other, operator.eq)\n return all_true - inv\n else:\n return self._scalar_binopt(other, operator.ne)\n # Dense other.\n elif isdense(other):\n return self.todense() != other\n # Pydata sparse other.\n elif is_pydata_spmatrix(other):\n return NotImplemented\n # Sparse other.\n elif isspmatrix(other):\n # TODO sparse broadcasting\n if self.shape != other.shape:\n return True\n elif self.format != other.format:\n other = other.asformat(self.format)\n return self._binopt(other, '_ne_')\n else:\n return True\n\n def _inequality(self, other, op, op_name, bad_scalar_msg):\n # Scalar other.\n if isscalarlike(other):\n if 0 == other and op_name in ('_le_', '_ge_'):\n raise NotImplementedError(\" >= and <= don't work with 0.\")\n elif op(0, other):\n warn(bad_scalar_msg, SparseEfficiencyWarning)\n other_arr = np.empty(self.shape, dtype=np.result_type(other))\n other_arr.fill(other)\n other_arr = self.__class__(other_arr)\n return self._binopt(other_arr, op_name)\n else:\n return self._scalar_binopt(other, op)\n # Dense other.\n elif isdense(other):\n return op(self.todense(), other)\n # Sparse other.\n elif isspmatrix(other):\n # TODO sparse broadcasting\n if self.shape != other.shape:\n raise ValueError(\"inconsistent shapes\")\n elif self.format != other.format:\n other = other.asformat(self.format)\n if op_name not in ('_ge_', '_le_'):\n return self._binopt(other, op_name)\n\n warn(\"Comparing sparse matrices using >= and <= is inefficient, \"\n \"using <, >, or !=, instead.\", SparseEfficiencyWarning)\n all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))\n res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')\n return all_true - res\n else:\n raise ValueError(\"Operands could not be compared.\")\n\n def __lt__(self, other):\n return self._inequality(other, operator.lt, '_lt_',\n \"Comparing a sparse matrix with a scalar \"\n \"greater than zero using < is inefficient, \"\n \"try using >= instead.\")\n\n def __gt__(self, other):\n return self._inequality(other, operator.gt, '_gt_',\n \"Comparing a sparse matrix with a scalar \"\n \"less than zero using > is inefficient, \"\n \"try using <= instead.\")\n\n def __le__(self, other):\n return self._inequality(other, operator.le, '_le_',\n \"Comparing a sparse matrix with a scalar \"\n \"greater than zero using <= is inefficient, \"\n \"try using > instead.\")\n\n def __ge__(self, other):\n return self._inequality(other, operator.ge, '_ge_',\n \"Comparing a sparse matrix with a scalar \"\n \"less than zero using >= is inefficient, \"\n \"try using < instead.\")\n\n #################################\n # Arithmetic operator overrides #\n #################################\n\n def _add_dense(self, other):\n if other.shape != self.shape:\n raise ValueError('Incompatible shapes.')\n dtype = upcast_char(self.dtype.char, other.dtype.char)\n order = self._swap('CF')[0]\n result = np.array(other, dtype=dtype, order=order, copy=True)\n M, N = self._swap(self.shape)\n y = result if result.flags.c_contiguous else result.T\n csr_todense(M, N, self.indptr, self.indices, self.data, y)\n return matrix(result, copy=False)\n\n def _add_sparse(self, other):\n return self._binopt(other, '_plus_')\n\n def _sub_sparse(self, other):\n return self._binopt(other, '_minus_')\n\n def multiply(self, other):\n \"\"\"Point-wise multiplication by another matrix, vector, or\n scalar.\n \"\"\"\n # Scalar multiplication.\n if isscalarlike(other):\n return self._mul_scalar(other)\n # Sparse matrix or vector.\n if isspmatrix(other):\n if self.shape == other.shape:\n other = self.__class__(other)\n return self._binopt(other, '_elmul_')\n # Single element.\n elif other.shape == (1, 1):\n return self._mul_scalar(other.toarray()[0, 0])\n elif self.shape == (1, 1):\n return other._mul_scalar(self.toarray()[0, 0])\n # A row times a column.\n elif self.shape[1] == 1 and other.shape[0] == 1:\n return self._mul_sparse_matrix(other.tocsc())\n elif self.shape[0] == 1 and other.shape[1] == 1:\n return other._mul_sparse_matrix(self.tocsc())\n # Row vector times matrix. other is a row.\n elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:\n other = dia_matrix((other.toarray().ravel(), [0]),\n shape=(other.shape[1], other.shape[1]))\n return self._mul_sparse_matrix(other)\n # self is a row.\n elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:\n copy = dia_matrix((self.toarray().ravel(), [0]),\n shape=(self.shape[1], self.shape[1]))\n return other._mul_sparse_matrix(copy)\n # Column vector times matrix. other is a column.\n elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:\n other = dia_matrix((other.toarray().ravel(), [0]),\n shape=(other.shape[0], other.shape[0]))\n return other._mul_sparse_matrix(self)\n # self is a column.\n elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:\n copy = dia_matrix((self.toarray().ravel(), [0]),\n shape=(self.shape[0], self.shape[0]))\n return copy._mul_sparse_matrix(other)\n else:\n raise ValueError(\"inconsistent shapes\")\n\n # Assume other is a dense matrix/array, which produces a single-item\n # object array if other isn't convertible to ndarray.\n other = np.atleast_2d(other)\n\n if other.ndim != 2:\n return np.multiply(self.toarray(), other)\n # Single element / wrapped object.\n if other.size == 1:\n return self._mul_scalar(other.flat[0])\n # Fast case for trivial sparse matrix.\n elif self.shape == (1, 1):\n return np.multiply(self.toarray()[0, 0], other)\n\n from .coo import coo_matrix\n ret = self.tocoo()\n # Matching shapes.\n if self.shape == other.shape:\n data = np.multiply(ret.data, other[ret.row, ret.col])\n # Sparse row vector times...\n elif self.shape[0] == 1:\n if other.shape[1] == 1: # Dense column vector.\n data = np.multiply(ret.data, other)\n elif other.shape[1] == self.shape[1]: # Dense matrix.\n data = np.multiply(ret.data, other[:, ret.col])\n else:\n raise ValueError(\"inconsistent shapes\")\n row = np.repeat(np.arange(other.shape[0]), len(ret.row))\n col = np.tile(ret.col, other.shape[0])\n return coo_matrix((data.view(np.ndarray).ravel(), (row, col)),\n shape=(other.shape[0], self.shape[1]),\n copy=False)\n # Sparse column vector times...\n elif self.shape[1] == 1:\n if other.shape[0] == 1: # Dense row vector.\n data = np.multiply(ret.data[:, None], other)\n elif other.shape[0] == self.shape[0]: # Dense matrix.\n data = np.multiply(ret.data[:, None], other[ret.row])\n else:\n raise ValueError(\"inconsistent shapes\")\n row = np.repeat(ret.row, other.shape[1])\n col = np.tile(np.arange(other.shape[1]), len(ret.col))\n return coo_matrix((data.view(np.ndarray).ravel(), (row, col)),\n shape=(self.shape[0], other.shape[1]),\n copy=False)\n # Sparse matrix times dense row vector.\n elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:\n data = np.multiply(ret.data, other[:, ret.col].ravel())\n # Sparse matrix times dense column vector.\n elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:\n data = np.multiply(ret.data, other[ret.row].ravel())\n else:\n raise ValueError(\"inconsistent shapes\")\n ret.data = data.view(np.ndarray).ravel()\n return ret\n\n ###########################\n # Multiplication handlers #\n ###########################\n\n def _mul_vector(self, other):\n M, N = self.shape\n\n # output array\n result = np.zeros(M, dtype=upcast_char(self.dtype.char,\n other.dtype.char))\n\n # csr_matvec or csc_matvec\n fn = getattr(_sparsetools, self.format + '_matvec')\n fn(M, N, self.indptr, self.indices, self.data, other, result)\n\n return result\n\n def _mul_multivector(self, other):\n M, N = self.shape\n n_vecs = other.shape[1] # number of column vectors\n\n result = np.zeros((M, n_vecs),\n dtype=upcast_char(self.dtype.char, other.dtype.char))\n\n # csr_matvecs or csc_matvecs\n fn = getattr(_sparsetools, self.format + '_matvecs')\n fn(M, N, n_vecs, self.indptr, self.indices, self.data,\n other.ravel(), result.ravel())\n\n return result\n\n def _mul_sparse_matrix(self, other):\n M, K1 = self.shape\n K2, N = other.shape\n\n major_axis = self._swap((M, N))[0]\n other = self.__class__(other) # convert to this format\n\n idx_dtype = get_index_dtype((self.indptr, self.indices,\n other.indptr, other.indices))\n\n fn = getattr(_sparsetools, self.format + '_matmat_maxnnz')\n nnz = fn(M, N,\n np.asarray(self.indptr, dtype=idx_dtype),\n np.asarray(self.indices, dtype=idx_dtype),\n np.asarray(other.indptr, dtype=idx_dtype),\n np.asarray(other.indices, dtype=idx_dtype))\n\n idx_dtype = get_index_dtype((self.indptr, self.indices,\n other.indptr, other.indices),\n maxval=nnz)\n\n indptr = np.empty(major_axis + 1, dtype=idx_dtype)\n indices = np.empty(nnz, dtype=idx_dtype)\n data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype))\n\n fn = getattr(_sparsetools, self.format + '_matmat')\n fn(M, N, np.asarray(self.indptr, dtype=idx_dtype),\n np.asarray(self.indices, dtype=idx_dtype),\n self.data,\n np.asarray(other.indptr, dtype=idx_dtype),\n np.asarray(other.indices, dtype=idx_dtype),\n other.data,\n indptr, indices, data)\n\n return self.__class__((data, indices, indptr), shape=(M, N))\n\n def diagonal(self, k=0):\n rows, cols = self.shape\n if k <= -rows or k >= cols:\n raise ValueError(\"k exceeds matrix dimensions\")\n fn = getattr(_sparsetools, self.format + \"_diagonal\")\n y = np.empty(min(rows + min(k, 0), cols - max(k, 0)),\n dtype=upcast(self.dtype))\n fn(k, self.shape[0], self.shape[1], self.indptr, self.indices,\n self.data, y)\n return y\n\n diagonal.__doc__ = spmatrix.diagonal.__doc__\n\n #####################\n # Other binary ops #\n #####################\n\n def _maximum_minimum(self, other, npop, op_name, dense_check):\n if isscalarlike(other):\n if dense_check(other):\n warn(\"Taking maximum (minimum) with > 0 (< 0) number results\"\n \" to a dense matrix.\", SparseEfficiencyWarning,\n stacklevel=3)\n other_arr = np.empty(self.shape, dtype=np.asarray(other).dtype)\n other_arr.fill(other)\n other_arr = self.__class__(other_arr)\n return self._binopt(other_arr, op_name)\n else:\n self.sum_duplicates()\n new_data = npop(self.data, np.asarray(other))\n mat = self.__class__((new_data, self.indices, self.indptr),\n dtype=new_data.dtype, shape=self.shape)\n return mat\n elif isdense(other):\n return npop(self.todense(), other)\n elif isspmatrix(other):\n return self._binopt(other, op_name)\n else:\n raise ValueError(\"Operands not compatible.\")\n\n def maximum(self, other):\n return self._maximum_minimum(other, np.maximum,\n '_maximum_', lambda x: np.asarray(x) > 0)\n\n maximum.__doc__ = spmatrix.maximum.__doc__\n\n def minimum(self, other):\n return self._maximum_minimum(other, np.minimum,\n '_minimum_', lambda x: np.asarray(x) < 0)\n\n minimum.__doc__ = spmatrix.minimum.__doc__\n\n #####################\n # Reduce operations #\n #####################\n\n def sum(self, axis=None, dtype=None, out=None):\n \"\"\"Sum the matrix over the given axis. If the axis is None, sum\n over both rows and columns, returning a scalar.\n \"\"\"\n # The spmatrix base class already does axis=0 and axis=1 efficiently\n # so we only do the case axis=None here\n if (not hasattr(self, 'blocksize') and\n axis in self._swap(((1, -1), (0, 2)))[0]):\n # faster than multiplication for large minor axis in CSC/CSR\n res_dtype = get_sum_dtype(self.dtype)\n ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype)\n\n major_index, value = self._minor_reduce(np.add)\n ret[major_index] = value\n ret = asmatrix(ret)\n if axis % 2 == 1:\n ret = ret.T\n\n if out is not None and out.shape != ret.shape:\n raise ValueError('dimensions do not match')\n\n return ret.sum(axis=(), dtype=dtype, out=out)\n # spmatrix will handle the remaining situations when axis\n # is in {None, -1, 0, 1}\n else:\n return spmatrix.sum(self, axis=axis, dtype=dtype, out=out)\n\n sum.__doc__ = spmatrix.sum.__doc__\n\n def _minor_reduce(self, ufunc, data=None):\n \"\"\"Reduce nonzeros with a ufunc over the minor axis when non-empty\n\n Can be applied to a function of self.data by supplying data parameter.\n\n Warning: this does not call sum_duplicates()\n\n Returns\n -------\n major_index : array of ints\n Major indices where nonzero\n\n value : array of self.dtype\n Reduce result for nonzeros in each major_index\n \"\"\"\n if data is None:\n data = self.data\n major_index = np.flatnonzero(np.diff(self.indptr))\n value = ufunc.reduceat(data,\n downcast_intp_index(self.indptr[major_index]))\n return major_index, value\n\n #######################\n # Getting and Setting #\n #######################\n\n def _get_intXint(self, row, col):\n M, N = self._swap(self.shape)\n major, minor = self._swap((row, col))\n indptr, indices, data = get_csr_submatrix(\n M, N, self.indptr, self.indices, self.data,\n major, major + 1, minor, minor + 1)\n return data.sum(dtype=self.dtype)\n\n def _get_sliceXslice(self, row, col):\n major, minor = self._swap((row, col))\n if major.step in (1, None) and minor.step in (1, None):\n return self._get_submatrix(major, minor, copy=True)\n return self._major_slice(major)._minor_slice(minor)\n\n def _get_arrayXarray(self, row, col):\n # inner indexing\n idx_dtype = self.indices.dtype\n M, N = self._swap(self.shape)\n major, minor = self._swap((row, col))\n major = np.asarray(major, dtype=idx_dtype)\n minor = np.asarray(minor, dtype=idx_dtype)\n\n val = np.empty(major.size, dtype=self.dtype)\n csr_sample_values(M, N, self.indptr, self.indices, self.data,\n major.size, major.ravel(), minor.ravel(), val)\n if major.ndim == 1:\n return asmatrix(val)\n return self.__class__(val.reshape(major.shape))\n\n def _get_columnXarray(self, row, col):\n # outer indexing\n major, minor = self._swap((row, col))\n return self._major_index_fancy(major)._minor_index_fancy(minor)\n\n def _major_index_fancy(self, idx):\n \"\"\"Index along the major axis where idx is an array of ints.\n \"\"\"\n idx_dtype = self.indices.dtype\n indices = np.asarray(idx, dtype=idx_dtype).ravel()\n\n _, N = self._swap(self.shape)\n M = len(indices)\n new_shape = self._swap((M, N))\n if M == 0:\n return self.__class__(new_shape)\n\n row_nnz = np.diff(self.indptr)\n idx_dtype = self.indices.dtype\n res_indptr = np.zeros(M+1, dtype=idx_dtype)\n np.cumsum(row_nnz[idx], out=res_indptr[1:])\n\n nnz = res_indptr[-1]\n res_indices = np.empty(nnz, dtype=idx_dtype)\n res_data = np.empty(nnz, dtype=self.dtype)\n csr_row_index(M, indices, self.indptr, self.indices, self.data,\n res_indices, res_data)\n\n return self.__class__((res_data, res_indices, res_indptr),\n shape=new_shape, copy=False)\n\n def _major_slice(self, idx, copy=False):\n \"\"\"Index along the major axis where idx is a slice object.\n \"\"\"\n if idx == slice(None):\n return self.copy() if copy else self\n\n M, N = self._swap(self.shape)\n start, stop, step = idx.indices(M)\n M = len(range(start, stop, step))\n new_shape = self._swap((M, N))\n if M == 0:\n return self.__class__(new_shape)\n\n row_nnz = np.diff(self.indptr)\n idx_dtype = self.indices.dtype\n res_indptr = np.zeros(M+1, dtype=idx_dtype)\n np.cumsum(row_nnz[idx], out=res_indptr[1:])\n\n if step == 1:\n all_idx = slice(self.indptr[start], self.indptr[stop])\n res_indices = np.array(self.indices[all_idx], copy=copy)\n res_data = np.array(self.data[all_idx], copy=copy)\n else:\n nnz = res_indptr[-1]\n res_indices = np.empty(nnz, dtype=idx_dtype)\n res_data = np.empty(nnz, dtype=self.dtype)\n csr_row_slice(start, stop, step, self.indptr, self.indices,\n self.data, res_indices, res_data)\n\n return self.__class__((res_data, res_indices, res_indptr),\n shape=new_shape, copy=False)\n\n def _minor_index_fancy(self, idx):\n \"\"\"Index along the minor axis where idx is an array of ints.\n \"\"\"\n idx_dtype = self.indices.dtype\n idx = np.asarray(idx, dtype=idx_dtype).ravel()\n\n M, N = self._swap(self.shape)\n k = len(idx)\n new_shape = self._swap((M, k))\n if k == 0:\n return self.__class__(new_shape)\n\n # pass 1: count idx entries and compute new indptr\n col_offsets = np.zeros(N, dtype=idx_dtype)\n res_indptr = np.empty_like(self.indptr)\n csr_column_index1(k, idx, M, N, self.indptr, self.indices,\n col_offsets, res_indptr)\n\n # pass 2: copy indices/data for selected idxs\n col_order = np.argsort(idx).astype(idx_dtype, copy=False)\n nnz = res_indptr[-1]\n res_indices = np.empty(nnz, dtype=idx_dtype)\n res_data = np.empty(nnz, dtype=self.dtype)\n csr_column_index2(col_order, col_offsets, len(self.indices),\n self.indices, self.data, res_indices, res_data)\n return self.__class__((res_data, res_indices, res_indptr),\n shape=new_shape, copy=False)\n\n def _minor_slice(self, idx, copy=False):\n \"\"\"Index along the minor axis where idx is a slice object.\n \"\"\"\n if idx == slice(None):\n return self.copy() if copy else self\n\n M, N = self._swap(self.shape)\n start, stop, step = idx.indices(N)\n N = len(range(start, stop, step))\n if N == 0:\n return self.__class__(self._swap((M, N)))\n if step == 1:\n return self._get_submatrix(minor=idx, copy=copy)\n # TODO: don't fall back to fancy indexing here\n return self._minor_index_fancy(np.arange(start, stop, step))\n\n def _get_submatrix(self, major=None, minor=None, copy=False):\n \"\"\"Return a submatrix of this matrix.\n\n major, minor: None, int, or slice with step 1\n \"\"\"\n M, N = self._swap(self.shape)\n i0, i1 = _process_slice(major, M)\n j0, j1 = _process_slice(minor, N)\n\n if i0 == 0 and j0 == 0 and i1 == M and j1 == N:\n return self.copy() if copy else self\n\n indptr, indices, data = get_csr_submatrix(\n M, N, self.indptr, self.indices, self.data, i0, i1, j0, j1)\n\n shape = self._swap((i1 - i0, j1 - j0))\n return self.__class__((data, indices, indptr), shape=shape,\n dtype=self.dtype, copy=False)\n\n def _set_intXint(self, row, col, x):\n i, j = self._swap((row, col))\n self._set_many(i, j, x)\n\n def _set_arrayXarray(self, row, col, x):\n i, j = self._swap((row, col))\n self._set_many(i, j, x)\n\n def _set_arrayXarray_sparse(self, row, col, x):\n # clear entries that will be overwritten\n self._zero_many(*self._swap((row, col)))\n\n M, N = row.shape # matches col.shape\n broadcast_row = M != 1 and x.shape[0] == 1\n broadcast_col = N != 1 and x.shape[1] == 1\n r, c = x.row, x.col\n x = np.asarray(x.data, dtype=self.dtype)\n if broadcast_row:\n r = np.repeat(np.arange(M), len(r))\n c = np.tile(c, M)\n x = np.tile(x, M)\n if broadcast_col:\n r = np.repeat(r, N)\n c = np.tile(np.arange(N), len(c))\n x = np.repeat(x, N)\n # only assign entries in the new sparsity structure\n i, j = self._swap((row[r, c], col[r, c]))\n self._set_many(i, j, x)\n\n def _setdiag(self, values, k):\n if 0 in self.shape:\n return\n\n M, N = self.shape\n broadcast = (values.ndim == 0)\n\n if k < 0:\n if broadcast:\n max_index = min(M + k, N)\n else:\n max_index = min(M + k, N, len(values))\n i = np.arange(max_index, dtype=self.indices.dtype)\n j = np.arange(max_index, dtype=self.indices.dtype)\n i -= k\n\n else:\n if broadcast:\n max_index = min(M, N - k)\n else:\n max_index = min(M, N - k, len(values))\n i = np.arange(max_index, dtype=self.indices.dtype)\n j = np.arange(max_index, dtype=self.indices.dtype)\n j += k\n\n if not broadcast:\n values = values[:len(i)]\n\n self[i, j] = values\n\n def _prepare_indices(self, i, j):\n M, N = self._swap(self.shape)\n\n def check_bounds(indices, bound):\n idx = indices.max()\n if idx >= bound:\n raise IndexError('index (%d) out of range (>= %d)' %\n (idx, bound))\n idx = indices.min()\n if idx < -bound:\n raise IndexError('index (%d) out of range (< -%d)' %\n (idx, bound))\n\n i = np.array(i, dtype=self.indices.dtype, copy=False, ndmin=1).ravel()\n j = np.array(j, dtype=self.indices.dtype, copy=False, ndmin=1).ravel()\n check_bounds(i, M)\n check_bounds(j, N)\n return i, j, M, N\n\n def _set_many(self, i, j, x):\n \"\"\"Sets value at each (i, j) to x\n\n Here (i,j) index major and minor respectively, and must not contain\n duplicate entries.\n \"\"\"\n i, j, M, N = self._prepare_indices(i, j)\n x = np.array(x, dtype=self.dtype, copy=False, ndmin=1).ravel()\n\n n_samples = x.size\n offsets = np.empty(n_samples, dtype=self.indices.dtype)\n ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,\n i, j, offsets)\n if ret == 1:\n # rinse and repeat\n self.sum_duplicates()\n csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,\n i, j, offsets)\n\n if -1 not in offsets:\n # only affects existing non-zero cells\n self.data[offsets] = x\n return\n\n else:\n warn(\"Changing the sparsity structure of a {}_matrix is expensive.\"\n \" lil_matrix is more efficient.\".format(self.format),\n SparseEfficiencyWarning, stacklevel=3)\n # replace where possible\n mask = offsets > -1\n self.data[offsets[mask]] = x[mask]\n # only insertions remain\n mask = ~mask\n i = i[mask]\n i[i < 0] += M\n j = j[mask]\n j[j < 0] += N\n self._insert_many(i, j, x[mask])\n\n def _zero_many(self, i, j):\n \"\"\"Sets value at each (i, j) to zero, preserving sparsity structure.\n\n Here (i,j) index major and minor respectively.\n \"\"\"\n i, j, M, N = self._prepare_indices(i, j)\n\n n_samples = len(i)\n offsets = np.empty(n_samples, dtype=self.indices.dtype)\n ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,\n i, j, offsets)\n if ret == 1:\n # rinse and repeat\n self.sum_duplicates()\n csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,\n i, j, offsets)\n\n # only assign zeros to the existing sparsity structure\n self.data[offsets[offsets > -1]] = 0\n\n def _insert_many(self, i, j, x):\n \"\"\"Inserts new nonzero at each (i, j) with value x\n\n Here (i,j) index major and minor respectively.\n i, j and x must be non-empty, 1d arrays.\n Inserts each major group (e.g. all entries per row) at a time.\n Maintains has_sorted_indices property.\n Modifies i, j, x in place.\n \"\"\"\n order = np.argsort(i, kind='mergesort') # stable for duplicates\n i = i.take(order, mode='clip')\n j = j.take(order, mode='clip')\n x = x.take(order, mode='clip')\n\n do_sort = self.has_sorted_indices\n\n # Update index data type\n idx_dtype = get_index_dtype((self.indices, self.indptr),\n maxval=(self.indptr[-1] + x.size))\n self.indptr = np.asarray(self.indptr, dtype=idx_dtype)\n self.indices = np.asarray(self.indices, dtype=idx_dtype)\n i = np.asarray(i, dtype=idx_dtype)\n j = np.asarray(j, dtype=idx_dtype)\n\n # Collate old and new in chunks by major index\n indices_parts = []\n data_parts = []\n ui, ui_indptr = np.unique(i, return_index=True)\n ui_indptr = np.append(ui_indptr, len(j))\n new_nnzs = np.diff(ui_indptr)\n prev = 0\n for c, (ii, js, je) in enumerate(zip(ui, ui_indptr, ui_indptr[1:])):\n # old entries\n start = self.indptr[prev]\n stop = self.indptr[ii]\n indices_parts.append(self.indices[start:stop])\n data_parts.append(self.data[start:stop])\n\n # handle duplicate j: keep last setting\n uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True)\n if len(uj) == je - js:\n indices_parts.append(j[js:je])\n data_parts.append(x[js:je])\n else:\n indices_parts.append(j[js:je][::-1][uj_indptr])\n data_parts.append(x[js:je][::-1][uj_indptr])\n new_nnzs[c] = len(uj)\n\n prev = ii\n\n # remaining old entries\n start = self.indptr[ii]\n indices_parts.append(self.indices[start:])\n data_parts.append(self.data[start:])\n\n # update attributes\n self.indices = np.concatenate(indices_parts)\n self.data = np.concatenate(data_parts)\n nnzs = np.empty(self.indptr.shape, dtype=idx_dtype)\n nnzs[0] = idx_dtype(0)\n indptr_diff = np.diff(self.indptr)\n indptr_diff[ui] += new_nnzs\n nnzs[1:] = indptr_diff\n self.indptr = np.cumsum(nnzs, out=nnzs)\n\n if do_sort:\n # TODO: only sort where necessary\n self.has_sorted_indices = False\n self.sort_indices()\n\n self.check_format(full_check=False)\n\n ######################\n # Conversion methods #\n ######################\n\n def tocoo(self, copy=True):\n major_dim, minor_dim = self._swap(self.shape)\n minor_indices = self.indices\n major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)\n _sparsetools.expandptr(major_dim, self.indptr, major_indices)\n row, col = self._swap((major_indices, minor_indices))\n\n from .coo import coo_matrix\n return coo_matrix((self.data, (row, col)), self.shape, copy=copy,\n dtype=self.dtype)\n\n tocoo.__doc__ = spmatrix.tocoo.__doc__\n\n def toarray(self, order=None, out=None):\n if out is None and order is None:\n order = self._swap('cf')[0]\n out = self._process_toarray_args(order, out)\n if not (out.flags.c_contiguous or out.flags.f_contiguous):\n raise ValueError('Output array must be C or F contiguous')\n # align ideal order with output array order\n if out.flags.c_contiguous:\n x = self.tocsr()\n y = out\n else:\n x = self.tocsc()\n y = out.T\n M, N = x._swap(x.shape)\n csr_todense(M, N, x.indptr, x.indices, x.data, y)\n return out\n\n toarray.__doc__ = spmatrix.toarray.__doc__\n\n ##############################################################\n # methods that examine or modify the internal data structure #\n ##############################################################\n\n def eliminate_zeros(self):\n \"\"\"Remove zero entries from the matrix\n\n This is an *in place* operation\n \"\"\"\n M, N = self._swap(self.shape)\n _sparsetools.csr_eliminate_zeros(M, N, self.indptr, self.indices,\n self.data)\n self.prune() # nnz may have changed\n\n def __get_has_canonical_format(self):\n \"\"\"Determine whether the matrix has sorted indices and no duplicates\n\n Returns\n - True: if the above applies\n - False: otherwise\n\n has_canonical_format implies has_sorted_indices, so if the latter flag\n is False, so will the former be; if the former is found True, the\n latter flag is also set.\n \"\"\"\n\n # first check to see if result was cached\n if not getattr(self, '_has_sorted_indices', True):\n # not sorted => not canonical\n self._has_canonical_format = False\n elif not hasattr(self, '_has_canonical_format'):\n self.has_canonical_format = _sparsetools.csr_has_canonical_format(\n len(self.indptr) - 1, self.indptr, self.indices)\n return self._has_canonical_format\n\n def __set_has_canonical_format(self, val):\n self._has_canonical_format = bool(val)\n if val:\n self.has_sorted_indices = True\n\n has_canonical_format = property(fget=__get_has_canonical_format,\n fset=__set_has_canonical_format)\n\n def sum_duplicates(self):\n \"\"\"Eliminate duplicate matrix entries by adding them together\n\n The is an *in place* operation\n \"\"\"\n if self.has_canonical_format:\n return\n self.sort_indices()\n\n M, N = self._swap(self.shape)\n _sparsetools.csr_sum_duplicates(M, N, self.indptr, self.indices,\n self.data)\n\n self.prune() # nnz may have changed\n self.has_canonical_format = True\n\n def __get_sorted(self):\n \"\"\"Determine whether the matrix has sorted indices\n\n Returns\n - True: if the indices of the matrix are in sorted order\n - False: otherwise\n\n \"\"\"\n\n # first check to see if result was cached\n if not hasattr(self, '_has_sorted_indices'):\n self._has_sorted_indices = _sparsetools.csr_has_sorted_indices(\n len(self.indptr) - 1, self.indptr, self.indices)\n return self._has_sorted_indices\n\n def __set_sorted(self, val):\n self._has_sorted_indices = bool(val)\n\n has_sorted_indices = property(fget=__get_sorted, fset=__set_sorted)\n\n def sorted_indices(self):\n \"\"\"Return a copy of this matrix with sorted indices\n \"\"\"\n A = self.copy()\n A.sort_indices()\n return A\n\n # an alternative that has linear complexity is the following\n # although the previous option is typically faster\n # return self.toother().toother()\n\n def sort_indices(self):\n \"\"\"Sort the indices of this matrix *in place*\n \"\"\"\n\n if not self.has_sorted_indices:\n _sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr,\n self.indices, self.data)\n self.has_sorted_indices = True\n\n def prune(self):\n \"\"\"Remove empty space after all non-zero elements.\n \"\"\"\n major_dim = self._swap(self.shape)[0]\n\n if len(self.indptr) != major_dim + 1:\n raise ValueError('index pointer has invalid length')\n if len(self.indices) < self.nnz:\n raise ValueError('indices array has fewer than nnz elements')\n if len(self.data) < self.nnz:\n raise ValueError('data array has fewer than nnz elements')\n\n self.indices = _prune_array(self.indices[:self.nnz])\n self.data = _prune_array(self.data[:self.nnz])\n\n def resize(self, *shape):\n shape = check_shape(shape)\n if hasattr(self, 'blocksize'):\n bm, bn = self.blocksize\n new_M, rm = divmod(shape[0], bm)\n new_N, rn = divmod(shape[1], bn)\n if rm or rn:\n raise ValueError(\"shape must be divisible into %s blocks. \"\n \"Got %s\" % (self.blocksize, shape))\n M, N = self.shape[0] // bm, self.shape[1] // bn\n else:\n new_M, new_N = self._swap(shape)\n M, N = self._swap(self.shape)\n\n if new_M < M:\n self.indices = self.indices[:self.indptr[new_M]]\n self.data = self.data[:self.indptr[new_M]]\n self.indptr = self.indptr[:new_M + 1]\n elif new_M > M:\n self.indptr = np.resize(self.indptr, new_M + 1)\n self.indptr[M + 1:].fill(self.indptr[M])\n\n if new_N < N:\n mask = self.indices < new_N\n if not np.all(mask):\n self.indices = self.indices[mask]\n self.data = self.data[mask]\n major_index, val = self._minor_reduce(np.add, mask)\n self.indptr.fill(0)\n self.indptr[1:][major_index] = val\n np.cumsum(self.indptr, out=self.indptr)\n\n self._shape = shape\n\n resize.__doc__ = spmatrix.resize.__doc__\n\n ###################\n # utility methods #\n ###################\n\n # needed by _data_matrix\n def _with_data(self, data, copy=True):\n \"\"\"Returns a matrix with the same sparsity structure as self,\n but with different data. By default the structure arrays\n (i.e. .indptr and .indices) are copied.\n \"\"\"\n if copy:\n return self.__class__((data, self.indices.copy(),\n self.indptr.copy()),\n shape=self.shape,\n dtype=data.dtype)\n else:\n return self.__class__((data, self.indices, self.indptr),\n shape=self.shape, dtype=data.dtype)\n\n def _binopt(self, other, op):\n \"\"\"apply the binary operation fn to two sparse matrices.\"\"\"\n other = self.__class__(other)\n\n # e.g. csr_plus_csr, csr_minus_csr, etc.\n fn = getattr(_sparsetools, self.format + op + self.format)\n\n maxnnz = self.nnz + other.nnz\n idx_dtype = get_index_dtype((self.indptr, self.indices,\n other.indptr, other.indices),\n maxval=maxnnz)\n indptr = np.empty(self.indptr.shape, dtype=idx_dtype)\n indices = np.empty(maxnnz, dtype=idx_dtype)\n\n bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']\n if op in bool_ops:\n data = np.empty(maxnnz, dtype=np.bool_)\n else:\n data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))\n\n fn(self.shape[0], self.shape[1],\n np.asarray(self.indptr, dtype=idx_dtype),\n np.asarray(self.indices, dtype=idx_dtype),\n self.data,\n np.asarray(other.indptr, dtype=idx_dtype),\n np.asarray(other.indices, dtype=idx_dtype),\n other.data,\n indptr, indices, data)\n\n A = self.__class__((data, indices, indptr), shape=self.shape)\n A.prune()\n\n return A\n\n def _divide_sparse(self, other):\n \"\"\"\n Divide this matrix by a second sparse matrix.\n \"\"\"\n if other.shape != self.shape:\n raise ValueError('inconsistent shapes')\n\n r = self._binopt(other, '_eldiv_')\n\n if np.issubdtype(r.dtype, np.inexact):\n # Eldiv leaves entries outside the combined sparsity\n # pattern empty, so they must be filled manually.\n # Everything outside of other's sparsity is NaN, and everything\n # inside it is either zero or defined by eldiv.\n out = np.empty(self.shape, dtype=self.dtype)\n out.fill(np.nan)\n row, col = other.nonzero()\n out[row, col] = 0\n r = r.tocoo()\n out[r.row, r.col] = r.data\n out = matrix(out)\n else:\n # integers types go with nan <-> 0\n out = r\n\n return out\n\n\ndef _process_slice(sl, num):\n if sl is None:\n i0, i1 = 0, num\n elif isinstance(sl, slice):\n i0, i1, stride = sl.indices(num)\n if stride != 1:\n raise ValueError('slicing with step != 1 not supported')\n i0 = min(i0, i1) # give an empty slice when i0 > i1\n elif isintlike(sl):\n if sl < 0:\n sl += num\n i0, i1 = sl, sl + 1\n if i0 < 0 or i1 > num:\n raise IndexError('index out of bounds: 0 <= %d < %d <= %d' %\n (i0, i1, num))\n else:\n raise TypeError('expected slice or scalar')\n\n return i0, i1\n"
] | [
[
"numpy.ones",
"numpy.multiply",
"numpy.diff",
"numpy.issubdtype",
"numpy.argsort",
"numpy.asarray",
"scipy._lib._util._prune_array",
"numpy.empty_like",
"numpy.isnan",
"numpy.unique",
"numpy.tile",
"numpy.atleast_2d",
"numpy.zeros",
"numpy.repeat",
"numpy.arange",
"numpy.all",
"numpy.resize",
"numpy.cumsum",
"numpy.empty",
"numpy.array",
"numpy.result_type",
"numpy.concatenate"
]
] |
athatheo/House-GANs-Reproduction | [
"00cc807f1e74f88eef5ed81615bfd87a39c52f94"
] | [
"src/models.py"
] | [
"import torch\nfrom torch import cat\nfrom torch.nn import Conv2d\nfrom torch.nn import Linear\nfrom torch.nn import Module\nfrom torch.nn import ConvTranspose2d\nfrom torch.nn import LeakyReLU\nfrom torch.nn import Tanh\nfrom torch.nn import MaxPool2d\nfrom torch import zeros_like\n\n\nclass ConvMPN(Module):\n def __init__(self):\n super().__init__()\n self.conv1 = Conv2d(in_channels=3*16, out_channels=2*16, kernel_size=(3, 3), stride=(1, 1), padding=1)\n self.conv2 = Conv2d(in_channels=2*16, out_channels=2*16, kernel_size=(3, 3), stride=(1, 1), padding=1)\n self.conv3 = Conv2d(in_channels=2*16, out_channels=16, kernel_size=(3, 3), stride=(1, 1), padding=1)\n self.leaky_relu = LeakyReLU(0.1)\n\n def get_nodes(self, feature_vectors, edges, include_neighbours=True):\n device = feature_vectors.device\n nodes = zeros_like(feature_vectors, device=device)\n if include_neighbours:\n index = torch.where(edges[:, 1] > 0)\n else:\n index = torch.where(edges[:, 1] < 0)\n\n src = torch.cat([edges[index[0], 0], edges[index[0], 2]]).long()\n dst = torch.cat([edges[index[0], 2], edges[index[0], 0]]).long()\n src = feature_vectors[src.contiguous()]\n dst = dst.view(-1, 1, 1, 1).expand_as(src).to(device)\n return nodes.scatter_add(0, dst, src)\n\n def cat_nodes(self, feature_vectors, edges):\n neighbouring_nodes = self.get_nodes(feature_vectors, edges, include_neighbours=True, )\n non_neighbouring_nodes = self.get_nodes(feature_vectors, edges, include_neighbours=False)\n\n encoding = torch.cat([feature_vectors, neighbouring_nodes, non_neighbouring_nodes], 1)\n return encoding\n\n def forward(self, x, edges):\n x = self.cat_nodes(x, edges)\n x = self.conv1(x)\n x = self.leaky_relu(x)\n x = self.conv2(x)\n x = self.leaky_relu(x)\n x = self.conv3(x)\n x = self.leaky_relu(x)\n return x\n\n\nclass Generator(Module):\n def __init__(self):\n super().__init__()\n self.linear_reshape_1 = Linear(138, 1024)\n self.conv_mpn_1 = ConvMPN()\n self.upsample_1 = ConvTranspose2d(16, 16, 4, 2, 1)\n self.conv_mpn_2 = ConvMPN()\n self.upsample_2 = ConvTranspose2d(16, 16, 4, 2, 1)\n self.conv_1 = Conv2d(16, 256, 3, 1, 1)\n self.leaky_relu = LeakyReLU(0.1)\n self.conv_2 = Conv2d(256, 128, 3, 1, 1)\n self.conv_3 = Conv2d(128, 1, 3, 1, 1)\n self.tanh = Tanh()\n\n def forward(self, z, t, edges):\n z = z.view(-1, 128)#\n t = t.view(-1, 10) #\n x = cat([z, t], 1)\n x = self.linear_reshape_1(x)\n x = x.view(-1, 16, 8, 8)\n x = self.conv_mpn_1(x, edges).view(-1, *x.shape[1:])\n x = self.upsample_1(x)\n x = self.leaky_relu(x)\n x = self.conv_mpn_2(x, edges).view(-1, *x.shape[1:])\n x = self.upsample_2(x)\n x = self.leaky_relu(x)\n x = self.conv_1(x.view(-1, x.shape[1], *x.shape[2:]))\n x = self.leaky_relu(x)\n x = self.conv_2(x)\n x = self.leaky_relu(x)\n x = self.conv_3(x)\n x = self.tanh(x)\n x = x.view(-1, *x.shape[2:])\n return x\n\n\nclass Discriminator(Module):\n def __init__(self):\n super().__init__()\n self.linear_reshape_1 = Linear(10, 8192)\n self.leaky_relu = LeakyReLU(0.1)\n self.conv_1 = Conv2d(9, 16, 3, 1, 1, bias=True)\n self.conv_2 = Conv2d(16, 16, 3, 1, 1)\n self.conv_3 = Conv2d(16, 16, 3, 1, 1)\n self.conv_mpn_1 = ConvMPN()\n self.downsample_1 = Conv2d(16, 16, 3, 2, 1)\n self.conv_mpn_2 = ConvMPN()\n self.downsample_2 = Conv2d(16, 16, 3, 2, 1)\n self.dec_conv_1 = Conv2d(16, 256, 3, 2, 1)\n self.dec_conv_2 = Conv2d(256, 128, 3, 2, 1)\n self.dec_conv_3 = Conv2d(128, 128, 3, 2, 1)\n self.pool_reshape_linear = Linear(128, 1)\n\n def add_pool(self, x, nd_to_sample):\n dtype, device = x.dtype, x.device\n batch_size = torch.max(nd_to_sample) + 1\n pooled_x = torch.zeros(batch_size, x.shape[-1], device=device).float()\n pool_to = nd_to_sample.view(-1, 1).expand_as(x).to(device)\n pooled_x = pooled_x.scatter_add(0, pool_to, x)\n return pooled_x\n\n def forward(self, x, t, edges, nd_to_sample):\n x = x.view(-1, 1, 32, 32)\n t = self.linear_reshape_1(t)\n t = t.view(-1, 8, 32, 32)\n x = cat([x, t], 1)\n x = self.conv_1(x)\n x = self.leaky_relu(x)\n x = self.conv_2(x)\n x = self.leaky_relu(x)\n x = self.conv_3(x)\n x = self.leaky_relu(x)\n x = self.conv_mpn_1(x, edges)\n x = self.downsample_1(x)\n x = self.leaky_relu(x)\n x = self.conv_mpn_2(x, edges)\n x = self.downsample_2(x)\n x = self.leaky_relu(x)\n x = self.dec_conv_1(x)\n x = self.leaky_relu(x)\n x = self.dec_conv_2(x)\n x = self.leaky_relu(x)\n x = self.dec_conv_3(x)\n x = self.leaky_relu(x)\n x = x.view(-1, x.shape[1])\n x = self.add_pool(x, nd_to_sample)\n x = self.pool_reshape_linear(x)\n return x\n"
] | [
[
"torch.nn.Linear",
"torch.zeros_like",
"torch.zeros",
"torch.nn.Tanh",
"torch.where",
"torch.nn.Conv2d",
"torch.max",
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.nn.LeakyReLU"
]
] |
dyahadila/ood_cartography | [
"ff65bf2b1a170e2913f0019a15af3398a1808f0f"
] | [
"cartography/classification/glue_utils.py"
] | [
"import logging\nimport os\n\nfrom transformers import glue_compute_metrics\nfrom transformers import glue_convert_examples_to_features as convert_examples_to_features\nfrom transformers import glue_output_modes\nfrom transformers import glue_processors\n\nfrom transformers.data.processors.glue import MnliMismatchedProcessor\nfrom transformers.data.processors.utils import InputFeatures\nfrom transformers.file_utils import is_tf_available\nif is_tf_available():\n import tensorflow as tf\n\nlogging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\", level=logging.INFO\n)\nlogger = logging.getLogger(__name__)\n\nfrom cartography.data_utils_glue import convert_string_to_unique_number\nfrom cartography.classification.mnli_utils import AdaptedMnliMismatchedProcessor, AdaptedMnliProcessor\nfrom cartography.classification.qnli_utils import AdaptedQnliProcessor\nfrom cartography.classification.snli_utils import SNLIProcessor\nfrom cartography.classification.winogrande_utils import WinograndeProcessor\nfrom cartography.classification.wnli_utils import AdaptedWnliProcessor\nfrom cartography.classification.rte_utils import AdaptedRteProcessor\n\nimport pandas as pd\n\n\nglue_processors[\"snli\"] = SNLIProcessor\nglue_processors[\"mnli\"] = AdaptedMnliProcessor\nglue_processors[\"mnli-mm\"] = AdaptedMnliMismatchedProcessor\nglue_processors[\"qnli\"] = AdaptedQnliProcessor\nglue_processors[\"winogrande\"] = WinograndeProcessor\nglue_processors[\"wnli\"] = AdaptedWnliProcessor\nglue_processors[\"rte\"] = AdaptedRteProcessor\n\nglue_output_modes[\"snli\"] = \"classification\"\nglue_output_modes[\"winogrande\"] = \"classification\"\n\n\n\nclass AdaptedInputFeatures(InputFeatures):\n def __init__(self, input_ids, attention_mask=None, token_type_ids=None, label=None, example_id=None, lex = None,\n const=None, subs=None, original_idx=None):\n self.input_ids = input_ids\n self.attention_mask = attention_mask\n self.token_type_ids = token_type_ids\n self.label = label\n self.example_id = example_id\n self.lex = lex\n self.const = const\n self.subs = subs\n self.original_idx = original_idx\n\n\ndef get_instance_heuristics(task, data_split):\n mode = data_split\n ### UNCOMMENT FOR MNLI\n if 'dev' in data_split:\n mode = 'dev'\n if task.upper() == 'MNLI':\n mode = 'dev_matched'\n\n df = pd.read_csv(\"/home/jusun/adila001/{}/{}_heuristic.tsv\".format(task.upper(), mode), delimiter=\"\\t|\\n\")\n lexical = df[\"lexical\"].tolist()\n if 'constituent' in set(df.columns):\n constituent = df[\"constituent\"].tolist()\n else:\n constituent = [0 for i in range(df.shape[0])]\n subsequence = df[\"subsequence\"].tolist()\n return lexical, constituent, subsequence\n\ndef adapted_glue_convert_examples_to_features(\n examples,\n tokenizer,\n max_length=512,\n task=None,\n label_list=None,\n output_mode=None,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n mask_padding_with_zero=True,\n heuristics=True,\n data_split='train',\n):\n \"\"\"\n Adapted from `transformers`. New functionality: also return an integer ID for each example.\n Loads a data file into a list of ``InputFeatures``\n\n Args:\n examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.\n tokenizer: Instance of a tokenizer that will tokenize the examples\n max_length: Maximum example length\n task: GLUE task\n label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method\n output_mode: String indicating the output mode. Either ``regression`` or ``classification``\n pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)\n pad_token: Padding token\n pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)\n mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values\n and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for\n actual values)\n\n Returns:\n If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``\n containing the task-specific features. If the input is a list of ``InputExamples``, will return\n a list of task-specific ``InputFeatures`` which can be fed to the model.\n\n \"\"\"\n is_tf_dataset = False\n if is_tf_available() and isinstance(examples, tf.data.Dataset):\n is_tf_dataset = True\n\n if task is not None:\n processor = glue_processors[task]()\n if label_list is None:\n label_list = processor.get_labels()\n logger.info(\"Using label list %s for task %s\" % (label_list, task))\n if output_mode is None:\n output_mode = glue_output_modes[task]\n logger.info(\"Using output mode %s for task %s\" % (output_mode, task))\n\n label_map = {label: i for i, label in enumerate(label_list)}\n\n features = []\n lex = []\n const= []\n subs = []\n if heuristics==True:\n lex, const, subs = get_instance_heuristics(task, data_split)\n\n for (ex_index, example) in enumerate(examples):\n len_examples = 0\n if is_tf_dataset:\n example = processor.get_example_from_tensor_dict(example)\n example = processor.tfds_map(example)\n len_examples = tf.data.experimental.cardinality(examples)\n else:\n len_examples = len(examples)\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d/%d\" % (ex_index, len_examples))\n\n inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length,)\n input_ids, token_type_ids = inputs[\"input_ids\"], inputs[\"token_type_ids\"]\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask\n token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)\n token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_length, \"Error with input length {} vs {}\".format(len(input_ids), max_length)\n assert len(attention_mask) == max_length, \"Error with input length {} vs {}\".format(\n len(attention_mask), max_length\n )\n assert len(token_type_ids) == max_length, \"Error with input length {} vs {}\".format(\n len(token_type_ids), max_length\n )\n\n if output_mode == \"classification\":\n label = label_map[example.label]\n elif output_mode == \"regression\":\n label = float(example.label)\n else:\n raise KeyError(output_mode)\n\n example_int_id = convert_string_to_unique_number(example.guid)\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(f\"guid: {example_int_id}\")\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"attention_mask: %s\" % \" \".join([str(x) for x in attention_mask]))\n logger.info(\"token_type_ids: %s\" % \" \".join([str(x) for x in token_type_ids]))\n logger.info(\"label: %s (id = %d)\" % (example.label, label))\n\n features.append(\n AdaptedInputFeatures(input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n label=label,\n example_id=example_int_id,\n lex=None if len(lex)==0 else lex[ex_index],\n const=None if len(const)==0 else const[ex_index],\n subs=None if len(const)==0else subs[ex_index],\n original_idx=ex_index))\n if is_tf_available() and is_tf_dataset:\n\n def gen():\n for ex in features:\n yield (\n {\n \"input_ids\": ex.input_ids,\n \"attention_mask\": ex.attention_mask,\n \"token_type_ids\": ex.token_type_ids,\n },\n ex.label,\n )\n\n return tf.data.Dataset.from_generator(\n gen,\n ({\"input_ids\": tf.int32, \"attention_mask\": tf.int32, \"token_type_ids\": tf.int32}, tf.int64),\n (\n {\n \"input_ids\": tf.TensorShape([None]),\n \"attention_mask\": tf.TensorShape([None]),\n \"token_type_ids\": tf.TensorShape([None]),\n },\n tf.TensorShape([]),\n ),\n )\n return features\n\n\ndef adapted_glue_compute_metrics(task_name, preds, labels):\n \"Adapted from `glue_compute_metrics` to also handle SNLI.\"\n try:\n return glue_compute_metrics(task_name, preds, labels)\n except KeyError:\n if task_name in [\"snli\", \"winogrande\", \"toxic\"]:\n # Since MNLI also uses accuracy.\n return glue_compute_metrics(\"mnli\", preds, labels)\n raise KeyError(task_name)\n\n"
] | [
[
"tensorflow.data.experimental.cardinality",
"tensorflow.TensorShape"
]
] |
Forest216/BigDL | [
"840da9a2eaf395978dd83730b02aa5e5dfbd7989",
"840da9a2eaf395978dd83730b02aa5e5dfbd7989"
] | [
"python/nano/src/bigdl/nano/automl/tf/objective.py",
"python/chronos/src/bigdl/chronos/autots/tspipeline.py"
] | [
"#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom selectors import EpollSelector\nfrom tensorflow.keras.backend import clear_session\nfrom tensorflow.keras.models import clone_model\nimport tensorflow as tf\nimport inspect\nimport copy\n\nfrom bigdl.nano.automl.hpo.backend import create_tfkeras_pruning_callback\nfrom bigdl.nano.utils.log4Error import invalidInputError\n\n\ndef _is_creator(model):\n return inspect.ismethod(model) or inspect.isfunction(model)\n\n\nclass Objective(object):\n \"\"\"The Tuning objective for HPO.\"\"\"\n\n def __init__(self,\n model=None,\n target_metric=None,\n pruning=False,\n backend=None,\n **kwargs\n ):\n \"\"\"\n Init the objective.\n\n :param: model: a model instance or a creator function.\n Defaults to None.\n :param: target_metric: str(optional): target metric to optimize.\n Defaults to None.\n :param: pruning: bool (optional): whether to enable pruning.\n Defaults to False.\n throw: ValueError: _description_\n \"\"\"\n if not _is_creator(model) and not isinstance(model, tf.keras.Model):\n invalidInputError(False,\n \"You should either pass a Tensorflo Keras model, or \"\n \"a model_creator to the Tuning objective.\")\n\n self.model_ = model\n self.target_metric_ = target_metric\n self.pruning = pruning\n self.backend = backend\n self.kwargs = kwargs\n\n @property\n def target_metric(self):\n \"\"\"Get the target metric.\"\"\"\n return self.target_metric_\n\n @target_metric.setter\n def target_metric(self, value):\n \"\"\"Set the target metric.\"\"\"\n # TODO add more validity check here\n self.target_metric_ = value\n\n def _prepare_fit_args(self, trial):\n # only do shallow copy and process/duplicate\n # specific args TODO: may need to handle more cases\n new_kwargs = copy.copy(self.kwargs)\n new_kwargs['verbose'] = 2\n\n # process batch size\n new_kwargs = self.backend.instantiate_param(trial, new_kwargs, 'batch_size')\n\n # process callbacks\n callbacks = new_kwargs.get('callbacks', None)\n callbacks = callbacks() if inspect.isfunction(callbacks) else callbacks\n\n if self.pruning:\n callbacks = callbacks or []\n prune_callback = create_tfkeras_pruning_callback(trial, self.target_metric)\n callbacks.append(prune_callback)\n\n new_kwargs['callbacks'] = callbacks\n return new_kwargs\n\n def __call__(self, trial):\n \"\"\"\n Execute Training and return target metric in each trial.\n\n :param: trial: the trial object which provides the hyperparameter combinition.\n :return: the target metric value.\n \"\"\"\n # Clear clutter from previous Keras session graphs.\n clear_session()\n # TODO may add data creator here, e.g. refresh data, reset generators, etc.\n # create model\n if _is_creator(self.model_):\n model = self.model_(trial)\n else:\n # copy model so that the original model is not changed\n # Need tests to check this path\n model = clone_model(self.model_)\n\n # fit\n new_kwargs = self._prepare_fit_args(trial)\n hist = model.fit(**new_kwargs)\n\n score = hist.history.get(self.target_metric, None)\n if score is not None:\n if isinstance(score, list):\n # score = score[-1]\n score = max(score)\n return score\n",
"#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport torch\nimport types\nimport numpy as np\n\nfrom bigdl.chronos.data import TSDataset\nfrom bigdl.chronos.metric.forecast_metrics import Evaluator\n\nDEFAULT_MODEL_INIT_DIR = \"model_init.ckpt\"\nDEFAULT_BEST_MODEL_DIR = \"best_model.ckpt\"\nDEFAULT_DATA_PROCESS_DIR = \"data_process.ckpt\"\nDEFAULT_BEST_CONFIG_DIR = \"best_config.ckpt\"\n\n\nclass TSPipeline:\n '''\n TSPipeline is an E2E solution for time series analysis (only forecasting task for now).\n You can use TSPipeline to:\n\n 1. Further development on the prototype. (predict, evaluate, incremental fit)\n\n 2. Deploy the model to their scenario. (save, load)\n '''\n def __init__(self,\n model,\n loss,\n optimizer,\n model_creator,\n loss_creator,\n optimizer_creator,\n best_config,\n **kwargs):\n from bigdl.nano.pytorch.trainer import Trainer\n\n # for runtime fit/predict/evaluate\n self._best_model = Trainer.compile(model=model,\n loss=loss,\n optimizer=optimizer)\n self._best_config = best_config\n self._onnxruntime_fp32 = None\n self._onnxruntime_int8 = None\n self._pytorch_int8 = None\n\n # for data postprocessing\n self._scaler = None\n self._scaler_index = None\n if \"scaler\" in kwargs.keys():\n self._scaler = kwargs[\"scaler\"]\n self._scaler_index = kwargs[\"scaler_index\"]\n\n # for save/load\n self.model_creator = model_creator\n self.loss_creator = loss_creator\n self.optimizer_creator = optimizer_creator\n\n def evaluate(self, data, metrics=['mse'], multioutput=\"uniform_average\",\n batch_size=32, quantize=False):\n '''\n Evaluate the time series pipeline.\n\n :param data: data can be a TSDataset or data creator.\n The TSDataset should follow the same operations as the training\n TSDataset used in AutoTSEstimator.fit.\n :param metrics: list of string or callable. e.g. ['mse'] or [customized_metrics]\n If callable function, it signature should be func(y_true, y_pred), where y_true and\n y_pred are numpy ndarray. The function should return a float value as evaluation\n result.\n :param multioutput: Defines aggregating of multiple output values.\n String in ['raw_values', 'uniform_average']. The value defaults to\n 'uniform_average'.\n :param batch_size: predict batch_size, the process will cost more time\n if batch_size is small while cost less memory. The param is only\n effective when data is a TSDataset. The values defaults to 32.\n :param quantize: if use the quantized model to predict.\n '''\n from bigdl.chronos.pytorch.utils import _pytorch_fashion_inference\n\n # predict\n if isinstance(data, TSDataset):\n x, y = self._tsdataset_to_numpy(data, is_predict=False)\n if quantize:\n yhat = _pytorch_fashion_inference(model=self._pytorch_int8,\n input_data=x,\n batch_size=batch_size)\n else:\n self._best_model.eval()\n yhat = _pytorch_fashion_inference(model=self._best_model,\n input_data=x,\n batch_size=batch_size)\n # unscale\n yhat = self._tsdataset_unscale(yhat)\n y = self._tsdataset_unscale(y)\n elif isinstance(data, types.FunctionType):\n yhat_list, y_list = [], []\n self._best_config.update({'batch_size': batch_size})\n for x, y in data(self._best_config):\n if quantize:\n yhat = _pytorch_fashion_inference(model=self._pytorch_int8,\n input_data=x.numpy())\n else:\n self._best_model.eval()\n yhat = _pytorch_fashion_inference(model=self._best_model,\n input_data=x.numpy())\n yhat_list.append(yhat)\n y_list.append(y)\n yhat = np.concatenate(yhat_list, axis=0)\n y = torch.cat(y_list, dim=0).numpy()\n else:\n from bigdl.nano.utils.log4Error import invalidInputError\n invalidInputError(False,\n \"We only support input tsdataset or data creator, \"\n f\"but found {data.__class__.__name__}.\")\n\n # evaluate\n aggregate = 'mean' if multioutput == 'uniform_average' else None\n eval_result = Evaluator.evaluate(metrics, y, yhat, aggregate=aggregate)\n return eval_result\n\n def evaluate_with_onnx(self, data, metrics=['mse'], multioutput=\"uniform_average\",\n batch_size=32, quantize=False):\n '''\n Evaluate the time series pipeline with onnx.\n\n :param data: data can be a TSDataset or data creator.\n The TSDataset should follow the same operations as the training\n TSDataset used in AutoTSEstimator.fit.\n :param metrics: list of string or callable. e.g. ['mse'] or [customized_metrics]\n If callable function, it signature should be func(y_true, y_pred), where y_true and\n y_pred are numpy ndarray. The function should return a float value as evaluation\n result.\n :param multioutput: Defines aggregating of multiple output values.\n String in ['raw_values', 'uniform_average']. The value defaults to\n 'uniform_average'.\n :param batch_size: predict batch_size, the process will cost more time\n if batch_size is small while cost less memory. The param is only\n effective when data is a TSDataset. The values defaults to 32.\n :param quantize: if use the quantized model to predict.\n '''\n from bigdl.chronos.pytorch import TSTrainer as Trainer\n from bigdl.chronos.pytorch.utils import _pytorch_fashion_inference\n from bigdl.nano.utils.log4Error import invalidInputError\n # predict with onnx\n if isinstance(data, TSDataset):\n x, y = self._tsdataset_to_numpy(data, is_predict=False)\n yhat = None\n if quantize:\n yhat = _pytorch_fashion_inference(model=self._onnxruntime_int8,\n input_data=x,\n batch_size=batch_size)\n else:\n if self._onnxruntime_fp32 is None:\n self._onnxruntime_fp32 = Trainer.trace(self._best_model,\n input_sample=torch.from_numpy(x[0:1]),\n accelerator=\"onnxruntime\")\n yhat = _pytorch_fashion_inference(model=self._onnxruntime_fp32,\n input_data=x,\n batch_size=batch_size)\n yhat = self._tsdataset_unscale(yhat)\n # unscale\n y = self._tsdataset_unscale(y)\n elif isinstance(data, types.FunctionType):\n yhat_list, y_list = [], []\n self._best_config.update({'batch_size': batch_size})\n yhat = None\n for x, y in data(self._best_config):\n if quantize:\n yhat = _pytorch_fashion_inference(model=self._onnxruntime_int8,\n input_data=x.numpy(),\n batch_size=batch_size)\n else:\n if self._onnxruntime_fp32 is None:\n self._onnxruntime_fp32 = Trainer.trace(self._best_model,\n input_sample=x[0:1],\n accelerator=\"onnxruntime\")\n yhat = _pytorch_fashion_inference(model=self._onnxruntime_fp32,\n input_data=x.numpy(),\n batch_size=batch_size)\n yhat_list.append(yhat)\n y_list.append(y)\n yhat = np.concatenate(yhat_list, axis=0)\n y = torch.cat(y_list, dim=0).numpy()\n else:\n invalidInputError(False,\n \"We only support input tsdataset or data creator, \"\n f\"but found {data.__class__.__name__}.\")\n # evaluate\n aggregate = 'mean' if multioutput == 'uniform_average' else None\n eval_result = Evaluator.evaluate(metrics, y, yhat, aggregate=aggregate)\n return eval_result\n\n def predict(self, data, batch_size=32, quantize=False):\n '''\n Rolling predict with time series pipeline.\n\n :param data: data can be a TSDataset or data creator.\n The TSDataset should follow the same operations as the training\n TSDataset used in AutoTSEstimator.fit.\n :param batch_size: predict batch_size, the process will cost more time\n if batch_size is small while cost less memory. The param is only\n effective when data is a TSDataset. The values defaults to 32.\n :param quantize: if use the quantized model to predict.\n '''\n from bigdl.chronos.pytorch.utils import _pytorch_fashion_inference\n from bigdl.nano.utils.log4Error import invalidInputError\n if isinstance(data, TSDataset):\n x, _ = self._tsdataset_to_numpy(data, is_predict=True)\n if quantize:\n yhat = _pytorch_fashion_inference(model=self._pytorch_int8,\n input_data=x,\n batch_size=batch_size)\n else:\n self._best_model.eval()\n yhat = _pytorch_fashion_inference(model=self._best_model,\n input_data=x,\n batch_size=batch_size)\n yhat = self._tsdataset_unscale(yhat)\n elif isinstance(data, types.FunctionType):\n yhat_list = []\n self._best_config.update({'batch_size': batch_size})\n for x, _ in data(self._best_config):\n if quantize:\n yhat = _pytorch_fashion_inference(model=self._pytorch_int8,\n input_data=x.numpy())\n else:\n self._best_model.eval()\n yhat = _pytorch_fashion_inference(model=self._best_model,\n input_data=x.numpy())\n yhat_list.append(yhat)\n yhat = np.concatenate(yhat_list, axis=0)\n else:\n invalidInputError(False,\n \"We only support input tsdataset or data creator, \"\n f\"but found {data.__class__.__name__}\")\n return yhat\n\n def predict_with_onnx(self, data, batch_size=32, quantize=False):\n '''\n Rolling predict with onnx with time series pipeline.\n\n :param data: data can be a TSDataset or data creator.\n The TSDataset should follow the same operations as the training\n TSDataset used in AutoTSEstimator.fit.\n :param batch_size: predict batch_size, the process will cost more time\n if batch_size is small while cost less memory. The param is only\n effective when data is a TSDataset. The values defaults to 32.\n :param quantize: if use the quantized model to predict.\n '''\n from bigdl.chronos.pytorch import TSTrainer as Trainer\n from bigdl.chronos.pytorch.utils import _pytorch_fashion_inference\n from bigdl.nano.utils.log4Error import invalidInputError\n if isinstance(data, TSDataset):\n x, _ = self._tsdataset_to_numpy(data, is_predict=True)\n yhat = None\n if quantize:\n yhat = _pytorch_fashion_inference(model=self._onnxruntime_int8,\n input_data=x,\n batch_size=batch_size)\n else:\n if self._onnxruntime_fp32 is None:\n self._onnxruntime_fp32 = Trainer.trace(self._best_model,\n input_sample=torch.from_numpy(x[0:1]),\n accelerator=\"onnxruntime\")\n yhat = _pytorch_fashion_inference(model=self._onnxruntime_fp32,\n input_data=x,\n batch_size=batch_size)\n yhat = self._tsdataset_unscale(yhat)\n elif isinstance(data, types.FunctionType):\n yhat = None\n yhat_list = []\n self._best_config.update({'batch_size': batch_size})\n for x, _ in data(self._best_config):\n if quantize:\n yhat = _pytorch_fashion_inference(model=self._onnxruntime_int8,\n input_data=x.numpy(),\n batch_size=batch_size)\n else:\n if self._onnxruntime_fp32 is None:\n self._onnxruntime_fp32 = Trainer.trace(self._best_model,\n input_sample=x[0:1],\n accelerator=\"onnxruntime\")\n yhat = _pytorch_fashion_inference(model=self._onnxruntime_fp32,\n input_data=x.numpy(),\n batch_size=batch_size)\n yhat_list.append(yhat)\n yhat = np.concatenate(yhat_list, axis=0)\n else:\n invalidInputError(False,\n \"We only support input tsdataset or data creator, \"\n f\"but found {data.__class__.__name__}\")\n return yhat\n\n def fit(self,\n data,\n validation_data=None,\n epochs=1,\n batch_size=None,\n **kwargs):\n '''\n Incremental fitting\n\n :param data: The data support following formats:\n\n | 1. data creator:\n | a function that takes a config dictionary as parameter and\n | returns a PyTorch DataLoader.\n |\n | 2. a bigdl.chronos.data.TSDataset:\n | the TSDataset should follow the same operations as the training\n | TSDataset used in `AutoTSEstimator.fit`.\n\n :param validation_data: validation data, same format as data.\n :param epochs: incremental fitting epoch. The value defaults to 1.\n :param metric: evaluate metric.\n :param batch_size: batch size, defaults to None, which takes the searched best batch_size.\n :param **kwargs: args to be passed to bigdl-nano trainer.\n '''\n from bigdl.chronos.pytorch import TSTrainer as Trainer\n from bigdl.nano.utils.log4Error import invalidInputError\n train_loader = None\n valid_loader = None\n if isinstance(data, TSDataset):\n if batch_size is None:\n batch_size = self._best_config[\"batch_size\"]\n train_loader = self._tsdataset_to_loader(data, batch_size=batch_size)\n if validation_data:\n valid_loader = self._tsdataset_to_loader(validation_data, batch_size=batch_size)\n elif isinstance(data, types.FunctionType):\n if batch_size:\n self._best_config.update({'batch_size': batch_size})\n train_loader = data(self._best_config)\n if validation_data:\n valid_loader = validation_data(self._best_config)\n else:\n invalidInputError(False,\n \"We only support input TSDataset or data creator, \"\n f\"but found {data.__class__.__name__}.\")\n\n self.trainer = Trainer(max_epochs=epochs, **kwargs)\n self.trainer.fit(self._best_model,\n train_dataloaders=train_loader,\n val_dataloaders=valid_loader)\n\n def save(self, file_path):\n '''\n Save the TSPipeline to a folder\n\n :param file_path: the folder location to save the pipeline\n '''\n import pickle\n if not os.path.isdir(file_path):\n os.mkdir(file_path)\n model_init_path = os.path.join(file_path, DEFAULT_MODEL_INIT_DIR)\n model_path = os.path.join(file_path, DEFAULT_BEST_MODEL_DIR)\n data_process_path = os.path.join(file_path, DEFAULT_DATA_PROCESS_DIR)\n best_config_path = os.path.join(file_path, DEFAULT_BEST_CONFIG_DIR)\n model_init = {\"model_creator\": self.model_creator,\n \"optimizer_creator\": self.optimizer_creator,\n \"loss_creator\": self.loss_creator}\n data_process = {\"scaler\": self._scaler,\n \"scaler_index\": self._scaler_index}\n with open(model_init_path, \"wb\") as f:\n pickle.dump(model_init, f)\n with open(data_process_path, \"wb\") as f:\n pickle.dump(data_process, f)\n with open(best_config_path, \"wb\") as f:\n pickle.dump(self._best_config, f)\n # self._best_model.save(model_path)\n torch.save(self._best_model.model.state_dict(), model_path)\n\n @staticmethod\n def load(file_path):\n '''\n Load the TSPipeline to a folder\n\n :param file_path: the folder location to load the pipeline\n '''\n import pickle\n model_init_path = os.path.join(file_path, DEFAULT_MODEL_INIT_DIR)\n model_path = os.path.join(file_path, DEFAULT_BEST_MODEL_DIR)\n data_process_path = os.path.join(file_path, DEFAULT_DATA_PROCESS_DIR)\n best_config_path = os.path.join(file_path, DEFAULT_BEST_CONFIG_DIR)\n with open(model_init_path, \"rb\") as f:\n model_init = pickle.load(f)\n with open(data_process_path, \"rb\") as f:\n data_process = pickle.load(f)\n with open(best_config_path, \"rb\") as f:\n best_config = pickle.load(f)\n\n model_creator = model_init[\"model_creator\"]\n optimizer_creator = model_init[\"optimizer_creator\"]\n loss_creator = model_init[\"loss_creator\"]\n\n model = model_creator(best_config)\n model.load_state_dict(torch.load(model_path))\n\n if isinstance(optimizer_creator, types.FunctionType):\n optimizer = optimizer_creator(model, best_config)\n else:\n optimizer = optimizer_creator(model.parameters(),\n lr=best_config.get('lr', 0.001))\n\n if isinstance(loss_creator, torch.nn.modules.loss._Loss):\n loss = loss_creator\n else:\n loss = loss_creator(best_config)\n\n return TSPipeline(model=model,\n loss=loss,\n optimizer=optimizer,\n model_creator=model_creator,\n loss_creator=loss_creator,\n optimizer_creator=optimizer_creator,\n best_config=best_config,\n **data_process)\n\n def quantize(self,\n calib_data,\n metric=None,\n conf=None,\n framework='pytorch_fx',\n approach='static',\n tuning_strategy='bayesian',\n relative_drop=None,\n absolute_drop=None,\n timeout=0,\n max_trials=1):\n \"\"\"\n Quantization TSPipeline.\n\n :param calib_data: Required for static quantization or evaluation.\n\n | 1. data creator:\n | a function that takes a config dictionary as parameter and\n | returns a PyTorch DataLoader.\n |\n | 2. a bigdl.chronos.data.TSDataset:\n | the TSDataset should follow the same operations as the training\n | TSDataset used in `AutoTSEstimator.fit`.\n |\n | 3. A torch.utils.data.dataloader.DataLoader object for calibration,\n | Users should set the configs correctly (e.g. past_seq_len, ...).\n | They can be found in TSPipeline._best_config.\n |\n | 4. A numpy ndarray tuple (x, y).\n | x's shape is (num_samples, past_seq_len, input_feature_dim).\n | y's shape is (num_samples, future_seq_len, output_feature_dim).\n | They can be found in TSPipeline._best_config.\n\n :param metric: A str represent the metrics for tunning the quality of\n quantization. You may choose from \"mse\", \"mae\", \"rmse\", \"r2\", \"mape\", \"smape\".\n :param conf: A path to conf yaml file for quantization. Default to None,\n using default config.\n :param framework: string or list, [{'pytorch'|'pytorch_fx'|'pytorch_ipex'},\n {'onnxrt_integerops'|'onnxrt_qlinearops'}]. Default: 'pytorch_fx'.\n Consistent with Intel Neural Compressor.\n :param approach: str, 'static' or 'dynamic'. Default to 'static'.\n :param tuning_strategy: str, 'bayesian', 'basic', 'mse' or 'sigopt'. Default to 'bayesian'.\n :param relative_drop: Float, tolerable ralative accuracy drop. Default to None,\n e.g. set to 0.1 means that we accept a 10% increase in the metrics error.\n :param absolute_drop: Float, tolerable ralative accuracy drop. Default to None,\n e.g. set to 5 means that we can only accept metrics smaller than 5.\n :param timeout: Tuning timeout (seconds). Default to 0, which means early stop.\n Combine with max_trials field to decide when to exit.\n :param max_trials: Max tune times. Default to 1. Combine with timeout field to\n decide when to exit. \"timeout=0, max_trials=1\" means it will try quantization\n only once and return satisfying best model.\n \"\"\"\n from torch.utils.data import DataLoader, TensorDataset\n from bigdl.chronos.data import TSDataset\n from bigdl.nano.utils.log4Error import invalidInputError\n # check model support for quantization\n from bigdl.chronos.autots.utils import check_quantize_available\n check_quantize_available(self._best_model.model)\n # calib data should be set if the forecaster is just loaded\n if calib_data is None and approach.startswith(\"static\"):\n invalidInputError(False,\n \"You must set a `calib_data` \"\n \"for quantization When you use 'static'.\")\n elif calib_data and approach.startswith(\"dynamic\"):\n invalidInputError(False,\n \"`calib_data` should be None When you use 'dynamic'.\")\n\n # preprocess data.\n from .utils import preprocess_quantize_data\n calib_data = preprocess_quantize_data(self, calib_data)\n\n # map metric str to function\n from bigdl.chronos.metric.forecast_metrics import TORCHMETRICS_REGRESSION_MAP\n if isinstance(metric, str):\n metric = TORCHMETRICS_REGRESSION_MAP[metric]\n\n # init acc criterion\n accuracy_criterion = None\n if relative_drop and absolute_drop:\n invalidInputError(False, \"Please unset either `relative_drop` or `absolute_drop`.\")\n if relative_drop:\n accuracy_criterion = {'relative': relative_drop, 'higher_is_better': False}\n if absolute_drop:\n accuracy_criterion = {'absolute': absolute_drop, 'higher_is_better': False}\n\n from bigdl.nano.pytorch.trainer import Trainer\n self._trainer = Trainer(logger=False, max_epochs=1,\n checkpoint_callback=False,\n use_ipex=False)\n\n # quantize\n framework = [framework] if isinstance(framework, str) else framework\n temp_quantized_model = None\n for framework_item in framework:\n accelerator, method = framework_item.split('_')\n if accelerator == 'pytorch':\n accelerator = None\n else:\n accelerator = 'onnxruntime'\n method = method[:-3]\n q_model = self._trainer.quantize(self._best_model,\n precision='int8',\n accelerator=accelerator,\n method=method,\n calib_dataloader=calib_data,\n metric=metric,\n conf=conf,\n approach=approach,\n tuning_strategy=tuning_strategy,\n accuracy_criterion=accuracy_criterion,\n timeout=timeout,\n max_trials=max_trials)\n if accelerator == \"onnxruntime\":\n self._onnxruntime_int8 = q_model\n if accelerator is None:\n self._pytorch_int8 = q_model\n\n def _tsdataset_to_loader(self, data, is_predict=False, batch_size=32):\n self._check_mixed_data_type_usage()\n lookback = self._best_config[\"past_seq_len\"]\n horizon = 0 if is_predict else self._best_config[\"future_seq_len\"]\n selected_features = self._best_config[\"selected_features\"]\n data_loader = data.to_torch_data_loader(batch_size=batch_size,\n roll=True,\n lookback=lookback,\n horizon=horizon,\n feature_col=selected_features)\n return data_loader\n\n def _tsdataset_to_numpy(self, data, is_predict=False):\n self._check_mixed_data_type_usage()\n lookback = self._best_config[\"past_seq_len\"]\n horizon = 0 if is_predict else self._best_config[\"future_seq_len\"]\n selected_features = self._best_config[\"selected_features\"]\n data.roll(lookback=lookback,\n horizon=horizon,\n feature_col=selected_features)\n return data.to_numpy()\n\n def _check_mixed_data_type_usage(self):\n from bigdl.nano.utils.log4Error import invalidInputError\n for key in (\"past_seq_len\", \"future_seq_len\", \"selected_features\"):\n if key not in self._best_config:\n invalidInputError(False,\n \"You use a data creator to fit your AutoTSEstimator, \"\n \"and use a TSDataset to predict/evaluate/fit on the TSPipeline.\"\n \"Please stick to the same data type.\")\n\n def _tsdataset_unscale(self, y):\n if self._scaler:\n from bigdl.chronos.data.utils.scale import unscale_timeseries_numpy\n y = unscale_timeseries_numpy(y, self._scaler, self._scaler_index)\n return y\n"
] | [
[
"tensorflow.keras.models.clone_model",
"tensorflow.keras.backend.clear_session"
],
[
"numpy.concatenate",
"torch.from_numpy",
"torch.cat",
"torch.load"
]
] |
Khanhnn00/blind_sr_denoise | [
"3153f90d20fd884ab69b47c30c685e0175276055"
] | [
"DNCNN/common.py"
] | [
"import os\nimport random\nimport numpy as np\nimport scipy.misc as misc\nimport imageio\nfrom tqdm import tqdm\nimport cv2\nfrom PIL import Image\n\nimport torch\nimport torch.nn.functional as F\n\nIMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP']\nBINARY_EXTENSIONS = ['.npy']\nBENCHMARK = ['Set5', 'Set14', 'B100', 'Urban100', 'Manga109', 'DIV2K', 'DF2K']\n\n\n####################\n# Files & IO\n####################\ndef is_image_file(filename):\n return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)\n\n\ndef is_binary_file(filename):\n return any(filename.endswith(extension) for extension in BINARY_EXTENSIONS)\n\n\ndef _get_paths_from_images(path):\n assert os.path.isdir(path), '[Error] [%s] is not a valid directory' % path\n images = []\n for dirpath, _, fnames in sorted(os.walk(path)):\n for fname in sorted(fnames):\n if is_image_file(fname):\n img_path = os.path.join(dirpath, fname)\n images.append(img_path)\n assert images, '[%s] has no valid image file' % path\n return images\n\n\ndef _get_paths_from_binary(path):\n assert os.path.isdir(path), '[Error] [%s] is not a valid directory' % path\n files = []\n for dirpath, _, fnames in sorted(os.walk(path)):\n for fname in sorted(fnames):\n if is_binary_file(fname):\n binary_path = os.path.join(dirpath, fname)\n files.append(binary_path)\n assert files, '[%s] has no valid binary file' % path\n return files\n\n\ndef find_benchmark(dataroot):\n bm_list = [dataroot.find(bm)>=0 for bm in BENCHMARK]\n if not sum(bm_list) == 0:\n bm_idx = bm_list.index(True)\n bm_name = BENCHMARK[bm_idx]\n else:\n bm_name = 'MyImage'\n return bm_name\n\n\ndef read_img(path):\n # read image by misc or from .npy\n # return: Numpy float32, HWC, RGB, [0,255]\n img = imageio.imread(path, pilmode='RGB')\n if img.ndim == 2:\n img = np.expand_dims(img, axis=2)\n return img\n\n# image processing\n# process on numpy image\n####################\ndef im2tensor01(im_np):\n \"\"\"Convert numpy to tensor to the gpu\"\"\"\n im_np = im_np / 255.0 if im_np.dtype == 'uint8' else im_np\n im_np = np.ascontiguousarray(im_np)\n return torch.FloatTensor(np.transpose(im_np, (2, 0, 1)))\n\ndef tensor2im(im_t):\n \"\"\"Copy the tensor to the cpu & convert to range [0,255]\"\"\"\n im_np = np.clip(np.round((np.transpose(im_t.squeeze(0).detach().cpu().float().numpy(), (1, 2, 0)) + 1) / 2.0 * 255.0), 0, 255)\n return im_np.astype(np.uint8)\n\ndef get_patch(img_tar, patch_size):\n oh, ow = img_tar.shape[:2]\n\n ip = patch_size\n tp = ip\n ix = random.randrange(0, ow - ip + 1)\n iy = random.randrange(0, oh - ip + 1)\n tx, ty = ix, iy\n\n img_tar = img_tar[ty:ty + tp, tx:tx + tp, :]\n\n return img_tar\n\ndef augment(img_list, hflip=True, rot=True):\n # horizontal flip OR rotate\n hflip = hflip and random.random() < 0.5\n vflip = rot and random.random() < 0.5\n rot90 = rot and random.random() < 0.5\n\n def _augment(img):\n if hflip: img = img[:, ::-1, :]\n if vflip: img = img[::-1, :, :]\n if rot90: img = img.transpose(1, 0, 2)\n return img\n\n return [_augment(img) for img in img_list]\n\n\ndef modcrop(img_in, scale):\n img = np.copy(img_in)\n if img.ndim == 2:\n H, W = img.shape\n H_r, W_r = H % scale, W % scale\n img = img[:H - H_r, :W - W_r]\n elif img.ndim == 3:\n H, W, C = img.shape\n H_r, W_r = H % scale, W % scale\n img = img[:H - H_r, :W - W_r, :]\n else:\n raise ValueError('Wrong img ndim: [%d].' % img.ndim)\n return img\n"
] | [
[
"numpy.ascontiguousarray",
"numpy.expand_dims",
"numpy.transpose",
"numpy.copy"
]
] |
tinyrobots/Generalized-PixelVAE | [
"ee99634be08c726c3da7e8ba2675c8d1448e15af"
] | [
"fast_pixel_cnn_pp/test_end_to_end.py"
] | [
"from . import model\nfrom . import fast_nn\n\nimport tensorflow as tf\nimport numpy as np\n\nimport os\nimport unittest\n\n\nclass FastPixelCNNPPEndToEndTest(tf.test.TestCase):\n def test_end_to_end(self):\n with self.test_session() as sess:\n print('Creating model')\n image_size = (10, 32, 32, 4)\n batch_size, image_height, image_width, image_channels = image_size\n\n # Create placeholders.\n row_input = tf.placeholder(\n tf.float32, [batch_size, 1, image_width, image_channels],\n name='row_input')\n pixel_input = tf.placeholder(\n tf.float32, [batch_size, 1, 1, image_channels],\n name='pixel_input')\n row_id = tf.placeholder(tf.int32, [], name='row_id')\n col_id = tf.placeholder(tf.int32, [], name='col_id')\n ema = tf.train.ExponentialMovingAverage(0.9995)\n\n # Create the model.\n model_spec = tf.make_template('model', model.model_spec)\n sample, fast_nn_out, v_stack = model_spec(\n row_input, pixel_input, row_id, col_id, image_size)\n\n # Initialize the caches.\n cache_variables = [\n v for v in tf.global_variables() if 'cache' in v.name\n ]\n sess.run(tf.variables_initializer(cache_variables))\n\n # Load the pretrained model\n print('Restoring variables')\n vars_to_restore = {\n k: v\n for k, v in ema.variables_to_restore().items()\n if 'cache' not in k\n }\n saver = tf.train.Saver(vars_to_restore)\n ckpt_path = None\n assert ckpt_path, 'Provide a path to the checkpoint in this file'\n saver.restore(sess, ckpt_path)\n\n # Create the fixed random input.\n np.random.seed(2702)\n x = np.random.randint(0, 256, size=(10, 32, 32, 3))\n x = np.cast[np.float32]((x - 127.5) / 127.5)\n x_pad = np.concatenate(\n (x, np.ones((batch_size, 32, 32, 1))), axis=3)\n x_downshift = fast_nn.down_shift(x_pad)\n x_rightshift = fast_nn.right_shift(x_pad)\n\n # Holds the output.\n num_output_features = 10 * 10\n output_features = np.zeros(\n (batch_size, 32, 32, num_output_features))\n\n # Compute all features.\n print('Computing features')\n sess.run(fast_nn.reset_cache_op())\n for row in range(image_height):\n x_row_input = x_downshift[:, row:(row + 1), :, :]\n sess.run(v_stack, {row_input: x_row_input, row_id: row})\n\n for col in range(image_width):\n x_pixel_input = x_rightshift[:, row:(row + 1),\n col:(col + 1), :]\n feed_dict = {\n row_id: row,\n col_id: col,\n pixel_input: x_pixel_input\n }\n pixel_features = sess.run(fast_nn_out, feed_dict)\n output_features[:, row:(row + 1), col:(\n col + 1), :] = pixel_features\n\n ground_truth_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'ground_truth_output.npy')\n ground_truth_features = np.load(ground_truth_file)\n total_features = np.prod(output_features[0].shape)\n for i in range(batch_size):\n self.assertTrue(\n np.allclose(\n output_features[i, :, :, :],\n ground_truth_features[i, :, :, :],\n atol=1e-4))\n"
] | [
[
"numpy.load",
"tensorflow.placeholder",
"tensorflow.train.ExponentialMovingAverage",
"numpy.ones",
"numpy.allclose",
"numpy.zeros",
"numpy.random.seed",
"tensorflow.global_variables",
"tensorflow.train.Saver",
"numpy.prod",
"tensorflow.make_template",
"numpy.random.randint",
"tensorflow.variables_initializer"
]
] |
zhangyanyu0722/EC523_Project | [
"72673713bb798023e82ccc257e8c05459c34a4b9"
] | [
"carla-data-export/dataexport.py"
] | [
"\"\"\"\nThis file contains all the methods responsible for saving the generated data in the correct output format.\n\n\"\"\"\nimport cv2\nimport numpy as np\nimport os\nimport logging\nfrom utils import degrees_to_radians\nimport json\n\n\ndef save_groundplanes(planes_fname, player_measurements, lidar_height):\n from math import cos, sin\n \"\"\" Saves the groundplane vector of the current frame.\n The format of the ground plane file is first three lines describing the file (number of parameters).\n The next line is the three parameters of the normal vector, and the last is the height of the normal vector,\n which is the same as the distance to the camera in meters.\n \"\"\"\n rotation = player_measurements.transform.rotation\n pitch, roll = rotation.pitch, rotation.roll\n # Since measurements are in degrees, convert to radians\n pitch = degrees_to_radians(pitch)\n roll = degrees_to_radians(roll)\n # Rotate normal vector (y) wrt. pitch and yaw\n normal_vector = [cos(pitch)*sin(roll),\n -cos(pitch)*cos(roll),\n sin(pitch)\n ]\n normal_vector = map(str, normal_vector)\n with open(planes_fname, 'w') as f:\n f.write(\"# Plane\\n\")\n f.write(\"Width 4\\n\")\n f.write(\"Height 1\\n\")\n f.write(\"{} {}\\n\".format(\" \".join(normal_vector), lidar_height))\n logging.info(\"Wrote plane data to %s\", planes_fname)\n\n\ndef save_ref_files(OUTPUT_FOLDER, TIME_ON_NEW_EPISODE, PHASE, id):\n \"\"\" Appends the id of the given record to the files \"\"\"\n # for name in ['train.txt', 'val.txt', 'trainval.txt']:\n # path = os.path.join(OUTPUT_FOLDER, name)\n # with open(path, 'a') as f:\n # f.write(\"{0:06}\".format(id) + '\\n')\n # logging.info(\"Wrote reference files to %s\", path)\n\n prefix = os.path.join(\"\\\".\", \"data\", \"carla\", PHASE, \"label\", TIME_ON_NEW_EPISODE)\n name = \"{0:06}.json\\\"\".format(id)\n path = os.path.join(OUTPUT_FOLDER, \"label\", \"{}.json\".format(TIME_ON_NEW_EPISODE))\n\n with open(path, \"a\") as f:\n filePath = os.path.join(prefix, name)\n\n f.write(filePath + \"\\n\")\n\n logging.info(\"Wrote reference files to %s\", path)\n\n\ndef save_image_data(filename, image):\n logging.info(\"Wrote image data to %s\", filename)\n # Convert to correct color format\n color_fmt = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n cv2.imwrite(filename, color_fmt)\n\n\ndef save_lidar_data(filename, point_cloud, LIDAR_HEIGHT, format=\"bin\"):\n \"\"\" Saves lidar data to given filename, according to the lidar data format.\n bin is used for KITTI-data format, while .ply is the regular point cloud format\n In Unreal, the coordinate system of the engine is defined as, which is the same as the lidar points\n z\n ^ ^ x\n | /\n | /\n |/____> y\n This is a left-handed coordinate system, with x being forward, y to the right and z up\n See also https://github.com/carla-simulator/carla/issues/498\n However, the lidar coordinate system from KITTI is defined as\n z\n ^ ^ x\n | /\n | /\n y<____|/\n Which is a right handed coordinate sylstem\n Therefore, we need to flip the y axis of the lidar in order to get the correct lidar format for kitti.\n\n This corresponds to the following changes from Carla to Kitti\n Carla: X Y Z\n KITTI: X -Y Z\n NOTE: We do not flip the coordinate system when saving to .ply.\n \"\"\"\n logging.info(\"Wrote lidar data to %s\", filename)\n\n if format == \"bin\":\n lidar_array = [[point[0], -point[1], point[2], 1.0]\n for point in point_cloud]\n lidar_array = np.array(lidar_array).astype(np.float32)\n logging.debug(\"Lidar min/max of x: {} {}\".format(\n lidar_array[:, 0].min(), lidar_array[:, 0].max()))\n logging.debug(\"Lidar min/max of y: {} {}\".format(\n lidar_array[:, 1].min(), lidar_array[:, 0].max()))\n logging.debug(\"Lidar min/max of z: {} {}\".format(\n lidar_array[:, 2].min(), lidar_array[:, 0].max()))\n lidar_array.tofile(filename)\n else:\n lidar_measurement.point_cloud.save_to_disk(filename)\n\n\ndef save_kitti_data(filename, datapoints):\n with open(filename, 'w') as f:\n # out_str = \"\\n\".join([str(point) for point in datapoints if point])\n # f.write(out_str)\n json.dump(datapoints, f)\n logging.info(\"Wrote kitti data to %s\", filename)\n\n\ndef save_calibration_matrices(filename, intrinsic_mat, extrinsic_mat):\n \"\"\" Saves the calibration matrices to a file.\n AVOD (and KITTI) refers to P as P=K*[R;t], so we will just store P.\n The resulting file will contain:\n 3x4 p0-p3 Camera P matrix. Contains extrinsic\n and intrinsic parameters. (P=K*[R;t])\n 3x3 r0_rect Rectification matrix, required to transform points\n from velodyne to camera coordinate frame.\n 3x4 tr_velodyne_to_cam Used to transform from velodyne to cam\n coordinate frame according to:\n Point_Camera = P_cam * R0_rect *\n Tr_velo_to_cam *\n Point_Velodyne.\n 3x4 tr_imu_to_velo Used to transform from imu to velodyne coordinate frame. This is not needed since we do not export\n imu data.\n \"\"\"\n # KITTI format demands that we flatten in row-major order\n ravel_mode = 'C'\n P0 = intrinsic_mat\n P0 = np.column_stack((P0, np.array([0, 0, 0])))\n P0 = np.ravel(P0, order=ravel_mode)\n R0 = np.identity(3)\n TR_velodyne = np.array([[0, -1, 0],\n [0, 0, -1],\n [1, 0, 0]])\n # Add translation vector from velo to camera. This is 0 because the position of camera and lidar is equal in our configuration.\n TR_velodyne = np.column_stack((TR_velodyne, np.array([0, 0, 0])))\n TR_imu_to_velo = np.identity(3)\n TR_imu_to_velo = np.column_stack((TR_imu_to_velo, np.array([0, 0, 0])))\n\n def write_flat(f, name, arr):\n f.write(\"{}: {}\\n\".format(name, ' '.join(\n map(str, arr.flatten(ravel_mode).squeeze()))))\n\n # All matrices are written on a line with spacing\n with open(filename, 'w') as f:\n for i in range(4): # Avod expects all 4 P-matrices even though we only use the first\n write_flat(f, \"P\" + str(i), P0)\n write_flat(f, \"R0_rect\", R0)\n write_flat(f, \"Tr_velo_to_cam\", TR_velodyne)\n write_flat(f, \"TR_imu_to_velo\", TR_imu_to_velo)\n logging.info(\"Wrote all calibration matrices to %s\", filename)\n"
] | [
[
"numpy.ravel",
"numpy.identity",
"numpy.array"
]
] |
lizhipengTouch/CSA-inpainting | [
"50602607ddc9153af5bfe627e355b0466fc4944f"
] | [
"models/vgg16.py"
] | [
"import torch\nimport torchvision\nfrom torchvision import models\nfrom collections import namedtuple\n\nclass Vgg16(torch.nn.Module):\n def __init__(self, requires_grad=False):\n super(Vgg16, self).__init__()\n vgg_pretrained_features = models.vgg16(pretrained=True).features # 获取预训练vgg网络层\n self.slice1 = torch.nn.Sequential()\n self.slice2 = torch.nn.Sequential()\n self.slice3 = torch.nn.Sequential()\n self.slice4 = torch.nn.Sequential()\n for x in range(5):\n self.slice1.add_module(str(x), vgg_pretrained_features[x])\n for x in range(5, 10):\n self.slice2.add_module(str(x), vgg_pretrained_features[x])\n for x in range(10, 17):\n self.slice3.add_module(str(x), vgg_pretrained_features[x])\n for x in range(17, 23):\n self.slice4.add_module(str(x), vgg_pretrained_features[x])\n if not requires_grad:\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, X):\n h = self.slice1(X)\n h_relu1_2 = h\n h = self.slice2(h)\n h_relu2_2 = h\n h = self.slice3(h)\n h_relu3_3 = h\n h = self.slice4(h)\n h_relu4_3 = h\n vgg_outputs = namedtuple(\"VggOutputs\", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3'])\n # 定义一个namedtuple类型数据,并包含列表中的属性。\n out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3)\n return out # 得到经过不同层的特征值"
] | [
[
"torch.nn.Sequential"
]
] |
alphaciel/Balancing-Robot-Raspberry-Pi-DIY | [
"8a61acf688ea0915017c40eaff3841a9b219f9b7"
] | [
"matplotlib/matplotlib_test/plot_lib_test.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.widgets import Slider, Button, RadioButtons\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nfig.subplots_adjust(left=0.25, bottom=0.25)\nmin0 = 0\nmax0 = 25000\n\nim = max0 * np.random.random((10,10))\nim1 = ax.imshow(im)\nfig.colorbar(im1)\n\naxcolor = 'lightgoldenrodyellow'\naxmin = fig.add_axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)\naxmax = fig.add_axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)\n\nsmin = Slider(axmin, 'Min', 0, 30000, valinit=min0)\nsmax = Slider(axmax, 'Max', 0, 30000, valinit=max0)\n\ndef update(val):\n im1.set_clim([smin.val,smax.val])\n fig.canvas.draw()\nsmin.on_changed(update)\nsmax.on_changed(update)\n\nplt.show()"
] | [
[
"numpy.random.random",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.widgets.Slider"
]
] |
Honghe/AnchorDETR | [
"fc3d45441241cd689b28878d3aa4b0bffb33a8b8"
] | [
"models/transformer.py"
] | [
"# ------------------------------------------------------------------------\n# Copyright (c) 2021 megvii-model. All Rights Reserved.\n# ------------------------------------------------------------------------\n# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------\nimport copy\nfrom typing import Optional, List\nimport math\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn, Tensor\n\nfrom util.misc import inverse_sigmoid\n\n\nfrom models.row_column_decoupled_attention import MultiheadRCDA\n\nclass Transformer(nn.Module):\n def __init__(self, d_model=256, nhead=8,\n num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=1024, dropout=0.,\n activation=\"relu\", num_feature_levels=3,num_query_position = 300,num_query_pattern=3,\n spatial_prior=\"learned\",attention_type=\"RCDA\"):\n super().__init__()\n\n self.d_model = d_model\n self.nhead = nhead\n\n self.attention_type = attention_type\n encoder_layer = TransformerEncoderLayerSpatial(d_model, dim_feedforward,\n dropout, activation, nhead , attention_type)\n encoder_layer_level = TransformerEncoderLayerLevel(d_model, dim_feedforward,\n dropout, activation, nhead)\n\n decoder_layer = TransformerDecoderLayer(d_model, dim_feedforward,\n dropout, activation, nhead,\n num_feature_levels, attention_type)\n\n if num_feature_levels == 1:\n self.num_encoder_layers_level = 0\n else:\n self.num_encoder_layers_level = num_encoder_layers // 2\n self.num_encoder_layers_spatial = num_encoder_layers - self.num_encoder_layers_level\n\n self.encoder_layers = _get_clones(encoder_layer, self.num_encoder_layers_spatial)\n self.encoder_layers_level = _get_clones(encoder_layer_level, self.num_encoder_layers_level)\n self.decoder_layers = _get_clones(decoder_layer, num_decoder_layers)\n\n self.spatial_prior=spatial_prior\n\n if num_feature_levels>1:\n self.level_embed = nn.Embedding(num_feature_levels, d_model)\n self.num_pattern = num_query_pattern\n self.pattern = nn.Embedding(self.num_pattern, d_model)\n\n self.num_position = num_query_position\n if self.spatial_prior == \"learned\":\n self.position = nn.Embedding(self.num_position, 2)\n\n self.adapt_pos2d = nn.Sequential(\n nn.Linear(d_model, d_model),\n nn.ReLU(),\n nn.Linear(d_model, d_model),\n )\n self.adapt_pos1d = nn.Sequential(\n nn.Linear(d_model, d_model),\n nn.ReLU(),\n nn.Linear(d_model, d_model),\n )\n\n self.num_layers = num_decoder_layers\n num_classes = 91\n\n self.class_embed = nn.Linear(d_model, num_classes)\n self.bbox_embed = MLP(d_model, d_model, 4, 3)\n\n self._reset_parameters()\n\n def _reset_parameters(self):\n\n num_pred = self.num_layers\n num_classes = 91\n prior_prob = 0.01\n bias_value = -math.log((1 - prior_prob) / prior_prob)\n self.class_embed.bias.data = torch.ones(num_classes) * bias_value\n\n nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)\n nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)\n if self.spatial_prior == \"learned\":\n nn.init.uniform_(self.position.weight.data, 0, 1)\n\n nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)\n self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])\n self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])\n\n\n def forward(self, srcs, masks):\n\n # prepare input for decoder\n bs, l, c, h, w = srcs.shape\n\n if self.spatial_prior == \"learned\":\n reference_points = self.position.weight.unsqueeze(0).repeat(bs, self.num_pattern, 1)\n elif self.spatial_prior == \"grid\":\n nx=ny=round(math.sqrt(self.num_position))\n self.num_position=nx*ny\n x = (torch.arange(nx) + 0.5) / nx\n y = (torch.arange(ny) + 0.5) / ny\n xy=torch.meshgrid(x,y)\n reference_points=torch.cat([xy[0].reshape(-1)[...,None],xy[1].reshape(-1)[...,None]],-1).cuda()\n reference_points = reference_points.unsqueeze(0).repeat(bs, self.num_pattern, 1)\n else:\n raise ValueError(f'unknown {self.spatial_prior} spatial prior')\n\n tgt = self.pattern.weight.reshape(1, self.num_pattern, 1, c).repeat(bs, 1, self.num_position, 1).reshape(\n bs, self.num_pattern * self.num_position, c)\n\n\n mask = masks[-1].unsqueeze(1).repeat(1,l,1,1).reshape(bs*l,h,w)\n pos_col, pos_row = mask2pos(mask)\n if self.attention_type==\"RCDA\":\n posemb_row = self.adapt_pos1d(pos2posemb1d(pos_row))\n posemb_col = self.adapt_pos1d(pos2posemb1d(pos_col))\n posemb_2d = None\n else:\n pos_2d = torch.cat([pos_row.unsqueeze(1).repeat(1, h, 1).unsqueeze(-1), pos_col.unsqueeze(2).repeat(1, 1, w).unsqueeze(-1)],dim=-1)\n posemb_2d = self.adapt_pos2d(pos2posemb2d(pos_2d))\n posemb_row = posemb_col = None\n\n outputs = srcs.reshape(bs * l, c, h, w)\n\n for idx in range(len(self.encoder_layers)):\n outputs = self.encoder_layers[idx](outputs, mask, posemb_row, posemb_col,posemb_2d)\n if idx < self.num_encoder_layers_level:\n outputs = self.encoder_layers_level[idx](outputs, level_emb=self.level_embed.weight.unsqueeze(1).unsqueeze(0).repeat(bs,1,1,1).reshape(bs*l,1,c))\n\n srcs = outputs.reshape(bs, l, c, h, w)\n\n output = tgt\n\n outputs_classes = []\n outputs_coords = []\n for lid, layer in enumerate(self.decoder_layers):\n output = layer(output, reference_points, srcs, mask, adapt_pos2d=self.adapt_pos2d,\n adapt_pos1d=self.adapt_pos1d, posemb_row=posemb_row, posemb_col=posemb_col,posemb_2d=posemb_2d)\n reference = inverse_sigmoid(reference_points)\n outputs_class = self.class_embed[lid](output)\n tmp = self.bbox_embed[lid](output)\n if reference.shape[-1] == 4:\n tmp += reference\n else:\n assert reference.shape[-1] == 2\n tmp[..., :2] += reference\n outputs_coord = tmp.sigmoid()\n outputs_classes.append(outputs_class[None,])\n outputs_coords.append(outputs_coord[None,])\n\n output = torch.cat(outputs_classes, dim=0), torch.cat(outputs_coords, dim=0)\n\n return output\n\n\nclass TransformerEncoderLayerSpatial(nn.Module):\n def __init__(self,\n d_model=256, d_ffn=1024,\n dropout=0., activation=\"relu\",\n n_heads=8, attention_type=\"RCDA\"):\n super().__init__()\n\n self.attention_type = attention_type\n if attention_type==\"RCDA\":\n attention_module=MultiheadRCDA\n elif attention_type == \"nn.MultiheadAttention\":\n attention_module=nn.MultiheadAttention\n else:\n raise ValueError(f'unknown {attention_type} attention_type')\n\n # self attention\n self.self_attn = attention_module(d_model, n_heads, dropout=dropout)\n self.dropout1 = nn.Dropout(dropout)\n self.norm1 = nn.LayerNorm(d_model)\n\n # ffn\n self.ffn = FFN(d_model, d_ffn, dropout, activation)\n\n @staticmethod\n def with_pos_embed(tensor, pos):\n return tensor if pos is None else tensor + pos\n\n def forward(self, src, padding_mask=None, posemb_row=None, posemb_col=None,posemb_2d=None):\n # self attention\n bz, c, h, w = src.shape\n src = src.permute(0, 2, 3, 1)\n\n if self.attention_type==\"RCDA\":\n posemb_row = posemb_row.unsqueeze(1).repeat(1, h, 1, 1)\n posemb_col = posemb_col.unsqueeze(2).repeat(1, 1, w, 1)\n src2 = self.self_attn((src + posemb_row).reshape(bz, h * w, c), (src + posemb_col).reshape(bz, h * w, c),\n src + posemb_row, src + posemb_col,\n src, key_padding_mask=padding_mask)[0].transpose(0, 1).reshape(bz, h, w, c)\n else:\n src2 = self.self_attn((src + posemb_2d).reshape(bz, h * w, c).transpose(0, 1),\n (src + posemb_2d).reshape(bz, h * w, c).transpose(0, 1),\n src.reshape(bz, h * w, c).transpose(0, 1))[0].transpose(0, 1).reshape(bz, h, w, c)\n\n src = src + self.dropout1(src2)\n src = self.norm1(src)\n\n # ffn\n src = self.ffn(src)\n src = src.permute(0, 3, 1, 2)\n return src\n\n\nclass TransformerEncoderLayerLevel(nn.Module):\n def __init__(self,\n d_model=256, d_ffn=1024,\n dropout=0., activation=\"relu\",\n n_heads=8):\n super().__init__()\n\n # self attention\n self.self_attn_level = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)\n self.dropout1 = nn.Dropout(dropout)\n self.norm1 = nn.LayerNorm(d_model)\n\n # ffn\n self.ffn = FFN(d_model, d_ffn, dropout, activation)\n\n @staticmethod\n def with_pos_embed(tensor, pos):\n return tensor if pos is None else tensor + pos\n\n def forward(self, src, level_emb=0):\n # self attention\n bz, c, h, w = src.shape\n src = src.permute(0, 2, 3, 1)\n\n src2 = self.self_attn_level(src.reshape(bz, h * w, c) + level_emb, src.reshape(bz, h * w, c) + level_emb,\n src.reshape(bz, h * w, c))[0].reshape(bz, h, w, c)\n\n src = src + self.dropout1(src2)\n src = self.norm1(src)\n\n # ffn\n src = self.ffn(src)\n src = src.permute(0, 3, 1, 2)\n return src\n\n\n\nclass TransformerDecoderLayer(nn.Module):\n def __init__(self, d_model=256, d_ffn=1024,\n dropout=0., activation=\"relu\", n_heads=8,\n n_levels=3, attention_type=\"RCDA\"):\n super().__init__()\n\n self.attention_type = attention_type\n self.attention_type = attention_type\n if attention_type==\"RCDA\":\n attention_module=MultiheadRCDA\n elif attention_type == \"nn.MultiheadAttention\":\n attention_module=nn.MultiheadAttention\n else:\n raise ValueError(f'unknown {attention_type} attention_type')\n\n # cross attention\n self.cross_attn = attention_module(d_model, n_heads, dropout=dropout)\n self.dropout1 = nn.Dropout(dropout)\n self.norm1 = nn.LayerNorm(d_model)\n\n # self attention\n self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)\n self.dropout2 = nn.Dropout(dropout)\n self.norm2 = nn.LayerNorm(d_model)\n\n\n # level combination\n if n_levels>1:\n self.level_fc = nn.Linear(d_model * n_levels, d_model)\n\n # ffn\n self.ffn = FFN(d_model, d_ffn, dropout, activation)\n\n @staticmethod\n def with_pos_embed(tensor, pos):\n return tensor if pos is None else tensor + pos\n\n def forward(self, tgt, reference_points, srcs, src_padding_masks=None, adapt_pos2d=None,\n adapt_pos1d=None, posemb_row=None, posemb_col=None, posemb_2d=None):\n tgt_len = tgt.shape[1]\n\n query_pos = pos2posemb2d(reference_points.squeeze(2))\n query_pos = adapt_pos2d(query_pos)\n # self attention\n q = k = self.with_pos_embed(tgt, query_pos)\n tgt2 = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), tgt.transpose(0, 1))[0].transpose(0, 1)\n tgt = tgt + self.dropout2(tgt2)\n tgt = self.norm2(tgt)\n\n bz, l, c, h, w = srcs.shape\n srcs = srcs.reshape(bz * l, c, h, w).permute(0, 2, 3, 1)\n\n if self.attention_type == \"RCDA\":\n query_pos_x = adapt_pos1d(pos2posemb1d(reference_points[..., 0]))\n query_pos_y = adapt_pos1d(pos2posemb1d(reference_points[..., 1]))\n posemb_row = posemb_row.unsqueeze(1).repeat(1, h, 1, 1)\n posemb_col = posemb_col.unsqueeze(2).repeat(1, 1, w, 1)\n src_row = src_col = srcs\n k_row = src_row + posemb_row\n k_col = src_col + posemb_col\n tgt2 = self.cross_attn((tgt + query_pos_x).repeat(l, 1, 1), (tgt + query_pos_y).repeat(l, 1, 1), k_row, k_col,\n srcs, key_padding_mask=src_padding_masks)[0].transpose(0, 1)\n else:\n tgt2 = self.cross_attn((tgt + query_pos).repeat(l, 1, 1).transpose(0, 1),\n (srcs + posemb_2d).reshape(bz * l, h * w, c).transpose(0,1),\n srcs.reshape(bz * l, h * w, c).transpose(0, 1))[0].transpose(0,1)\n\n if l > 1:\n tgt2 = self.level_fc(tgt2.reshape(bz, l, tgt_len, c).permute(0, 2, 3, 1).reshape(bz, tgt_len, c * l))\n\n tgt = tgt + self.dropout1(tgt2)\n tgt = self.norm1(tgt)\n\n # ffn\n tgt = self.ffn(tgt)\n\n return tgt\n\n\nclass FFN(nn.Module):\n\n def __init__(self, d_model=256, d_ffn=1024, dropout=0., activation='relu'):\n super().__init__()\n self.linear1 = nn.Linear(d_model, d_ffn)\n self.activation = _get_activation_fn(activation)\n self.dropout2 = nn.Dropout(dropout)\n self.linear2 = nn.Linear(d_ffn, d_model)\n self.dropout3 = nn.Dropout(dropout)\n self.norm2 = nn.LayerNorm(d_model)\n\n def forward(self, src):\n src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))\n src = src + self.dropout3(src2)\n src = self.norm2(src)\n return src\n\n\nclass MLP(nn.Module):\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x\n\n\ndef _get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n\ndef _get_activation_fn(activation):\n \"\"\"Return an activation function given a string\"\"\"\n if activation == \"relu\":\n return F.relu\n if activation == \"gelu\":\n return F.gelu\n if activation == \"glu\":\n return F.glu\n raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")\n\n\ndef build_transformer(args):\n return Transformer(\n d_model=args.hidden_dim,\n nhead=args.nheads,\n num_encoder_layers=args.enc_layers,\n num_decoder_layers=args.dec_layers,\n dim_feedforward=args.dim_feedforward,\n dropout=args.dropout,\n activation=\"relu\",\n num_feature_levels=args.num_feature_levels,\n num_query_position=args.num_query_position,\n num_query_pattern=args.num_query_pattern,\n spatial_prior=args.spatial_prior,\n attention_type=args.attention_type,\n)\n\n\n\n\n\ndef pos2posemb2d(pos, num_pos_feats=128, temperature=10000):\n scale = 2 * math.pi\n pos = pos * scale\n dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device)\n dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)\n pos_x = pos[..., 0, None] / dim_t\n pos_y = pos[..., 1, None] / dim_t\n pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2)\n pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()), dim=-1).flatten(-2)\n posemb = torch.cat((pos_y, pos_x), dim=-1)\n return posemb\n\n\ndef pos2posemb1d(pos, num_pos_feats=256, temperature=10000):\n scale = 2 * math.pi\n pos = pos * scale\n dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device)\n dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)\n pos_x = pos[..., None] / dim_t\n posemb = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2)\n return posemb\n\n\ndef mask2pos(mask):\n not_mask = ~mask\n y_embed = not_mask[:, :, 0].cumsum(1, dtype=torch.float32)\n x_embed = not_mask[:, 0, :].cumsum(1, dtype=torch.float32)\n y_embed = (y_embed - 0.5) / y_embed[:, -1:]\n x_embed = (x_embed - 0.5) / x_embed[:, -1:]\n return y_embed, x_embed\n"
] | [
[
"torch.nn.MultiheadAttention",
"torch.ones",
"torch.nn.Linear",
"torch.nn.init.constant_",
"torch.nn.init.uniform_",
"torch.nn.Embedding",
"torch.nn.ReLU",
"torch.arange",
"torch.nn.LayerNorm",
"torch.meshgrid",
"torch.cat",
"torch.nn.Dropout"
]
] |
SIMEXP/nilearn | [
"4f51aea58f38689ca32c2edd748528d521e6cfb0"
] | [
"examples/01_plotting/plot_colormaps.py"
] | [
"\"\"\"\nMatplotlib colormaps in Nilearn\n================================\n\nVisualize HCP connectome workbench color maps shipped with Nilearn\nwhich can be used for plotting brain images on surface.\n\nSee :ref:`surface-plotting` for surface plotting details.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom nilearn.plotting.cm import _cmap_d as nilearn_cmaps\nfrom nilearn.plotting import show\n\n###########################################################################\n# Plot color maps\n# ----------------\n\nnmaps = len(nilearn_cmaps)\na = np.outer(np.arange(0, 1, 0.01), np.ones(10))\n\n# Initialize the figure\nplt.figure(figsize=(10, 4.2))\nplt.subplots_adjust(top=0.4, bottom=0.05, left=0.01, right=0.99)\n\nfor index, cmap in enumerate(nilearn_cmaps):\n plt.subplot(1, nmaps + 1, index + 1)\n plt.imshow(a, cmap=nilearn_cmaps[cmap])\n plt.axis('off')\n plt.title(cmap, fontsize=10, va='bottom', rotation=90)\n\n###########################################################################\n# Plot matplotlib color maps\n# --------------------------\nplt.figure(figsize=(10, 5))\nplt.subplots_adjust(top=0.8, bottom=0.05, left=0.01, right=0.99)\ndeprecated_cmaps = ['Vega10', 'Vega20', 'Vega20b', 'Vega20c', 'spectral']\nm_cmaps = []\nfor m in plt.cm.datad:\n if not m.endswith(\"_r\") and m not in deprecated_cmaps:\n m_cmaps.append(m)\nm_cmaps.sort()\n\nfor index, cmap in enumerate(m_cmaps):\n plt.subplot(1, len(m_cmaps) + 1, index + 1)\n plt.imshow(a, cmap=plt.get_cmap(cmap), aspect='auto')\n plt.axis('off')\n plt.title(cmap, fontsize=10, va='bottom', rotation=90)\n\nshow()\n"
] | [
[
"numpy.ones",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"numpy.arange",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.get_cmap"
]
] |
raznem/sac_ppo | [
"c18e9bd32a70fcc4bc413565c6b885d7560b8b5a"
] | [
"rltoolkit/rl.py"
] | [
"import logging\nfrom pathlib import Path\nfrom typing import Any, Optional, Tuple, Union\n\nimport gym\nimport torch\nimport pickle as pkl\n\nfrom rltoolkit import config, utils\nfrom rltoolkit.buffer import Memory\nfrom rltoolkit.stats_logger import StatsLogger\nfrom rltoolkit.tensorboard_logger import TensorboardWriter\n\nlogger = logging.getLogger(__name__)\n\n\nclass MetaLearner:\n def __init__(\n self,\n env_name: str,\n use_gpu: bool,\n debug_mode: bool = config.DEBUG_MODE,\n tensorboard_dir: Union[str, None] = config.TENSORBOARD_DIR,\n tensorboard_comment: str = config.TENSORBOARD_COMMENT,\n ):\n f\"\"\"Class with parameters common for RL and other interactions with environment\n\n Args:\n env_name (str): Name of the gym environment.\n use_gpu (bool): Use CUDA.\n debug_mode (bool, optional): Log additional info.\n Defaults to { config.DEBUG_MODE }\n tensorboard_dir (Union[str, None], optional): Path to tensorboard logs.\n Defaults to { config.TENSORBOARD_DIR }.\n tensorboard_comment (str, optional): Comment for tensorboard files.\n Defaults to { config.TENSORBOARD_COMMENT }.\n \"\"\"\n self.env_name = env_name\n if use_gpu and torch.cuda.is_available():\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n\n self.env = gym.make(self.env_name)\n self.discrete = isinstance(self.env.action_space, gym.spaces.Discrete)\n self.ob_dim = self.env.observation_space.shape[0]\n if self.discrete:\n self.ac_dim = self.env.action_space.n\n self.ac_lim = None\n else:\n self.ac_dim = self.env.action_space.shape[0]\n self.ac_lim = torch.tensor(self.env.action_space.high, device=self.device)\n\n self.obs_mean = torch.zeros(self.ob_dim, device=self.device)\n self.obs_std = torch.ones(self.ob_dim, device=self.device)\n\n self.iteration = 0 # used in tensorboard\n\n self.opt = torch.optim.Adam\n self.loss = {}\n\n self.debug_mode = debug_mode\n\n self.tensorboard_writer = None\n self.tensorboard_comment = (\n \"_\" + tensorboard_comment if tensorboard_comment else \"\"\n )\n self.tensorboard_dir = tensorboard_dir\n\n def run_tensorboard_if_needed(self):\n if self.tensorboard_writer is None and (self.tensorboard_dir is not None):\n self.tensorboard_writer = TensorboardWriter(\n env_name=self.env_name,\n log_dir=self.tensorboard_dir,\n filename=self.filename,\n render=self.render,\n )\n\n def log_obs_mean_std_tensorboard(self):\n \"\"\"\n Log mean and std of observations in the tensorboard.\n \"\"\"\n self.run_tensorboard_if_needed()\n self.tensorboard_writer.log_obs_mean_std(\n self.iteration, self.obs_mean, self.obs_std\n )\n\n def update_obs_mean_std(self, buffer: Memory) -> Memory:\n \"\"\"\n Update running average of mean and stds based on the buffer.\n\n Args:\n buffer (Memory)\n\n Returns:\n Memory\n \"\"\"\n buffer.update_obs_mean_std()\n self.obs_mean = buffer.obs_mean\n self.obs_std = buffer.obs_std\n\n if self.debug_mode and self.tensorboard_dir is not None:\n self.log_obs_mean_std_tensorboard()\n return buffer\n\n\nclass RL(MetaLearner):\n def __init__(\n self,\n env_name: str = config.ENV_NAME,\n gamma: float = config.GAMMA,\n stats_freq: int = config.STATS_FREQ,\n test_episodes: int = config.TEST_EPISODES,\n batch_size: int = config.BATCH_SIZE,\n iterations: int = config.ITERATIONS,\n max_frames: int = None,\n return_done: Union[int, None] = config.RETURN_DONE,\n log_dir: str = config.LOG_DIR,\n use_gpu: bool = config.USE_GPU,\n verbose: int = config.VERBOSE,\n render: bool = config.RENDER,\n *args,\n **kwargs,\n ):\n f\"\"\"Basic parent class for reinforcement learning algorithms.\n\n Args:\n env_name (str, optional): Name of the gym environment.\n Defaults to { config.ENV_NAME }.\n gamma (float, optional): Discount factor. Defaults to { config.GAMMA }.\n stats_freq (int, optional): Frequency of logging the progress.\n Defaults to { config.STATS_FREQ }.\n batch_size (int, optional): Number of frames used for one algorithm step\n (could be higher because batch collection stops when rollout ends).\n Defaults to { config.BATCH_SIZE }.\n iterations (int, optional): Number of algorithms iterations.\n Defaults to { config.ITERATIONS }.\n max_frames (int, optional): Limit of frames for training. Defaults to\n { None }.\n return_done (Union[int, None], optional): target return, which will stop\n training if reached. Defaults to { config.RETURN_DONE }.\n log_dir (str, optional): Path for basic logs which includes final model.\n Defaults to { config.LOG_DIR }.\n use_gpu (bool, optional): Use CUDA. Defaults to { config.USE_GPU }.\n verbose (int, optional): Verbose level. Defaults to { config.VERBOSE }.\n render (bool, optional): Render rollouts to tensorboard.\n Defaults to { config.RENDER }.\n debug_mode (bool, optional): Log additional info.\n Defaults to { config.DEBUG_MODE }\n tensorboard_dir (Union[str, None], optional): Path to tensorboard logs.\n Defaults to { config.TENSORBOARD_DIR }.\n tensorboard_comment (str, optional): Comment for tensorboard files.\n Defaults to { config.TENSORBOARD_COMMENT }.\n \"\"\"\n super().__init__(env_name, use_gpu, *args, **kwargs)\n assert iterations > 0, f\"Iteration has to be positive not {iterations}\"\n if max_frames is not None:\n assert (\n max_frames <= iterations * batch_size\n ), \"max_frames should be smaller or equal than iterations * batch_size\"\n\n self.max_frames = max_frames\n self.gamma = gamma\n self.stats_freq = stats_freq\n self.test_episodes = test_episodes\n self.batch_size = batch_size\n self.iterations = iterations\n self.return_done = return_done\n if log_dir is not None:\n self.log_dir = Path(log_dir)\n self.log_dir.mkdir(parents=True, exist_ok=True)\n else:\n self.log_dir = log_dir\n self.verbose = verbose\n self.render = render\n\n self.max_ep_len = self.env._max_episode_steps\n\n self.start_time = utils.get_time()\n\n self.hparams = {\n \"hparams/gamma\": self.gamma,\n \"hparams/batch_size\": self.batch_size,\n \"hparams/type\": utils.get_pretty_type_name(self),\n }\n self.shortnames = config.SHORTNAMES\n self.stats_logger = StatsLogger()\n\n def train(self, iterations=None):\n f\"\"\" Train RL model\n\n Args:\n iterations ([type], optional): Number of additional training iterations.\n If None performs number of iterations defined in self.iterations.\n Otherwise increase global counter by this value to run additional steps.\n Defaults to { None }.\n \"\"\"\n self.run_tensorboard_if_needed()\n if iterations:\n self.iterations += iterations\n\n while self.iteration < self.iterations:\n buffer, time_diff = self.perform_iteration()\n self.stats_logger.time_list.append(time_diff)\n running_return = self.stats_logger.calc_running_return(buffer)\n\n if self.return_done is not None and running_return >= self.return_done:\n break\n\n if self.iteration % self.stats_freq == 0:\n self.logs_after_iteration(buffer)\n\n if self.log_dir is not None:\n self.stats_logger.dump_stats(self.log_path)\n\n self.iteration += 1 # used also for logs\n if (\n self.max_frames is not None\n and self.max_frames < self.stats_logger.frames\n ):\n logger.info(f\"Reached max_frames at {self.iteration} iteration\") # INFO\n break\n\n self.logs_after_iteration(buffer, done=True)\n\n if self.log_dir is not None:\n self.save()\n\n def test(self, episodes=None):\n f\"\"\"Test policy\n\n Args:\n episodes (int): Number of episodes. Defaults to { None }.\n\n Returns:\n float: mean episode reward\n \"\"\"\n mean_reward = None\n return mean_reward\n\n @utils.measure_time\n def perform_iteration(self):\n raise NotImplementedError\n\n def save_model(self):\n raise NotImplementedError\n\n def check_path(self, path):\n if self.filename is None and path is None:\n raise AttributeError\n elif path is None:\n path = str(self.log_path) + \".pkl\"\n return path\n\n def collect_params_dict(self):\n params_dict = {}\n params_dict[\"actor\"] = self.actor.state_dict()\n params_dict[\"critic\"] = self.critic.state_dict()\n params_dict[\"obs_mean\"] = self.obs_mean\n params_dict[\"obs_std\"] = self.obs_std\n return params_dict\n\n def apply_params_dict(self, params_dict):\n self.actor.load_state_dict(params_dict[\"actor\"])\n self.critic.load_state_dict(params_dict[\"critic\"])\n self.obs_mean = params_dict[\"obs_mean\"]\n self.obs_std = params_dict[\"obs_std\"]\n\n def save(self, path: str = None):\n f\"\"\"Save RL object\n\n Args:\n path (str): Path to save\n \"\"\"\n path = self.check_path(path)\n with open(path, \"wb\") as f:\n params_dict = self.collect_params_dict()\n pkl.dump(params_dict, f)\n\n def load(self, path: str):\n \"\"\"Load RL object\n\n Args:\n path (str): Path to saved RL object\n \"\"\"\n path = self.check_path(path)\n with open(path, \"rb\") as f:\n params_dict = pkl.load(f)\n self.apply_params_dict(params_dict)\n\n @property\n def log_iteration(self):\n return self.iteration // self.stats_freq\n\n @property\n def filename(self):\n suffix = self.get_tensorboard_hparams_suffix()\n suffix += self.tensorboard_comment\n filename = self.start_time + suffix\n return filename\n\n @property\n def log_path(self):\n log_path = Path(self.log_dir)\n log_path = log_path / self.filename\n return log_path\n\n def logs_after_iteration(self, buffer: Memory, done: bool = False):\n f\"\"\"Logs writer\n\n Args:\n buffer (Memory): Buffer used for tensorboard\n done (bool, optional): Finalize tensorboard logging due to last iteration.\n Defaults to { False }.\n \"\"\"\n if self.test_episodes is not None:\n self.stats_logger.test_return = self.test()\n\n running_return = self.stats_logger.running_return\n if self.verbose:\n if done:\n self.stats_logger.task_done(self.iteration)\n else:\n self.stats_logger.log_stats(self.iteration)\n\n self.stats_logger.stats.append([self.iteration, running_return])\n self.stats_logger.reset_time_list()\n\n if self.tensorboard_writer is not None:\n self.add_tensorboard_logs(buffer, done)\n\n def add_tensorboard_logs(self, buffer: Memory, done: bool):\n self.tensorboard_writer.log_running_return(\n self.iteration,\n self.stats_logger.frames,\n self.stats_logger.rollouts,\n self.stats_logger.running_return,\n )\n if self.test_episodes:\n self.tensorboard_writer.log_test_return(\n self.iteration,\n self.stats_logger.frames,\n self.stats_logger.rollouts,\n self.stats_logger.test_return,\n )\n\n if (self.log_iteration % 5) == 0 or done:\n _, rendering_time = self.tensorboard_writer.record_episode(\n self, self.iteration, done\n )\n self.tensorboard_writer.log_returns(self.iteration, buffer)\n self.tensorboard_writer.log_actions(self.iteration, buffer)\n self.tensorboard_writer.log_observations(self.iteration, buffer)\n self.tensorboard_writer.log_loss(self.iteration, self.loss)\n\n def get_tensorboard_hparams_suffix(self):\n suffix = \"\"\n for key, val in self.hparams.items():\n if key in self.shortnames.keys():\n key = self.shortnames[key]\n else:\n key = key.split(\"/\")[1]\n if isinstance(val, float):\n val = f\"{val:.2}\"\n else:\n val = str(val)\n suffix += f\"-{key}{val}\"\n\n return suffix\n\n def _get_initial_obs_mean_std(\n self, obs_norm: Any\n ) -> Tuple[Optional[torch.tensor], Optional[torch.tensor]]:\n f\"\"\"\n Check if observations are normalized and if so return initial mean and std,\n None otherwise.\n\n Returns:\n Tuple[Optional[torch.tensor], Optional[torch.tensor]]: obs mean and std\n \"\"\"\n if obs_norm:\n obs_mean = torch.zeros(self.ob_dim, device=self.device)\n obs_std = torch.ones(self.ob_dim, device=self.device)\n else:\n obs_mean = None\n obs_std = None\n return obs_mean, obs_std\n"
] | [
[
"torch.ones",
"torch.tensor",
"torch.cuda.is_available",
"torch.zeros",
"torch.device"
]
] |
samggreenberg/pynndescent | [
"f97bc2fe01e4e59c5dad20ed23b9cb47e8182b6c"
] | [
"pynndescent/utils.py"
] | [
"# Author: Leland McInnes <[email protected]>\n#\n# License: BSD 2 clause\n\nimport time\n\nimport numba\nfrom numba.core import types\nimport numba.experimental.structref as structref\nimport numpy as np\n\n\[email protected](\"void(i8[:], i8)\", cache=True)\ndef seed(rng_state, seed):\n \"\"\"Seed the random number generator with a given seed.\"\"\"\n rng_state.fill(seed + 0xFFFF)\n\n\[email protected](\"i4(i8[:])\", cache=True)\ndef tau_rand_int(state):\n \"\"\"A fast (pseudo)-random number generator.\n\n Parameters\n ----------\n state: array of int64, shape (3,)\n The internal state of the rng\n\n Returns\n -------\n A (pseudo)-random int32 value\n \"\"\"\n state[0] = (((state[0] & 4294967294) << 12) & 0xFFFFFFFF) ^ (\n (((state[0] << 13) & 0xFFFFFFFF) ^ state[0]) >> 19\n )\n state[1] = (((state[1] & 4294967288) << 4) & 0xFFFFFFFF) ^ (\n (((state[1] << 2) & 0xFFFFFFFF) ^ state[1]) >> 25\n )\n state[2] = (((state[2] & 4294967280) << 17) & 0xFFFFFFFF) ^ (\n (((state[2] << 3) & 0xFFFFFFFF) ^ state[2]) >> 11\n )\n\n return state[0] ^ state[1] ^ state[2]\n\n\[email protected](\"f4(i8[:])\", cache=True)\ndef tau_rand(state):\n \"\"\"A fast (pseudo)-random number generator for floats in the range [0,1]\n\n Parameters\n ----------\n state: array of int64, shape (3,)\n The internal state of the rng\n\n Returns\n -------\n A (pseudo)-random float32 in the interval [0, 1]\n \"\"\"\n integer = tau_rand_int(state)\n return abs(float(integer) / 0x7FFFFFFF)\n\n\[email protected](\n [\n \"f4(f4[::1])\",\n numba.types.float32(\n numba.types.Array(numba.types.float32, 1, \"C\", readonly=True)\n ),\n ],\n locals={\n \"dim\": numba.types.intp,\n \"i\": numba.types.uint32,\n # \"result\": numba.types.float32, # This provides speed, but causes errors in corner cases\n },\n fastmath=True,\n cache=True,\n)\ndef norm(vec):\n \"\"\"Compute the (standard l2) norm of a vector.\n\n Parameters\n ----------\n vec: array of shape (dim,)\n\n Returns\n -------\n The l2 norm of vec.\n \"\"\"\n result = 0.0\n dim = vec.shape[0]\n for i in range(dim):\n result += vec[i] * vec[i]\n return np.sqrt(result)\n\n\[email protected](cache=True)\ndef rejection_sample(n_samples, pool_size, rng_state):\n \"\"\"Generate n_samples many integers from 0 to pool_size such that no\n integer is selected twice. The duplication constraint is achieved via\n rejection sampling.\n\n Parameters\n ----------\n n_samples: int\n The number of random samples to select from the pool\n\n pool_size: int\n The size of the total pool of candidates to sample from\n\n rng_state: array of int64, shape (3,)\n Internal state of the random number generator\n\n Returns\n -------\n sample: array of shape(n_samples,)\n The ``n_samples`` randomly selected elements from the pool.\n \"\"\"\n result = np.empty(n_samples, dtype=np.int64)\n for i in range(n_samples):\n reject_sample = True\n j = 0\n while reject_sample:\n j = tau_rand_int(rng_state) % pool_size\n for k in range(i):\n if j == result[k]:\n break\n else:\n reject_sample = False\n result[i] = j\n return result\n\n\[email protected]\nclass HeapType(types.StructRef):\n pass\n\n\nclass Heap(structref.StructRefProxy):\n @property\n def indices(self):\n return Heap_get_indices(self)\n\n @property\n def distances(self):\n return Heap_get_distances(self)\n\n @property\n def flags(self):\n return Heap_get_flags(self)\n\n\[email protected](cache=True)\ndef Heap_get_flags(self):\n return self.flags\n\n\[email protected](cache=True)\ndef Heap_get_distances(self):\n return self.distances\n\n\[email protected](cache=True)\ndef Heap_get_indices(self):\n return self.indices\n\n\nstructref.define_proxy(Heap, HeapType, [\"indices\", \"distances\", \"flags\"])\n\n# Heap = namedtuple(\"Heap\", (\"indices\", \"distances\", \"flags\"))\n\n\[email protected](cache=True)\ndef make_heap(n_points, size):\n \"\"\"Constructor for the numba enabled heap objects. The heaps are used\n for approximate nearest neighbor search, maintaining a list of potential\n neighbors sorted by their distance. We also flag if potential neighbors\n are newly added to the list or not. Internally this is stored as\n a single ndarray; the first axis determines whether we are looking at the\n array of candidate graph_indices, the array of distances, or the flag array for\n whether elements are new or not. Each of these arrays are of shape\n (``n_points``, ``size``)\n\n Parameters\n ----------\n n_points: int\n The number of graph_data points to track in the heap.\n\n size: int\n The number of items to keep on the heap for each graph_data point.\n\n Returns\n -------\n heap: An ndarray suitable for passing to other numba enabled heap functions.\n \"\"\"\n indices = np.full((int(n_points), int(size)), -1, dtype=np.int32)\n distances = np.full((int(n_points), int(size)), np.infty, dtype=np.float32)\n flags = np.zeros((int(n_points), int(size)), dtype=np.uint8)\n result = (indices, distances, flags)\n\n return result\n\n\[email protected](cache=True)\ndef siftdown(heap1, heap2, elt):\n \"\"\"Restore the heap property for a heap with an out of place element\n at position ``elt``. This works with a heap pair where heap1 carries\n the weights and heap2 holds the corresponding elements.\"\"\"\n while elt * 2 + 1 < heap1.shape[0]:\n left_child = elt * 2 + 1\n right_child = left_child + 1\n swap = elt\n\n if heap1[swap] < heap1[left_child]:\n swap = left_child\n\n if right_child < heap1.shape[0] and heap1[swap] < heap1[right_child]:\n swap = right_child\n\n if swap == elt:\n break\n else:\n heap1[elt], heap1[swap] = heap1[swap], heap1[elt]\n heap2[elt], heap2[swap] = heap2[swap], heap2[elt]\n elt = swap\n\n\[email protected](parallel=True, cache=False)\ndef deheap_sort(indices, distances):\n \"\"\"Given two arrays representing a heap (indices and distances), reorder the \n arrays by increasing distance. This is effectively just the second half of\n heap sort (the first half not being required since we already have the\n graph_data in a heap).\n\n Note that this is done in-place.\n\n Parameters\n ----------\n indices : array of shape (n_samples, n_neighbors)\n The graph indices to sort by distance.\n distances : array of shape (n_samples, n_neighbors)\n The corresponding edge distance.\n\n Returns\n -------\n indices, distances: arrays of shape (n_samples, n_neighbors)\n The indices and distances sorted by increasing distance.\n \"\"\"\n for i in numba.prange(indices.shape[0]):\n # starting from the end of the array and moving back\n for j in range(indices.shape[1] - 1, 0, -1):\n indices[i, 0], indices[i, j] = indices[i, j], indices[i, 0]\n distances[i, 0], distances[i, j] = distances[i, j], distances[i, 0]\n\n siftdown(distances[i, :j], indices[i, :j], 0)\n\n return indices, distances\n\n\n# @numba.njit()\n# def smallest_flagged(heap, row):\n# \"\"\"Search the heap for the smallest element that is\n# still flagged.\n#\n# Parameters\n# ----------\n# heap: array of shape (3, n_samples, n_neighbors)\n# The heaps to search\n#\n# row: int\n# Which of the heaps to search\n#\n# Returns\n# -------\n# index: int\n# The index of the smallest flagged element\n# of the ``row``th heap, or -1 if no flagged\n# elements remain in the heap.\n# \"\"\"\n# ind = heap[0][row]\n# dist = heap[1][row]\n# flag = heap[2][row]\n#\n# min_dist = np.inf\n# result_index = -1\n#\n# for i in range(ind.shape[0]):\n# if flag[i] == 1 and dist[i] < min_dist:\n# min_dist = dist[i]\n# result_index = i\n#\n# if result_index >= 0:\n# flag[result_index] = 0.0\n# return int(ind[result_index])\n# else:\n# return -1\n\n\[email protected](parallel=True, locals={\"idx\": numba.types.int64}, cache=False)\ndef new_build_candidates(current_graph, max_candidates, rng_state, n_threads):\n \"\"\"Build a heap of candidate neighbors for nearest neighbor descent. For\n each vertex the candidate neighbors are any current neighbors, and any\n vertices that have the vertex as one of their nearest neighbors.\n\n Parameters\n ----------\n current_graph: heap\n The current state of the graph for nearest neighbor descent.\n\n max_candidates: int\n The maximum number of new candidate neighbors.\n\n rng_state: array of int64, shape (3,)\n The internal state of the rng\n\n Returns\n -------\n candidate_neighbors: A heap with an array of (randomly sorted) candidate\n neighbors for each vertex in the graph.\n \"\"\"\n current_indices = current_graph[0]\n current_flags = current_graph[2]\n\n n_vertices = current_indices.shape[0]\n n_neighbors = current_indices.shape[1]\n\n new_candidate_indices = np.full((n_vertices, max_candidates), -1, dtype=np.int32)\n new_candidate_priority = np.full(\n (n_vertices, max_candidates), np.inf, dtype=np.float32\n )\n\n old_candidate_indices = np.full((n_vertices, max_candidates), -1, dtype=np.int32)\n old_candidate_priority = np.full(\n (n_vertices, max_candidates), np.inf, dtype=np.float32\n )\n\n for n in numba.prange(n_threads):\n local_rng_state = rng_state + n\n for i in range(n_vertices):\n for j in range(n_neighbors):\n idx = current_indices[i, j]\n isn = current_flags[i, j]\n\n if idx < 0:\n continue\n\n d = tau_rand(local_rng_state)\n\n if isn:\n if i % n_threads == n:\n checked_heap_push(\n new_candidate_priority[i], new_candidate_indices[i], d, idx\n )\n if idx % n_threads == n:\n checked_heap_push(\n new_candidate_priority[idx],\n new_candidate_indices[idx],\n d,\n i,\n )\n else:\n if i % n_threads == n:\n checked_heap_push(\n old_candidate_priority[i], old_candidate_indices[i], d, idx\n )\n if idx % n_threads == n:\n checked_heap_push(\n old_candidate_priority[idx],\n old_candidate_indices[idx],\n d,\n i,\n )\n\n indices = current_graph[0]\n flags = current_graph[2]\n\n for i in numba.prange(n_vertices):\n for j in range(n_neighbors):\n idx = indices[i, j]\n\n for k in range(max_candidates):\n if new_candidate_indices[i, k] == idx:\n flags[i, j] = 0\n break\n\n return new_candidate_indices, old_candidate_indices\n\n\[email protected](\"b1(u1[::1],i4)\", cache=True)\ndef has_been_visited(table, candidate):\n loc = candidate >> 3\n mask = 1 << (candidate & 7)\n return table[loc] & mask\n\n\[email protected](\"void(u1[::1],i4)\", cache=True)\ndef mark_visited(table, candidate):\n loc = candidate >> 3\n mask = 1 << (candidate & 7)\n table[loc] |= mask\n return\n\n\[email protected](\n \"i4(f4[::1],i4[::1],f4,i4)\",\n fastmath=True,\n locals={\n \"size\": numba.types.intp,\n \"i\": numba.types.uint16,\n \"ic1\": numba.types.uint16,\n \"ic2\": numba.types.uint16,\n \"i_swap\": numba.types.uint16,\n },\n cache=True,\n)\ndef simple_heap_push(priorities, indices, p, n):\n if p >= priorities[0]:\n return 0\n\n size = priorities.shape[0]\n\n # insert val at position zero\n priorities[0] = p\n indices[0] = n\n\n # descend the heap, swapping values until the max heap criterion is met\n i = 0\n while True:\n ic1 = 2 * i + 1\n ic2 = ic1 + 1\n\n if ic1 >= size:\n break\n elif ic2 >= size:\n if priorities[ic1] > p:\n i_swap = ic1\n else:\n break\n elif priorities[ic1] >= priorities[ic2]:\n if p < priorities[ic1]:\n i_swap = ic1\n else:\n break\n else:\n if p < priorities[ic2]:\n i_swap = ic2\n else:\n break\n\n priorities[i] = priorities[i_swap]\n indices[i] = indices[i_swap]\n\n i = i_swap\n\n priorities[i] = p\n indices[i] = n\n\n return 1\n\n\[email protected](\n \"i4(f4[::1],i4[::1],f4,i4)\",\n fastmath=True,\n locals={\n \"size\": numba.types.intp,\n \"i\": numba.types.uint16,\n \"ic1\": numba.types.uint16,\n \"ic2\": numba.types.uint16,\n \"i_swap\": numba.types.uint16,\n },\n cache=True,\n)\ndef checked_heap_push(priorities, indices, p, n):\n if p >= priorities[0]:\n return 0\n\n size = priorities.shape[0]\n\n # break if we already have this element.\n for i in range(size):\n if n == indices[i]:\n return 0\n\n # insert val at position zero\n priorities[0] = p\n indices[0] = n\n\n # descend the heap, swapping values until the max heap criterion is met\n i = 0\n while True:\n ic1 = 2 * i + 1\n ic2 = ic1 + 1\n\n if ic1 >= size:\n break\n elif ic2 >= size:\n if priorities[ic1] > p:\n i_swap = ic1\n else:\n break\n elif priorities[ic1] >= priorities[ic2]:\n if p < priorities[ic1]:\n i_swap = ic1\n else:\n break\n else:\n if p < priorities[ic2]:\n i_swap = ic2\n else:\n break\n\n priorities[i] = priorities[i_swap]\n indices[i] = indices[i_swap]\n\n i = i_swap\n\n priorities[i] = p\n indices[i] = n\n\n return 1\n\n\[email protected](\n \"i4(f4[::1],i4[::1],u1[::1],f4,i4,u1)\",\n fastmath=True,\n locals={\n \"size\": numba.types.intp,\n \"i\": numba.types.uint16,\n \"ic1\": numba.types.uint16,\n \"ic2\": numba.types.uint16,\n \"i_swap\": numba.types.uint16,\n },\n cache=True,\n)\ndef checked_flagged_heap_push(priorities, indices, flags, p, n, f):\n if p >= priorities[0]:\n return 0\n\n size = priorities.shape[0]\n\n # break if we already have this element.\n for i in range(size):\n if n == indices[i]:\n return 0\n\n # insert val at position zero\n priorities[0] = p\n indices[0] = n\n flags[0] = f\n\n # descend the heap, swapping values until the max heap criterion is met\n i = 0\n while True:\n ic1 = 2 * i + 1\n ic2 = ic1 + 1\n\n if ic1 >= size:\n break\n elif ic2 >= size:\n if priorities[ic1] > p:\n i_swap = ic1\n else:\n break\n elif priorities[ic1] >= priorities[ic2]:\n if p < priorities[ic1]:\n i_swap = ic1\n else:\n break\n else:\n if p < priorities[ic2]:\n i_swap = ic2\n else:\n break\n\n priorities[i] = priorities[i_swap]\n indices[i] = indices[i_swap]\n flags[i] = flags[i_swap]\n\n i = i_swap\n\n priorities[i] = p\n indices[i] = n\n flags[i] = f\n\n return 1\n\n\[email protected](\n parallel=True,\n locals={\n \"p\": numba.int32,\n \"q\": numba.int32,\n \"d\": numba.float32,\n \"added\": numba.uint8,\n \"n\": numba.uint32,\n \"i\": numba.uint32,\n \"j\": numba.uint32,\n },\n cache=False,\n)\ndef apply_graph_updates_low_memory(current_graph, updates, n_threads):\n\n n_changes = 0\n priorities = current_graph[1]\n indices = current_graph[0]\n flags = current_graph[2]\n # n_threads = numba.get_num_threads()\n\n for n in numba.prange(n_threads):\n for i in range(len(updates)):\n for j in range(len(updates[i])):\n p, q, d = updates[i][j]\n\n if p == -1 or q == -1:\n continue\n\n if p % n_threads == n:\n added = checked_flagged_heap_push(\n priorities[p], indices[p], flags[p], d, q, 1\n )\n n_changes += added\n\n if q % n_threads == n:\n added = checked_flagged_heap_push(\n priorities[q], indices[q], flags[q], d, p, 1\n )\n n_changes += added\n\n return n_changes\n\n\[email protected](locals={\"p\": numba.types.int64, \"q\": numba.types.int64}, cache=True)\ndef apply_graph_updates_high_memory(current_graph, updates, in_graph):\n\n n_changes = 0\n\n for i in range(len(updates)):\n for j in range(len(updates[i])):\n p, q, d = updates[i][j]\n\n if p == -1 or q == -1:\n continue\n\n if q in in_graph[p] and p in in_graph[q]:\n continue\n elif q in in_graph[p]:\n pass\n else:\n added = checked_flagged_heap_push(\n current_graph[1][p],\n current_graph[0][p],\n current_graph[2][p],\n d,\n q,\n 1,\n )\n\n if added > 0:\n in_graph[p].add(q)\n n_changes += added\n\n if p == q or p in in_graph[q]:\n pass\n else:\n added = checked_flagged_heap_push(\n current_graph[1][p],\n current_graph[0][p],\n current_graph[2][p],\n d,\n q,\n 1,\n )\n\n if added > 0:\n in_graph[q].add(p)\n n_changes += added\n\n return n_changes\n\n\[email protected](cache=True)\ndef initalize_heap_from_graph_indices(heap, graph_indices, data, metric):\n\n for i in range(graph_indices.shape[0]):\n for idx in range(graph_indices.shape[1]):\n j = graph_indices[i, idx]\n if j >= 0:\n d = metric(data[i], data[j])\n checked_flagged_heap_push(heap[1][i], heap[0][i], heap[2][i], d, j, 1)\n\n return heap\n\n\[email protected](parallel=True, cache=False)\ndef sparse_initalize_heap_from_graph_indices(\n heap, graph_indices, data_indptr, data_indices, data_vals, metric\n):\n\n for i in numba.prange(graph_indices.shape[0]):\n for idx in range(graph_indices.shape[1]):\n j = graph_indices[i, idx]\n ind1 = data_indices[data_indptr[i] : data_indptr[i + 1]]\n data1 = data_vals[data_indptr[i] : data_indptr[i + 1]]\n ind2 = data_indices[data_indptr[j] : data_indptr[j + 1]]\n data2 = data_vals[data_indptr[j] : data_indptr[j + 1]]\n d = metric(ind1, data1, ind2, data2)\n checked_flagged_heap_push(heap[1][i], heap[0][i], heap[2][i], d, j, 1)\n\n return heap\n\n\n# Generates a timestamp for use in logging messages when verbose=True\ndef ts():\n return time.ctime(time.time())\n"
] | [
[
"numpy.sqrt",
"numpy.full",
"numpy.empty"
]
] |
RonnyLV/PRNet | [
"0c2ded7042ceee2b2f9bba02bc19d91d4c3993c5"
] | [
"prnet/utils/render_app.py"
] | [
"import numpy as np\nfrom prnet.utils.render import vis_of_vertices, render_texture\nfrom scipy import ndimage\n\ndef get_visibility(vertices, triangles, h, w):\n triangles = triangles.T\n vertices_vis = vis_of_vertices(vertices.T, triangles, h, w)\n vertices_vis = vertices_vis.astype(bool)\n for k in range(2):\n tri_vis = vertices_vis[triangles[0,:]] | vertices_vis[triangles[1,:]] | vertices_vis[triangles[2,:]]\n ind = triangles[:, tri_vis]\n vertices_vis[ind] = True\n # for k in range(2):\n # tri_vis = vertices_vis[triangles[0,:]] & vertices_vis[triangles[1,:]] & vertices_vis[triangles[2,:]]\n # ind = triangles[:, tri_vis]\n # vertices_vis[ind] = True\n vertices_vis = vertices_vis.astype(np.float32) #1 for visible and 0 for non-visible\n return vertices_vis\n\ndef get_uv_mask(vertices_vis, triangles, uv_coords, h, w, resolution):\n triangles = triangles.T\n vertices_vis = vertices_vis.astype(np.float32)\n uv_mask = render_texture(uv_coords.T, vertices_vis[np.newaxis, :], triangles, resolution, resolution, 1)\n uv_mask = np.squeeze(uv_mask > 0)\n uv_mask = ndimage.binary_closing(uv_mask)\n uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4))) \n uv_mask = ndimage.binary_closing(uv_mask)\n uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4))) \n uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4))) \n uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4))) \n uv_mask = uv_mask.astype(np.float32)\n\n return np.squeeze(uv_mask)\n\ndef get_depth_image(vertices, triangles, h, w, isShow = False):\n z = vertices[:, 2:]\n if isShow:\n z = z/max(z)\n depth_image = render_texture(vertices.T, z.T, triangles.T, h, w, 1)\n return np.squeeze(depth_image)"
] | [
[
"scipy.ndimage.binary_closing",
"numpy.ones",
"numpy.squeeze"
]
] |
miramirakim227/SwapNeRF_single_GT | [
"55a842ec4155fa782ca1c48b5c6863aeca8ca295"
] | [
"im2scene/camera.py"
] | [
"import numpy as np\nimport torch\nfrom scipy.spatial.transform import Rotation as Rot\nimport pdb \nimport math \n\ndef get_camera_mat(fov=49.13, invert=True):\n # fov = 2 * arctan( sensor / (2 * focal))\n # focal = (sensor / 2) * 1 / (tan(0.5 * fov))\n # in our case, sensor = 2 as pixels are in [-1, 1]\n focal = 1. / np.tan(0.5 * fov * np.pi/180.)\n focal = focal.astype(np.float32)\n mat = torch.tensor([\n [focal, 0., 0., 0.],\n [0., focal, 0., 0.],\n [0., 0., 1, 0.],\n [0., 0., 0., 1.]\n ]).reshape(1, 4, 4)\n\n if invert:\n mat = torch.inverse(mat)\n return mat\n\n\ndef get_random_pose(u, v, range_radius, batch_size=16, # batch size 유동적으로 바꿀 수 있도록!\n invert=False): \n # edit mira start \n if isinstance(u, int):\n device = 'cpu'\n u = torch.zeros(batch_size,).to(device)\n v = torch.ones(batch_size,).to(device) * 0.25\n loc = sample_on_sphere(u, v, size=(batch_size))\n radius = range_radius[0] + \\\n torch.rand(batch_size) * (range_radius[1] - range_radius[0])\n if loc.is_cuda:\n radius = radius.cuda()\n loc = loc * radius.unsqueeze(-1)\n R = look_at(loc)\n RT = torch.eye(4).reshape(1, 4, 4).repeat(batch_size, 1, 1)\n RT[:, :3, :3] = R\n RT[:, :3, -1] = loc\n\n if invert:\n RT = torch.inverse(RT)\n return radius, RT\n\n\ndef get_middle_pose(range_u, range_v, range_radius, batch_size=32,\n invert=False):\n u_m, u_v, r_v = sum(range_u) * 0.5, sum(range_v) * \\\n 0.5, sum(range_radius) * 0.5\n loc = sample_on_sphere((u_m, u_m), (u_v, u_v), size=(batch_size))\n radius = torch.ones(batch_size) * r_v\n loc = loc * radius.unsqueeze(-1)\n R = look_at(loc)\n RT = torch.eye(4).reshape(1, 4, 4).repeat(batch_size, 1, 1)\n RT[:, :3, :3] = R\n RT[:, :3, -1] = loc\n\n if invert:\n RT = torch.inverse(RT)\n return RT\n\n\ndef get_camera_pose(range_u, range_v, range_r, val_u=0.5, val_v=0.5, val_r=0.5,\n batch_size=32, invert=False):\n u0, ur = range_u[0], range_u[1] - range_u[0]\n v0, vr = range_v[0], range_v[1] - range_v[0]\n r0, rr = range_r[0], range_r[1] - range_r[0]\n u = u0 + val_u * ur\n v = v0 + val_v * vr\n r = r0 + val_r * rr\n\n loc = sample_on_sphere((u, u), (v, v), size=(batch_size))\n radius = torch.ones(batch_size) * r\n loc = loc * radius.unsqueeze(-1)\n R = look_at(loc)\n RT = torch.eye(4).reshape(1, 4, 4).repeat(batch_size, 1, 1)\n RT[:, :3, :3] = R\n RT[:, :3, -1] = loc\n\n if invert:\n RT = torch.inverse(RT)\n return RT\n\n# edit: np -> torch \ndef to_sphere(u, v):\n theta = 2 * math.pi * u\n phi = torch.arccos(1 - 2 * v)\n cx = torch.sin(phi) * torch.cos(theta)\n cy = torch.sin(phi) * torch.sin(theta)\n cz = torch.cos(phi)\n return torch.stack([cx, cy, cz], dim=-1)\n\n\ndef sample_on_sphere(u=None, v=None, size=(1,),\n to_pytorch=True): # range_u (0, 0) range_v (0.25, 0.25)\n sample = to_sphere(u, v) # sample expect to be (16, 3)\n if to_pytorch:\n sample = torch.tensor(sample).float()\n\n return sample\n\n\ndef look_at(eye, at=np.array([0, 0, 0]), up=np.array([0, 0, 1]), eps=1e-5,\n to_pytorch=True):\n at = at.reshape(1, 3)\n up = up.reshape(1, 3)\n eye = eye.reshape(-1, 3)\n if isinstance(eye, torch.Tensor):\n if eye.is_cuda:\n device=torch.device('cuda:0')\n else:\n device=torch.device('cpu') # array \n at = torch.tensor(at).to(device).float()\n up = torch.tensor(up).to(device).float()\n \n up = up.repeat(eye.shape[0] // up.shape[0], 1)\n eps = torch.tensor([eps]).reshape(1, 1).repeat(up.shape[0], 1).to(device).float()\n\n z_axis = eye - at\n z_axis = z_axis / torch.max(torch.stack([torch.norm(z_axis,\n dim=1, keepdim=True), eps]))\n\n x_axis = torch.cross(up, z_axis)\n x_axis = x_axis / torch.max(torch.stack([torch.norm(x_axis,\n dim=1, keepdim=True), eps]))\n\n y_axis = torch.cross(z_axis, x_axis)\n y_axis = y_axis / torch.max(torch.stack([torch.norm(y_axis,\n dim=1, keepdim=True), eps]))\n\n r_mat = torch.cat(\n (x_axis.reshape(-1, 3, 1), y_axis.reshape(-1, 3, 1), z_axis.reshape(\n -1, 3, 1)), dim=2)\n\n else:\n print('pass here? oh my gaadd....') # 여기 안들어간다 오우쨔쓰!!\n up = up.repeat(eye.shape[0] // up.shape[0], axis = 0)\n eps = np.array([eps]).reshape(1, 1).repeat(up.shape[0], axis=0)\n\n z_axis = eye - at\n z_axis /= np.max(np.stack([np.linalg.norm(z_axis,\n axis=1, keepdims=True), eps]))\n\n x_axis = np.cross(up, z_axis)\n x_axis /= np.max(np.stack([np.linalg.norm(x_axis,\n axis=1, keepdims=True), eps]))\n\n y_axis = np.cross(z_axis, x_axis)\n y_axis /= np.max(np.stack([np.linalg.norm(y_axis,\n axis=1, keepdims=True), eps]))\n\n r_mat = np.concatenate(\n (x_axis.reshape(-1, 3, 1), y_axis.reshape(-1, 3, 1), z_axis.reshape(\n -1, 3, 1)), axis=2)\n\n if to_pytorch:\n r_mat = torch.tensor(r_mat).float()\n\n return r_mat\n\n\ndef get_rotation_matrix(axis='z', value=0., batch_size=32):\n r = Rot.from_euler(axis, value * 2 * np.pi).as_dcm()\n r = torch.from_numpy(r).reshape(1, 3, 3).repeat(batch_size, 1, 1)\n return r\n"
] | [
[
"torch.inverse",
"torch.stack",
"torch.rand",
"numpy.cross",
"torch.cross",
"torch.eye",
"torch.cos",
"torch.norm",
"torch.sin",
"torch.from_numpy",
"torch.arccos",
"scipy.spatial.transform.Rotation.from_euler",
"torch.device",
"torch.ones",
"torch.tensor",
"numpy.tan",
"numpy.linalg.norm",
"numpy.array",
"torch.zeros"
]
] |
gerritholl/typhon | [
"dbde147be12922ec730bd072dc4797c9da9a6d6b"
] | [
"typhon/retrieval/common.py"
] | [
"from ast import literal_eval\nimport copy\nfrom importlib import import_module\nimport json\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.pipeline import Pipeline\nfrom typhon.utils import to_array\n\n__all__ = [\n 'RetrievalProduct',\n]\n\n\nclass NotTrainedError(Exception):\n \"\"\"Should be raised if someone runs a non-trained retrieval product\n \"\"\"\n def __init__(self, *args):\n message = \"You must train this retrieval product before running it!\"\n Exception.__init__(self, message, *args)\n\n\nclass RetrievalProduct:\n \"\"\"Retrieval that can be trained with data and stored to json files\n\n This is basically a wrapper around the scikit-learn estimator and trainer\n classes and makes it possible to save the trained models as json file.\n\n To save this object to a json file, the additional package json_tricks is\n required.\n \"\"\"\n\n def __init__(self, verbose=False):\n \"\"\"Initialize a Retriever object\n\n Args:\n verbose: The higher this value is the more debug messages are\n printed. Default is False.\n \"\"\"\n\n # The trainer and/or model for this retriever:\n self.estimator = None\n self.verbose = verbose\n self._inputs = []\n self._outputs = []\n\n @property\n def inputs(self):\n return self._inputs\n\n @property\n def outputs(self):\n return self._outputs\n\n @staticmethod\n def _import_class(module_name, class_name):\n \"\"\"Import a class dynamically to the namespace\"\"\"\n mod = import_module(module_name)\n klass = getattr(mod, class_name)\n return klass\n\n @staticmethod\n def _encode_numpy(obj):\n def _to_dict(item):\n if isinstance(item, np.ndarray):\n return {\n \"__ndarray__\": item.tolist(),\n \"__dtype__\": str(item.dtype),\n \"__shape__\": item.shape,\n }\n else:\n return np.asscalar(item)\n\n def _is_numpy(item):\n return type(item).__module__ == np.__name__\n\n if isinstance(obj, dict):\n obj = obj.copy()\n iterator = obj.items()\n elif isinstance(obj, list):\n obj = obj.copy()\n iterator = enumerate(obj)\n else:\n return obj\n\n for key, value in iterator:\n if _is_numpy(value):\n obj[key] = _to_dict(value)\n elif isinstance(value, (list, dict)):\n obj[key] = RetrievalProduct._encode_numpy(value)\n\n return obj\n\n @staticmethod\n def _decode_numpy(obj):\n def _from_dict(item):\n try:\n return np.array(\n item[\"__ndarray__\"],\n dtype=item[\"__dtype__\"],\n )\n except TypeError:\n return np.array(\n item[\"__ndarray__\"],\n dtype=literal_eval(item[\"__dtype__\"]),\n )\n\n def _is_numpy(item):\n return isinstance(item, dict) and \"__ndarray__\" in item\n\n if isinstance(obj, dict):\n obj = obj.copy()\n iterator = obj.items()\n elif isinstance(obj, list):\n obj = obj.copy()\n iterator = enumerate(obj)\n else:\n return obj\n\n for key, value in iterator:\n if _is_numpy(value):\n obj[key] = _from_dict(value)\n elif isinstance(value, (list, tuple, dict)):\n obj[key] = RetrievalProduct._decode_numpy(value)\n\n return obj\n\n @staticmethod\n def _tree_to_dict(tree):\n return {\n \"module\": type(tree).__module__,\n \"class\": type(tree).__name__,\n \"coefs\": tree.__getstate__(),\n }\n\n @staticmethod\n def _tree_from_dict(dictionary, coefs):\n instance = RetrievalProduct._import_class(\n dictionary[\"module\"], dictionary[\"class\"]\n )\n tree = instance(\n to_array(coefs[\"n_features_\"]),\n to_array(coefs[\"n_classes_\"]),\n to_array(coefs[\"n_outputs_\"])\n )\n tree.__setstate__(dictionary[\"coefs\"])\n return tree\n\n @staticmethod\n def _model_to_dict(model):\n \"\"\"Convert a sklearn model object to a dictionary\"\"\"\n dictionary = {\n \"module\": type(model).__module__,\n \"class\": type(model).__name__,\n \"params\": model.get_params(deep=True),\n \"coefs\": {\n attr: copy.deepcopy(getattr(model, attr))\n for attr in model.__dir__()\n if not attr.startswith(\"__\") and attr.endswith(\"_\")\n }\n }\n\n if \"tree_\" in dictionary[\"coefs\"]:\n # Not funny. sklearn.tree objects are not directly\n # serializable to json. Hence, we must dump them by ourselves.\n dictionary[\"coefs\"][\"tree_\"] = RetrievalProduct._tree_to_dict(\n dictionary[\"coefs\"][\"tree_\"]\n )\n\n return RetrievalProduct._encode_numpy(dictionary)\n\n @staticmethod\n def _model_from_dict(dictionary):\n \"\"\"Create a sklearn model object from a dictionary\"\"\"\n dictionary = RetrievalProduct._decode_numpy(dictionary)\n instance = RetrievalProduct._import_class(\n dictionary[\"module\"], dictionary[\"class\"]\n )\n model = instance(**dictionary[\"params\"])\n for attr, value in dictionary[\"coefs\"].items():\n if attr == \"tree_\":\n # We must treat a tree specially:\n value = RetrievalProduct._tree_from_dict(\n value, dictionary[\"coefs\"]\n )\n try:\n setattr(model, attr, value)\n except AttributeError:\n # Some attributes cannot be set such as feature_importances_\n pass\n return model\n\n @staticmethod\n def _pipeline_to_dict(pipeline):\n \"\"\"Convert a pipeline object to a dictionary\"\"\"\n if pipeline is None:\n raise ValueError(\"No object trained!\")\n\n all_steps = {}\n for name, model in pipeline.steps:\n all_steps[name] = RetrievalProduct._model_to_dict(model)\n return all_steps\n\n @staticmethod\n def _pipeline_from_dict(dictionary):\n \"\"\"Create a pipeline object from a dictionary\"\"\"\n all_steps = []\n for name, step in dictionary.items():\n model = RetrievalProduct._model_from_dict(step)\n all_steps.append([name, model])\n\n return Pipeline(all_steps)\n\n def is_trained(self):\n \"\"\"Return true if RetrievalProduct is trained\"\"\"\n return self.estimator is not None\n\n @classmethod\n def from_dict(cls, parameter, *args, **kwargs):\n \"\"\"Load a retrieval product from a dictionary\n\n Args:\n parameter: A dictionary with the training parameters. Simply the\n output of :meth:`to_dict`.\n *args: Positional arguments allowed for :meth:`__init__`.\n **kwargs Keyword arguments allowed for :meth:`__init__`.\n\n Returns:\n A new :class:`RetrievalProduct` object.\n \"\"\"\n\n self = cls(*args, **kwargs)\n\n estimator = parameter.get(\"estimator\", None)\n if estimator is None:\n raise ValueError(\"Found no coefficients for estimator!\")\n\n is_pipeline = parameter[\"estimator_is_pipeline\"]\n\n if is_pipeline:\n self.estimator = self._pipeline_from_dict(estimator)\n else:\n self.estimator = self._model_from_dict(estimator)\n\n self._inputs = parameter[\"inputs\"]\n self._outputs = parameter[\"outputs\"]\n return self\n\n def to_dict(self):\n \"\"\"Dump this retrieval product to a dictionary\"\"\"\n parameter = {}\n if isinstance(self.estimator, Pipeline):\n parameter[\"estimator\"] = self._pipeline_to_dict(self.estimator)\n parameter[\"estimator_is_pipeline\"] = True\n else:\n parameter[\"estimator\"] = self._model_to_dict(self.estimator)\n parameter[\"estimator_is_pipeline\"] = False\n\n parameter[\"inputs\"] = self.inputs\n parameter[\"outputs\"] = self.outputs\n return parameter\n\n @classmethod\n def from_txt(cls, filename, *args, **kwargs):\n \"\"\"Load a retrieval product from a txt file\n\n Notes:\n The output format is not standard json!\n\n Training parameters are:\n * weights of the estimator\n * names of the input and target fields\n\n Args:\n filename: The name of file from where to load the training\n parameters.\n *args: Positional arguments allowed for :meth:`__init__`.\n **kwargs Keyword arguments allowed for :meth:`__init__`.\n\n Returns:\n A new :class:`RetrievalProduct` object.\n \"\"\"\n\n with open(filename, 'r') as infile:\n parameter = literal_eval(infile.read())\n return cls.from_dict(parameter, *args, **kwargs)\n\n def to_txt(self, filename):\n \"\"\"Save this retrieval product to a txt file\n\n Training parameters are:\n * configuration of the used estimator\n * names of the input, output, and target fields\n\n Args:\n filename: The name of the file where to store the training\n parameters.\n\n Returns:\n None\n \"\"\"\n\n with open(filename, 'w') as outfile:\n outfile.write(repr(self.to_dict()))\n\n def retrieve(self, inputs):\n \"\"\"Predict the target values for data coming from arrays\n\n Args:\n inputs: A pandas.DataFrame object. The keys must be the\n same labels as used in :meth:`train`.\n\n Returns:\n A pandas.DataFrame object with the retrieved data.\n\n Examples:\n\n .. :code-block:: python\n\n # TODO\n \"\"\"\n\n if self.estimator is None:\n raise NotTrainedError()\n\n # Skip empty datasets\n if inputs.empty:\n return None\n\n # Retrieve the data from the neural network:\n output_data = self.estimator.predict(inputs)\n\n return pd.DataFrame(data=output_data, columns=self.outputs)\n\n def score(self, inputs, targets):\n \"\"\"\n\n Args:\n inputs: A pandas.DataFrame with input data.\n targets: A pandas.DataFrame with target data.\n\n Returns:\n The metric score as a number\n \"\"\"\n if self.estimator is None:\n raise NotTrainedError()\n\n return self.estimator.score(inputs.squeeze(), targets.squeeze())\n\n def train(self, estimator, inputs, targets):\n \"\"\"Train this retriever with data from arrays\n\n Args:\n estimator: The object that will be trained. If it is a trainer\n object such as a GridSearchCV, the best estimator will be\n chosen after training. Can also be a Pipeline or a standard\n Estimator from scikit-learn.\n inputs: A pandas.DataFrame with input data.\n targets: A pandas.DataFrame with target data.\n\n Returns:\n A float number indicating the training score.\n \"\"\"\n\n # The input and target labels will be saved because to know what this\n # product retrieves and from what:\n self._inputs = inputs.columns.tolist()\n self._outputs = targets.columns.tolist()\n\n # Start to train!\n estimator.fit(inputs.squeeze(), targets.squeeze())\n\n # Let's check whether the estimator was a trainer object such as\n # GridSearchCV, etc. Then we save only the best estimator.\n if hasattr(estimator, \"best_estimator_\"):\n # Use the best estimator from now on:\n self.estimator = estimator.best_estimator_\n else:\n self.estimator = estimator\n\n return self.score(inputs, targets)\n"
] | [
[
"numpy.array",
"numpy.asscalar",
"sklearn.pipeline.Pipeline",
"pandas.DataFrame"
]
] |
BernhardRiemann/iree | [
"471349762b316f7d6b83eb5f9089255d78052758"
] | [
"integrations/tensorflow/e2e/broadcasting_test.py"
] | [
"# Lint as: python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test broadcasting support.\"\"\"\n\nfrom absl import app\nimport numpy as np\nfrom pyiree.tf.support import tf_test_utils\nfrom pyiree.tf.support import tf_utils\nimport tensorflow.compat.v2 as tf\n\n\nclass BroadcastingModule(tf.Module):\n\n @tf.function(input_signature=[\n tf.TensorSpec([None], tf.float32),\n tf.TensorSpec([None], tf.float32),\n ])\n def add(self, lhs, rhs):\n return lhs + rhs\n\n\nclass BroadcastingTest(tf_test_utils.TracedModuleTestCase):\n\n def __init__(self, methodName=\"runTest\"):\n super(BroadcastingTest, self).__init__(methodName)\n self._modules = tf_test_utils.compile_tf_module(BroadcastingModule)\n\n def test_add_same_shape(self):\n\n def add_same_shape(module):\n lhs = tf_utils.uniform([4])\n rhs = tf_utils.uniform([4])\n module.add(lhs, rhs)\n\n self.compare_backends(add_same_shape, self._modules)\n\n def test_add_broadcast_lhs(self):\n\n def add_broadcast_lhs(module):\n lhs = tf_utils.uniform([1])\n rhs = tf_utils.uniform([4])\n module.add(lhs, rhs)\n\n self.compare_backends(add_broadcast_lhs, self._modules)\n\n def test_add_broadcast_rhs(self):\n\n def add_broadcast_rhs(module):\n lhs = tf_utils.uniform([4])\n rhs = tf_utils.uniform([1])\n module.add(lhs, rhs)\n\n self.compare_backends(add_broadcast_rhs, self._modules)\n\n\ndef main(argv):\n del argv # Unused\n if hasattr(tf, 'enable_v2_behavior'):\n tf.enable_v2_behavior()\n tf.test.main()\n\n\nif __name__ == '__main__':\n app.run(main)\n"
] | [
[
"tensorflow.compat.v2.TensorSpec",
"tensorflow.compat.v2.enable_v2_behavior",
"tensorflow.compat.v2.test.main"
]
] |
mannyray/sort | [
"f0ee0488aa4e7213d30ff50bcb848a843fedde42"
] | [
"python_implementation/example/example7.py"
] | [
"import commonExample\nimport math\nimport sys\nsys.path.insert(0,'..')\nimport generate\nimport constants\nfrom numpy import random\nimport intersection\nfrom PIL import Image, ImageDraw, ImageFont\n\ngif_file=\"example7\"\n\n\nxcoords = [constants.width,constants.width,constants.width,100,400,700,1000,1300]\nycoords = [50,350,700,constants.height, constants.height, constants.height, constants.height, constants.height]\n\ndef updateCoords(xCor,yCor,frameNumber):\n lastFrame = False\n turnBackHorizontal = False\n turnBackVertical = False\n if frameNumber > constants.width/constants.step_size:\n turnBackHorizontal = True\n if frameNumber > constants.height/constants.step_size:\n turnBackVertical = True\n if yCor[3] > constants.height + 10:\n lastFrame = True\n for i in range(0,len(xCor)):\n if i < 3:\n if turnBackHorizontal == False:\n xCor[i] = xCor[i] - constants.step_size\n else:\n xCor[i] = xCor[i] + constants.step_size\n else:\n if turnBackVertical == False:\n yCor[i] = yCor[i] - constants.step_size\n else:\n yCor[i] = yCor[i] + constants.step_size\n return lastFrame, xCor, yCor\n\ndef drawImage7(image,draw,xcoords,ycoords,index):\n if index == 0:\n original = Image.open('assets/orange.jpg')\n elif index == 1:\n original = Image.open('assets/apple.jpg')\n elif index == 2:\n original = Image.open('assets/watermellon.jpg')\n elif index == 3:\n original = Image.open('assets/orange.jpg')\n elif index == 4:\n original = Image.open('assets/apple.jpg')\n elif index == 5:\n original = Image.open('assets/watermellon.jpg')\n elif index == 6:\n original = Image.open('assets/apple.jpg')\n elif index == 7:\n original = Image.open('assets/watermellon.jpg')\n \n font = ImageFont.truetype(\"/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf\", 20)\n image.paste(original, box=(xcoords[index],ycoords[index]))\n draw.text((xcoords[index]+constants.orange_width/2,ycoords[index]+constants.orange_width/2),str(index+1),fill=(0,0,0), font=font)\n\ndef boundBoxNoNoise7(x,y,index):\n center = 25\n objectType = None\n if index == 0:\n objectType = \"orange\"\n if index == 1:\n objectType = \"apple\"\n if index == 2:\n objectType = \"watermellon\"\n if index == 3:\n objectType = \"orange\"\n if index == 4:\n objectType = \"apple\"\n if index == 5:\n objectType = \"watermellon\"\n elif index == 6:\n objectType = \"apple\"\n elif index == 7:\n objectType = \"watermellon\"\n return x+center, y+center, constants.orange_width - center*2, constants.orange_width - center*2.5,objectType\n\ndef boundBoxNoise7(x,y,index):\n multiplier = 10\n x,y,w,h,objectType = boundBoxNoNoise7(x,y,index)\n arr = random.normal(size=(4,1))*multiplier\n return x+arr[0], y+arr[1], w+arr[2], h+arr[3], objectType\n\ncommonExample.common_run(updateCoords,gif_file,xcoords,ycoords,boundBoxNoise=boundBoxNoise7,boundBoxNoNoise=boundBoxNoNoise7,drawImage=drawImage7,saveData=True)\n"
] | [
[
"numpy.random.normal"
]
] |
FastSense/rosbot-ros2 | [
"063c897a16129d9aa88c2c7c52bdf6547af894e4"
] | [
"ros2_ws/src/utils/logger/logger/logger.py"
] | [
"import os\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport rclpy\nimport numpy as np\n\nfrom rclpy.node import Node\nfrom geometry_msgs.msg import Twist\nfrom nav_msgs.msg import Odometry\nfrom std_srvs.srv import Empty\n\nfrom logger.utils import convert_ros2_time_to_float\nfrom logger.create_graphs import build_general_graph_for_rosbot\nfrom scipy.spatial.transform import Rotation\n\nclass Logger(Node):\n \"\"\"\n Class for logging the state of the rosbot\n Node for logging the state of the robot,\n kinematic model (optional) and neural network\n model (optional), control and time stamps\n :Attributes:\n :first_tick: (bool) srue if it is first callbcak\n :init_time: (float) node start time (time of the first callback)\n :curr_control: (list) current control [u_v, u_w]\n :output_path: (str) Absolute path to the directory\n where the logged data will be saved\n :control_topic: (str) nam of the control topic (/cmd_vel)\n :parent_frame: (str) name of the origin tf frame\n :kinetic_model_frame: (str) name of the kinematic model tf frame\n :nn_model_frame: (str) name of the NN model tf frame\n :robot_state: (pandas.DataFrame) container for rosbot state\n :kinetic_model_state: (pandas.DataFrame) container for\n kinematic model state\n :nn_model_state: (pandas.DataFrame) container for NN model state\n :robot_control: (pandas.DataFrame) container for rosbot control\n :time: (list) container for time stamps\n :odom_sub: subscriber to /odom topic\n :control_sub: subscriber to control topic\n \"\"\"\n def __init__(self):\n \"\"\"\n \"\"\"\n super().__init__('logger')\n \n self.init_parameters()\n self.get_node_parametes()\n self.init_subs()\n self.init_containers()\n \n self.first_tick = True\n self.init_time = None\n self.curr_control = list()\n self.srv = self.create_service(Empty, 'shutdown_logger', self.shutdown_logger_callback)\n rclpy.get_default_context().on_shutdown(self.on_shutdown)\n \n\n def init_parameters(self):\n \"\"\"\n Declares node parameters\n \"\"\"\n self.declare_parameter('output_path', \"\")\n self.declare_parameter('control_topic', '/cmd_vel')\n self.declare_parameter('parent_frame', 'odom')\n self.declare_parameter('kinetic_model_frame', 'model_link')\n self.declare_parameter('nn_model_frame', 'nn_model_link')\n # self.declare_parameter('tf_topic', '/tf')\n\n def get_node_parametes(self):\n \"\"\"\n Gets node parameters\n \"\"\"\n self.output_path = self.get_parameter('output_path').get_parameter_value().string_value\n self.control_topic = self.get_parameter('control_topic').get_parameter_value().string_value\n self.parent_frame = self.get_parameter('parent_frame').get_parameter_value().string_value\n self.kinetic_model_frame = self.get_parameter('kinetic_model_frame').get_parameter_value().string_value\n self.nn_model_frame = self.get_parameter('nn_model_frame').get_parameter_value().string_value\n # self.tf_topic = self.get_parameter('tf_topic').get_parameter_value().string_value\n\n def init_containers(self):\n \"\"\"\n Declares containers for logged data\n \"\"\"\n self.robot_state = pd.DataFrame(\n columns=[\n 'x', 'y', 'z', 'roll', 'pitch', 'yaw',\n 'v_x', 'v_y', 'v_z', 'w_x', 'w_y', 'w_z',\n ]\n )\n self.kinetic_model_state = pd.DataFrame(\n columns=[\n 'x', 'y', 'z', 'roll', 'pitch', 'yaw',\n 'v_x', 'v_y', 'v_z', 'w_x', 'w_y', 'w_z',\n ]\n )\n self.nn_model_state = pd.DataFrame(\n columns=[\n 'x', 'y', 'z', 'roll', 'pitch', 'yaw',\n 'v_x', 'v_y', 'v_z', 'w_x', 'w_y', 'w_z',\n ]\n )\n self.robot_control = pd.DataFrame(\n columns=[\n 'v_x', 'w_z'\n ]\n )\n self.time = list()\n\n def init_subs(self):\n \"\"\"\n Declares node subscribers\n \"\"\"\n self.odom_sub = self.create_subscription(\n Odometry,\n '/odom',\n self.odom_callback,\n 1\n )\n self.control_sub = self.create_subscription(\n Twist,\n self.control_topic,\n self.control_callback,\n 1\n )\n\n # prevent unused variable warning\n self.control_sub\n self.odom_sub\n\n def odom_callback(self, odom_msg):\n \"\"\"\n Callback on odom message\n Robot position, current time and control are logged\n Args:\n :odom_msg: (nav_msgs.msg.Odometry): odom msg\n \"\"\"\n\n if (len(self.curr_control) == 0):\n return \n\n curr_time = convert_ros2_time_to_float(\n self.get_clock().now().seconds_nanoseconds()\n ) \n # update time container\n self.time.append(curr_time - self.init_time)\n # update control container\n self.robot_control.loc[len(self.robot_control)] = self.curr_control\n # update robot_state container\n rosbot_pose = odom_msg.pose.pose\n rosbot_velocities = odom_msg.twist.twist\n x, y, z = rosbot_pose.position.x, rosbot_pose.position.y, rosbot_pose.position.z\n rpy = Rotation.from_quat([\n np.float(rosbot_pose.orientation.x),\n np.float(rosbot_pose.orientation.y),\n np.float(rosbot_pose.orientation.z),\n np.float(rosbot_pose.orientation.w)]\n ).as_euler('xyz')\n rpy = list(rpy)\n\n v_x = rosbot_velocities.linear.x # Linear velocity\n v_y = rosbot_velocities.linear.y\n v_z = rosbot_velocities.linear.z\n\n w_x = rosbot_velocities.angular.x\n w_y = rosbot_velocities.angular.y\n w_z = rosbot_velocities.angular.z # YAW velocity\n\n last_row = len(self.robot_state)\n self.robot_state.loc[last_row] = [x,y,z] + rpy + [v_x, v_y, v_z, w_x, w_y, w_z]\n\n def control_callback(self, control):\n \"\"\"\n Updates the current control\n Args:\n :control: (geometry_msgs.msg.Twist) control msg\n \"\"\"\n if self.first_tick:\n self.first_tick = False\n self.init_time = convert_ros2_time_to_float(\n self.get_clock().now().seconds_nanoseconds()\n )\n\n self.curr_control = [control.linear.x, control.angular.z]\n\n def save_collected_data_to_csv(self):\n \"\"\"\n Saves logged data in csv format\n \"\"\"\n # if not os.path.exists(self.output_path):\n # os.makedirs(self.output_path)\n\n self.robot_state.to_csv(\n path_or_buf=os.path.join(self.output_path, \"rosbot_state.csv\"),\n sep=' ',\n index=False\n )\n\n self.kinetic_model_state.to_csv(\n path_or_buf=os.path.join(self.output_path, \"kinematic_model_state.csv\"),\n sep=' ',\n index=False\n )\n\n self.nn_model_state.to_csv(\n path_or_buf=os.path.join(self.output_path, \"nn_model_state.csv\"),\n sep=' ',\n index=False\n )\n\n self.robot_control.to_csv(\n path_or_buf= os.path.join(self.output_path,\"control.csv\"),\n sep=' ',\n index=False\n )\n\n pd.DataFrame(data=self.time, columns=['t']).to_csv(\n path_or_buf= os.path.join(self.output_path, \"time.csv\"),\n sep=' ',\n index=False\n )\n \n\n def shutdown_logger_callback(self):\n \"\"\"\n Callback for the shutdown_logger service, \n turns off the logger node\n \"\"\"\n rclpy.try_shutdown()\n\n def on_shutdown(self):\n \"\"\"\n A function that is executed when a node shutdown.\n Plots a graph of all collected data, saves it in csv format.\n \"\"\"\n\n if not os.path.exists(self.output_path):\n os.makedirs(self.output_path)\n \n data_plots = build_general_graph_for_rosbot(\n robot_state_df=self.robot_state,\n control_df=self.robot_control,\n time_list=self.time,\n save_to_png=True,\n path=self.output_path\n )\n self.save_collected_data_to_csv()\n\n self.get_logger().warn(\"Output path = {}\".format(self.output_path))\n\ndef main():\n \"\"\"\n Declares the logger node.\n Node works \n \"\"\"\n rclpy.init()\n logger = Logger()\n\n try:\n rclpy.spin(logger)\n except:\n pass\n\n logger.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n\n\n\n"
] | [
[
"numpy.float",
"pandas.DataFrame"
]
] |
bgpeyton/QCElemental | [
"2e84cd686d5fff0fc79accb28ffa985de4684704"
] | [
"qcelemental/util/misc.py"
] | [
"import math\nimport re\nfrom typing import Dict, List\n\nimport numpy as np\n\nfrom ..physical_constants import constants\n\n\ndef distance_matrix(a: np.ndarray, b: np.ndarray) -> np.ndarray:\n \"\"\"Euclidean distance matrix between rows of arrays `a` and `b`. Equivalent to\n `scipy.spatial.distance.cdist(a, b, 'euclidean')`. Returns a.shape[0] x b.shape[0] array.\n\n \"\"\"\n assert a.shape[1] == b.shape[1], \"\"\"Inner dimensions do not match\"\"\"\n distm = np.zeros([a.shape[0], b.shape[0]])\n for i in range(a.shape[0]):\n distm[i] = np.linalg.norm(a[i] - b, axis=1)\n return distm\n\n\ndef update_with_error(a: Dict, b: Dict, path=None) -> Dict:\n \"\"\"Merges `b` into `a` like dict.update; however, raises KeyError if values of a\n key shared by `a` and `b` conflict.\n\n Adapted from: https://stackoverflow.com/a/7205107\n\n \"\"\"\n if path is None:\n path = []\n for key in b:\n if key in a:\n if isinstance(a[key], dict) and isinstance(b[key], dict):\n update_with_error(a[key], b[key], path + [str(key)])\n elif a[key] == b[key]:\n pass # same leaf value\n elif a[key] is None:\n a[key] = b[key]\n elif (\n isinstance(a[key], (list, tuple))\n and not isinstance(a[key], str)\n and isinstance(b[key], (list, tuple))\n and not isinstance(b[key], str)\n and len(a[key]) == len(b[key])\n and all((av is None or av == bv) for av, bv in zip(a[key], b[key]))\n ): # yapf: disable\n a[key] = b[key]\n else:\n raise KeyError(\"Conflict at {}: {} vs. {}\".format(\".\".join(path + [str(key)]), a[key], b[key]))\n else:\n a[key] = b[key]\n return a\n\n\ndef standardize_efp_angles_units(units: str, geom_hints: List[List[float]]) -> List[List[float]]:\n \"\"\"Applies to the pre-validated xyzabc or points hints in `geom_hints`\n the libefp default (1) units of [a0] and (2) radian angle range of\n (-pi, pi]. The latter is handy since this is how libefp returns hints\n\n \"\"\"\n\n def radrge(radang):\n \"\"\"Adjust `radang` by 2pi into (-pi, pi] range.\"\"\"\n if radang > math.pi:\n return radang - 2 * math.pi\n elif radang <= -math.pi:\n return radang + 2 * math.pi\n else:\n return radang\n\n if units == \"Angstrom\":\n iutau = 1.0 / constants.bohr2angstroms\n else:\n iutau = 1.0\n\n hints = []\n for hint in geom_hints:\n if len(hint) == 6:\n x, y, z = [i * iutau for i in hint[:3]]\n a, b, c = [radrge(i) for i in hint[3:]]\n hints.append([x, y, z, a, b, c])\n if len(hint) == 9:\n points = [i * iutau for i in hint]\n hints.append(points)\n\n return hints\n\n\ndef filter_comments(string: str) -> str:\n \"\"\"Remove from `string` any Python-style comments ('#' to end of line).\"\"\"\n\n return re.sub(r\"(^|[^\\\\])#.*\", \"\", string)\n\n\ndef unnp(dicary: Dict, _path=None, *, flat: bool = False) -> Dict:\n \"\"\"Return `dicary` with any ndarray values replaced by lists.\n\n Parameters\n ----------\n dicary: dict\n Dictionary where any internal iterables are dict or list.\n flat : bool, optional\n Whether the returned lists are flat or nested.\n\n Returns\n -------\n dict\n Input with any ndarray values replaced by lists.\n\n \"\"\"\n\n if _path is None:\n _path = []\n\n ndicary: Dict = {}\n for k, v in dicary.items():\n if isinstance(v, dict):\n ndicary[k] = unnp(v, _path + [str(k)], flat=flat)\n elif isinstance(v, list):\n # relying on Py3.6+ ordered dict here\n fakedict = {kk: vv for kk, vv in enumerate(v)}\n tolisted = unnp(fakedict, _path + [str(k)], flat=flat)\n ndicary[k] = list(tolisted.values())\n else:\n try:\n v.shape\n except AttributeError:\n ndicary[k] = v\n else:\n if flat:\n ndicary[k] = v.ravel().tolist()\n else:\n ndicary[k] = v.tolist()\n return ndicary\n\n\ndef _norm(points) -> float:\n \"\"\"\n Return the Frobenius norm across axis=-1, NumPy's internal norm is crazy slow (~4x)\n \"\"\"\n\n tmp = np.atleast_2d(points)\n return np.sqrt(np.einsum(\"ij,ij->i\", tmp, tmp))\n\n\ndef measure_coordinates(coordinates, measurements, degrees=False):\n \"\"\"\n Measures a geometry array based on 0-based indices provided, automatically detects distance, angle,\n and dihedral based on length of measurement input.\n \"\"\"\n\n coordinates = np.atleast_2d(coordinates)\n num_coords = coordinates.shape[0]\n\n single = False\n if isinstance(measurements[0], int):\n measurements = [measurements]\n single = True\n\n ret = []\n for num, m in enumerate(measurements):\n if any(x >= num_coords for x in m):\n raise ValueError(f\"An index of measurement {num} is out of bounds.\")\n\n kwargs = {}\n if len(m) == 2:\n func = compute_distance\n elif len(m) == 3:\n func = compute_angle\n kwargs = {\"degrees\": degrees}\n elif len(m) == 4:\n func = compute_dihedral\n kwargs = {\"degrees\": degrees}\n else:\n raise KeyError(f\"Unrecognized number of arguments for measurement {num}, found {len(m)}, expected 2-4.\")\n\n val = func(*[coordinates[x] for x in m], **kwargs)\n ret.append(float(val))\n\n if single:\n return ret[0]\n else:\n return ret\n\n\ndef compute_distance(points1, points2) -> np.ndarray:\n \"\"\"\n Computes the distance between the provided points on a per-row basis.\n\n Parameters\n ----------\n points1 : array-like\n The first list of points, can be 1D or 2D\n points2 : array-like\n The second list of points, can be 1D or 2D\n\n Returns\n -------\n distances : np.ndarray\n The array of distances between points1 and points2\n\n Notes\n -----\n Units are not considered inside these expressions, please preconvert to the same units before using.\n\n See Also\n --------\n distance_matrix\n Computes the distance between the provided points in all rows.\n compute_distance result is the diagonal of the distance_matrix result.\n\n \"\"\"\n points1 = np.atleast_2d(points1)\n points2 = np.atleast_2d(points2)\n\n return _norm(points1 - points2)\n\n\ndef compute_angle(points1, points2, points3, *, degrees: bool = False) -> np.ndarray:\n \"\"\"\n Computes the angle (p1, p2 [vertex], p3) between the provided points on a per-row basis.\n\n Parameters\n ----------\n points1 : np.ndarray\n The first list of points, can be 1D or 2D\n points2 : np.ndarray\n The second list of points, can be 1D or 2D\n points3 : np.ndarray\n The third list of points, can be 1D or 2D\n degrees : bool, options\n Returns the angle in degrees rather than radians if True\n\n Returns\n -------\n angles : np.ndarray\n The angle between the three points in radians\n\n Notes\n -----\n Units are not considered inside these expressions, please preconvert to the same units before using.\n \"\"\"\n\n points1 = np.atleast_2d(points1)\n points2 = np.atleast_2d(points2)\n points3 = np.atleast_2d(points3)\n\n v12 = points1 - points2\n v23 = points2 - points3\n\n denom = _norm(v12) * _norm(v23)\n cosine_angle = np.einsum(\"ij,ij->i\", v12, v23) / denom\n\n angle = np.pi - np.arccos(cosine_angle)\n\n if degrees:\n return np.degrees(angle)\n else:\n return angle\n\n\ndef compute_dihedral(points1, points2, points3, points4, *, degrees: bool = False) -> np.ndarray:\n \"\"\"\n Computes the dihedral angle (p1, p2, p3, p4) between the provided points on a per-row basis using the Praxeolitic formula.\n\n Parameters\n ----------\n points1 : np.ndarray\n The first list of points, can be 1D or 2D\n points2 : np.ndarray\n The second list of points, can be 1D or 2D\n points3 : np.ndarray\n The third list of points, can be 1D or 2D\n points4 : np.ndarray\n The third list of points, can be 1D or 2D\n degrees : bool, options\n Returns the dihedral angle in degrees rather than radians if True\n\n Returns\n -------\n dihedrals : np.ndarray\n The dihedral angle between the four points in radians\n\n Notes\n -----\n Units are not considered inside these expressions, please preconvert to the same units before using.\n \"\"\"\n\n # FROM: https://stackoverflow.com/questions/20305272/\n\n points1 = np.atleast_2d(points1)\n points2 = np.atleast_2d(points2)\n points3 = np.atleast_2d(points3)\n points4 = np.atleast_2d(points4)\n\n # Build the three vectors\n v1 = -1.0 * (points2 - points1)\n v2 = points3 - points2\n v3 = points4 - points3\n\n # Normalize the central vector\n v2 = v2 / _norm(v2)\n\n # v = projection of b0 onto plane perpendicular to b1\n # = b0 minus component that aligns with b1\n # w = projection of b2 onto plane perpendicular to b1\n # = b2 minus component that aligns with b1\n v = v1 - np.einsum(\"ij,ij->i\", v1, v1) * v2\n w = v3 - np.einsum(\"ij,ij->i\", v3, v2) * v2\n\n # angle between v and w in a plane is the torsion angle\n # v and w may not be normalized but that's fine since tan is y/x\n x = np.einsum(\"ij,ij->i\", v, w)\n y = np.einsum(\"ij,ij->i\", np.cross(v2, v), w)\n angle = np.arctan2(y, x)\n\n if degrees:\n return np.degrees(angle)\n else:\n return angle\n"
] | [
[
"numpy.degrees",
"numpy.arctan2",
"numpy.atleast_2d",
"numpy.zeros",
"numpy.arccos",
"numpy.cross",
"numpy.einsum",
"numpy.linalg.norm"
]
] |
DavidHurst/palbolts | [
"72f9ca3f82499b532f14d0e797426e1b425d3efe"
] | [
"conduit/fair/models/gpd.py"
] | [
"\"\"\"Zhang Gradient Projection Debiasing Baseline Model.\"\"\"\nfrom __future__ import annotations\nfrom typing import NamedTuple, cast\n\nimport ethicml as em\nfrom kit import implements\nfrom kit.torch import CrossEntropyLoss, TrainingMode\nimport pandas as pd\nimport pytorch_lightning as pl\nfrom pytorch_lightning.utilities.types import EPOCH_OUTPUT\nimport torch\nfrom torch import Tensor, nn\nfrom torch.optim.optimizer import Optimizer\n\nfrom conduit.data.structures import TernarySample\nfrom conduit.models.base import CdtModel\nfrom conduit.models.utils import aggregate_over_epoch, prediction, prefix_keys\nfrom conduit.types import LRScheduler, Stage\n\n__all__ = [\"GPD\"]\n\n\ndef compute_proj_grads(*, model: nn.Module, loss_p: Tensor, loss_a: Tensor, alpha: float) -> None:\n \"\"\"Computes the adversarial-gradient projection term.\n\n :param model: Model whose parameters the gradients are to be computed w.r.t.\n :param loss_p: Prediction loss.\n :param loss_a: Adversarial loss.\n :param alpha: Pre-factor for adversarial loss.\n \"\"\"\n grad_p = torch.autograd.grad(loss_p, tuple(model.parameters()), retain_graph=True)\n grad_a = torch.autograd.grad(loss_a, tuple(model.parameters()), retain_graph=True)\n\n def _proj(a: Tensor, b: Tensor) -> Tensor:\n return b * torch.sum(a * b) / torch.sum(b * b).clamp(min=torch.finfo(b.dtype).eps)\n\n grad_p = [p - _proj(p, a) - alpha * a for p, a in zip(grad_p, grad_a)]\n\n for param, grad in zip(model.parameters(), grad_p):\n param.grad = grad\n\n\ndef compute_grad(*, model: nn.Module, loss: Tensor) -> None:\n \"\"\"Computes the adversarial gradient projection term.\n\n :param model: Model whose parameters the gradients are to be computed w.r.t.\n :param loss: Adversarial loss.\n \"\"\"\n grad_list = torch.autograd.grad(loss, tuple(model.parameters()), retain_graph=True)\n\n for param, grad in zip(model.parameters(), grad_list):\n param.grad = grad\n\n\nclass ModelOut(NamedTuple):\n s: Tensor\n y: Tensor\n\n\nclass GPD(CdtModel):\n \"\"\"Zhang Mitigating Unwanted Biases.\"\"\"\n\n def __init__(\n self,\n *,\n adv: nn.Module,\n enc: nn.Module,\n clf: nn.Module,\n lr: float = 3.0e-4,\n weight_decay: float = 0.0,\n lr_initial_restart: int = 10,\n lr_restart_mult: int = 2,\n lr_sched_interval: TrainingMode = TrainingMode.epoch,\n lr_sched_freq: int = 1,\n ) -> None:\n super().__init__(\n lr=lr,\n weight_decay=weight_decay,\n lr_initial_restart=lr_initial_restart,\n lr_restart_mult=lr_restart_mult,\n lr_sched_interval=lr_sched_interval,\n lr_sched_freq=lr_sched_freq,\n )\n\n self.adv = adv\n self.enc = enc\n self.clf = clf\n\n self._loss_adv_fn = CrossEntropyLoss()\n self._loss_clf_fn = CrossEntropyLoss()\n\n self.automatic_optimization = False # Mark for manual optimization\n\n @implements(CdtModel)\n @torch.no_grad()\n def inference_step(self, batch: TernarySample, *, stage: Stage) -> dict[str, Tensor]:\n assert isinstance(batch.x, Tensor)\n model_out = self.forward(batch.x)\n loss_adv, loss_clf, loss = self._get_losses(model_out=model_out, batch=batch)\n logging_dict = {\n \"loss\": loss.item(),\n \"loss_adv\": loss_adv.item(),\n \"loss_clf\": loss_clf.item(),\n }\n logging_dict = prefix_keys(dict_=logging_dict, prefix=str(stage), sep=\"/\")\n self.log_dict(logging_dict)\n\n return {\n \"targets\": batch.y.view(-1),\n \"subgroup_inf\": batch.s.view(-1),\n \"logits_y\": model_out.y,\n }\n\n @implements(CdtModel)\n def inference_epoch_end(self, outputs: EPOCH_OUTPUT, stage: Stage) -> dict[str, float]:\n targets_all = aggregate_over_epoch(outputs=outputs, metric=\"targets\")\n subgroup_inf_all = aggregate_over_epoch(outputs=outputs, metric=\"subgroup_inf\")\n logits_y_all = aggregate_over_epoch(outputs=outputs, metric=\"logits_y\")\n\n preds_y_all = prediction(logits_y_all)\n\n dt = em.DataTuple(\n x=pd.DataFrame(\n torch.rand_like(subgroup_inf_all).detach().cpu().numpy(),\n columns=[\"x0\"],\n ),\n s=pd.DataFrame(subgroup_inf_all.detach().cpu().numpy(), columns=[\"s\"]),\n y=pd.DataFrame(targets_all.detach().cpu().numpy(), columns=[\"y\"]),\n )\n\n return em.run_metrics(\n predictions=em.Prediction(hard=pd.Series(preds_y_all.detach().cpu().numpy())),\n actual=dt,\n metrics=[em.Accuracy(), em.RenyiCorrelation(), em.Yanovich()],\n per_sens_metrics=[em.Accuracy(), em.ProbPos(), em.TPR()],\n )\n\n def _get_losses(\n self, model_out: ModelOut, *, batch: TernarySample\n ) -> tuple[Tensor, Tensor, Tensor]:\n loss_adv = self._loss_adv_fn(model_out.s, target=batch.s)\n loss_clf = self._loss_clf_fn(model_out.y, target=batch.y)\n return loss_adv, loss_clf, loss_adv + loss_clf\n\n @implements(pl.LightningModule)\n def training_step(self, batch: TernarySample, batch_idx: int) -> None:\n assert isinstance(batch.x, Tensor)\n opt = cast(Optimizer, self.optimizers())\n\n opt.zero_grad()\n\n model_out: ModelOut = self.forward(batch.x)\n loss_adv, loss_clf, loss = self._get_losses(model_out=model_out, batch=batch)\n\n logging_dict = {\n \"adv_loss\": loss_adv.item(),\n \"clf_loss\": loss_clf.item(),\n \"loss\": loss.item(),\n }\n logging_dict = prefix_keys(dict_=logging_dict, prefix=\"train\", sep=\"/\")\n self.log_dict(logging_dict)\n\n compute_proj_grads(model=self.enc, loss_p=loss_clf, loss_a=loss_adv, alpha=1.0)\n compute_grad(model=self.adv, loss=loss_adv)\n compute_grad(model=self.clf, loss=loss_clf)\n\n opt.step()\n\n if (self.lr_sched_interval is TrainingMode.step) and (\n self.global_step % self.lr_sched_freq == 0\n ):\n sch = cast(LRScheduler, self.lr_schedulers())\n sch.step()\n if (self.lr_sched_interval is TrainingMode.epoch) and self.trainer.is_last_batch:\n sch = cast(LRScheduler, self.lr_schedulers())\n sch.step()\n\n @implements(nn.Module)\n def forward(self, x: Tensor) -> ModelOut:\n embedding = self.enc(x)\n y_pred = self.clf(embedding)\n s_pred = self.adv(embedding)\n return ModelOut(y=y_pred, s=s_pred)\n"
] | [
[
"torch.sum",
"torch.finfo",
"torch.no_grad",
"torch.rand_like"
]
] |
Tulioas/dfp_analyser | [
"d66ff94ba0b88a5d421a992ad27661011db36091"
] | [
"primary_info.py"
] | [
"import pandas as pd\nfrom zipfile import ZipFile\nimport numpy as np\nimport re\nimport os\n\n\ndef year_identifier(file_name):\n\n '''\n Abstrait: identify the year of the file\n '''\n\n folder_regex = re.compile(r'20\\d\\d')\n match = folder_regex.search(str(file_name))\n year = match.group()\n return year\n\n\ndef debt_correction(dataframe):\n\n debt_ident_list = ['Empréstimos e Financiamentos']\n lpa_ident_list = ['ON']\n count_debt = 1\n count_lpa = 1\n\n for row in range(len(dataframe)):\n for col in range(len(dataframe.columns)):\n if dataframe.iloc[row,col] in debt_ident_list:\n prev_name = dataframe.iloc[row,col]\n dataframe.iat[row, col] = f'{prev_name} {count_debt}'\n count_debt += 1\n if dataframe.iloc[row,col] in lpa_ident_list:\n prev_name = dataframe.iloc[row,col]\n dataframe.iat[row, col] = f'{prev_name} {count_lpa}'\n count_lpa += 1 \n\n return dataframe\n\n\ndef dataframe_filtering(folder, file_name_list, company_list, prev=False):\n\n '''\n Input: folder name, list with important files in the folder and list with companies of interest\n Output: \n '''\n\n dataframe_general = []\n\n for company in company_list:\n\n dataframe_company = []\n dataframe_list = []\n\n for file in file_name_list:\n\n # Create BPA DataFrame\n file_raw = pd.read_csv(f'raw_dfp\\\\{folder}\\\\{file}', encoding='iso-8859-1', delimiter=';', skiprows=0, low_memory=False)\n\n # Filter year and last year results\n if prev is False:\n file_1 = file_raw[~file_raw['ORDEM_EXERC'].str.startswith('P')]\n folder_year = year_identifier(file_name_list)\n else:\n file_1 = file_raw[file_raw['ORDEM_EXERC'].str.startswith('P')]\n folder_year = int(year_identifier(file_name_list)) - 1\n\n # Filter the right columns\n file_2 = file_1[['DENOM_CIA', 'CD_CONTA','DS_CONTA', 'VL_CONTA']]\n\n # Filter the right companies\n file_3 = file_2[file_2['DENOM_CIA'].isin([company])]\n\n # Filter the right data\n\n if file.find('DRE') != -1:\n interest_data = ['Receita de Venda de Bens e/ou Serviços', 'Resultado Bruto', 'Despesas com Vendas', 'Despesas com Pesquisa e Desenvolvimento',\n 'Custos com Pesquisa e Desenvolvimento', 'Despesas com pesquisas e desenvolvimento', 'Pesquisa e Desenvolvimento', 'Pesquisa', 'Despesas com Pesquisas e Desenvolvimento',\n 'Custo com Pesquisa e Desenvolvimento Tecnológico', 'Despesas com gastos com desenvolvimento', 'Despesas com desenvolvimento de tecnologia e produtos', 'Com estudos em desenvolvimento',\n 'Despesas Gerais e Administrativas', 'Despesas de Depreciação', 'Despesas/Receitas Operacionais',\n 'Resultado Antes do Resultado Financeiro e dos Tributos', 'Resultado Financeiro', 'Resultado Antes dos Tributos sobre o Lucro',\n 'Resultado Líquido das Operações Continuadas', 'Lucro Básico por Ação', 'ON']\n\n elif file.find('BPA') != -1:\n interest_data = ['Ativo Total', 'Ativo Circulante', 'Imobilizado']\n\n elif file.find('BPP') != -1:\n interest_data = ['Passivo Circulante', 'Empréstimos e Financiamentos', 'Passivo Não Circulante', 'Patrimônio Líquido Consolidado',\n 'Reservas de Lucros', 'Lucros/Prejuízos Acumulados']\n\n elif file.find('DFC_MI') != -1:\n interest_data = ['Lucro Líquido do exercício', 'Depreciação, Amortização e Impairment', 'Depreciação e amortização', 'Depreciação de arrendamento', 'Depreciação e Amortização', 'Depreciações e Amortizações', 'Amortização e Depreciação', 'Depreciação/amortização', 'Depreciações', 'Depreciação e Amortizações', 'Depreciação do imobilizado', 'Depreciação e depleção do imobilizado', 'Depreciação, exaustão e amortização', 'Depreciação, Amortização e Exaustão', 'Depreciação, Exaustão e Amortização',\n 'Aquisição de Imobilizado e Intangíveis', 'Adições de imobilizado', 'Compras de ativo imobilizado', 'Aquisições de imobilizado', 'Aquisições de Imobilizado',\n 'Aquisições de Imobilizado e Intangível', 'Aquisições de imobilizado e intangível', 'Aquisições de Imobilizados e Intangíveis (Exceto pelo Excedente de Cessão Onerosa)',\n 'Aquisições de imobilizados e intangíveis', 'Aquisições de imobilizado veículos frota', 'Aquisições de imobilizado de uso', 'Aquisições de Imobilizado de Uso', 'Aquisição de ativos imobilizados, intangível e propriedade para investimento', 'Aquisição de imobilizado e intangível']\n\n file_4 = file_3[file_3['DS_CONTA'].isin(interest_data)]\n\n dataframe_list.append(file_4)\n\n # Concatenate each file dataframe into one and add year column\n dataframe_company = pd.concat(dataframe_list)\n dataframe_company = dataframe_company.rename(columns={\"VL_CONTA\": f\"{folder_year}\"})\n\n # Append to general list\n dataframe_general.append(dataframe_company)\n\n return dataframe_general\n\n\ndef primary_info(companies, clear_prev_folder=False):\n\n company_frames = []\n for company in companies:\n company_frames.append(pd.DataFrame())\n\n # Identify zip year\n for file in os.listdir('raw_dfp\\\\raw_zip'):\n zip_year = year_identifier(f'raw_dfp\\\\raw_zip\\\\{file}')\n\n # Create or clear the folder of the year\n output_folder = zip_year\n directory_elements = os.listdir('raw_dfp')\n if output_folder not in directory_elements:\n os.mkdir(f'raw_dfp\\\\{output_folder}')\n elif os.listdir(f'raw_dfp\\\\{output_folder}') != [] and clear_prev_folder is True:\n output_folder_elements = os.listdir(f'raw_dfp\\\\{output_folder}')\n for element in output_folder_elements:\n os.remove(f'raw_dfp\\\\{output_folder}\\\\{element}')\n\n # Extract files from zip\n if os.listdir(f'raw_dfp\\\\{output_folder}') == []:\n with ZipFile(f'raw_dfp\\\\raw_zip\\\\{file}', 'r') as zip:\n zip.extractall(path=f'raw_dfp\\\\{output_folder}')\n else:\n print(f\"A pasta \\\"raw_dfp/{zip_year}\\\" ja tem arquivos internos. Confira a necessidade de descompactar o .zip.\")\n print('Prosseguindo ...')\n\n # List folders in 'raw_dfp' and remove 'raw_zip'\n raw_folders = os.listdir('raw_dfp')\n raw_folders.remove('raw_zip')\n\n # Travel around raw_dfp folders excluding \"raw_zip\"\n for folder in raw_folders:\n\n # Remove all individual reports, aiming only consolidated reports\n file_list = os.listdir(f'raw_dfp\\\\{folder}')\n for file in file_list:\n file_regex = re.compile(r'ind_20\\d\\d')\n mo = file_regex.search(str(file))\n if mo is not None:\n os.remove(f'raw_dfp\\\\{folder}\\\\{file}')\n\n # Travel around folder files\n for file in file_list:\n\n # Save DRE file name in a variable\n dre_regex = re.compile(r'DRE_con_20\\d\\d')\n mo_dre = dre_regex.search(str(file))\n if mo_dre is not None:\n dre = file\n\n # Save BPA file name in a variable\n bpa_regex = re.compile(r'BPA_con_20\\d\\d')\n mo_bpa = bpa_regex.search(str(file))\n if mo_bpa is not None:\n bpa = file\n\n # Save BPP file name in a variable\n bpp_regex = re.compile(r'BPP_con_20\\d\\d')\n mo_bpp = bpp_regex.search(str(file))\n if mo_bpp is not None:\n bpp = file\n\n # Save DFC_MI file name in a variable\n dfc_regex = re.compile(r'DFC_MI_con_20\\d\\d')\n mo_dfc = dfc_regex.search(str(file))\n if mo_dfc is not None:\n dfc = file\n\n folder_list = dataframe_filtering(folder, [dre, bpa, bpp, dfc], companies)\n\n # Create datframe for 2016 based on 2017 folder\n if int(folder) == 2017:\n folder_list_2 = dataframe_filtering(folder, [dre, bpa, bpp, dfc], companies, prev=True)\n\n for company_index in range(len(companies)):\n if len(folder_list_2[company_index]) == 0: # Do not add empty dataframe\n pass\n else:\n company_frames[company_index] = debt_correction(folder_list_2[company_index])\n \n # Construct and append a final dataframe for each company with all years information\n for company_index in range(len(companies)):\n if len(folder_list[company_index]) == 0:\n pass\n elif len(company_frames[company_index]) == 0:\n company_frames[company_index] = debt_correction(folder_list[company_index])\n\n else:\n main = company_frames[company_index]\n serie_corrected = debt_correction(folder_list[company_index][['DS_CONTA', str(folder)]])\n serie = serie_corrected.set_index('DS_CONTA')\n\n #serie_no_dups = serie\n company_frames[company_index] = pd.merge(main, serie, on=['DS_CONTA'])\n\n return company_frames\n\n\ndef worked_info(companies=['AMBEV S.A.'], clear_prev_folder=False):\n\n # Create return variable\n return_dict_list = []\n\n # Extract primary information\n prim_info = primary_info(companies, clear_prev_folder=False)\n\n print('-+-' * 20)\n print('CARREGANDO DATAFFRAME ...')\n\n # Travel throught companies\n for comp_index in range(len(companies)):\n\n # Extract list of years collected\n year_columns = []\n for column in prim_info[comp_index].columns:\n if '20' in column:\n year_columns.append(column)\n\n # Extract company frame\n primary_frame = prim_info[comp_index]\n #pd.set_option('display.expand_frame_repr', False)\n #print(primary_frame)\n #primary_frame.to_csv('primary_csv.csv',sep=' ')\n\n # Duplicate checker\n imobilizado_duplicate = 0\n desp_ga_duplicate = 0\n lucro_acumul_duplicate = 0\n dai_duplicate = 0\n ped_duplicate = 0\n vendas_duplicate = 0\n divida_curto_duplicate = 0\n divida_longo_duplicate = 0\n receita_duplicate = 0\n\n # Initialize primary variables lists\n receita_list = []\n lucro_brut_list = []\n desp_vendas_list = []\n desp_ga_list = []\n dai_list = []\n desp_oper_list = []\n financeiro_list = []\n lucropreimp_list = []\n lucro_liq_list = []\n lucro_oper_list = []\n lucroporacao_list = []\n\n ativo_total_list = []\n ativo_circ_list = []\n imobilizado_list = []\n passivo_circ_list = []\n divida_curto_list = []\n divida_longo_list = []\n passivo_ncirc_list = []\n patr_liq_list = []\n lucro_acumul_list = []\n\n lucro_liq_exerc_list = []\n desp_ativo_fixo_list = []\n\n # Initialize intermediate variables\n desp_vga_list = []\n desp_ped_list = []\n\n # Travel trought cells\n for row in range(len(primary_frame)):\n\n col = 'DS_CONTA'\n \n # Fill primary variable lists (DRE)\n if primary_frame.iloc[row][col] == 'Receita de Venda de Bens e/ou Serviços':\n if receita_duplicate == 0:\n receita_duplicate += 1\n for year in year_columns:\n receita_list.append(primary_frame.iloc[row][year])\n else:\n pass\n\n elif primary_frame.iloc[row][col] == 'Resultado Bruto':\n for year in year_columns:\n lucro_brut_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] == 'Despesas com Vendas':\n if vendas_duplicate == 0:\n vendas_duplicate += 1\n for year in year_columns:\n desp_vendas_list.append(primary_frame.iloc[row][year])\n else:\n pass\n\n elif primary_frame.iloc[row][col] == 'Despesas Gerais e Administrativas':\n if desp_ga_duplicate == 0:\n desp_ga_duplicate += 1\n for year in year_columns:\n desp_ga_list.append(primary_frame.iloc[row][year])\n else:\n pass\n\n elif primary_frame.iloc[row][col] in ['Despesas de Depreciação', 'Depreciação, Amortização e Impairment', 'Depreciação e amortização', 'Depreciação de arrendamento',\n 'Depreciação e Amortização', 'Depreciações e Amortizações', 'Amortização e Depreciação', 'Depreciação/amortização',\n 'Depreciações', 'Depreciação e Amortizações', 'Depreciação do imobilizado', 'Depreciação e depleção do imobilizado', 'Depreciação, exaustão e amortização',\n 'Depreciação, Amortização e Exaustão', 'Depreciação, Exaustão e Amortização']:\n if dai_duplicate == 0:\n dai_duplicate += 1\n for year in year_columns:\n dai_list.append(primary_frame.iloc[row][year])\n else:\n pass\n\n elif primary_frame.iloc[row][col] in ['Despesas com Pesquisa e Desenvolvimento',\n 'Custos com Pesquisa e Desenvolvimento', 'Despesas com pesquisas e desenvolvimento', 'Pesquisa e Desenvolvimento', 'Pesquisa', 'Despesas com Pesquisas e Desenvolvimento',\n 'Custo com Pesquisa e Desenvolvimento Tecnológico', 'Despesas com gastos com desenvolvimento', 'Despesas com desenvolvimento de tecnologia e produtos', 'Com estudos em desenvolvimento']:\n if ped_duplicate == 0:\n ped_duplicate += 1\n for year in year_columns:\n desp_ped_list.append(primary_frame.iloc[row][year])\n else:\n pass\n\n elif primary_frame.iloc[row][col] == 'Despesas/Receitas Operacionais':\n for year in year_columns:\n desp_oper_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] == 'Resultado Antes do Resultado Financeiro e dos Tributos':\n for year in year_columns:\n lucro_oper_list.append(primary_frame.iloc[row][year]) \n\n elif primary_frame.iloc[row][col] == 'Resultado Financeiro':\n for year in year_columns:\n financeiro_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] == 'Resultado Antes dos Tributos sobre o Lucro':\n for year in year_columns:\n lucropreimp_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] == 'Resultado Líquido das Operações Continuadas':\n for year in year_columns:\n lucro_liq_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] == 'ON 1':\n for year in year_columns:\n lucroporacao_list.append(primary_frame.iloc[row][year])\n\n # Fill primary variable lists (BPA and BPP)\n if primary_frame.iloc[row][col] == 'Ativo Total':\n for year in year_columns:\n ativo_total_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] == 'Ativo Circulante':\n for year in year_columns:\n ativo_circ_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] == 'Imobilizado':\n if imobilizado_duplicate == 0:\n imobilizado_duplicate += 1\n for year in year_columns:\n imobilizado_list.append(primary_frame.iloc[row][year])\n else:\n pass\n\n elif primary_frame.iloc[row][col] == 'Passivo Circulante':\n for year in year_columns:\n passivo_circ_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] == 'Empréstimos e Financiamentos 1':\n if divida_curto_duplicate == 0:\n divida_curto_duplicate += 1\n for year in year_columns:\n divida_curto_list.append(primary_frame.iloc[row][year])\n else:\n pass\n\n elif primary_frame.iloc[row][col] == 'Empréstimos e Financiamentos 3':\n if divida_longo_duplicate == 0:\n divida_longo_duplicate += 1\n for year in year_columns:\n divida_longo_list.append(primary_frame.iloc[row][year])\n else:\n pass\n\n elif primary_frame.iloc[row][col] == 'Passivo Não Circulante':\n for year in year_columns:\n passivo_ncirc_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] == 'Patrimônio Líquido Consolidado':\n for year in year_columns:\n patr_liq_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] == 'Reservas de Lucros' or primary_frame.iloc[row][col] == 'Lucros/Prejuízos Acumulados':\n if lucro_acumul_duplicate == 0:\n lucro_acumul_duplicate += 1\n for year in year_columns:\n lucro_acumul_list.append(primary_frame.iloc[row][year])\n else:\n pass\n\n # Fill primary variable lists (DFC)\n elif primary_frame.iloc[row][col] == 'Lucro Líquido do exercício':\n for year in year_columns:\n lucro_liq_exerc_list.append(primary_frame.iloc[row][year])\n\n elif primary_frame.iloc[row][col] in ['Aquisição de Imobilizado e Intangíveis',\n 'Adições de imobilizado', 'Compras de ativo imobilizado', 'Aquisições de imobilizado', 'Aquisições de Imobilizado',\n 'Aquisições de Imobilizado e Intangível', 'Aquisições de imobilizado e intangível', 'Aquisições de Imobilizados e Intangíveis (Exceto pelo Excedente de Cessão Onerosa)',\n 'Aquisições de imobilizados e intangíveis', 'Aquisições de imobilizado veículos frota', 'Aquisições de imobilizado de uso', 'Aquisições de Imobilizado de Uso',\n 'Aquisição de ativos imobilizados, intangível e propriedade para investimento', 'Aquisição de imobilizado e intangível']:\n for year in year_columns:\n desp_ativo_fixo_list.append(primary_frame.iloc[row][year])\n\n # Build intermediate Variables\n desp_vga_list = np.array(desp_vendas_list) + np.array(desp_ga_list)\n divida_tot_list = np.array(divida_curto_list) + np.array(divida_longo_list)\n\n if lucro_brut_list == []:\n lucro_brut_list = np.zeros(len(year_columns))\n if desp_ped_list == []:\n desp_ped_list = np.zeros(len(year_columns))\n if dai_list == []:\n dai_list = np.zeros(len(year_columns))\n if desp_ativo_fixo_list == []:\n desp_ativo_fixo_list = np.zeros(len(year_columns))\n if lucro_liq_exerc_list == []:\n lucro_liq_exerc_list = lucro_liq_list\n\n # Build worked info\n marg_brut_list = 100 * np.divide(np.array(lucro_brut_list), np.array(receita_list))\n marg_liq_list = 100 * np.divide(np.array(lucro_liq_list), np.array(receita_list))\n vga_lucro_brut_list = 100 * np.divide(np.array(desp_vga_list), np.array(lucro_brut_list))\n ped_lucro_brut_list = 100 * np.divide(np.array(desp_ped_list), np.array(lucro_brut_list))\n deprec_lucro_brut_list = 100 * np.divide(np.array(dai_list), np.array(lucro_brut_list))\n juros_lucro_oper_list = 100 * np.divide(np.array(financeiro_list), np.array(lucro_oper_list))\n coef_liquidez_list = np.divide(np.array(ativo_circ_list), np.array(passivo_circ_list))\n passivo_tot_patrliq_list = np.divide((np.array(passivo_circ_list) + np.array(passivo_ncirc_list)), np.array(patr_liq_list))\n roe_list = 100 * np.divide(np.array(lucro_liq_list), np.array(patr_liq_list))\n roa_list = 100 * np.divide(np.array(lucro_liq_list), np.array(ativo_total_list))\n desp_ativo_fixo_lucro_liq_exerc_list = 100 * np.divide(np.array(desp_ativo_fixo_list), np.array(lucro_liq_exerc_list))\n divida_curto_tot_list = 100 * np.divide(np.array(divida_curto_list), np.array(divida_tot_list))\n divida_tot_lucro_oper_list = np.divide(np.array(divida_tot_list), np.array(lucro_oper_list))\n\n company_dict = {\n 'year_columns': year_columns,\n 'marg_brut_list': marg_brut_list,\n 'marg_liq_list': marg_liq_list,\n 'vga_lucro_brut_list': vga_lucro_brut_list,\n 'ped_lucro_brut_list': ped_lucro_brut_list,\n 'deprec_lucro_brut_list': deprec_lucro_brut_list,\n 'juros_lucro_oper_list': juros_lucro_oper_list,\n 'lucro_brut_list': lucro_brut_list,\n 'lucro_liq_list': lucro_liq_list,\n 'lucroporacao_list':lucroporacao_list,\n 'coef_liquidez_list': coef_liquidez_list,\n 'imobilizado_list': imobilizado_list,\n 'passivo_tot_patrliq_list': passivo_tot_patrliq_list,\n 'roe_list': roe_list,\n 'roa_list': roa_list,\n 'lucro_acumul_list': lucro_acumul_list,\n 'desp_ativo_fixo_lucro_liq_exerc_list': desp_ativo_fixo_lucro_liq_exerc_list,\n 'divida_curto_tot_list': divida_curto_tot_list,\n 'divida_tot_lucro_oper_list': divida_tot_lucro_oper_list\n }\n\n return_dict_list.append(company_dict)\n\n return return_dict_list\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"pandas.merge",
"pandas.concat",
"numpy.array"
]
] |
rabbitsun2/toy_python | [
"32f84b4d15b13c4daa4fa212a40e685abc0d2a5d"
] | [
"practice/3_basic_tensorflow/Keras/Example_simpleLinearRegression.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\n\nx = np.arange(1, 6)\n\ny = 3 * x + 2\n\nprint(x)\nprint(y)\n\n# 시각화\nplt.plot(x, y)\nplt.title('y = 3x + 2')\nplt.show()\n"
] | [
[
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
]
] |
manuelciosici/DeepSpeed | [
"3da841853ca07abf3a09e7bd325a576c4e642c11"
] | [
"deepspeed/runtime/zero/linear.py"
] | [
"#Linear Module to use with ZeRO Stage 3 to allow for parameter memory release\n#after the module execution during forward\n#Instead of saving variables using save_for_backward, we save variable ids\n#Allowing us to retrieve the variable without creating pointer to it\n#Which allows for underlying tensor to be garbage collected\n#When partitioned as needed by the Zero Stage 3 optimizer\n#TODO instead of patching Linear module, we could patch the ctx.save_for_backward\n#ctx.saved_tensors so that this approach works for all nn modules that are built upon\n#torch.nn.function. However the issue is that many modules uses C++ implementations\n#which does not have pytorch implementation. Eg torch.addmm which acts as a functional\n#when implemented outside of torch.autograd.Function\n\nimport math\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn.parameter import Parameter\nfrom torch.nn import init\nfrom torch.nn.modules.module import Module\nfrom deepspeed.runtime.utils import noop_decorator\n\ntensor_map = {}\n\n\ndef print_rank_0(message, debug=False, force=False):\n if torch.distributed.get_rank() == 0 and (debug or force):\n print(message)\n\n\ntry:\n autocast_custom_fwd = torch.cuda.amp.custom_fwd\n autocast_custom_bwd = torch.cuda.amp.custom_bwd\nexcept (ImportError, AttributeError) as exp:\n autocast_custom_fwd = noop_decorator\n autocast_custom_bwd = noop_decorator\n\n\nclass LinearFunctionForZeroStage3(torch.autograd.Function):\n\n # Note that both forward and backward are @staticmethods\n @staticmethod\n @autocast_custom_fwd\n # bias is an optional argument\n def forward(ctx, input, weight, bias=None):\n #print(\"In ZeRO Linear Function\")\n\n weight_id = id(weight)\n bias_id = id(bias)\n\n #ctx.save_for_backward(input, weight, bias)\n ctx.save_for_backward(input, torch.tensor(weight_id), torch.tensor(bias_id))\n\n tensor_map[weight_id] = weight\n tensor_map[bias_id] = bias\n\n if input.dim() == 2 and bias is not None:\n # fused op is marginally faster\n ret = torch.addmm(bias, input, weight.t())\n else:\n output = input.matmul(weight.t())\n if bias is not None:\n output += bias\n ret = output\n\n return ret\n\n # This function has only a single output, so it gets only one gradient\n @staticmethod\n @autocast_custom_bwd\n def backward(ctx, grad_output):\n # This is a pattern that is very convenient - at the top of backward\n # unpack saved_tensors and initialize all gradients w.r.t. inputs to\n # None. Thanks to the fact that additional trailing Nones are\n # ignored, the return statement is simple even when the function has\n # optional inputs.\n #input, weight, bias = ctx.saved_tensors\n\n input, weight_id, bias_id = ctx.saved_tensors\n weight = tensor_map[weight_id.item()]\n bias = tensor_map[bias_id.item()]\n\n grad_input = grad_weight = grad_bias = None\n\n #print(f\"backward shaped grad_output {grad_output.shape}, input {input.shape}, weight {weight.shape} and bias {bias.shape if bias is not None else None}\")\n # These needs_input_grad checks are optional and there only to\n # improve efficiency. If you want to make your code simpler, you can\n # skip them. Returning gradients for inputs that don't require it is\n # not an error.\n if ctx.needs_input_grad[0]:\n #print(f\"Computing grad input weight {weight.shape} grad_output {grad_output.shape}\")\n grad_input = grad_output.matmul(weight)\n #print(f\"Computed grad input {grad_input.shape}\")\n if ctx.needs_input_grad[1]:\n #print(\"Computing grad weight\")\n dim = grad_output.dim()\n if dim > 2:\n grad_weight = grad_output.reshape(-1,\n grad_output.shape[-1]).t().matmul(\n input.reshape(-1,\n input.shape[-1]))\n else:\n grad_weight = grad_output.t().matmul(input)\n #print(f\"Computed grad weight grad_weight {grad_weight.shape}\")\n if bias is not None and ctx.needs_input_grad[2]:\n #print(\"Computing grad bias\")\n grad_bias = grad_output.sum(0)\n #print(\"Done computing grad bias\")\n #print(\"needs bias\")\n #print(f\"backward shaped grad_input {grad_input.shape}, grad_weight {grad_weight.shape}, grad_bias {grad_bias.shape if grad_bias is not None else None}\")\n return grad_input, grad_weight, grad_bias\n\n\ndef zero3_linear_wrap(input, weight, bias=None):\n if bias is None:\n return LinearFunctionForZeroStage3.apply(input, weight)\n else:\n return LinearFunctionForZeroStage3.apply(input, weight, bias)\n\n\nclass LinearModuleForZeroStage3(Module):\n r\"\"\"Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.\n The weights are pre-transposed and stored as A^T instead of transposing during each\n forward. Memory savings proportional to the parameter size.\n\n Args:\n in_features: size of each input sample\n out_features: size of each output sample\n bias: If set to ``False``, the layer will not learn an additive bias.\n Default: ``True``\n\n Shape:\n - Input: :math:`(N, *, H_{in})` where :math:`*` means any number of\n additional dimensions and :math:`H_{in} = \\text{in\\_features}`\n - Output: :math:`(N, *, H_{out})` where all but the last dimension\n are the same shape as the input and :math:`H_{out} = \\text{out\\_features}`.\n\n Attributes:\n weight: the learnable weights of the module of shape\n :math:`(\\text{out\\_features}, \\text{in\\_features})`. The values are\n initialized from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})`, where\n :math:`k = \\frac{1}{\\text{in\\_features}}`\n bias: the learnable bias of the module of shape :math:`(\\text{out\\_features})`.\n If :attr:`bias` is ``True``, the values are initialized from\n :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where\n :math:`k = \\frac{1}{\\text{in\\_features}}`\n\n Examples::\n\n >>> m = nn.Linear(20, 30)\n >>> input = torch.randn(128, 20)\n >>> output = m(input)\n >>> print(output.size())\n torch.Size([128, 30])\n \"\"\"\n __constants__ = ['in_features', 'out_features']\n in_features: int\n out_features: int\n weight: Tensor\n\n def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:\n super(LinearModuleForZeroStage3, self).__init__()\n print(\"Building ZeRO module\")\n self.in_features = in_features\n self.out_features = out_features\n self.weight = Parameter(torch.Tensor(out_features, in_features))\n if bias:\n self.bias = Parameter(torch.Tensor(out_features))\n else:\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self) -> None:\n init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n if self.bias is not None:\n fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)\n bound = 1 / math.sqrt(fan_in)\n init.uniform_(self.bias, -bound, bound)\n\n def forward(self, input: Tensor) -> Tensor:\n return LinearFunctionForZeroStage3.apply(input, self.weight, self.bias)\n\n def extra_repr(self) -> str:\n return 'in_features={}, out_features={}, bias={}'.format(\n self.in_features,\n self.out_features,\n self.bias is not None)\n"
] | [
[
"torch.distributed.get_rank",
"torch.nn.init._calculate_fan_in_and_fan_out",
"torch.nn.init.uniform_",
"torch.tensor",
"torch.Tensor"
]
] |
daniel-falk/nnabla | [
"3fe132ea52dc10521cc029a5d6ba8f565cf65ccf"
] | [
"python/test/function/refs.py"
] | [
"# Copyright 2017,2018,2019,2020,2021 Sony Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import division\nfrom six.moves import range\n\nimport itertools\nimport numpy as np\n\n\ndef get_conv_out_size(w, k, p, s, d=1):\n return (w + 2 * p - (d * (k - 1) + 1)) // s + 1\n\n\ndef get_deconv_out_size(w, k, p, s, d):\n return s * (w - 1) - 2 * p + (d * (k - 1) + 1)\n\n\ndef get_pool_out_size(w, k, p, s, ignore_border):\n return (w + p - ((k - p) if ignore_border else 1)) // s + 1\n\n\nclass ChannelLastToFirstTranspose(object):\n\n def __init__(self, dim, kdim):\n base_axis = dim - kdim - 1\n up_to_base = tuple(range(0, base_axis))\n self.axes = up_to_base + (dim - 1,) + tuple(range(base_axis, dim - 1))\n self.inv_axes = up_to_base + \\\n tuple(range(base_axis + 1, dim)) + (base_axis,)\n\n def __call__(self, x):\n return x.transpose(self.axes).copy()\n\n def inv(self, x):\n return x.transpose(self.inv_axes).copy()\n\n\ndef convolution_1d(x, w, b, pad, stride, dilation, group, dtype=np.float32):\n \"\"\"\n \"\"\"\n C, H = x.shape\n K, Cg, M = w.shape\n\n Ho = get_conv_out_size(H, M, pad[0], stride[0], dilation[0])\n x_pad = np.zeros((C, H + pad[0] * 2), dtype=dtype)\n x_pad[:, pad[0]:pad[0] + H] = x\n y = np.zeros((K, Ho), dtype=dtype)\n for k in range(K):\n g = int(k // (K // group))\n for ho in range(Ho):\n hi = ho * stride[0] + np.arange(0, M) * dilation[0]\n ci = np.arange(g * Cg, (g + 1) * Cg)\n y[k, ho] = (w[k] * x_pad[np.ix_(ci, hi)]).sum()\n if b is not None:\n y += b[..., np.newaxis]\n return y\n\n\ndef convolution_2d(x, w, b, pad, stride, dilation, group, dtype=np.float32):\n \"\"\"\n \"\"\"\n C, H, W = x.shape\n K, Cg, M, N = w.shape\n\n Ho = get_conv_out_size(H, M, pad[0], stride[0], dilation[0])\n Wo = get_conv_out_size(W, N, pad[1], stride[1], dilation[1])\n x_pad = np.zeros((C, H + pad[0] * 2, W + pad[1] * 2), dtype=dtype)\n x_pad[:, pad[0]:pad[0] + H, pad[1]:pad[1] + W] = x\n y = np.zeros((K, Ho, Wo), dtype=dtype)\n for k in range(K):\n g = int(k // (K // group))\n for ho in range(Ho):\n for wo in range(Wo):\n hi = ho * stride[0] + np.arange(0, M) * dilation[0]\n wi = wo * stride[1] + np.arange(0, N) * dilation[1]\n ci = np.arange(g * Cg, (g + 1) * Cg)\n y[k, ho, wo] = (w[k] * x_pad[np.ix_(ci, hi, wi)]).sum()\n if b is not None:\n y += b[..., np.newaxis, np.newaxis]\n return y\n\n\ndef convolution_nd(x, w, b, pad, stride, dilation, group, dtype=np.float32):\n \"\"\"\n \"\"\"\n C = x.shape[0]\n inshape = x.shape[1:]\n ndim = len(inshape)\n assert w.ndim == ndim + 2\n K, Cg = w.shape[:2]\n kshape = w.shape[2:]\n\n def get_conv_out_size_recursive(d, ndim):\n if d == ndim:\n return []\n s = get_conv_out_size(\n inshape[d], kshape[d], pad[d], stride[d], dilation[d])\n return [s] + get_conv_out_size_recursive(d + 1, ndim)\n\n outshape = get_conv_out_size_recursive(0, ndim)\n inshape_pad = [C] + [inshape[d] + 2 * pad[d] for d in range(ndim)]\n x_pad = np.zeros(inshape_pad, dtype=dtype)\n x_pad[[slice(None,)] + [slice(pad[d], pad[d] + inshape[d])\n for d in range(ndim)]] = x\n y = np.zeros([K] + outshape, dtype=dtype)\n for k in range(K):\n g = int(k // (K // group))\n for outindex in itertools.product(*map(range, outshape)):\n inindex = [outindex[d] * stride[d] +\n np.arange(0, kshape[d]) * dilation[d] for d in range(ndim)]\n ci = np.arange(g * Cg, (g + 1) * Cg)\n y[(k,) + tuple(outindex)] = (w[k] *\n x_pad[np.ix_(ci, *inindex)]).sum()\n if b is not None:\n y += b[[Ellipsis] + [np.newaxis for d in range(ndim)]]\n return y\n\n\ndef deconvolution_1d(x, w, b, pad, stride, dilation, group, dtype=np.float32,\n output_padding=(0,)):\n y = x\n K, Ho = y.shape\n K, Cg, M = w.shape\n C = Cg * group\n\n H = (get_deconv_out_size(Ho, M, pad[0], stride[0], dilation[0])\n + output_padding[0])\n x_pad = np.zeros((C, H + pad[0] * 2), dtype=dtype)\n for k in range(K):\n g = int(k // (K // group))\n for ho in range(Ho):\n hi = ho * stride[0] + np.arange(0, M) * dilation[0]\n ci = np.arange(g * Cg, (g + 1) * Cg)\n x_pad[np.ix_(ci, hi)] += w[k] * y[k, ho]\n x = x_pad[:, pad[0]:pad[0] + H]\n if b is not None:\n x += b[..., np.newaxis]\n return x\n\n\ndef deconvolution_2d(x, w, b, pad, stride, dilation, group, dtype=np.float32,\n output_padding=(0, 0)):\n y = x\n K, Ho, Wo = y.shape\n K, Cg, M, N = w.shape\n C = Cg * group\n\n H = (get_deconv_out_size(Ho, M, pad[0], stride[0], dilation[0])\n + output_padding[0])\n W = (get_deconv_out_size(Wo, N, pad[1], stride[1], dilation[1])\n + output_padding[1])\n x_pad = np.zeros((C, H + pad[0] * 2, W + pad[1] * 2), dtype=dtype)\n for k in range(K):\n g = int(k // (K // group))\n for ho in range(Ho):\n for wo in range(Wo):\n hi = ho * stride[0] + np.arange(0, M) * dilation[0]\n wi = wo * stride[1] + np.arange(0, N) * dilation[1]\n ci = np.arange(g * Cg, (g + 1) * Cg)\n x_pad[np.ix_(ci, hi, wi)] += w[k] * y[k, ho, wo]\n x = x_pad[:, pad[0]:pad[0] + H, pad[1]:pad[1] + W]\n if b is not None:\n x += b[..., np.newaxis, np.newaxis]\n return x\n\n\ndef deformable_convolution_2d(x, w, offset, mask, b, pad, stride,\n dilation, group, deformable_group,\n channel_last, dtype=np.float32):\n \"\"\"\n Deformable convlution 2D for a single batch data\n \"\"\"\n C, H, W = x.shape # without batch dimension\n K, Cg, M, N = w.shape\n\n assert C == Cg * \\\n group, \"Wrong shape, x: {}, w: {}\".format(x.shape, w.shape)\n assert offset.shape[0] == 2 * deformable_group * M * N, \\\n \"Wrong shape offset: {}, 2 * deformable_group * Kw * Kh: {}\".format(\n offset.shape, 2 * deformable_group * M * N)\n assert offset.shape[1:] == (\n H, W), \"Wrong shape, offset: {}, w: {}\".format(offset.shape, w.shape)\n assert mask.shape[0] == deformable_group * M * N, \\\n \"Wrong shape mask: {}, deformable_group * Kw * Kh: {}\".format(\n mask.shape, deformable_group * M * N)\n assert mask.shape[1:] == (\n H, W), \"Wrong shape, mask: {}, w: {}\".format(mask.shape, w.shape)\n assert pad[0] < (w.shape[2] + 1)//2 and pad[1] < (w.shape[3] +\n 1)//2, \"Wrong shape, kernel: {}, pad: {}\".format(w.shape[2:], pad)\n\n # Zero padding\n x_pad = np.zeros((C, H + pad[0] * 2, W + pad[1] * 2), dtype=dtype)\n x_pad[:, pad[0]:pad[0] + H, pad[1]:pad[1] + W] = x\n\n # Create and initialize output variable\n Ho = get_conv_out_size(H, M, pad[0], stride[0], dilation[0])\n Wo = get_conv_out_size(W, N, pad[1], stride[1], dilation[1])\n y = np.zeros((K, Ho, Wo), dtype=dtype)\n\n _, Hp, Wp = x_pad.shape\n\n # Deformable Convolution\n for k in range(K):\n for c in range(C//group):\n g = k // (K//group)\n ci = Cg * g + c\n dg = ci // (C // deformable_group)\n\n for ho in range(Ho):\n for wo in range(Wo):\n # Get the input coordinates {(hi, wi)} which are\n # mapped to the output coordinate (ho, wo) by the kernel.\n hi = ho * stride[0] + np.arange(0, M) * dilation[0]\n wi = wo * stride[1] + np.arange(0, N) * dilation[1]\n\n # Apply the kernel\n modulated_x = np.zeros((M, N), dtype=dtype)\n\n for m in range(M):\n for n in range(N):\n # Shift (hi, wi) to (ph, pw) by using offset\n ph = hi[m] + offset[2*((dg*M*N) + (m * N) + n),\n ho * stride[0], wo * stride[1]]\n pw = wi[n] + offset[2*((dg*M*N) + (m * N) + n) + 1,\n ho * stride[0], wo * stride[1]]\n\n # Bilinear interpolation\n h_low = int(np.floor(ph))\n w_low = int(np.floor(pw))\n h_high = h_low + 1\n w_high = w_low + 1\n\n if h_low >= Hp or w_low >= Wp or \\\n h_high < 0 or w_high < 0:\n # Out of bounds.\n # Interpolation cannot be perform.\n val = 0\n else:\n v1 = 0 # (h_low, w_low)\n v2 = 0 # (h_low, w_high)\n v3 = 0 # (h_high, w_low)\n v4 = 0 # (h_high, w_high)\n if h_low >= 0 and w_low >= 0:\n v1 = x_pad[ci, h_low, w_low]\n if h_low >= 0 and w_high < Wp:\n v2 = x_pad[ci, h_low, w_high]\n if h_high < Hp and w_low >= 0:\n v3 = x_pad[ci, h_high, w_low]\n if h_high < Hp and w_high < Wp:\n v4 = x_pad[ci, h_high, w_high]\n\n lh = ph - h_low\n lw = pw - w_low\n hh = 1 - lh\n hw = 1 - lw\n w1 = hh * hw\n w2 = hh * lw\n w3 = lh * hw\n w4 = lh * lw\n val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4\n\n # Apply mask\n val *= mask[(dg*M*N) + (m * N) + n,\n ho * stride[0], wo * stride[1]]\n\n modulated_x[m, n] = val\n\n y[k, ho, wo] += (w[k, c] * modulated_x).sum()\n\n if b is not None:\n y += b[..., np.newaxis, np.newaxis]\n\n return y\n\n\ndef pooling_2d(x, mode, kernel, stride, pad, ignore_border=True,\n including_pad=True, dtype=np.float32):\n \"\"\"\n \"\"\"\n assert mode in ['average', 'sum', 'max']\n\n C, H, W = x.shape\n Ho = get_pool_out_size(H, kernel[0], pad[0], stride[0], ignore_border)\n Wo = get_pool_out_size(W, kernel[1], pad[1], stride[1], ignore_border)\n Hi = H + pad[0] + (pad[0] if ignore_border else kernel[0] - 1)\n Wi = W + pad[1] + (pad[1] if ignore_border else kernel[1] - 1)\n\n x_pad = np.ones((C, Hi, Wi), dtype=dtype)\n x_pad *= x.min() if mode == 'max' else 0\n x_pad[:, pad[0]:pad[0] + H, pad[1]:pad[1] + W] = x\n\n if mode == 'average':\n b_pad = np.zeros((C, Hi, Wi), dtype=np.uint)\n h_beg = int(not including_pad) * pad[0]\n w_beg = int(not including_pad) * pad[1]\n h_end = H + (1 + int(including_pad)) * pad[0]\n w_end = W + (1 + int(including_pad)) * pad[1]\n b_pad[:, h_beg:h_end, w_beg:w_end] = 1\n\n y = np.zeros((C, Ho, Wo), dtype=dtype)\n\n for c in range(C):\n for ho in range(Ho):\n for wo in range(Wo):\n hi = ho * stride[0] + np.arange(0, kernel[0])\n wi = wo * stride[1] + np.arange(0, kernel[1])\n yy = y[c]\n xx = x_pad[c]\n if mode == \"max\":\n yy[ho, wo] = xx[np.ix_(hi, wi)].max()\n elif mode == \"sum\":\n yy[ho, wo] = xx[np.ix_(hi, wi)].sum()\n elif mode == \"average\":\n pad_sum = xx[np.ix_(hi, wi)].sum()\n pad_cnt = b_pad[c][np.ix_(hi, wi)].sum()\n yy[ho, wo] = pad_sum / pad_cnt\n return y\n\n\ndef pooling_3d(x, mode, kernel, stride, pad, ignore_border=True,\n including_pad=True, dtype=np.float32):\n \"\"\"\n \"\"\"\n assert mode in ['average', 'sum', 'max']\n\n C, Z, H, W = x.shape\n Zo = get_pool_out_size(Z, kernel[0], pad[0], stride[0], ignore_border)\n Ho = get_pool_out_size(H, kernel[1], pad[1], stride[1], ignore_border)\n Wo = get_pool_out_size(W, kernel[2], pad[2], stride[2], ignore_border)\n Zi = Z + pad[0] + (pad[0] if ignore_border else kernel[0] - 1)\n Hi = H + pad[1] + (pad[1] if ignore_border else kernel[1] - 1)\n Wi = W + pad[2] + (pad[2] if ignore_border else kernel[2] - 1)\n\n x_pad = np.ones((C, Zi, Hi, Wi), dtype=dtype)\n x_pad *= x.min() if mode == 'max' else 0\n x_pad[:, pad[0]:pad[0] + Z, pad[1]:pad[1] + H, pad[2]:pad[2] + W] = x\n\n if mode == 'average':\n b_pad = np.zeros((C, Zi, Hi, Wi), dtype=np.uint)\n z_beg = int(not including_pad) * pad[0]\n h_beg = int(not including_pad) * pad[1]\n w_beg = int(not including_pad) * pad[2]\n z_end = Z + (1 + int(including_pad)) * pad[0]\n h_end = H + (1 + int(including_pad)) * pad[1]\n w_end = W + (1 + int(including_pad)) * pad[2]\n b_pad[:, z_beg:z_end, h_beg:h_end, w_beg:w_end] = 1\n #b_pad[:, pad[0]:pad[0] + Z, pad[1]:pad[1] + H, pad[2]:pad[2] + W] = 1\n\n y = np.zeros((C, Zo, Ho, Wo), dtype=dtype)\n\n for c in range(C):\n for zo in range(Zo):\n for ho in range(Ho):\n for wo in range(Wo):\n zi = zo * stride[0] + np.arange(0, kernel[0])\n hi = ho * stride[1] + np.arange(0, kernel[1])\n wi = wo * stride[2] + np.arange(0, kernel[2])\n yy = y[c]\n xx = x_pad[c]\n if mode == \"max\":\n yy[zo, ho, wo] = xx[np.ix_(zi, hi, wi)].max()\n elif mode == \"sum\":\n yy[zo, ho, wo] = xx[np.ix_(zi, hi, wi)].sum()\n elif mode == \"average\":\n pool_sum = xx[np.ix_(zi, hi, wi)].sum()\n pool_cnt = b_pad[c][np.ix_(zi, hi, wi)].sum()\n yy[zo, ho, wo] = pool_sum / pool_cnt\n return y\n\n\ndef generate_rotation_2d(rng, B):\n rotates = []\n for i in range(B):\n degree = 2 * np.pi * (2.0 * rng.rand() - 1.0)\n c, s = np.cos(degree), np.sin(degree)\n rotate = np.asarray([[c, -s],\n [s, c]])\n rotates.append(rotate)\n return np.asarray(rotates)\n\n\ndef generate_rotation_3d(rng, B):\n rotates = []\n for i in range(B):\n alpha = np.pi * (2.0 * rng.rand() - 1.0)\n beta = np.pi / 2.0 * (2.0 * rng.rand() - 1.0)\n gamma = np.pi * (2.0 * rng.rand() - 1.0)\n\n c, s = np.cos(alpha), np.sin(alpha)\n Ra = np.asarray([[c, -s, 0],\n [s, c, 0],\n [0, 0, 1]])\n c, s = np.cos(beta), np.sin(beta)\n Rb = np.asarray([[c, 0, s],\n [0, 1, 0],\n [-s, 0, c]])\n c, s = np.cos(gamma), np.sin(gamma)\n Rg = np.asarray([[1, 0, 0],\n [0, c, -s],\n [0, s, c]])\n rotate = Ra.dot(Rb).dot(Rg)\n rotates.append(rotate)\n return np.asarray(rotates)\n\n\ndef generate_transformation_2d(rng, batch_size):\n rotate = generate_rotation_2d(rng, batch_size)\n translate = (2.0 * rng.rand(batch_size, 2, 1) - 1.0) * 0.001\n theta = np.concatenate([rotate, translate], axis=2)\n return theta.astype(np.float32)\n\n\ndef generate_transformation_3d(rng, batch_size):\n rotate = generate_rotation_3d(rng, batch_size)\n translate = (2.0 * rng.rand(batch_size, 3, 1) - 1.0) * 0.001\n theta = np.concatenate([rotate, translate], axis=2)\n return theta.astype(np.float32)\n\n\ndef generate_normalized_grid_2d(B, size, align_corners):\n H, W = size\n hgrid = np.linspace(-1.0, 1.0, H)\n wgrid = np.linspace(-1.0, 1.0, W)\n hgrid = hgrid if align_corners else hgrid * (H - 1) / H\n wgrid = wgrid if align_corners else wgrid * (W - 1) / W\n w, h = np.meshgrid(wgrid, hgrid)\n\n x = w.reshape(-1)\n y = h.reshape(-1)\n t = np.ones(len(x))\n normalized_grid = np.stack((x, y, t), axis=1)\n normalized_grid = normalized_grid.reshape(H, W, 3)\n normalized_grid = np.repeat(\n normalized_grid[np.newaxis, :, :, :], B, axis=0)\n return normalized_grid.astype(np.float32)\n\n\ndef generate_normalized_grid_3d(B, size, align_corners):\n D, H, W = size\n dgrid = np.linspace(-1.0, 1.0, D)\n hgrid = np.linspace(-1.0, 1.0, H)\n wgrid = np.linspace(-1.0, 1.0, W)\n dgrid = dgrid if align_corners else dgrid * (D - 1) / D\n hgrid = hgrid if align_corners else hgrid * (H - 1) / H\n wgrid = wgrid if align_corners else wgrid * (W - 1) / W\n h, d, w = np.meshgrid(hgrid, dgrid, wgrid)\n\n x = w.reshape(-1)\n y = h.reshape(-1)\n z = d.reshape(-1)\n t = np.ones(len(x))\n normalized_grid = np.stack((x, y, z, t), axis=1)\n normalized_grid = normalized_grid.reshape(D, H, W, 4)\n normalized_grid = np.repeat(\n normalized_grid[np.newaxis, :, :, :, :], B, axis=0)\n return normalized_grid.astype(np.float32)\n\n\ndef affine_grid_2d(affine, size, align_corners):\n B = affine.shape[0]\n H, W = size\n grid_t = generate_normalized_grid_2d(B, size, align_corners)\n grid_s = np.matmul(grid_t.reshape(B, H * W, 3),\n affine.transpose((0, 2, 1)))\n grid_s = grid_s.reshape(B, H, W, 2)\n return grid_s.astype(np.float32)\n\n\ndef affine_grid_3d(affine, size, align_corners):\n B = affine.shape[0]\n D, H, W = size\n grid_t = generate_normalized_grid_3d(B, size, align_corners)\n grid_s = np.matmul(grid_t.reshape(B, D * H * W, 4),\n affine.transpose((0, 2, 1)))\n grid_s = grid_s.reshape(B, D, H, W, 3)\n return grid_s.astype(np.float32)\n\n\ndef pad_sequence(sequences, batch_first):\n # sequences: list of nparray\n # sequences[i]: (T_i, D_1, ..., D_M)\n Ds = () if len(sequences[0].shape) == 1 else sequences[0].shape[1:]\n B = len(sequences)\n T = max([seq.shape[0] for seq in sequences])\n data = np.zeros((B, T) + Ds) if batch_first else np.zeros((T, B) + Ds)\n for b, seq in enumerate(sequences):\n l = seq.shape[0]\n if batch_first:\n data[b, :l] = seq\n else:\n data[:l, b] = seq\n return data\n"
] | [
[
"numpy.ones",
"numpy.ix_",
"numpy.zeros",
"numpy.floor",
"numpy.asarray",
"numpy.repeat",
"numpy.cos",
"numpy.arange",
"numpy.stack",
"numpy.sin",
"numpy.concatenate",
"numpy.meshgrid",
"numpy.linspace"
]
] |
prannayk/MSRASI17 | [
"f7277d90ffdd062c1ba94391b7f82c621e619743"
] | [
"models/wc3.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport operator\nimport collections\nimport math\nimport time\nimport os\nimport random\nimport zipfile\nimport time\nimport numpy as np\nimport sys\nfrom six.moves import urllib\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\nsys.path.append( '../util/')\nfrom generators import *\nfrom loader import *\nfrom print_tweets import *\nfrom similar_tokens import * \nfrom training import *\nfrom similar_tokens import *\nfrom expand_query import *\nfrom argument_loader import *\nfrom setup import *\nfrom LSTM import *\n\ndataset, query_type, filename, num_steps, num_steps_roll, num_steps_train, expand_flag,lr_, matchname = import_arguments(sys.argv)\n\nchar_batch_dict, word_batch_dict,data, count, dictionary, reverse_dictionary, word_max_len, char_max_len, vocabulary_size, char_dictionary, reverse_char_dictionary, data_index, char_data_index, buffer_index, batch_list, char_batch_list, word_batch_list, char_data = build_everything(dataset)\n\ndata_index, batch, labels = generate_batch(data, data_index, batch_size=8, num_skips=2, skip_window=1,)\nfor i in range(8):\n print(batch[i], reverse_dictionary[batch[i]],\n '->', labels[i, 0], reverse_dictionary[labels[i, 0]])\nchar_data_index, batch, labels = generate_batch_char(char_data, char_data_index, batch_size=8, num_skips=2, skip_window=1)\nfor i in range(8):\n print(batch[i], reverse_char_dictionary[batch[i]],\n '->', labels[i, 0], reverse_char_dictionary[labels[i, 0]])\n\nlambda_1, tweet_batch_size, expand_start_count, query_name, query_tokens, query_tokens_alternate, char_batch_size, num_sampled, valid_examples, valid_window, valid_size, skip_window, num_skips, embedding_size, char_vocabulary_size, batch_size, num_char_skips, skip_char_window = setup(char_dictionary, dictionary, query_type)\nlearning_rate = lr_\n\ngraph = tf.Graph()\nwith graph.as_default():\n\n # Input data.\n need_constant = tf.constant(query_tokens,dtype=tf.int32)\n avail_constant = tf.constant(query_tokens_alternate, dtype=tf.int32)\n train_inputs = tf.placeholder(tf.int32, shape=[batch_size])\n train_input_chars = tf.placeholder(tf.int32, shape=[char_batch_size])\n train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n train_char_labels = tf.placeholder(tf.int32, shape=[char_batch_size, 1])\n word_char_embeddings = tf.placeholder(tf.int32, shape=[batch_size, char_max_len])\n valid_dataset = tf.constant(valid_examples[0], dtype=tf.int32)\n valid_char_dataset = tf.constant(valid_examples[1], dtype=tf.int32)\n query_ints = tf.placeholder(tf.int32, shape=len(query_tokens))\n expanded_query_ints = tf.placeholder(tf.int32, shape=(len(query_tokens)+3))\n tquery_word_holder = tf.placeholder(tf.int32, shape=[word_max_len],name=\"tweet_query_word_holder\")\n tquery_char_holder = tf.placeholder(tf.int32, shape=[word_max_len, char_max_len],name=\"tweet_query_char_holder\")\n # Ops and variables pinned to the CPU because of missing GPU implementation\n tweet_char_holder = tf.placeholder(tf.int32, shape=[tweet_batch_size,word_max_len,char_max_len],name=\"tweet_char_holder\")\n tweet_word_holder = tf.placeholder(tf.int32, shape=[tweet_batch_size, word_max_len],name=\"tweet_word_holder\")\n with tf.device('/gpu:0'):\n # Look up embeddings for inputs.\n embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))\n char_embeddings = tf.Variable(tf.random_uniform([char_vocabulary_size, embedding_size // 2],-1.0,1.0))\n embed = tf.nn.embedding_lookup(embeddings, train_inputs)\n char_embed = tf.nn.embedding_lookup(char_embeddings,train_input_chars)\n lambda_2 = tf.Variable(tf.random_normal([1],stddev=1.0))\n\n # weight variables\n w1 = tf.Variable(tf.random_normal([embedding_size,embedding_size // 4],stddev=1.0/math.sqrt(embedding_size)))\n w2 = tf.Variable(tf.random_normal([embedding_size // 4,1],stddev=1.0/math.sqrt(embedding_size)))\n weights = tf.stack([w1]*batch_size)\n vvector = tf.stack([w2]*batch_size)\n weights_tweet = tf.stack([w1]*tweet_batch_size*word_max_len)\n vvector_tweet = tf.stack([w2]*tweet_batch_size*word_max_len)\n\n # Construct the variables for the NCE loss\n nce_weights = tf.Variable(\n tf.truncated_normal([vocabulary_size, embedding_size],\n stddev=1.0 / math.sqrt(embedding_size)))\n nce_biases = tf.Variable(tf.zeros([vocabulary_size]))\n # character weights\n nce_char_weights = tf.Variable(\n tf.truncated_normal([vocabulary_size, embedding_size // 2],\n stddev=1.0 / math.sqrt(embedding_size // 2)))\n nce_char_biases = tf.Variable(tf.zeros([vocabulary_size]))\n\n nce_train_weights = tf.Variable(\n tf.truncated_normal([vocabulary_size, embedding_size],\n stddev=1.0 / math.sqrt(embedding_size)))\n nce_train_biases = tf.Variable(tf.zeros([vocabulary_size]))\n \n loss = tf.reduce_mean(\n tf.nn.nce_loss(weights=nce_weights,\n biases=nce_biases,\n labels=train_labels,\n inputs=embed,\n num_sampled=num_sampled,\n num_classes=vocabulary_size))\n\n loss_char = tf.reduce_mean(\n tf.nn.nce_loss(weights=nce_char_weights,\n biases=nce_char_biases,\n labels=train_char_labels,\n inputs=char_embed,\n num_sampled=10,\n num_classes=char_vocabulary_size))\n\n # Construct the SGD optimizer using a learning rate of 1.0.\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)\n optimizer_char = tf.train.AdamOptimizer(learning_rate /5).minimize(loss_char)\n\n # Compute the cosine similarity between minibatch examples and all embeddings.\n norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n normalized_embeddings = embeddings / norm\n valid_embeddings = tf.nn.embedding_lookup(\n normalized_embeddings, valid_dataset)\n similarity = tf.matmul(\n valid_embeddings, normalized_embeddings, transpose_b=True)\n query_embedding_token = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,query_ints),axis=0),shape=[1,embedding_size])\n expanded_query_embedding_token = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,expanded_query_ints),axis=0),shape=[1,embedding_size])\n similarity_query = tf.reshape(tf.matmul(\n query_embedding_token, normalized_embeddings, transpose_b=True),shape=[int(normalized_embeddings.shape[0])])\n similarity_expanded_query = tf.reshape(tf.matmul(\n expanded_query_embedding_token, normalized_embeddings, transpose_b=True),shape=[int(normalized_embeddings.shape[0])])\n\n norm_char = tf.sqrt(tf.reduce_sum(tf.square(char_embeddings), 1, keep_dims=True))\n normalized_char_embeddings = char_embeddings / norm_char\n valid_embeddings_char = tf.nn.embedding_lookup(\n normalized_char_embeddings, valid_char_dataset)\n similarity_char = tf.matmul(\n valid_embeddings_char, normalized_char_embeddings, transpose_b=True)\n \n bilstm = biLSTM_setup(embedding_size)\n character_word_embeddings = tf.nn.embedding_lookup(normalized_char_embeddings, word_char_embeddings)\n intermediate = biLSTM_implementation(character_word_embeddings, bilstm, False)\n output = attention(w1, w2, intermediate)\n\n word_embeddings = tf.nn.embedding_lookup(normalized_embeddings, train_inputs)\n final_embedding = lambda_2*word_embeddings + (1-lambda_2)*output\n with tf.variable_scope(tf.get_variable_scope(), reuse=None):\n\n loss_char_train = tf.reduce_mean(\n tf.nn.nce_loss(weights=nce_train_weights,\n biases=nce_train_biases,\n labels=train_labels,\n inputs=final_embedding,\n num_sampled=64,\n num_classes=vocabulary_size))\n\n optimizer_train = tf.train.AdamOptimizer(learning_rate/5).minimize(loss_char_train)\n\n tweet_word_embed = tf.nn.embedding_lookup(normalized_embeddings, tweet_word_holder)\n tweet_char_embeddings = tf.reshape(tf.nn.embedding_lookup(normalized_char_embeddings, tweet_char_holder),shape=[tweet_batch_size*word_max_len, char_max_len, embedding_size//2])\n intermediate = biLSTM_implementation(tweet_char_embeddings, bilstm)\n tweet_char_embed = tf.reshape(attention(w1,w2,intermediate),shape=[tweet_batch_size, word_max_len, embedding_size])\n tweet_embedding = tf.reduce_mean(lambda_1*tweet_word_embed + (1-lambda_1)*tweet_char_embed,axis=1)\n # query embeddings\n query_embedding = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,query_ints),axis=0),shape=[1,embedding_size])\n expanded_query_embedding = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,expanded_query_ints),axis=0),shape=[1,embedding_size],name=\"similarity_normal\")\n query_similarity = tf.reshape(tf.matmul(tweet_embedding, query_embedding, transpose_b=True),shape=[tweet_batch_size])\n expanded_query_similarity = tf.reshape(tf.matmul(tweet_embedding, expanded_query_embedding, transpose_b=True),shape=[tweet_batch_size],name=\"similarity_expanded\")\n # tweet level query : for matching / extraction\n tquery_word_embed = tf.nn.embedding_lookup(normalized_embeddings, tquery_word_holder)\n tquery_char_embeddings = tf.reshape(tf.nn.embedding_lookup(normalized_char_embeddings, tquery_char_holder),shape=[word_max_len, char_max_len, embedding_size//2])\n intermediate = biLSTM_implementation(tquery_char_embeddings, bilstm)\n tquery_char_embed = attention(w1, w2, intermediate)\n tquery_embedding = tf.reshape(tf.reduce_mean(lambda_1*tquery_word_embed + (1-lambda_1)*tquery_char_embed,axis=0),shape=[1,embedding_size])\n\n norm_query = tf.sqrt(tf.reduce_sum(tf.square(tquery_embedding), 1, keep_dims=True))\n tquery_embedding_norm = tquery_embedding / norm_query\n cosine = tf.matmul(tweet_embedding, tquery_embedding_norm, transpose_b=True)\n tweet_query_similarity = tf.reshape(cosine, shape=[tweet_batch_size], name=\"tweet_query_similarity\")\n \n tquery_embedding_norm_dim = tf.reshape(tquery_embedding_norm, shape=[1,embedding_size])\n query_need_embedding = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings, need_constant),axis=0),shape=[1,embedding_size])\n cosine_need = tf.matmul(tquery_embedding_norm_dim, query_need_embedding, transpose_b=True)\n tquery_embedding_reqd = tf.reshape(tquery_embedding_norm_dim - (cosine_need*tquery_embedding_norm_dim),shape=[1,embedding_size])\n # we have the need vector without the need vector\n query_avail_embedding = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,avail_constant),axis=0),shape=[1,embedding_size])\n query_norm = tf.sqrt(tf.reduce_sum(tf.square(query_avail_embedding),1,keep_dims=True))\n query_avail_embedding_norm = query_embedding / query_norm\n cosine_avail = tf.matmul(tweet_embedding, query_avail_embedding_norm, transpose_b=True)\n reduced_tweet_embedding = tweet_embedding - (tweet_embedding*cosine_avail)\n match_similarity = tf.reshape(tf.matmul(reduced_tweet_embedding, tquery_embedding_reqd, transpose_b=True),shape=[tweet_batch_size],name=\"match_similarity\")\n # Add variable initializer.\n init = tf.global_variables_initializer()\n saver = tf.train.Saver()\n\n# Step 5: Begin training.\n# loading tweet list in integer marking form\n# load more data\nexpand_count = 3\nwith tf.Session(graph=graph) as session:\n # We must initialize all variables before we use them.\n init.run()\n count = 0\n print(\"Initialized\")\n\n generators = [generate_batch, generate_batch_char]\n similarities = [similarity, similarity_char]\n placeholders = [[train_inputs,train_labels],[train_input_chars,train_char_labels]]\n losses = [loss, loss_char]\n optimizers = [optimizer, optimizer_char]\n interval1 = 800\n interval2 = 8000\n datas = [data,char_data]\n data_index = [data_index, char_data_index, buffer_index]\n reverse_dictionaries = [reverse_dictionary, reverse_char_dictionary]\n if query_type == 0:\n query_name = 'Need'\n else :\n query_name = 'Avail'\n print(query_tokens)\n print(query_name)\n count_ = train_model(session, dataset,query_similarity, query_tokens, query_ints, query_name, word_batch_list, char_batch_list, tweet_word_holder, tweet_char_holder, generators, similarities, num_steps, placeholders,losses, optimizers, interval1, interval2, valid_size, valid_examples, reverse_dictionaries, batch_size, num_skips, skip_window, filename, datas, data_index, tweet_batch_size)\n placeholders += [[train_inputs, word_char_embeddings, train_labels]]\n losses += [loss_char_train]\n optimizers += [optimizer_train]\n datas += [[word_batch_list, char_batch_list]]\n count_ = train_model(session, dataset,query_similarity, query_tokens ,query_ints, query_name, word_batch_list, char_batch_list, tweet_word_holder, tweet_char_holder, generators, similarities, num_steps_roll, placeholders,losses, optimizers, interval1, interval2, valid_size, valid_examples, reverse_dictionaries, batch_size, num_skips, skip_window, filename, datas, data_index, tweet_batch_size, count_)\n \n expanded_query_tokens, expanded_query_holder, final_query_similarity= expand_query(expand_flag, session,query_ints, np.array(query_tokens),dataset ,similarity_query, word_batch_dict, 100, query_ints, expanded_query_ints, query_similarity, expanded_query_similarity, expand_start_count, expand_count)\n expanded_query_tokens = query_tokens + expanded_query_tokens\n print(expanded_query_tokens)\n \n count_ = train_model(session, dataset, final_query_similarity, expanded_query_tokens, expanded_query_holder, query_name, word_batch_list, char_batch_list, tweet_word_holder, tweet_char_holder, generators, similarities, num_steps_train , placeholders,losses, optimizers, interval1, interval2, valid_size, valid_examples, reverse_dictionaries, batch_size, num_skips, skip_window, filename, datas, data_index, tweet_batch_size, count_)\n folder_name = './%s/%s/'%(dataset, query_type)\n final_embeddings = normalized_embeddings.eval()\n final_char_embedding = normalized_char_embeddings.eval()\n np.save('../results/%s/%s/%s_word_embeddings.npy'%(dataset, query_name, filename), final_embeddings)\n np.save('../results/%s/%s/%s_char_embeddings.npy'%(dataset, query_name, filename), final_char_embedding)\n saver.save(session, '../results/%s/%s/%s_model.ckpt'%(dataset, query_name, filename))\n"
] | [
[
"numpy.save",
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.get_variable_scope",
"tensorflow.random_normal",
"tensorflow.global_variables_initializer",
"tensorflow.device",
"tensorflow.Graph",
"tensorflow.constant",
"tensorflow.stack",
"tensorflow.nn.nce_loss",
"tensorflow.random_uniform",
"tensorflow.train.Saver",
"tensorflow.Session",
"tensorflow.nn.embedding_lookup",
"tensorflow.placeholder",
"tensorflow.zeros",
"tensorflow.train.AdamOptimizer",
"tensorflow.reduce_mean",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.square",
"numpy.array"
]
] |
ruohoruotsi/pyro | [
"b54a4b42b9474eb3ecee11505e45fde85b1cdc54"
] | [
"pyro/distributions/relaxed_straight_through.py"
] | [
"from __future__ import absolute_import, division, print_function\n\nimport torch\n\nfrom pyro.distributions.torch import RelaxedOneHotCategorical, RelaxedBernoulli\nfrom pyro.distributions.util import copy_docs_from\nfrom torch.distributions.utils import clamp_probs\n\n\n@copy_docs_from(RelaxedOneHotCategorical)\nclass RelaxedOneHotCategoricalStraightThrough(RelaxedOneHotCategorical):\n \"\"\"\n An implementation of\n :class:`~torch.distributions.relaxed_categorical.RelaxedOneHotCategorical`\n with a straight-through gradient estimator.\n\n This distribution has the following properties:\n\n - The samples returned by the :meth:`rsample` method are discrete/quantized.\n - The :meth:`log_prob` method returns the log probability of the\n relaxed/unquantized sample using the GumbelSoftmax distribution.\n - In the backward pass the gradient of the sample with respect to the\n parameters of the distribution uses the relaxed/unquantized sample.\n\n References:\n\n [1] The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables,\n Chris J. Maddison, Andriy Mnih, Yee Whye Teh\n [2] Categorical Reparameterization with Gumbel-Softmax,\n Eric Jang, Shixiang Gu, Ben Poole\n \"\"\"\n def rsample(self, sample_shape=torch.Size()):\n soft_sample = super(RelaxedOneHotCategoricalStraightThrough, self).rsample(sample_shape)\n soft_sample = clamp_probs(soft_sample)\n hard_sample = QuantizeCategorical.apply(soft_sample)\n return hard_sample\n\n def log_prob(self, value):\n value = getattr(value, '_unquantize', value)\n return super(RelaxedOneHotCategoricalStraightThrough, self).log_prob(value)\n\n\nclass QuantizeCategorical(torch.autograd.Function):\n @staticmethod\n def forward(ctx, soft_value):\n argmax = soft_value.max(-1)[1]\n hard_value = torch.zeros_like(soft_value)\n hard_value._unquantize = soft_value\n if argmax.dim() < hard_value.dim():\n argmax = argmax.unsqueeze(-1)\n return hard_value.scatter_(-1, argmax, 1)\n\n @staticmethod\n def backward(ctx, grad):\n return grad\n\n\n@copy_docs_from(RelaxedBernoulli)\nclass RelaxedBernoulliStraightThrough(RelaxedBernoulli):\n \"\"\"\n An implementation of\n :class:`~torch.distributions.relaxed_bernoulli.RelaxedBernoulli`\n with a straight-through gradient estimator.\n\n This distribution has the following properties:\n\n - The samples returned by the :meth:`rsample` method are discrete/quantized.\n - The :meth:`log_prob` method returns the log probability of the\n relaxed/unquantized sample using the GumbelSoftmax distribution.\n - In the backward pass the gradient of the sample with respect to the\n parameters of the distribution uses the relaxed/unquantized sample.\n\n References:\n\n [1] The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables,\n Chris J. Maddison, Andriy Mnih, Yee Whye Teh\n [2] Categorical Reparameterization with Gumbel-Softmax,\n Eric Jang, Shixiang Gu, Ben Poole\n \"\"\"\n def rsample(self, sample_shape=torch.Size()):\n soft_sample = super(RelaxedBernoulliStraightThrough, self).rsample(sample_shape)\n soft_sample = clamp_probs(soft_sample)\n hard_sample = QuantizeBernoulli.apply(soft_sample)\n return hard_sample\n\n def log_prob(self, value):\n value = getattr(value, '_unquantize', value)\n return super(RelaxedBernoulliStraightThrough, self).log_prob(value)\n\n\nclass QuantizeBernoulli(torch.autograd.Function):\n @staticmethod\n def forward(ctx, soft_value):\n hard_value = soft_value.round()\n hard_value._unquantize = soft_value\n return hard_value\n\n @staticmethod\n def backward(ctx, grad):\n return grad\n"
] | [
[
"torch.zeros_like",
"torch.Size",
"torch.distributions.utils.clamp_probs"
]
] |
tsingqguo/ABA | [
"c32edbbe5705b0332a08951b5ee436b5f58c2e70"
] | [
"ltr/dataset/lasot.py"
] | [
"import os\nimport os.path\nimport torch\nimport numpy as np\nimport pandas\nimport csv\nimport random\nfrom collections import OrderedDict\nfrom .base_video_dataset import BaseVideoDataset\nfrom ltr.data.image_loader import jpeg4py_loader\nfrom ltr.admin.environment import env_settings\n\n\nclass Lasot(BaseVideoDataset):\n \"\"\" LaSOT dataset.\n\n Publication:\n LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling\n CVPR, 2019\n https://arxiv.org/pdf/1809.07845.pdf\n\n Download the dataset from https://cis.temple.edu/lasot/download.html\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the lasot dataset.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n videos with subscripts -1, -3, and -5 from each class will be used for training.\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n vid_ids or split option can be used at a time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().lasot_dir if root is None else root\n super().__init__('LaSOT', root, image_loader)\n\n # Keep a list of all classes\n self.class_list = [f for f in os.listdir(self.root)]\n self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\n\n self.sequence_list = self._build_sequence_list(vid_ids, split)\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.seq_per_class = self._build_class_list()\n\n def _build_sequence_list(self, vid_ids=None, split=None):\n if split is not None:\n if vid_ids is not None:\n raise ValueError('Cannot set both split_name and vid_ids.')\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\n else:\n raise ValueError('Unknown split name.')\n sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\n elif vid_ids is not None:\n sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]\n else:\n raise ValueError('Set either split_name or vid_ids.')\n\n return sequence_list\n\n def _build_class_list(self):\n seq_per_class = {}\n for seq_id, seq_name in enumerate(self.sequence_list):\n class_name = seq_name.split('-')[0]\n if class_name in seq_per_class:\n seq_per_class[class_name].append(seq_id)\n else:\n seq_per_class[class_name] = [seq_id]\n\n return seq_per_class\n\n def get_name(self):\n return 'lasot'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n return torch.tensor(gt)\n\n def _read_target_visible(self, seq_path):\n # Read full occlusion and out_of_view\n occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\n out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\n\n with open(occlusion_file, 'r', newline='') as f:\n occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n with open(out_of_view_file, 'r') as f:\n out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n\n target_visible = ~occlusion & ~out_of_view\n\n return target_visible\n\n def _get_sequence_path(self, seq_id):\n seq_name = self.sequence_list[seq_id]\n class_name = seq_name.split('-')[0]\n vid_id = seq_name.split('-')[1]\n\n return os.path.join(self.root, class_name, class_name + '-' + vid_id)\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = self._read_target_visible(seq_path) & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n def _get_class(self, seq_path):\n raw_class = seq_path.split('/')[-2]\n return raw_class\n\n def get_class_name(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n obj_class = self._get_class(seq_path)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n #print(seq_path)\n obj_class = self._get_class(seq_path)\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta\n"
] | [
[
"pandas.read_csv",
"torch.tensor"
]
] |
anton-potapov/openvino | [
"84119afe9a8c965e0a0cd920fff53aee67b05108",
"84119afe9a8c965e0a0cd920fff53aee67b05108"
] | [
"model-optimizer/mo/middle/passes/fusing/decomposition_test.py",
"model-optimizer/extensions/back/InterpolateReshape_test.py"
] | [
"\"\"\"\n Copyright (C) 2018-2020 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nfrom mo.middle.passes.fusing.decomposition import convert_scale_shift_to_mul_add, convert_batch_norm\nfrom mo.utils.ir_engine.compare_graphs import compare_graphs\nfrom mo.utils.unittest.graph import build_graph\n\nnodes_attributes = {\n 'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},\n 'placeholder_2': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},\n 'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},\n # ScaleShift layer\n 'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift', 'axis': 0},\n 'const_scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'op'},\n 'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'},\n 'const_scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'op'},\n 'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'},\n 'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n # Mul and Add operations\n 'mul_1': {'type': None, 'value': None, 'kind': 'op', 'op': 'Mul'},\n 'const_mul_1_w': {'value': None, 'shape': None, 'kind': 'op'},\n 'mul_1_w': {'value': None, 'shape': None, 'kind': 'data'},\n 'mul_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'add_1': {'type': None, 'kind': 'op', 'op': 'Add'},\n 'const_add_1_w': {'value': None, 'shape': None, 'kind': 'op'},\n 'add_1_w': {'value': None, 'shape': None, 'kind': 'data'},\n 'add_1_data': {'value': None, 'shape': None, 'kind': 'data'},\n # Mul and Add operations\n 'mul_2': {'type': None, 'kind': 'op', 'op': 'Mul'},\n 'const_mul_2_w': {'value': None, 'shape': None, 'kind': 'op'},\n 'mul_2_w': {'value': None, 'shape': None, 'kind': 'data'},\n 'mul_2_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'add_2': {'type': None, 'kind': 'op', 'op': 'Add'},\n 'const_add_2_w': {'value': None, 'shape': None, 'kind': 'op'},\n 'add_2_w': {'value': None, 'shape': None, 'kind': 'data'},\n 'add_2_data': {'value': None, 'shape': None, 'kind': 'data'},\n # Reshape\n 'placeholder_2/Reshape_': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},\n 'placeholder_2/Reshape_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'placeholder_2/Reshape_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': None},\n 'placeholder_2/Reshape_const_data': {'kind': 'data', 'value': None, 'shape': None},\n # BatchNorm operation\n 'bn_op': {'type': None, 'kind': 'op', 'op': 'BatchNorm', 'can_be_fused': True},\n 'const_bn_const': {'value': None, 'shape': None, 'kind': 'op'},\n 'bn_const': {'value': None, 'shape': None, 'kind': 'data'},\n 'const_bn_beta': {'value': None, 'shape': None, 'kind': 'op'},\n 'bn_beta': {'value': None, 'shape': None, 'kind': 'data'},\n 'const_bn_mean': {'value': None, 'shape': None, 'kind': 'op'},\n 'bn_mean': {'value': None, 'shape': None, 'kind': 'data'},\n 'const_bn_var': {'value': None, 'shape': None, 'kind': 'op'},\n 'bn_var': {'value': None, 'shape': None, 'kind': 'data'},\n 'bn_data': {'value': None, 'shape': None, 'kind': 'data'},\n # Concat1 operation\n 'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},\n 'concat_data': {'value': None, 'shape': None, 'kind': 'data'},\n 'op_output': {'kind': 'op', 'op': 'Result'}\n}\n\n\nclass ScaleShiftToMulAdd(unittest.TestCase):\n # ScaleShift -> Mul\n def test_scaleshift_to_mul_1(self):\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'scaleshift_1'),\n ('const_scaleshift_1_w', 'scaleshift_1_w'),\n ('scaleshift_1_w', 'scaleshift_1'),\n ('scaleshift_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'scaleshift_1_data': {}\n })\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'mul_1'),\n ('const_mul_1_w', 'mul_1_w'),\n ('mul_1_w', 'mul_1'),\n ('mul_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'mul_1': {'can_be_fused': True},\n 'scaleshift_1_data': {}\n })\n\n graph.graph['layout'] = 'NHWC'\n convert_scale_shift_to_mul_add(graph)\n graph.clean_up()\n (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')\n self.assertTrue(flag, resp)\n\n # ScaleShift 2 inputs-> Mul\n def test_scaleshift2_to_mul(self):\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_2', 'placeholder_2_data'),\n ('placeholder_1_data', 'scaleshift_1'),\n ('placeholder_2_data', 'scaleshift_1'),\n ('scaleshift_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'placeholder_2_data': {'shape': np.array([1, 227])},\n 'scaleshift_1_data': {}\n })\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_2', 'placeholder_2_data'),\n ('placeholder_2_data', 'placeholder_2/Reshape_'),\n ('placeholder_2/Reshape_const', 'placeholder_2/Reshape_const_data'),\n ('placeholder_2/Reshape_const_data', 'placeholder_2/Reshape_'),\n ('placeholder_2/Reshape_', 'placeholder_2/Reshape_data'),\n ('placeholder_1_data', 'mul_1'),\n ('placeholder_2/Reshape_data', 'mul_1'),\n ('mul_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'placeholder_2_data': {'shape': np.array([1, 227])},\n 'placeholder_2/Reshape_const': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},\n 'placeholder_2/Reshape_const_data': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},\n 'placeholder_2/Reshape_data': {'shape': np.array([1, 227, 1, 1])},\n 'mul_1': {'can_be_fused': True},\n 'scaleshift_1_data': {}\n })\n\n graph.graph['layout'] = 'NHWC'\n convert_scale_shift_to_mul_add(graph)\n graph.clean_up()\n (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')\n self.assertTrue(flag, resp)\n\n # ScaleShift 2 inputs-> Mul (axis = 1)\n def test_scaleshift2_axis1_to_mul(self):\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_2', 'placeholder_2_data'),\n ('placeholder_1_data', 'scaleshift_1'),\n ('placeholder_2_data', 'scaleshift_1'),\n ('scaleshift_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'placeholder_2_data': {'shape': np.array([227])},\n 'scaleshift_1': {'axis': 1},\n 'scaleshift_1_data': {}\n })\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_2', 'placeholder_2_data'),\n ('placeholder_2_data', 'placeholder_2/Reshape_'),\n ('placeholder_2/Reshape_const', 'placeholder_2/Reshape_const_data'),\n ('placeholder_2/Reshape_const_data', 'placeholder_2/Reshape_'),\n ('placeholder_2/Reshape_', 'placeholder_2/Reshape_data'),\n ('placeholder_1_data', 'mul_1'),\n ('placeholder_2/Reshape_data', 'mul_1'),\n ('mul_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'placeholder_2_data': {'shape': np.array([227])},\n 'placeholder_2/Reshape_const': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},\n 'placeholder_2/Reshape_const_data': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},\n 'placeholder_2/Reshape_data': {'shape': np.array([1, 227, 1, 1])},\n 'mul_1': {'can_be_fused': True},\n 'scaleshift_1_data': {}\n })\n\n graph.graph['layout'] = 'NHWC'\n convert_scale_shift_to_mul_add(graph)\n graph.clean_up()\n (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')\n self.assertTrue(flag, resp)\n\n # ScaleShift -> Mul (Zero biases)\n def test_scaleshift_to_mul_2(self):\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'scaleshift_1'),\n ('const_scaleshift_1_w', 'scaleshift_1_w'),\n ('const_scaleshift_1_b', 'scaleshift_1_b'),\n ('scaleshift_1_w', 'scaleshift_1'),\n ('scaleshift_1_b', 'scaleshift_1'),\n ('scaleshift_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},\n 'scaleshift_1_data': {}\n })\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'mul_1'),\n ('const_mul_1_w', 'mul_1_w'),\n ('mul_1_w', 'mul_1'),\n ('mul_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'mul_1': {'can_be_fused': True},\n 'scaleshift_1_data': {}\n })\n\n graph.graph['layout'] = 'NHWC'\n convert_scale_shift_to_mul_add(graph)\n graph.clean_up()\n (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')\n self.assertTrue(flag, resp)\n\n # ScaleShift -> Mul->Add\n def test_scaleshift_to_mul_add(self):\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'scaleshift_1'),\n ('const_scaleshift_1_w', 'scaleshift_1_w'),\n ('const_scaleshift_1_b', 'scaleshift_1_b'),\n ('scaleshift_1_w', 'scaleshift_1'),\n ('scaleshift_1_b', 'scaleshift_1'),\n ('scaleshift_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},\n 'scaleshift_1_data': {}\n })\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'mul_1'),\n ('const_mul_1_w', 'mul_1_w'),\n ('mul_1_w', 'mul_1'),\n ('mul_1', 'mul_1_data'),\n ('mul_1_data', 'add_1'),\n ('const_add_1_w', 'add_1_w'),\n ('add_1_w', 'add_1'),\n ('add_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'const_add_1_w': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},\n 'add_1_w': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},\n 'mul_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'add_1': {'can_be_fused': True},\n 'mul_1': {'can_be_fused': True},\n 'scaleshift_1_data': {}\n })\n\n graph.graph['layout'] = 'NHWC'\n convert_scale_shift_to_mul_add(graph)\n graph.clean_up()\n (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')\n self.assertTrue(flag, resp)\n\n # ScaleShift -> None (Zero weights and biases)\n def test_scaleshift_to_nothing(self):\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'scaleshift_1'),\n ('const_scaleshift_1_w', 'scaleshift_1_w'),\n ('const_scaleshift_1_b', 'scaleshift_1_b'),\n ('scaleshift_1_w', 'scaleshift_1'),\n ('scaleshift_1_b', 'scaleshift_1'),\n ('scaleshift_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},\n 'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},\n 'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}\n }, nodes_with_edges_only=True)\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}}\n ,nodes_with_edges_only=True)\n\n graph.graph['layout'] = 'NHWC'\n convert_scale_shift_to_mul_add(graph)\n graph.clean_up()\n (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')\n self.assertTrue(flag, resp)\n\n # ScaleShift -> ScaleShift (can_be_fused=False)\n def test_scaleshift_can_be_fused(self):\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'scaleshift_1'),\n ('const_scaleshift_1_w', 'scaleshift_1_w'),\n ('const_scaleshift_1_b', 'scaleshift_1_b'),\n ('scaleshift_1_w', 'scaleshift_1'),\n ('scaleshift_1_b', 'scaleshift_1'),\n ('scaleshift_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},\n 'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},\n 'scaleshift_1': {'can_be_fused': False},\n 'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}\n })\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'scaleshift_1'),\n ('const_scaleshift_1_w', 'scaleshift_1_w'),\n ('const_scaleshift_1_b', 'scaleshift_1_b'),\n ('scaleshift_1_w', 'scaleshift_1'),\n ('scaleshift_1_b', 'scaleshift_1'),\n ('scaleshift_1', 'scaleshift_1_data'),\n ('scaleshift_1_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'const_scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},\n 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},\n 'const_scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},\n 'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},\n 'scaleshift_1': {'can_be_fused': False},\n 'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}\n })\n\n convert_scale_shift_to_mul_add(graph)\n graph.clean_up()\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'scaleshift_1_data')\n self.assertTrue(flag, resp)\n\n\nclass BatchNormDecomposition(unittest.TestCase):\n def test_bn_decomposition_1(self):\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'bn_op'),\n ('const_bn_const', 'bn_const'),\n ('const_bn_beta', 'bn_beta'),\n ('const_bn_mean', 'bn_mean'),\n ('const_bn_var', 'bn_var'),\n ('bn_const', 'bn_op'),\n ('bn_beta', 'bn_op'),\n ('bn_mean', 'bn_op'),\n ('bn_var', 'bn_op'),\n ('bn_op', 'bn_data'),\n ('concat', 'concat_data'),\n ('bn_data', 'concat'),\n ('concat_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'bn_op': {'eps': 1.2},\n 'bn_const': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'bn_beta': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'bn_data': {'shape': np.array([1, 227, 227, 3])},\n 'concat_data': {}\n }, nodes_with_edges_only=True)\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'mul_1'),\n ('const_mul_1_w', 'mul_1_w'),\n ('mul_1_w', 'mul_1'),\n ('mul_1', 'mul_1_data'),\n ('mul_1_data', 'add_1'),\n ('const_add_1_w', 'add_1_w'),\n ('add_1_w', 'add_1'),\n ('add_1', 'add_1_data'),\n ('add_1_data', 'mul_2'),\n ('const_mul_2_w', 'mul_2_w'),\n ('mul_2_w', 'mul_2'),\n ('mul_2', 'mul_2_data'),\n ('mul_2_data', 'add_2'),\n ('const_add_2_w', 'add_2_w'),\n ('add_2_w', 'add_2'),\n ('add_2', 'add_2_data'),\n ('concat', 'concat_data'),\n ('add_2_data', 'concat'),\n ('concat_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'const_mul_1_w': {'shape': np.array([3]),\n 'value': np.array([0.67419986, 0.55901699, 0.48795004])},\n 'mul_1_w': {'shape': np.array([3]),\n 'value': np.array([0.67419986, 0.55901699, 0.48795004])},\n 'const_mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'const_add_1_w': {'shape': np.array([3]),\n 'value': np.array([-0.67419986, -1.11803399, -1.46385011])},\n 'add_1_w': {'shape': np.array([3]),\n 'value': np.array([-0.67419986, -1.11803399, -1.46385011])},\n 'const_add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'add_2_data': {'shape': np.array([1, 227, 227, 3])},\n 'mul_1': {'can_be_fused': True},\n 'mul_2': {'can_be_fused': True},\n 'add_1': {'can_be_fused': True},\n 'add_2': {'can_be_fused': True},\n 'concat_data': {}\n }, nodes_with_edges_only=True)\n\n graph.graph['layout'] = 'NHWC'\n convert_batch_norm(graph)\n graph.clean_up()\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'concat_data')\n self.assertTrue(flag, resp)\n\n # 'can_be_fused': False for BatchNorm\n def test_bn_decomposition_2(self):\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'bn_op'),\n ('const_bn_const', 'bn_const'),\n ('const_bn_beta', 'bn_beta'),\n ('const_bn_mean', 'bn_mean'),\n ('const_bn_var', 'bn_var'),\n ('bn_const', 'bn_op'),\n ('bn_beta', 'bn_op'),\n ('bn_mean', 'bn_op'),\n ('bn_var', 'bn_op'),\n ('bn_op', 'bn_data'),\n ('concat', 'concat_data'),\n ('bn_data', 'concat'),\n ('concat_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'bn_op': {'eps': 1.2, 'can_be_fused': False},\n 'bn_const': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'bn_beta': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'bn_data': {'shape': np.array([1, 227, 227, 3])},\n 'concat_data': {}\n })\n\n graph_ref = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data'),\n ('placeholder_1_data', 'mul_1'),\n ('const_mul_1_w', 'mul_1_w'),\n ('mul_1_w', 'mul_1'),\n ('mul_1', 'mul_1_data'),\n ('mul_1_data', 'add_1'),\n ('const_add_1_w', 'add_1_w'),\n ('add_1_w', 'add_1'),\n ('add_1', 'add_1_data'),\n ('add_1_data', 'mul_2'),\n ('const_mul_2_w', 'mul_2_w'),\n ('mul_2_w', 'mul_2'),\n ('mul_2', 'mul_2_data'),\n ('mul_2_data', 'add_2'),\n ('const_add_2_w', 'add_2_w'),\n ('add_2_w', 'add_2'),\n ('add_2', 'add_2_data'),\n ('concat', 'concat_data'),\n ('add_2_data', 'concat'),\n ('concat_data', 'op_output')\n ],\n {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},\n 'const_mul_1_w': {'shape': np.array([3]),\n 'value': np.array([0.67419986, 0.55901699, 0.48795004])},\n 'mul_1_w': {'shape': np.array([3]),\n 'value': np.array([0.67419986, 0.55901699, 0.48795004])},\n 'const_mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'const_add_1_w': {'shape': np.array([3]),\n 'value': np.array([-0.67419986, -1.11803399, -1.46385011])},\n 'add_1_w': {'shape': np.array([3]),\n 'value': np.array([-0.67419986, -1.11803399, -1.46385011])},\n 'const_add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},\n 'add_2_data': {'shape': np.array([1, 227, 227, 3])},\n 'mul_1': {'can_be_fused': False},\n 'mul_2': {'can_be_fused': False},\n 'add_1': {'can_be_fused': False},\n 'add_2': {'can_be_fused': False},\n 'concat_data': {}\n })\n\n graph.graph['layout'] = 'NHWC'\n convert_batch_norm(graph)\n graph.clean_up()\n\n (flag, resp) = compare_graphs(graph, graph_ref, 'concat_data')\n self.assertTrue(flag, resp)",
"\"\"\"\n Copyright (C) 2018-2020 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport unittest\nfrom argparse import Namespace\n\nimport numpy as np\n\nfrom extensions.back.InterpolateReshape import InterpolateReshapeWA, InterpolateConcat\nfrom mo.utils.ir_engine.compare_graphs import compare_graphs\nfrom mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, \\\n connect_data\n\nnodes = {\n **regular_op_with_shaped_data('placeholder', [1, 3, 30, 40], {'type': 'Parameter'}),\n **valued_const_with_data('out_shape', np.array([60, 160])),\n\n **regular_op_with_shaped_data('interpolate', [1, 3, 60, 160], {'type': 'Interpolate', 'axes': [2, 3]}),\n\n **regular_op_with_shaped_data('shape', [4], {'type': 'ShapeOf'}),\n **valued_const_with_data('indices', np.array([2, 3])),\n **valued_const_with_data('axis', np.array(0)),\n **regular_op_with_shaped_data('gather', [2], {'type': 'Gather'}),\n\n **valued_const_with_data('multiplier', np.array([2, 4])),\n **regular_op_with_shaped_data('mul', [2], {'type': 'Multiply'}),\n\n **regular_op_with_shaped_data('placeholder_1', [1, 3, 60, 160], {'type': 'Parameter'}),\n **regular_op_with_shaped_data('concat', [1, 7, 60, 160], {'type': 'Concat', 'axis': 1}),\n\n **result(),\n}\n\n\nclass TestInterpolateReshapeWA(unittest.TestCase):\n def test_interpolate_reshape_graph_comparison(self):\n graph = build_graph(nodes, [\n *connect('placeholder', '0:interpolate'),\n *connect('out_shape', '1:interpolate'),\n *connect('interpolate', 'output'),\n ], nodes_with_edges_only=True)\n InterpolateReshapeWA().find_and_replace_pattern(graph)\n graph.graph['cmd_params'] = Namespace(keep_shape_ops=True)\n graph.clean_up()\n graph_ref = build_graph(nodes, [\n *connect('placeholder', '0:interpolate'),\n *connect_data('placeholder', 'shape'),\n *connect('shape', '0:gather'),\n *connect('indices', '1:gather'),\n *connect('axis', '2:gather'),\n *connect('gather', '0:mul'),\n *connect('multiplier', '1:mul'),\n *connect('mul', '1:interpolate'),\n *connect('interpolate', 'output'),\n ], nodes_with_edges_only=True)\n (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)\n self.assertTrue(flag, resp)\n\n\nclass TestInterpolateConcat(unittest.TestCase):\n def test_interpolate_concat_reshape_graph_comparison(self):\n graph = build_graph(nodes, [\n *connect('placeholder', '0:interpolate'),\n *connect('out_shape', '1:interpolate'),\n *connect('interpolate', '0:concat'),\n *connect('placeholder_1', '1:concat'),\n *connect('concat', 'output'),\n ], nodes_with_edges_only=True)\n InterpolateConcat().find_and_replace_pattern(graph)\n graph.graph['cmd_params'] = Namespace(keep_shape_ops=True)\n graph.clean_up()\n graph_ref = build_graph(nodes, [\n *connect('placeholder', '0:interpolate'),\n *connect('placeholder_1', 'shape'),\n *connect('shape', '0:gather'),\n *connect('indices', '1:gather'),\n *connect('axis', '2:gather'),\n *connect('gather', '1:interpolate'),\n *connect('interpolate', '0:concat'),\n *connect_data('placeholder_1', '1:concat'),\n *connect('concat', 'output'),\n ], nodes_with_edges_only=True)\n (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)\n self.assertTrue(flag, resp)\n"
] | [
[
"numpy.array"
],
[
"numpy.array"
]
] |
DottD/pynger | [
"9a24b43a2170234e5059a54ed20329e036260b0a"
] | [
"pynger/fingerprint/FVC_utilities.py"
] | [
"import os\nimport re\nimport io\nimport numpy as np\nimport PIL.Image\nimport typing\nfrom pynger.types import Image, Mask, Field\nfrom pynger.fingerprint.tuning_lro import LROEstimator\nfrom pynger.fingerprint.sampling import convert_to_full, subsample\nfrom pynger.field.manipulation import polar2cart\nfrom pynger.misc import recursively_scan_dir_gen, recursively_scan_dir, random_combination\nfrom itertools import combinations, starmap\n\n\nclass Proxy:\n def write(self, path: str):\n raise NotImplementedError(\"Derived classes must reimplement this method\")\n\n def read(self, path: str):\n raise NotImplementedError(\"Derived classes must reimplement this method\")\n\nclass MaskProxy(Proxy):\n def __init__(self, *args):\n if len(args) == 1:\n if isinstance(args[0], np.ndarray):\n self.mask = args[0]\n elif isinstance(args[0], str):\n self.read(args[0])\n else:\n raise TypeError(\"Arguments not recognized\")\n else:\n self.mask = None\n\n def read(self, path: str, full: bool = True):\n \"\"\" Reads the mask, according to FVC-OnGoing specs.\n\n Args:\n path: The input file path (generally with .fg extension)\n full: Whether the full output should be returned (not implemented yet)\n\n Return:\n The boolean mask represented in the given file.\n \"\"\"\n if not os.path.exists(path):\n raise RuntimeError(\"The input file does not exist\")\n with open(path, 'r') as f:\n shape = tuple([int(n) for n in f.readline().split()])\n mask = np.empty(shape, dtype=bool)\n for row_n, line in enumerate(f):\n mask[row_n,:] = [bool(int(n)) for n in line.split()]\n self.mask = mask\n return mask\n\n def write(self, path: str):\n \"\"\" Writes the mask, according to FVC-OnGoing specs.\n\n Args:\n path: The output file path (generally with .fg extension)\n \"\"\"\n with open(path, 'w') as f:\n print(self.mask.shape, file=f)\n for line in self.mask.astype(int):\n print(line, file=f)\n\nclass FieldProxy(Proxy):\n def __init__(self, *args):\n if len(args) == 2 and isinstance(args[0], np.ndarray) and isinstance(args[1], np.ndarray):\n self.angle, self.mask = args[0].copy(), args[1].copy()\n elif len(args) == 1 and isinstance(args[0], str):\n self.read(args[0])\n else:\n self.angle, self.mask = None, None\n\n def read(self, path: str, full: bool = True):\n \"\"\" Reads the field, according to FVC-OnGoing specs.\n\n Args:\n path: The input file path (generally with .gt extension)\n full: Whether the full output should be returned\n\n Return:\n The field represented in the given file.\n \"\"\"\n if not os.path.exists(path):\n raise RuntimeError(\"The input file does not exist\")\n with open(path, 'rb') as f:\n # Read and discard the header. To visualize -> print(f.read(8).decode('ascii'))\n f.read(8)\n # Read the field specifications\n get_next_int = lambda: int.from_bytes(f.read(4), byteorder='little', signed=True)\n self.border_x = get_next_int()\n self.border_y = get_next_int()\n self.step_x = get_next_int()\n self.step_y = get_next_int()\n cols = get_next_int()\n rows = get_next_int()\n # Read the values\n get_next_uint8 = lambda: int.from_bytes(f.read(1), byteorder='little', signed=False)\n content = [(get_next_uint8(), get_next_uint8()) for _ in range(cols*rows)]\n angle, mask = zip(*content)\n angle = np.array(angle, dtype=float).reshape((rows, cols))\n angle *= np.pi / 255.0\n mask = np.array(mask, dtype=bool).reshape((rows, cols))\n # Optionally convert to full matrix\n if full:\n self.angle = convert_to_full(angle, border_x=self.border_x, border_y=self.border_y, step_x=self.step_x, step_y=self.step_y, mode='constant')\n self.mask = convert_to_full(mask, border_x=self.border_x, border_y=self.border_y, step_x=self.step_x, step_y=self.step_y, mode='constant')\n else:\n self.angle = angle\n self.mask = mask\n return self.angle, self.mask\n\n def write(self, path: str, **kwargs):\n \"\"\" Writes the field, according to FVC-OnGoing specs.\n\n Args:\n path: The output file path (generally with .gt extension)\n\n Keyword Args:\n border_x (int): Horizontal border used to sample the field (defaults to 14)\n border_y (int): Vertical border used to sample the field (defaults to 14)\n step_x (int): Horizontal distance between two conscutive sample points (defaults to 8)\n step_y (int): Vertical distance between two conscutive sample points (defaults to 8)\n subsample (bool): Whether the input shall be sub-sampled before saving it\n\n Note:\n The field is subsampled in the process. To avoid this behaviour, set border parameters to 0 and step parameters to 1.\n \"\"\"\n # Read parameters\n bx = kwargs.get('border_x', 14)\n by = kwargs.get('border_y', 14)\n sx = kwargs.get('step_x', 8)\n sy = kwargs.get('step_y', 8)\n needSubsample = kwargs.pop('subsample', True)\n # Sample the field\n if self.angle.shape != self.mask.shape:\n raise RuntimeError('angle and mask sizes mismatch')\n if needSubsample:\n angle = subsample(self.angle, is_field=False, smooth=False, **kwargs)\n mask = subsample(self.mask, is_field=False, smooth=False, **kwargs)\n else:\n angle = self.angle\n mask = self.mask\n with open(path, 'wb') as f:\n f.write(\"DIRIMG00\".encode('ascii'))\n # Read the field specifications\n put_int = lambda n: f.write(int(n).to_bytes(4, byteorder='little', signed=True))\n put_int(bx)\n put_int(by)\n put_int(sx)\n put_int(sy)\n rows, cols = angle.shape\n put_int(cols)\n put_int(rows)\n # Values conversion\n angle *= 255.0 / np.pi\n angle = angle.astype(int)\n mask = mask.astype(int)\n mask *= int(255 / mask.max())\n # Write the values\n put_uint8 = lambda n: f.write(int(n).to_bytes(1, byteorder='little', signed=False))\n for a, m in zip(angle.ravel(), mask.ravel()):\n put_uint8(a)\n put_uint8(m)\n\ndef loadDataset(path: str, loadGT: bool = True):\n \"\"\" Loads the FVC-TEST dataset.\n\n Args:\n path: Directory with the FVC-TEST dataset.\n loadGT: whether to load the ground truth information or not.\n\n Return:\n A generator of pairs (X, y) where X has the original image, its mask and its border specifications, and y is the corresponding orientation field ground truth.\n \"\"\"\n with open(path, 'r') as f:\n _ = int(f.readline())\n for line in f:\n name, step, bd = line.split()\n step = int(step)\n bd = int(bd)\n # Load image\n image_path = os.path.join(os.path.dirname(path), name)\n image = np.array(PIL.Image.open(image_path).convert('L')).astype(float)\n # Load mask\n mask_path = os.path.splitext(image_path)[0]+'.fg'\n mask = MaskProxy().read(mask_path)\n # Set specifications\n specs = [bd, bd, step, step]\n # Adjust image shape\n _mask = convert_to_full(mask, border_x=bd, border_y=bd, step_x=step, step_y=step, mode='constant')\n image = image[:_mask.shape[0], :_mask.shape[1]]\n # Load the ground truth field\n if loadGT:\n field_path = os.path.splitext(image_path)[0]+'.gt'\n lro, _ = FieldProxy().read(field_path, full=False)\n field = polar2cart(lro, 1, retField=True)\n # Serialize input data and append to X and the ground truth information\n yield (LROEstimator.serialize_Xrow(image, mask, specs), LROEstimator.serialize_yrow(field))\n else:\n yield (LROEstimator.serialize_Xrow(image, mask, specs), image_path)\n\ndef countDatasetElements(path):\n with open(path, 'r') as f:\n return int(f.readline())\n\ndef loadSegmentationDataset(sdir: str, odir: str):\n \"\"\" Loads the dataset for segmentation evaluation.\n\n Args:\n sdir: Path to the segmented images; all the images shall be direct children of this directory.\n odir: Path to the original images; this folder shall contain as direct children the folder of the databases FVC2000, FVC2002, FVC2004 (from DB1a, DB1b, to DB4a, DB4b) - e.g. the main root of the DVD shipped with Handbook of Fingerprint Recognition.\n\n Note:\n If some DB is not available a warning will be issued, but the other images will be loaded anyway.\n\n Return:\n A generator of pairs (X, y) where X is the original image, and y the corresponding ground truth segmentation image.\n \"\"\"\n pattern = re.compile('(FVC\\\\d+)_(\\\\w+)_\\\\w+_(\\\\d+)_(\\\\d+)')\n sfiles = recursively_scan_dir_gen(sdir, '.png')\n for sfile in sfiles:\n basename = os.path.basename(sfile)\n match = pattern.match(basename)\n if match:\n ofile = os.path.join(\n odir,\n match[1], # FVCxxxx\n 'Dbs',\n # converts DB1 to Db1, them appends an 'a' for the first 100 images, and a 'b' otherwise\n match[2].title() + '_' + ('a' if int(match[3])<=100 else 'b'),\n '{}_{}.tif'.format(match[3], match[4]) # append the filename\n )\n yield (ofile, sfile)\n\ndef loadMatchingDatasetFVC(path: str):\n \"\"\" Loads the FVC-TEST dataset.\n\n Args:\n path: Directory with the FVC-TEST dataset.\n\n Return:\n A dictionary whose keys are pairs of:\n - tuples containing a reference to the database and competition where the images belong, and values are lists of pairs (X, y) where X has the pair of image filenames, and y is the corresponding ground truth label, i.e. a 0 for reject or 1 for accept;\n - the list of all images found in the given folder.\n \"\"\"\n _, all_image_files = recursively_scan_dir(path, '.tif')\n\n _, index_files = recursively_scan_dir(path, '.MFA')\n comp_pattern = re.compile('(FVC\\\\d+)')\n \n competitions = {}\n # Loop over the four possible databases\n for db_n in range(1, 5):\n for MFA in index_files:\n # Get index for false matches\n MFR = MFA[:-1]+'R'\n # Retrieve competition\n match = comp_pattern.search(MFA)\n if match:\n competition = match[1]\n else:\n competition = 'NULL'\n # Retrieve database type (a or b)\n db_type = MFA[-5].lower()\n # Create a new key for this competition\n comp_key = (competition, db_n, db_type)\n competitions[comp_key] = []\n # Generate database name\n db_name = 'Db{}_{}'.format(db_n, db_type)\n # Take the subset of images related to this dataset\n image_files = [name for name in all_image_files if os.path.basename(os.path.dirname(name)) == db_name]\n # Load all the pairs that will be matched\n challenge_pairs = []\n for ifile, gt in zip([MFA, MFR], [0, 1]):\n dir_ = os.path.dirname(ifile)\n with open(ifile, 'r') as file_:\n for line in file_:\n file1, file2 = line.split()\n path1 = os.path.join(dir_, db_name, file1)\n path2 = os.path.join(dir_, db_name, file2)\n challenge_pairs.append( ((path1, path2), gt) )\n # Update the competition dictionary\n competitions[comp_key] = (challenge_pairs, image_files)\n return competitions\n\ndef loadMatchingDatasetNIST(path: str, ratio: float = 2.0, verbose: bool = True):\n \"\"\" Load NIST SD04 for matching.\n \n Args: \n path: Path to the folder containing the images.\n ratio: Ratio between the number of impostor and genuine matches.\n verbose: whether to print some basic information about the dataset.\n\n Return:\n A tuple (X, y, lenX) where X yields pairs of images, y generates 0 for a non-match and 1 for a match, lenX is the total number of elements.\n \"\"\"\n # Load all images\n _, image_files = recursively_scan_dir(path, ['.jpg', '.jpeg', '.png', '.bmp', '.tif', '.tiff'])\n # Split between first and second impression\n f_image_files = list(filter(lambda s: os.path.basename(s)[0]=='f', image_files))\n\n # Collect the genuine matches\n challenge_pairs = []\n for ffile in f_image_files:\n basename = os.path.basename(ffile)\n basename = 's'+basename[1:]\n sfile = os.path.join( os.path.dirname(ffile), basename )\n challenge_pairs.append( ((ffile, sfile), 1) )\n\n # Get the total number of impostor and genuine matches\n genuine_matches = len(challenge_pairs)\n impostor_matches = int(genuine_matches * ratio)\n total_matches = genuine_matches + impostor_matches\n if verbose:\n print('{} genuine matches and {} impostor matches will be selected'.format(genuine_matches, impostor_matches))\n\n # Collect the impostor matches:\n while True:\n pair = random_combination(image_files, 2)\n left_bname = os.path.basename(pair[0])\n right_bname = os.path.basename(pair[1])\n if left_bname[1:] == right_bname[1:]:\n continue # genuine or the same image\n else:\n challenge_pairs.append( (pair, 0) )\n if len(challenge_pairs) >= total_matches:\n break\n\n competitions = {\n ('NIST', 'SD04', '_'): (challenge_pairs, image_files)\n }\n return competitions\n"
] | [
[
"numpy.array",
"numpy.empty"
]
] |
rhwhite/rhwhitepackages3 | [
"91d5677ea57d7cc9a3643708cd8c82a74fb6188d"
] | [
"SSWs.py"
] | [
"# Module to search for and get data on SSWs\n# Using the definition of Charlton and Polvani (2007):\n\n# Author [email protected]\n\n# Created July 2017\n\nimport numpy as np\nimport xarray as xr\nimport math\nimport sys\n\ndef adddays(U,itime,ndays):\n # Find ndays consecutive days with easterlies\n numcons = 0\n torun = True\n while itime < len(U.time):\n if U[itime] > 0:\n numcons += 1\n else:\n numcons = 0\n if numcons >= ndays:\n return(itime,numcons,False)\n itime += 1\n return(itime,numcons,True)\n\ndef meanSE(N,in0,in1,in2,in3=0):\n # Calculate mean and standard error of number of SSWs\n # a la Charlton and Polvani (2007)\n p0 = float(in0)/float(N)\n p1 = float(in1)/float(N)\n p2 = float(in2)/float(N)\n p3 = float(in3)/float(N)\n\n calcmean = p1 + (2 * p2) + (3 * p3)\n calcSE = ((math.sqrt(((0-calcmean)**2 * p0) +\n ((1-calcmean)**2 * p1) +\n ((2-calcmean)**2 * p2) + \n ((3-calcmean)**2 * p3)))\n /math.sqrt(N))\n return calcmean,calcSE\n\ndef findyearSSWs(U,times,count,thresh,lastdate,startdate,toprint,SSWdates):\n # find all SSWs in a single year\n\n finalwarmingstart = -1\n yearcount = 0\n itime = 0\n # if U starts below 0, iterate until it isn't!\n while U[itime]<0:\n itime +=1\n while itime < len(U.time):\n if U[itime] < 0:\n central,end,itime = findend(U,itime,thresh)\n if end == -1:\n finalwarmingstart = ((times[central]+1) % 365)\n else:\n SSWdates.append(int(times[central]))\n if toprint: print ('SSW, day of year ' +\n str((times[central]) % 365))\n if lastdate < ((times[central] +1) % 365) < startdate :\n # it counts as a final warming\n finalwarmingstart = ((times[central]+1) % 365)\n else:\n count +=1\n yearcount +=1\n\n itime +=1\n return count,yearcount, finalwarmingstart, SSWdates\n\ndef findend(U,itime,thresh):\n # find final SSW\n centraltime,endtime = -1,-1\n if U[itime] < 0:\n centraltime = itime\n\n # Find end date\n while U[itime] < 0:\n itime = itime + 1\n if itime >= len(U.time): return (centraltime,-1,itime)\n endtime = itime\n\n # Check for final warming: ends after April 30th but started before July\n # Add 10 consective easterly days - must occur before April 30th for event\n # to count \n\n newtime,numcons,end = adddays(U,itime,thresh)\n\n if end:\n return(itime,-1,newtime)\n else:\n # Event counts. Now add 20 consecutive days\n itime,ndays,end = adddays(U,itime,20)\n return(centraltime,endtime,itime)\n\n\n\ndef findSSWs(U,thresh,Obs=False,startyr = 0):\n # Find SSWs, print the mean number, the standard error, and \n # return the dates\n # Created for WACCM daily data\n\n SSWdates = []\n toprint = False\n SSWyears = []\n startdate = 303 # beginning of November\n lastdate = 119 # end of April \n enddate = 119 # 30th April\n\n count = 0\n yearcount = 0\n singleyear = 0\n doubleyear = 0\n tripleyear = 0\n final = []\n nyears = len(U.time)//365\n times = U.time\n\n # Select first year\n if Obs:\n yearU = U.sel(time=slice(str(startyr) + '-01',str(startyr) + '-04'))\n yeartime = times.sel(time=slice(str(startyr) + '-01',\n str(startyr) +'-04'))\n yeartime = (yeartime.values - np.datetime64('1980-01-01'))/ np.timedelta64(1, 'D')\n else:\n yearU = U.isel(time=slice(0,120))\n yeartime = times[0:120].values\n\n count,yearcount,finalW,SSWdates = findyearSSWs(yearU,yeartime,count,thresh,\n lastdate,startdate,\n toprint, SSWdates)\n if yearcount == 1:\n singleyear +=1\n #if toprint: print('year 0 1 SSW \\n')\n SSWyears.append(0)\n elif yearcount ==2:\n doubleyear +=1\n #if toprint: print('year 0 2 SSWs \\n')\n SSWyears.append(0)\n elif yearcount ==3:\n tripleyear +=1\n SSWyears.append(0)\n final.append(finalW)\n\n for iyear in range(0,nyears):\n if Obs:\n yearU = U.sel(time=slice(str(startyr+iyear) +'-11',\n str(startyr+iyear+1) + '-04'))\n yeartime = times.sel(time=slice(str(startyr+iyear) + '-11',\n str(startyr+iyear+1) +'-04'))\n yeartime = ((yeartime.values - np.datetime64('1980-01-01'))/\n np.timedelta64(1, 'D'))\n\n else:\n yearU = U.isel(time=slice(startdate+(iyear*365),\n enddate + ((iyear + 1) * 365)))\n yeartime = (times[startdate+(iyear*365):\n enddate+((iyear+1)*365)].values)\n\n count,yearcount,finalW,SSWdates = findyearSSWs(\n yearU,yeartime,\n count,thresh,lastdate,startdate,\n toprint,SSWdates)\n if yearcount == 1:\n singleyear +=1\n SSWyears.append(iyear + 1)\n #if toprint: print('year ' + str(iyear +1) + ' 1 SSW \\n')\n elif yearcount ==2:\n doubleyear +=1\n #if toprint: print('year ' + str(iyear +1) + ' 2 SSWs \\n')\n SSWyears.append(iyear + 1)\n elif yearcount ==3:\n tripleyear +=1\n SSWyears.append(iyear + 1)\n final.append(finalW)\n\n if singleyear + 2 * doubleyear +3 * tripleyear != count:\n print(count)\n print(singleyear + 2 * doubleyear +3 * tripleyear)\n sys.exit(\"problem with counting, maybe a year with more than 3 SSWs?!\")\n\n mean,SE = meanSE(nyears,nyears - singleyear - doubleyear,singleyear,doubleyear)\n print ('mean: ' + str(mean) + ' ; s.e.: ' + str(SE) )\n\n return(SSWdates)\n\n"
] | [
[
"numpy.timedelta64",
"numpy.datetime64"
]
] |
haihabi/GenerativeCRB | [
"d53c01bec7214bb087fbe17dba241e12eb60858e"
] | [
"experiments/analysis/edge_bound/training_nlf/camera_nlf_training.py"
] | [
"import torch\nimport numpy as np\nfrom tqdm import tqdm\nfrom torch.utils.data import DataLoader\nfrom experiments.data_model.image_denoising.noise_dataset import NoiseDataSet\nfrom experiments.models_architecture.camera_nlf_flow import generate_nlf_flow\n\n\ndef train_step(in_noise, in_cond_vector):\n opt.zero_grad()\n loss = flow.nll_mean(in_noise, in_cond_vector)\n loss.backward()\n loss_list.append(loss.item())\n opt.step()\n\n\nif __name__ == '__main__':\n\n lr = 1e-4\n patch_size = 32\n n_epochs = 5\n batch_size = 32\n n_iter_per_epoch = 1000\n input_shape = [4, patch_size, patch_size]\n trained_alpha = True\n\n flow = generate_nlf_flow(input_shape, trained_alpha)\n opt = torch.optim.Adam(flow.parameters(), lr=lr)\n nds = NoiseDataSet(\"/data/datasets/SIDD_Medium_Raw/Data\", n_pat_per_im=5000)\n\n nds_dl = DataLoader(nds, batch_size=batch_size, shuffle=True)\n loss_best = np.inf\n for n in range(n_epochs):\n loss_list = []\n for noise, clean, cam, iso in tqdm(nds_dl):\n noise, clean, cam, iso = noise.cuda(), clean.cuda(), cam.long().cuda(), iso.cuda()\n clean = torch.permute(clean, (0, 3, 1, 2)).float()\n noise = torch.permute(noise, (0, 3, 1, 2)).float()\n cond_vector = [clean, iso, cam]\n train_step(noise, cond_vector)\n\n loss_current = sum(loss_list) / len(loss_list)\n print(loss_current)\n if loss_current < loss_best:\n flow_name = \"flow_nlf_best.pt\" if trained_alpha else \"flow_gaussian_best.pt\"\n torch.save(flow.state_dict(), f\"./{flow_name}\")\n loss_best = loss_current\n print(f\"Update Best To:{loss_current}\")\n\n flow_name = \"flow_nlf.pt\" if trained_alpha else \"flow_gaussian.pt\"\n torch.save(flow.state_dict(), f\"./{flow_name}\")\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.permute"
]
] |
RelationRx/pyrelational | [
"41ededeff84158bd88b76d39006764de3388c821"
] | [
"pyrelational/models/mcdropout_model.py"
] | [
"import copy\nimport logging\nfrom abc import ABC\nfrom typing import Dict, Optional, Type, Union\n\nimport torch\nfrom pytorch_lightning import LightningModule\nfrom torch.nn.modules import Module\nfrom torch.utils.data import DataLoader\n\nfrom .generic_model import GenericModel\nfrom .lightning_model import LightningModel\n\nlogger = logging.getLogger()\n\n\nclass GenericMCDropoutModel(GenericModel, ABC):\n \"\"\"\n Generic model wrapper for mcdropout uncertainty estimator\n \"\"\"\n\n def __init__(\n self,\n model_class: Type[Module],\n model_config: Union[str, Dict],\n trainer_config: Union[str, Dict],\n n_estimators: int = 10,\n eval_dropout_prob: float = 0.2,\n ):\n super(GenericMCDropoutModel, self).__init__(model_class, model_config, trainer_config)\n _check_mc_dropout_model(model_class, model_config)\n self.n_estimators = n_estimators\n self.eval_dropout_prob = eval_dropout_prob\n\n def __call__(self, loader: DataLoader) -> torch.Tensor:\n \"\"\"\n\n :param loader: pytorch dataloader\n :return: model predictions\n \"\"\"\n if self.current_model is None:\n raise ValueError(\"No current model, call 'train(train_loader, valid_loader)' to train the model first\")\n predictions = []\n model = self.current_model\n model.eval()\n\n with torch.no_grad():\n _enable_only_dropout_layers(model, self.eval_dropout_prob)\n for _ in range(self.n_estimators):\n model_prediction = []\n for x, _ in loader:\n model_prediction.append(model(x).detach().cpu())\n predictions.append(torch.cat(model_prediction, 0))\n predictions = torch.stack(predictions)\n return predictions\n\n\nclass LightningMCDropoutModel(GenericMCDropoutModel, LightningModel):\n r\"\"\"\n Wrapper for MC Dropout estimator with pytorch lightning trainer\n\n Example:\n\n .. code-block:: python\n\n import torch\n import pytorch_lightning as pl\n\n class PyLModel(pl.LightningModule):\n def __init__(self, in_dim, out_dim):\n super(PyLModel, self).()\n self.linear = torch.nn.Linear(in_dim, out_dim)\n # need to define other train/test steps and optimizers methods required\n # by pytorch-lightning to run this example\n\n wrapper = LightningMCDropoutModel(\n PyLModel,\n model_config={\"in_dim\":10, \"out_dim\":1},\n trainer_config={\"epochs\":100},\n n_estimators=10,\n eval_dropout_prob=0.2,\n )\n wrapper.train(train_loader, valid_loader)\n predictions = wrapper(loader)\n assert predictions.size(0) == 10\n\n \"\"\"\n\n def __init__(\n self,\n model_class: Type[LightningModule],\n model_config: Union[Dict, str],\n trainer_config: Union[Dict, str],\n n_estimators: int = 10,\n eval_dropout_prob: float = 0.2,\n ):\n super(LightningMCDropoutModel, self).__init__(\n model_class,\n model_config,\n trainer_config,\n n_estimators=n_estimators,\n eval_dropout_prob=eval_dropout_prob,\n )\n\n\ndef _enable_only_dropout_layers(model: Module, p: Optional[float] = None) -> None:\n def enable_dropout_on_module(m):\n if m.__class__.__name__.startswith(\"Dropout\"):\n if isinstance(p, float) and (0 <= p <= 1):\n m.p = p\n elif isinstance(p, float) and (p < 0 or p > 1):\n logger.warning(f\"Evaluation dropout probability should be a float between 0 and 1, got {p}\")\n m.train()\n\n model.apply(enable_dropout_on_module)\n\n\ndef _check_mc_dropout_model(model_class: Type[Module], model_config: Dict) -> None:\n model = model_class(**model_config)\n\n def has_dropout_module(model):\n is_dropout = []\n for m in model.children():\n if m.__class__.__name__.startswith(\"Dropout\"):\n is_dropout.append(True)\n else:\n is_dropout += has_dropout_module(m)\n return is_dropout\n\n if not any(has_dropout_module(model)):\n raise ValueError(\"Model provided do not contain any torch.nn.Dropout modules, cannot apply MC Dropout\")\n"
] | [
[
"torch.stack",
"torch.no_grad",
"torch.cat"
]
] |
ravish0007/fairml | [
"bdfb707ff9554c1a789dc8de3926c1ef3cfb1fc8"
] | [
"fairml/tests/test_orthogonal_projection.py"
] | [
"from __future__ import division\n\n\nimport pytest\nimport numpy as np\nfrom random import randint\n\nfrom fairml.orthogonal_projection import audit_model\nfrom fairml.orthogonal_projection import get_orthogonal_vector\n\nfrom fairml.utils import mse\nfrom fairml.utils import accuracy\nfrom fairml.utils import detect_feature_sign\n\nfrom fairml.perturbation_strategies import constant_zero\n\n\n# let's define a black-box function\ndef black_box_function(input_data):\n if not (input_data.shape[1] == weights.shape[0]):\n raise Exception(\"problem, misaligned dimensions\")\n output = np.dot(input_data, weights)\n return output\n\n\ndef test_orthogonal_projection(number_of_tries=20, size=10000):\n \"\"\"Orthogonal projection function. \"\"\"\n for i in range(number_of_tries):\n\n a = np.random.normal(0, 1, size)\n b = np.random.normal(0, 1, size)\n c = np.random.binomial(10, 0.1, size)\n d = np.random.uniform(0, 10, size)\n\n # normal-normal check\n orth_b = get_orthogonal_vector(a, b)\n assert np.dot(orth_b, a) < 1e-8\n\n # normal- normal check\n ortho_c = get_orthogonal_vector(a, c)\n assert np.dot(ortho_c, a) < 1e-8\n\n # normal - uniform check\n ortho_d = get_orthogonal_vector(a, d)\n assert np.dot(ortho_d, a) < 1e-8\n\n\ndef test_mse():\n y_true = [3, -0.5, 2, 7]\n y_pred = [2.5, 0.0, 2, 8]\n\n test_mse = mse(y_true, y_pred)\n assert test_mse == 0.375\n\n\ndef test_accuracy():\n y_pred = [0, 2, 1, 3]\n y_true = [0, 1, 2, 3]\n\n test_acc = accuracy(y_pred, y_true)\n print(test_acc)\n assert test_acc == 0.5\n"
] | [
[
"numpy.random.normal",
"numpy.dot",
"numpy.random.uniform",
"numpy.random.binomial"
]
] |
mattboggess/pandas | [
"5551bcf9d297ea8a0aeffb70b17ae6730e8abf89"
] | [
"pandas/core/indexes/interval.py"
] | [
"\"\"\" define the IntervalIndex \"\"\"\nimport textwrap\nimport warnings\n\nimport numpy as np\n\nfrom pandas.compat import add_metaclass\nfrom pandas.core.dtypes.missing import isna\nfrom pandas.core.dtypes.cast import find_common_type, maybe_downcast_to_dtype\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n is_list_like,\n is_datetime_or_timedelta_dtype,\n is_datetime64tz_dtype,\n is_integer_dtype,\n is_float_dtype,\n is_interval_dtype,\n is_object_dtype,\n is_scalar,\n is_float,\n is_number,\n is_integer)\nfrom pandas.core.indexes.base import (\n Index, ensure_index,\n default_pprint, _index_shared_docs)\n\nfrom pandas._libs import Timestamp, Timedelta\nfrom pandas._libs.interval import (\n Interval, IntervalMixin, IntervalTree,\n)\n\nfrom pandas.core.indexes.datetimes import date_range\nfrom pandas.core.indexes.timedeltas import timedelta_range\nfrom pandas.core.indexes.multi import MultiIndex\nimport pandas.core.common as com\nfrom pandas.util._decorators import cache_readonly, Appender\nfrom pandas.util._doctools import _WritableDoc\nfrom pandas.util._exceptions import rewrite_exception\nfrom pandas.core.config import get_option\nfrom pandas.tseries.frequencies import to_offset\nfrom pandas.tseries.offsets import DateOffset\n\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.arrays.interval import (IntervalArray,\n _interval_shared_docs)\n\n_VALID_CLOSED = {'left', 'right', 'both', 'neither'}\n_index_doc_kwargs = dict(ibase._index_doc_kwargs)\n_index_doc_kwargs.update(\n dict(klass='IntervalIndex',\n target_klass='IntervalIndex or list of Intervals',\n name=textwrap.dedent(\"\"\"\\\n name : object, optional\n to be stored in the index.\n \"\"\"),\n ))\n\n\ndef _get_next_label(label):\n dtype = getattr(label, 'dtype', type(label))\n if isinstance(label, (Timestamp, Timedelta)):\n dtype = 'datetime64'\n if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):\n return label + np.timedelta64(1, 'ns')\n elif is_integer_dtype(dtype):\n return label + 1\n elif is_float_dtype(dtype):\n return np.nextafter(label, np.infty)\n else:\n raise TypeError('cannot determine next label for type {typ!r}'\n .format(typ=type(label)))\n\n\ndef _get_prev_label(label):\n dtype = getattr(label, 'dtype', type(label))\n if isinstance(label, (Timestamp, Timedelta)):\n dtype = 'datetime64'\n if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):\n return label - np.timedelta64(1, 'ns')\n elif is_integer_dtype(dtype):\n return label - 1\n elif is_float_dtype(dtype):\n return np.nextafter(label, -np.infty)\n else:\n raise TypeError('cannot determine next label for type {typ!r}'\n .format(typ=type(label)))\n\n\ndef _get_interval_closed_bounds(interval):\n \"\"\"\n Given an Interval or IntervalIndex, return the corresponding interval with\n closed bounds.\n \"\"\"\n left, right = interval.left, interval.right\n if interval.open_left:\n left = _get_next_label(left)\n if interval.open_right:\n right = _get_prev_label(right)\n return left, right\n\n\ndef _new_IntervalIndex(cls, d):\n \"\"\"\n This is called upon unpickling, rather than the default which doesn't have\n arguments and breaks __new__\n \"\"\"\n return cls.from_arrays(**d)\n\n\n@Appender(_interval_shared_docs['class'] % dict(\n klass=\"IntervalIndex\",\n summary=\"Immutable index of intervals that are closed on the same side.\",\n name=_index_doc_kwargs['name'],\n versionadded=\"0.20.0\",\n extra_methods=\"contains\\n\",\n examples=textwrap.dedent(\"\"\"\\\n\n Examples\n --------\n A new ``IntervalIndex`` is typically constructed using\n :func:`interval_range`:\n\n >>> pd.interval_range(start=0, end=5)\n IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]]\n closed='right', dtype='interval[int64]')\n\n It may also be constructed using one of the constructor\n methods: :meth:`IntervalIndex.from_arrays`,\n :meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.\n\n See further examples in the doc strings of ``interval_range`` and the\n mentioned constructor methods.\n \"\"\"),\n\n))\n@add_metaclass(_WritableDoc)\nclass IntervalIndex(IntervalMixin, Index):\n _typ = 'intervalindex'\n _comparables = ['name']\n _attributes = ['name', 'closed']\n\n # we would like our indexing holder to defer to us\n _defer_to_indexing = True\n\n # Immutable, so we are able to cache computations like isna in '_mask'\n _mask = None\n\n def __new__(cls, data, closed=None, dtype=None, copy=False,\n name=None, verify_integrity=True):\n\n if name is None and hasattr(data, 'name'):\n name = data.name\n\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype,\n verify_integrity=verify_integrity)\n\n return cls._simple_new(array, name)\n\n @classmethod\n def _simple_new(cls, array, name, closed=None):\n \"\"\"\n Construct from an IntervalArray\n\n Parameters\n ----------\n array : IntervalArray\n name : str\n Attached as result.name\n closed : Any\n Ignored.\n \"\"\"\n result = IntervalMixin.__new__(cls)\n result._data = array\n result.name = name\n result._reset_identity()\n return result\n\n @Appender(_index_shared_docs['_shallow_copy'])\n def _shallow_copy(self, left=None, right=None, **kwargs):\n result = self._data._shallow_copy(left=left, right=right)\n attributes = self._get_attributes_dict()\n attributes.update(kwargs)\n return self._simple_new(result, **attributes)\n\n @cache_readonly\n def _isnan(self):\n \"\"\"Return a mask indicating if each value is NA\"\"\"\n if self._mask is None:\n self._mask = isna(self.left)\n return self._mask\n\n @cache_readonly\n def _engine(self):\n return IntervalTree(self.left, self.right, closed=self.closed)\n\n def __contains__(self, key):\n \"\"\"\n return a boolean if this key is IN the index\n We *only* accept an Interval\n\n Parameters\n ----------\n key : Interval\n\n Returns\n -------\n boolean\n \"\"\"\n if not isinstance(key, Interval):\n return False\n\n try:\n self.get_loc(key)\n return True\n except KeyError:\n return False\n\n def contains(self, key):\n \"\"\"\n Return a boolean indicating if the key is IN the index\n\n We accept / allow keys to be not *just* actual\n objects.\n\n Parameters\n ----------\n key : int, float, Interval\n\n Returns\n -------\n boolean\n \"\"\"\n try:\n self.get_loc(key)\n return True\n except KeyError:\n return False\n\n @classmethod\n @Appender(_interval_shared_docs['from_breaks'] % _index_doc_kwargs)\n def from_breaks(cls, breaks, closed='right', name=None, copy=False,\n dtype=None):\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray.from_breaks(breaks, closed=closed, copy=copy,\n dtype=dtype)\n return cls._simple_new(array, name=name)\n\n @classmethod\n @Appender(_interval_shared_docs['from_arrays'] % _index_doc_kwargs)\n def from_arrays(cls, left, right, closed='right', name=None, copy=False,\n dtype=None):\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray.from_arrays(left, right, closed, copy=copy,\n dtype=dtype)\n return cls._simple_new(array, name=name)\n\n @classmethod\n @Appender(_interval_shared_docs['from_intervals'] % _index_doc_kwargs)\n def from_intervals(cls, data, closed=None, name=None, copy=False,\n dtype=None):\n msg = ('IntervalIndex.from_intervals is deprecated and will be '\n 'removed in a future version; Use IntervalIndex(...) instead')\n warnings.warn(msg, FutureWarning, stacklevel=2)\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype)\n\n if name is None and isinstance(data, cls):\n name = data.name\n\n return cls._simple_new(array, name=name)\n\n @classmethod\n @Appender(_interval_shared_docs['from_tuples'] % _index_doc_kwargs)\n def from_tuples(cls, data, closed='right', name=None, copy=False,\n dtype=None):\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n arr = IntervalArray.from_tuples(data, closed=closed, copy=copy,\n dtype=dtype)\n return cls._simple_new(arr, name=name)\n\n @Appender(_interval_shared_docs['to_tuples'] % dict(\n return_type=\"Index\",\n examples=\"\"\"\n Examples\n --------\n >>> idx = pd.IntervalIndex.from_arrays([0, np.nan, 2], [1, np.nan, 3])\n >>> idx.to_tuples()\n Index([(0.0, 1.0), (nan, nan), (2.0, 3.0)], dtype='object')\n >>> idx.to_tuples(na_tuple=False)\n Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object')\"\"\",\n ))\n def to_tuples(self, na_tuple=True):\n tuples = self._data.to_tuples(na_tuple=na_tuple)\n return Index(tuples)\n\n @cache_readonly\n def _multiindex(self):\n return MultiIndex.from_arrays([self.left, self.right],\n names=['left', 'right'])\n\n @property\n def left(self):\n \"\"\"\n Return the left endpoints of each Interval in the IntervalIndex as\n an Index\n \"\"\"\n return self._data._left\n\n @property\n def right(self):\n \"\"\"\n Return the right endpoints of each Interval in the IntervalIndex as\n an Index\n \"\"\"\n return self._data._right\n\n @property\n def closed(self):\n \"\"\"\n Whether the intervals are closed on the left-side, right-side, both or\n neither\n \"\"\"\n return self._data._closed\n\n @Appender(_interval_shared_docs['set_closed'] % _index_doc_kwargs)\n def set_closed(self, closed):\n if closed not in _VALID_CLOSED:\n msg = \"invalid option for 'closed': {closed}\"\n raise ValueError(msg.format(closed=closed))\n\n # return self._shallow_copy(closed=closed)\n array = self._data.set_closed(closed)\n return self._simple_new(array, self.name)\n\n @property\n def length(self):\n \"\"\"\n Return an Index with entries denoting the length of each Interval in\n the IntervalIndex\n \"\"\"\n return self._data.length\n\n @property\n def size(self):\n # Avoid materializing ndarray[Interval]\n return self._data.size\n\n @property\n def shape(self):\n # Avoid materializing ndarray[Interval]\n return self._data.shape\n\n @property\n def itemsize(self):\n msg = ('IntervalIndex.itemsize is deprecated and will be removed in '\n 'a future version')\n warnings.warn(msg, FutureWarning, stacklevel=2)\n\n # supress the warning from the underlying left/right itemsize\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n return self.left.itemsize + self.right.itemsize\n\n def __len__(self):\n return len(self.left)\n\n @cache_readonly\n def values(self):\n \"\"\"\n Return the IntervalIndex's data as an IntervalArray.\n \"\"\"\n return self._data\n\n @cache_readonly\n def _values(self):\n return self._data\n\n @cache_readonly\n def _ndarray_values(self):\n return np.array(self._data)\n\n def __array__(self, result=None):\n \"\"\" the array interface, return my values \"\"\"\n return self._ndarray_values\n\n def __array_wrap__(self, result, context=None):\n # we don't want the superclass implementation\n return result\n\n def __reduce__(self):\n d = dict(left=self.left,\n right=self.right)\n d.update(self._get_attributes_dict())\n return _new_IntervalIndex, (self.__class__, d), None\n\n @Appender(_index_shared_docs['copy'])\n def copy(self, deep=False, name=None):\n array = self._data.copy(deep=deep)\n attributes = self._get_attributes_dict()\n if name is not None:\n attributes.update(name=name)\n\n return self._simple_new(array, **attributes)\n\n @Appender(_index_shared_docs['astype'])\n def astype(self, dtype, copy=True):\n with rewrite_exception('IntervalArray', self.__class__.__name__):\n new_values = self.values.astype(dtype, copy=copy)\n if is_interval_dtype(new_values):\n return self._shallow_copy(new_values.left, new_values.right)\n return super(IntervalIndex, self).astype(dtype, copy=copy)\n\n @cache_readonly\n def dtype(self):\n \"\"\"Return the dtype object of the underlying data\"\"\"\n return self._data.dtype\n\n @property\n def inferred_type(self):\n \"\"\"Return a string of the type inferred from the values\"\"\"\n return 'interval'\n\n @Appender(Index.memory_usage.__doc__)\n def memory_usage(self, deep=False):\n # we don't use an explicit engine\n # so return the bytes here\n return (self.left.memory_usage(deep=deep) +\n self.right.memory_usage(deep=deep))\n\n @cache_readonly\n def mid(self):\n \"\"\"\n Return the midpoint of each Interval in the IntervalIndex as an Index\n \"\"\"\n return self._data.mid\n\n @cache_readonly\n def is_monotonic(self):\n \"\"\"\n Return True if the IntervalIndex is monotonic increasing (only equal or\n increasing values), else False\n \"\"\"\n return self._multiindex.is_monotonic\n\n @cache_readonly\n def is_monotonic_increasing(self):\n \"\"\"\n Return True if the IntervalIndex is monotonic increasing (only equal or\n increasing values), else False\n \"\"\"\n return self._multiindex.is_monotonic_increasing\n\n @cache_readonly\n def is_monotonic_decreasing(self):\n \"\"\"\n Return True if the IntervalIndex is monotonic decreasing (only equal or\n decreasing values), else False\n \"\"\"\n return self._multiindex.is_monotonic_decreasing\n\n @cache_readonly\n def is_unique(self):\n \"\"\"\n Return True if the IntervalIndex contains unique elements, else False\n \"\"\"\n return self._multiindex.is_unique\n\n @cache_readonly\n def is_non_overlapping_monotonic(self):\n return self._data.is_non_overlapping_monotonic\n\n @Appender(_index_shared_docs['_convert_scalar_indexer'])\n def _convert_scalar_indexer(self, key, kind=None):\n if kind == 'iloc':\n return super(IntervalIndex, self)._convert_scalar_indexer(\n key, kind=kind)\n return key\n\n def _maybe_cast_slice_bound(self, label, side, kind):\n return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)\n\n @Appender(_index_shared_docs['_convert_list_indexer'])\n def _convert_list_indexer(self, keyarr, kind=None):\n \"\"\"\n we are passed a list-like indexer. Return the\n indexer for matching intervals.\n \"\"\"\n locs = self.get_indexer_for(keyarr)\n\n # we have missing values\n if (locs == -1).any():\n raise KeyError\n\n return locs\n\n def _maybe_cast_indexed(self, key):\n \"\"\"\n we need to cast the key, which could be a scalar\n or an array-like to the type of our subtype\n \"\"\"\n if isinstance(key, IntervalIndex):\n return key\n\n subtype = self.dtype.subtype\n if is_float_dtype(subtype):\n if is_integer(key):\n key = float(key)\n elif isinstance(key, (np.ndarray, Index)):\n key = key.astype('float64')\n elif is_integer_dtype(subtype):\n if is_integer(key):\n key = int(key)\n\n return key\n\n def _check_method(self, method):\n if method is None:\n return\n\n if method in ['bfill', 'backfill', 'pad', 'ffill', 'nearest']:\n msg = 'method {method} not yet implemented for IntervalIndex'\n raise NotImplementedError(msg.format(method=method))\n\n raise ValueError(\"Invalid fill method\")\n\n def _searchsorted_monotonic(self, label, side, exclude_label=False):\n if not self.is_non_overlapping_monotonic:\n raise KeyError('can only get slices from an IntervalIndex if '\n 'bounds are non-overlapping and all monotonic '\n 'increasing or decreasing')\n\n if isinstance(label, IntervalMixin):\n raise NotImplementedError\n\n # GH 20921: \"not is_monotonic_increasing\" for the second condition\n # instead of \"is_monotonic_decreasing\" to account for single element\n # indexes being both increasing and decreasing\n if ((side == 'left' and self.left.is_monotonic_increasing) or\n (side == 'right' and not self.left.is_monotonic_increasing)):\n sub_idx = self.right\n if self.open_right or exclude_label:\n label = _get_next_label(label)\n else:\n sub_idx = self.left\n if self.open_left or exclude_label:\n label = _get_prev_label(label)\n\n return sub_idx._searchsorted_monotonic(label, side)\n\n def _get_loc_only_exact_matches(self, key):\n if isinstance(key, Interval):\n\n if not self.is_unique:\n raise ValueError(\"cannot index with a slice Interval\"\n \" and a non-unique index\")\n\n # TODO: this expands to a tuple index, see if we can\n # do better\n return Index(self._multiindex.values).get_loc(key)\n raise KeyError\n\n def _find_non_overlapping_monotonic_bounds(self, key):\n if isinstance(key, IntervalMixin):\n start = self._searchsorted_monotonic(\n key.left, 'left', exclude_label=key.open_left)\n stop = self._searchsorted_monotonic(\n key.right, 'right', exclude_label=key.open_right)\n elif isinstance(key, slice):\n # slice\n start, stop = key.start, key.stop\n if (key.step or 1) != 1:\n raise NotImplementedError(\"cannot slice with a slice step\")\n if start is None:\n start = 0\n else:\n start = self._searchsorted_monotonic(start, 'left')\n if stop is None:\n stop = len(self)\n else:\n stop = self._searchsorted_monotonic(stop, 'right')\n else:\n # scalar or index-like\n\n start = self._searchsorted_monotonic(key, 'left')\n stop = self._searchsorted_monotonic(key, 'right')\n return start, stop\n\n def get_loc(self, key, method=None):\n \"\"\"Get integer location, slice or boolean mask for requested label.\n\n Parameters\n ----------\n key : label\n method : {None}, optional\n * default: matches where the label is within an interval only.\n\n Returns\n -------\n loc : int if unique index, slice if monotonic index, else mask\n\n Examples\n ---------\n >>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)\n >>> index = pd.IntervalIndex([i1, i2])\n >>> index.get_loc(1)\n 0\n\n You can also supply an interval or an location for a point inside an\n interval.\n\n >>> index.get_loc(pd.Interval(0, 2))\n array([0, 1], dtype=int64)\n >>> index.get_loc(1.5)\n 1\n\n If a label is in several intervals, you get the locations of all the\n relevant intervals.\n\n >>> i3 = pd.Interval(0, 2)\n >>> overlapping_index = pd.IntervalIndex([i2, i3])\n >>> overlapping_index.get_loc(1.5)\n array([0, 1], dtype=int64)\n \"\"\"\n self._check_method(method)\n\n original_key = key\n key = self._maybe_cast_indexed(key)\n\n if self.is_non_overlapping_monotonic:\n if isinstance(key, Interval):\n left = self._maybe_cast_slice_bound(key.left, 'left', None)\n right = self._maybe_cast_slice_bound(key.right, 'right', None)\n key = Interval(left, right, key.closed)\n else:\n key = self._maybe_cast_slice_bound(key, 'left', None)\n\n start, stop = self._find_non_overlapping_monotonic_bounds(key)\n\n if start is None or stop is None:\n return slice(start, stop)\n elif start + 1 == stop:\n return start\n elif start < stop:\n return slice(start, stop)\n else:\n raise KeyError(original_key)\n\n else:\n # use the interval tree\n if isinstance(key, Interval):\n left, right = _get_interval_closed_bounds(key)\n return self._engine.get_loc_interval(left, right)\n else:\n return self._engine.get_loc(key)\n\n def get_value(self, series, key):\n if com.is_bool_indexer(key):\n loc = key\n elif is_list_like(key):\n loc = self.get_indexer(key)\n elif isinstance(key, slice):\n\n if not (key.step is None or key.step == 1):\n raise ValueError(\"cannot support not-default step in a slice\")\n\n try:\n loc = self.get_loc(key)\n except TypeError:\n # we didn't find exact intervals or are non-unique\n msg = \"unable to slice with this key: {key}\".format(key=key)\n raise ValueError(msg)\n\n else:\n loc = self.get_loc(key)\n return series.iloc[loc]\n\n @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)\n def get_indexer(self, target, method=None, limit=None, tolerance=None):\n\n self._check_method(method)\n target = ensure_index(target)\n target = self._maybe_cast_indexed(target)\n\n if self.equals(target):\n return np.arange(len(self), dtype='intp')\n\n if self.is_non_overlapping_monotonic:\n start, stop = self._find_non_overlapping_monotonic_bounds(target)\n\n start_plus_one = start + 1\n if not ((start_plus_one < stop).any()):\n return np.where(start_plus_one == stop, start, -1)\n\n if not self.is_unique:\n raise ValueError(\"cannot handle non-unique indices\")\n\n # IntervalIndex\n if isinstance(target, IntervalIndex):\n indexer = self._get_reindexer(target)\n\n # non IntervalIndex\n else:\n indexer = np.concatenate([self.get_loc(i) for i in target])\n\n return ensure_platform_int(indexer)\n\n def _get_reindexer(self, target):\n \"\"\"\n Return an indexer for a target IntervalIndex with self\n \"\"\"\n\n # find the left and right indexers\n lindexer = self._engine.get_indexer(target.left.values)\n rindexer = self._engine.get_indexer(target.right.values)\n\n # we want to return an indexer on the intervals\n # however, our keys could provide overlapping of multiple\n # intervals, so we iterate thru the indexers and construct\n # a set of indexers\n\n indexer = []\n n = len(self)\n\n for i, (lhs, rhs) in enumerate(zip(lindexer, rindexer)):\n\n target_value = target[i]\n\n # matching on the lhs bound\n if (lhs != -1 and\n self.closed == 'right' and\n target_value.left == self[lhs].right):\n lhs += 1\n\n # matching on the lhs bound\n if (rhs != -1 and\n self.closed == 'left' and\n target_value.right == self[rhs].left):\n rhs -= 1\n\n # not found\n if lhs == -1 and rhs == -1:\n indexer.append(np.array([-1]))\n\n elif rhs == -1:\n\n indexer.append(np.arange(lhs, n))\n\n elif lhs == -1:\n\n # care about left/right closed here\n value = self[i]\n\n # target.closed same as self.closed\n if self.closed == target.closed:\n if target_value.left < value.left:\n indexer.append(np.array([-1]))\n continue\n\n # target.closed == 'left'\n elif self.closed == 'right':\n if target_value.left <= value.left:\n indexer.append(np.array([-1]))\n continue\n\n # target.closed == 'right'\n elif self.closed == 'left':\n if target_value.left <= value.left:\n indexer.append(np.array([-1]))\n continue\n\n indexer.append(np.arange(0, rhs + 1))\n\n else:\n indexer.append(np.arange(lhs, rhs + 1))\n\n return np.concatenate(indexer)\n\n @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)\n def get_indexer_non_unique(self, target):\n target = self._maybe_cast_indexed(ensure_index(target))\n return super(IntervalIndex, self).get_indexer_non_unique(target)\n\n @Appender(_index_shared_docs['where'])\n def where(self, cond, other=None):\n if other is None:\n other = self._na_value\n values = np.where(cond, self.values, other)\n return self._shallow_copy(values)\n\n def delete(self, loc):\n \"\"\"\n Return a new IntervalIndex with passed location(-s) deleted\n\n Returns\n -------\n new_index : IntervalIndex\n \"\"\"\n new_left = self.left.delete(loc)\n new_right = self.right.delete(loc)\n return self._shallow_copy(new_left, new_right)\n\n def insert(self, loc, item):\n \"\"\"\n Return a new IntervalIndex inserting new item at location. Follows\n Python list.append semantics for negative values. Only Interval\n objects and NA can be inserted into an IntervalIndex\n\n Parameters\n ----------\n loc : int\n item : object\n\n Returns\n -------\n new_index : IntervalIndex\n \"\"\"\n if isinstance(item, Interval):\n if item.closed != self.closed:\n raise ValueError('inserted item must be closed on the same '\n 'side as the index')\n left_insert = item.left\n right_insert = item.right\n elif is_scalar(item) and isna(item):\n # GH 18295\n left_insert = right_insert = item\n else:\n raise ValueError('can only insert Interval objects and NA into '\n 'an IntervalIndex')\n\n new_left = self.left.insert(loc, left_insert)\n new_right = self.right.insert(loc, right_insert)\n return self._shallow_copy(new_left, new_right)\n\n def _as_like_interval_index(self, other):\n self._assert_can_do_setop(other)\n other = ensure_index(other)\n if not isinstance(other, IntervalIndex):\n msg = ('the other index needs to be an IntervalIndex too, but '\n 'was type {}').format(other.__class__.__name__)\n raise TypeError(msg)\n elif self.closed != other.closed:\n msg = ('can only do set operations between two IntervalIndex '\n 'objects that are closed on the same side')\n raise ValueError(msg)\n return other\n\n def _concat_same_dtype(self, to_concat, name):\n \"\"\"\n assert that we all have the same .closed\n we allow a 0-len index here as well\n \"\"\"\n if not len({i.closed for i in to_concat if len(i)}) == 1:\n msg = ('can only append two IntervalIndex objects '\n 'that are closed on the same side')\n raise ValueError(msg)\n return super(IntervalIndex, self)._concat_same_dtype(to_concat, name)\n\n @Appender(_index_shared_docs['take'] % _index_doc_kwargs)\n def take(self, indices, axis=0, allow_fill=True,\n fill_value=None, **kwargs):\n result = self._data.take(indices, axis=axis, allow_fill=allow_fill,\n fill_value=fill_value, **kwargs)\n attributes = self._get_attributes_dict()\n return self._simple_new(result, **attributes)\n\n def __getitem__(self, value):\n result = self._data[value]\n if isinstance(result, IntervalArray):\n return self._shallow_copy(result)\n else:\n # scalar\n return result\n\n # __repr__ associated methods are based on MultiIndex\n\n def _format_with_header(self, header, **kwargs):\n return header + list(self._format_native_types(**kwargs))\n\n def _format_native_types(self, na_rep='', quoting=None, **kwargs):\n \"\"\" actually format my specific types \"\"\"\n from pandas.io.formats.format import IntervalArrayFormatter\n return IntervalArrayFormatter(values=self,\n na_rep=na_rep,\n justify='all').get_result()\n\n def _format_data(self, name=None):\n\n # TODO: integrate with categorical and make generic\n # name argument is unused here; just for compat with base / categorical\n n = len(self)\n max_seq_items = min((get_option(\n 'display.max_seq_items') or n) // 10, 10)\n\n formatter = str\n\n if n == 0:\n summary = '[]'\n elif n == 1:\n first = formatter(self[0])\n summary = '[{first}]'.format(first=first)\n elif n == 2:\n first = formatter(self[0])\n last = formatter(self[-1])\n summary = '[{first}, {last}]'.format(first=first, last=last)\n else:\n\n if n > max_seq_items:\n n = min(max_seq_items // 2, 10)\n head = [formatter(x) for x in self[:n]]\n tail = [formatter(x) for x in self[-n:]]\n summary = '[{head} ... {tail}]'.format(\n head=', '.join(head), tail=', '.join(tail))\n else:\n tail = [formatter(x) for x in self]\n summary = '[{tail}]'.format(tail=', '.join(tail))\n\n return summary + ',' + self._format_space()\n\n def _format_attrs(self):\n attrs = [('closed', repr(self.closed))]\n if self.name is not None:\n attrs.append(('name', default_pprint(self.name)))\n attrs.append(('dtype', \"'{dtype}'\".format(dtype=self.dtype)))\n return attrs\n\n def _format_space(self):\n space = ' ' * (len(self.__class__.__name__) + 1)\n return \"\\n{space}\".format(space=space)\n\n def argsort(self, *args, **kwargs):\n return np.lexsort((self.right, self.left))\n\n def equals(self, other):\n \"\"\"\n Determines if two IntervalIndex objects contain the same elements\n \"\"\"\n if self.is_(other):\n return True\n\n # if we can coerce to an II\n # then we can compare\n if not isinstance(other, IntervalIndex):\n if not is_interval_dtype(other):\n return False\n other = Index(getattr(other, '.values', other))\n\n return (self.left.equals(other.left) and\n self.right.equals(other.right) and\n self.closed == other.closed)\n\n def _setop(op_name):\n def func(self, other):\n other = self._as_like_interval_index(other)\n\n # GH 19016: ensure set op will not return a prohibited dtype\n subtypes = [self.dtype.subtype, other.dtype.subtype]\n common_subtype = find_common_type(subtypes)\n if is_object_dtype(common_subtype):\n msg = ('can only do {op} between two IntervalIndex '\n 'objects that have compatible dtypes')\n raise TypeError(msg.format(op=op_name))\n\n result = getattr(self._multiindex, op_name)(other._multiindex)\n result_name = self.name if self.name == other.name else None\n\n # GH 19101: ensure empty results have correct dtype\n if result.empty:\n result = result.values.astype(self.dtype.subtype)\n else:\n result = result.values\n\n return type(self).from_tuples(result, closed=self.closed,\n name=result_name)\n return func\n\n union = _setop('union')\n intersection = _setop('intersection')\n difference = _setop('difference')\n symmetric_difference = _setop('symmetric_difference')\n\n # TODO: arithmetic operations\n\n\nIntervalIndex._add_logical_methods_disabled()\n\n\ndef _is_valid_endpoint(endpoint):\n \"\"\"helper for interval_range to check if start/end are valid types\"\"\"\n return any([is_number(endpoint),\n isinstance(endpoint, Timestamp),\n isinstance(endpoint, Timedelta),\n endpoint is None])\n\n\ndef _is_type_compatible(a, b):\n \"\"\"helper for interval_range to check type compat of start/end/freq\"\"\"\n is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset))\n is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset))\n return ((is_number(a) and is_number(b)) or\n (is_ts_compat(a) and is_ts_compat(b)) or\n (is_td_compat(a) and is_td_compat(b)) or\n com._any_none(a, b))\n\n\ndef interval_range(start=None, end=None, periods=None, freq=None,\n name=None, closed='right'):\n \"\"\"\n Return a fixed frequency IntervalIndex\n\n Parameters\n ----------\n start : numeric or datetime-like, default None\n Left bound for generating intervals\n end : numeric or datetime-like, default None\n Right bound for generating intervals\n periods : integer, default None\n Number of periods to generate\n freq : numeric, string, or DateOffset, default None\n The length of each interval. Must be consistent with the type of start\n and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1\n for numeric and 'D' for datetime-like.\n name : string, default None\n Name of the resulting IntervalIndex\n closed : {'left', 'right', 'both', 'neither'}, default 'right'\n Whether the intervals are closed on the left-side, right-side, both\n or neither.\n\n Notes\n -----\n Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. If ``freq`` is omitted, the resulting\n ``IntervalIndex`` will have ``periods`` linearly spaced elements between\n ``start`` and ``end``, inclusively.\n\n To learn more about datetime-like frequency strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.\n\n Returns\n -------\n rng : IntervalIndex\n\n Examples\n --------\n Numeric ``start`` and ``end`` is supported.\n\n >>> pd.interval_range(start=0, end=5)\n IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]]\n closed='right', dtype='interval[int64]')\n\n Additionally, datetime-like input is also supported.\n\n >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),\n end=pd.Timestamp('2017-01-04'))\n IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],\n (2017-01-03, 2017-01-04]]\n closed='right', dtype='interval[datetime64[ns]]')\n\n The ``freq`` parameter specifies the frequency between the left and right.\n endpoints of the individual intervals within the ``IntervalIndex``. For\n numeric ``start`` and ``end``, the frequency must also be numeric.\n\n >>> pd.interval_range(start=0, periods=4, freq=1.5)\n IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]]\n closed='right', dtype='interval[float64]')\n\n Similarly, for datetime-like ``start`` and ``end``, the frequency must be\n convertible to a DateOffset.\n\n >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),\n periods=3, freq='MS')\n IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],\n (2017-03-01, 2017-04-01]]\n closed='right', dtype='interval[datetime64[ns]]')\n\n Specify ``start``, ``end``, and ``periods``; the frequency is generated\n automatically (linearly spaced).\n\n >>> pd.interval_range(start=0, end=6, periods=4)\n IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]]\n closed='right',\n dtype='interval[float64]')\n\n The ``closed`` parameter specifies which endpoints of the individual\n intervals within the ``IntervalIndex`` are closed.\n\n >>> pd.interval_range(end=5, periods=4, closed='both')\n IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]]\n closed='both', dtype='interval[int64]')\n\n See Also\n --------\n IntervalIndex : an Index of intervals that are all closed on the same side.\n \"\"\"\n start = com.maybe_box_datetimelike(start)\n end = com.maybe_box_datetimelike(end)\n endpoint = start if start is not None else end\n\n if freq is None and com._any_none(periods, start, end):\n freq = 1 if is_number(endpoint) else 'D'\n\n if com.count_not_none(start, end, periods, freq) != 3:\n raise ValueError('Of the four parameters: start, end, periods, and '\n 'freq, exactly three must be specified')\n\n if not _is_valid_endpoint(start):\n msg = 'start must be numeric or datetime-like, got {start}'\n raise ValueError(msg.format(start=start))\n elif not _is_valid_endpoint(end):\n msg = 'end must be numeric or datetime-like, got {end}'\n raise ValueError(msg.format(end=end))\n\n if is_float(periods):\n periods = int(periods)\n elif not is_integer(periods) and periods is not None:\n msg = 'periods must be a number, got {periods}'\n raise TypeError(msg.format(periods=periods))\n\n if freq is not None and not is_number(freq):\n try:\n freq = to_offset(freq)\n except ValueError:\n raise ValueError('freq must be numeric or convertible to '\n 'DateOffset, got {freq}'.format(freq=freq))\n\n # verify type compatibility\n if not all([_is_type_compatible(start, end),\n _is_type_compatible(start, freq),\n _is_type_compatible(end, freq)]):\n raise TypeError(\"start, end, freq need to be type compatible\")\n\n # +1 to convert interval count to breaks count (n breaks = n-1 intervals)\n if periods is not None:\n periods += 1\n\n if is_number(endpoint):\n # force consistency between start/end/freq (lower end if freq skips it)\n if com._all_not_none(start, end, freq):\n end -= (end - start) % freq\n\n # compute the period/start/end if unspecified (at most one)\n if periods is None:\n periods = int((end - start) // freq) + 1\n elif start is None:\n start = end - (periods - 1) * freq\n elif end is None:\n end = start + (periods - 1) * freq\n\n breaks = np.linspace(start, end, periods)\n if all(is_integer(x) for x in com._not_none(start, end, freq)):\n # np.linspace always produces float output\n breaks = maybe_downcast_to_dtype(breaks, 'int64')\n else:\n # delegate to the appropriate range function\n if isinstance(endpoint, Timestamp):\n range_func = date_range\n else:\n range_func = timedelta_range\n\n breaks = range_func(start=start, end=end, periods=periods, freq=freq)\n\n return IntervalIndex.from_breaks(breaks, name=name, closed=closed)\n"
] | [
[
"pandas.core.dtypes.common.is_number",
"pandas.core.indexes.base.default_pprint",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.arrays.interval.IntervalArray",
"numpy.nextafter",
"pandas.core.arrays.interval.IntervalArray.from_breaks",
"pandas.core.dtypes.common.is_integer",
"pandas.core.dtypes.common.is_list_like",
"pandas.core.common._all_not_none",
"pandas.core.dtypes.common.is_float",
"pandas.core.config.get_option",
"pandas.core.arrays.interval.IntervalArray.from_arrays",
"pandas.core.common.count_not_none",
"numpy.timedelta64",
"pandas._libs.interval.IntervalTree",
"pandas.core.indexes.base.ensure_index",
"pandas.core.dtypes.cast.maybe_downcast_to_dtype",
"numpy.where",
"pandas.core.common.maybe_box_datetimelike",
"numpy.linspace",
"pandas.core.common._not_none",
"pandas.core.indexes.base.Index",
"pandas.util._exceptions.rewrite_exception",
"pandas.tseries.frequencies.to_offset",
"pandas.core.dtypes.common.is_interval_dtype",
"numpy.lexsort",
"pandas.io.formats.format.IntervalArrayFormatter",
"numpy.arange",
"pandas.core.common.is_bool_indexer",
"pandas.core.dtypes.cast.find_common_type",
"pandas._libs.interval.IntervalMixin.__new__",
"pandas.core.indexes.multi.MultiIndex.from_arrays",
"pandas.core.dtypes.common.ensure_platform_int",
"pandas.core.common._any_none",
"pandas.core.dtypes.missing.isna",
"pandas.core.dtypes.common.is_scalar",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.common.is_datetime_or_timedelta_dtype",
"pandas.compat.add_metaclass",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.core.arrays.interval.IntervalArray.from_tuples",
"pandas.core.dtypes.common.is_object_dtype",
"pandas._libs.interval.Interval",
"numpy.array",
"numpy.concatenate",
"pandas.core.dtypes.common.is_integer_dtype"
]
] |
charliezjw/Neural-Signal-Decoder | [
"fb0df09ba0314724c7c90141bd47cc8fb0201b7a"
] | [
"try.py"
] | [
"import numpy as np\nimport tensorflow as tf\n\n# a = tf.placeholder(tf.int32, [None, 3])\n#\n# b = tf.convert_to_tensor(tf.argmax(tf.bincount(a[0])))\n# b = tf.stack([b, tf.argmax(tf.bincount(a[1]))], 0)\n# for i in range(2, 5):\n# max_indx = tf.argmax(tf.bincount(a[i]))\n# b = tf.concat([b, [max_indx]], 0)\n#\n# with tf.Session() as sess:\n# t1 = np.asarray([[1, 1, 0], [2, 4, 4], [6, 6, 6], [5, 5, 5], [2, 7, 7]])\n# t2, t3 = sess.run([b, max_indx], feed_dict={a: t1})\n# print(t2)\n# print(t3)\na = np.asarray(np.asarray([[1, 1, 0], [2, 4, 4], [6, 6, 6], [5, 5, 5], [2, 7, 7]]))\nb = np.zeros(a.shape[0])\nc = np.asarray([1, 4, 6, 7, 9])\n\nfor i in range(a.shape[0]):\n b[i] = np.argmax(np.bincount(a[i]))\n\nprint(np.mean(np.equal(b, c)))"
] | [
[
"numpy.equal",
"numpy.bincount",
"numpy.asarray",
"numpy.zeros"
]
] |
lcintron/WhoopClient | [
"46ccc6c3e3b98f4b6c82cf8938056d72a22bd6b6"
] | [
"WhoopClient.py"
] | [
"import requests\nimport pandas as pd\nimport numpy as np\nimport configparser\nfrom datetime import datetime\nfrom dateutil import relativedelta, parser, rrule\nfrom dateutil.rrule import WEEKLY\n\n\nclass WhoopClient:\n '''A class to allow a user to login and store their authorization code,\n then perform pulls using the code in order to access different types of data'''\n def __init__(self,\n auth_code=None,\n whoop_id=None,\n current_datetime=datetime.utcnow()):\n self.auth_code = auth_code\n self.whoop_id = whoop_id\n self.current_datetime = current_datetime\n self.start_datetime = None\n self.all_data = None\n self.all_activities = None\n self.sport_dict = None\n self.all_sleep = None\n self.all_sleep_events = None\n\n def reset(self):\n self.auth_code = None\n self.whoop_id = None\n self.current_datetime = datetime.utcnow()\n self.start_datetime = None\n self.all_data = None\n self.all_activities = None\n self.sport_dict = None\n self.all_sleep = None\n self.all_sleep_events = None\n\n def pull_api(self, url, df=False):\n auth_code = self.auth_code\n headers = {'authorization': auth_code}\n pull = requests.get(url, headers=headers)\n if pull.status_code == 200 and len(pull.content) > 1:\n if df:\n d = pd.json_normalize(pull.json())\n return d\n else:\n return pull.json()\n else:\n return \"no response\"\n\n def pull_sleep_main(self, sleep_id):\n athlete_id = self.whoop_id\n sleep = self.pull_api(\n 'https://api-7.whoop.com/users/{}/sleeps/{}'.format(\n athlete_id, sleep_id))\n main_df = pd.json_normalize(sleep)\n return main_df\n\n def pull_sleep_events(self, sleep_id):\n athlete_id = self.whoop_id\n sleep = self.pull_api(\n 'https://api-7.whoop.com/users/{}/sleeps/{}'.format(\n athlete_id, sleep_id))\n events_df = pd.json_normalize(sleep['events'])\n events_df['id'] = sleep_id\n return events_df\n\n def get_authorization(self, user_ini):\n '''\n Function to get the authorization token and user id.\n This must be completed before a user can query the api\n '''\n\n config = configparser.ConfigParser()\n config.read(user_ini)\n username = config['whoop']['username']\n password = config['whoop']['password']\n\n headers = {\n \"username\": username,\n \"password\": password,\n \"grant_type\": \"password\",\n \"issueRefresh\": False\n }\n auth = requests.post(\"https://api-7.whoop.com/oauth/token\",\n json=headers)\n\n if auth.status_code == 200:\n content = auth.json()\n user_id = content['user']['id']\n token = content['access_token']\n start_time = content['user']['profile']['createdAt']\n self.whoop_id = user_id\n self.auth_code = 'bearer ' + token\n self.start_datetime = start_time\n print(\"Whoop: Authentication successful\")\n\n else:\n print(\n \"Authentication failed - please double check your credentials\")\n\n def get_keydata_all(self):\n '''\n This function returns a dataframe of WHOOP metrics for each day of WHOOP membership.\n In the resulting dataframe, each day is a row and contains strain, recovery, and sleep information\n '''\n\n if self.start_datetime:\n if self.all_data is not None:\n ## All data already pulled\n return self.all_data\n else:\n start_date = parser.isoparse(\n self.start_datetime).replace(tzinfo=None)\n end_time = 'T23:59:59.999Z'\n start_time = 'T00:00:00.000Z'\n intervals = rrule.rrule(freq=WEEKLY,\n interval=1,\n until=self.current_datetime,\n dtstart=start_date)\n date_range = [[\n d.strftime('%Y-%m-%d') + start_time,\n (d +\n relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')\n + end_time\n ] for d in intervals]\n all_data = pd.DataFrame()\n for dates in date_range:\n cycle_url = 'https://api-7.whoop.com/users/{}/cycles?end={}&start={}'.format(\n self.whoop_id, dates[1], dates[0])\n data = self.pull_api(cycle_url, df=True)\n all_data = pd.concat([all_data, data])\n all_data.reset_index(drop=True, inplace=True)\n\n ## fixing the day column so it's not a list\n all_data['days'] = all_data['days'].map(lambda d: d[0])\n all_data.rename(columns={\"days\": 'day'}, inplace=True)\n\n ## Putting all time into minutes instead of milliseconds\n sleep_cols = [\n 'qualityDuration', 'needBreakdown.baseline',\n 'needBreakdown.debt', 'needBreakdown.naps',\n 'needBreakdown.strain', 'needBreakdown.total'\n ]\n for sleep_col in sleep_cols:\n all_data['sleep.' + sleep_col] = all_data[\n 'sleep.' + sleep_col].astype(float).apply(\n lambda x: np.nan if np.isnan(x) else x / 60000)\n\n ## Making nap variable\n all_data['nap_duration'] = all_data['sleep.naps'].apply(\n lambda x: x[0]['qualityDuration'] / 60000\n if len(x) == 1 else (sum([\n y['qualityDuration'] for y in x\n if y['qualityDuration'] is not None\n ]) / 60000 if len(x) > 1 else 0))\n all_data.drop(['sleep.naps'], axis=1, inplace=True)\n ## dropping duplicates subsetting because of list columns\n all_data.drop_duplicates(subset=['day', 'sleep.id'],\n inplace=True)\n\n self.all_data = all_data\n return all_data\n else:\n print(\"Please run the authorization function first\")\n\n def get_activities_all(self):\n '''\n Activity data is pulled through the get_keydata functions so if the data pull is present, this function\n just transforms the activity column into a dataframe of activities, where each activity is a row.\n If it has not been pulled, this function runs the key data function then returns the activity dataframe'''\n\n if self.sport_dict:\n sport_dict = self.sport_dict\n else:\n sports = self.pull_api('https://api-7.whoop.com/sports')\n sport_dict = {sport['id']: sport['name'] for sport in sports}\n self.sport_dict = self.sport_dict\n\n if self.start_datetime:\n ## process activity data\n\n if self.all_data is not None:\n ## use existing\n data = self.all_data\n else:\n ## pull all data to process activities\n data = self.get_keydata_all()\n ## now process activities data\n act_data = pd.json_normalize(\n data[data['strain.workouts'].apply(len) > 0]\n ['strain.workouts'].apply(lambda x: x[0]))\n act_data[['during.upper', 'during.lower'\n ]] = act_data[['during.upper',\n 'during.lower']].apply(pd.to_datetime)\n act_data['total_minutes'] = act_data.apply(\n lambda x:\n (x['during.upper'] - x['during.lower']).total_seconds() / 60.0,\n axis=1)\n for z in range(0, 6):\n act_data['zone{}_minutes'.format(\n z + 1)] = act_data['zones'].apply(lambda x: x[z] / 60000.)\n act_data['sport_name'] = act_data.sportId.apply(\n lambda x: sport_dict[x])\n\n act_data['day'] = act_data['during.lower'].dt.strftime('%Y-%m-%d')\n act_data.drop(['zones', 'during.bounds'], axis=1, inplace=True)\n act_data.drop_duplicates(inplace=True)\n self.all_activities = act_data\n return act_data\n else:\n print(\"Whoop: Please run the authorization function first\")\n\n def get_sleep_all(self):\n '''\n This function returns all sleep metrics in a data frame, for the duration of user's WHOOP membership.\n Each row in the data frame represents one night of sleep\n '''\n if self.auth_code:\n if self.all_data is not None:\n ## use existing\n data = self.all_data\n else:\n ## pull timeframe data\n data = self.get_keydata_all()\n\n ## getting all the sleep ids\n if self.all_sleep is not None:\n ## All sleep data already pulled\n return self.all_sleep\n else:\n sleep_ids = data['sleep.id'].values.tolist()\n sleep_list = [int(x) for x in sleep_ids if pd.isna(x) == False]\n all_sleep = pd.DataFrame()\n for s in sleep_list:\n m = self.pull_sleep_main(s)\n all_sleep = pd.concat([all_sleep, m])\n\n ## Cleaning sleep data\n sleep_update = [\n 'qualityDuration', 'latency', 'debtPre', 'debtPost',\n 'needFromStrain', 'sleepNeed', 'habitualSleepNeed',\n 'timeInBed', 'lightSleepDuration', 'slowWaveSleepDuration',\n 'remSleepDuration', 'wakeDuration', 'arousalTime',\n 'noDataDuration', 'creditFromNaps', 'projectedSleep'\n ]\n\n for col in sleep_update:\n all_sleep[col] = all_sleep[col].astype(float).apply(\n lambda x: np.nan if np.isnan(x) else x / 60000)\n\n all_sleep.drop(['during.bounds'], axis=1, inplace=True)\n self.all_sleep = all_sleep.copy(deep=True)\n all_sleep.drop(['events'], axis=1, inplace=True)\n return all_sleep\n else:\n print(\"Whoop: Please run the authorization function first\")\n\n def get_sleep_events_all(self):\n '''\n This function returns all sleep events in a data frame, for the duration of user's WHOOP membership.\n Each row in the data frame represents an individual sleep event within an individual night of sleep.\n Sleep events can be joined against the sleep or main datasets by sleep id.\n All sleep times are returned in minutes.\n '''\n if self.auth_code:\n if self.all_data is not None:\n ## use existing\n data = self.all_data\n else:\n ## pull timeframe data\n data = self.get_keydata_all()\n\n ## getting all the sleep ids\n if self.all_sleep_events is not None:\n ## All sleep data already pulled\n return self.all_sleep_events\n else:\n if self.all_sleep is not None:\n sleep_events = self.all_sleep[['activityId', 'events']]\n all_sleep_events = pd.concat([\n pd.concat([\n pd.json_normalize(events),\n pd.DataFrame({'id': len(events) * [sleep]})\n ],\n axis=1) for events, sleep in\n zip(sleep_events['events'], sleep_events['activityId'])\n ])\n else:\n sleep_ids = data['sleep.id'].values.tolist()\n sleep_list = [\n int(x) for x in sleep_ids if pd.isna(x) == False\n ]\n all_sleep_events = pd.DataFrame()\n for s in sleep_list:\n events = self.pull_sleep_events(s)\n all_sleep_events = pd.concat(\n [all_sleep_events, events])\n\n ## Cleaning sleep events data\n all_sleep_events['during.lower'] = pd.to_datetime(\n all_sleep_events['during.lower'])\n all_sleep_events['during.upper'] = pd.to_datetime(\n all_sleep_events['during.upper'])\n all_sleep_events.drop(['during.bounds'], axis=1, inplace=True)\n all_sleep_events['total_minutes'] = all_sleep_events.apply(\n lambda x: (x['during.upper'] - x['during.lower']\n ).total_seconds() / 60.0,\n axis=1)\n\n self.all_sleep_events = all_sleep_events\n return all_sleep_events\n else:\n print(\"Whoop: Please run the authorization function first\")\n\n #returnTYpe = df, json\n def get_hr_all(self, returnType=None):\n '''\n This function will pull every heart rate measurement recorded for the life of WHOOP membership.\n The default return for this function is a list of lists, where each \"row\" contains the date, time, and hr value.\n The measurements are spaced out every ~6 seconds on average.\n\n To return a dataframe, set df=True. This will take a bit longer, but will return a data frame.\n\n NOTE: This api pull takes about 6 seconds per week of data ... or 1 minutes for 10 weeks of data,\n so be careful when you pull, it may take a while.\n '''\n if self.start_datetime:\n athlete_id = self.whoop_id\n start_date = parser.isoparse(\n self.start_datetime).replace(tzinfo=None)\n end_time = 'T23:59:59.999Z'\n start_time = 'T00:00:00.000Z'\n intervals = rrule.rrule(freq=WEEKLY,\n interval=1,\n until=self.current_datetime,\n dtstart=start_date)\n date_range = [[\n d.strftime('%Y-%m-%d') + start_time,\n (d + relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')\n + end_time\n ] for d in intervals]\n\n hr_list = []\n for dates in date_range:\n start = dates[0]\n end = dates[1]\n ul = '''https://api-7.whoop.com/users/{}/metrics/heart_rate?end={}&order=t&start={}&step=6'''.format(\n athlete_id, end, start)\n hr_vals = self.pull_api(ul)['values']\n hr_values = [[\n datetime.utcfromtimestamp(h['time'] / 1e3).date(),\n datetime.utcfromtimestamp(h['time'] / 1e3).time(),\n h['data']\n ] for h in hr_vals]\n hr_list.extend(hr_values)\n if returnType == \"df\":\n hr_df = pd.DataFrame(hr_list)\n hr_df.columns = ['date', 'time', 'hr']\n return hr_df\n\n elif returnType == \"json\":\n hr_json = [{\n 'datetime': str(h[0]) + 'T' + str(h[1]),\n 'hr': h[2]\n } for h in hr_list]\n return hr_json\n else:\n return hr_list\n else:\n print(\"Please run the authorization function first\")\n\n def get_keydata_timeframe(self,\n start,\n end=datetime.strftime(datetime.utcnow(),\n \"%Y-%m-%d\")):\n '''\n This function returns a dataframe of WHOOP metrics for each day in a specified time period.\n To use this function, provide a start and end date in string format as follows \"YYYY-MM-DD\".\n\n If no end date is specified, it will default to today's date.\n\n In the resulting dataframe, each day is a row and contains strain, recovery, and sleep information\n '''\n\n st = datetime.strptime(start, '%Y-%m-%d')\n e = datetime.strptime(end, '%Y-%m-%d')\n if st > e:\n if e > datetime.today():\n print(\"Please enter an end date earlier than tomorrow\")\n else:\n print(\n \"Please enter a start date that is earlier than your end date\"\n )\n else:\n if self.auth_code:\n end_time = 'T23:59:59.999Z'\n start_time = 'T00:00:00.000Z'\n intervals = rrule.rrule(freq=WEEKLY,\n interval=1,\n until=e,\n dtstart=st)\n date_range = [[\n d.strftime('%Y-%m-%d') + start_time,\n (d +\n relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')\n + end_time\n ] for d in intervals if d <= e]\n time_data = pd.DataFrame()\n for dates in date_range:\n cycle_url = 'https://api-7.whoop.com/users/{}/cycles?end={}&start={}'.format(\n self.whoop_id, dates[1], dates[0])\n data = self.pull_api(cycle_url, df=True)\n time_data = pd.concat([time_data, data])\n time_data.reset_index(drop=True, inplace=True)\n\n ## fixing the day column so it's not a list\n time_data['days'] = time_data['days'].map(lambda d: d[0])\n time_data.rename(columns={\"days\": 'day'}, inplace=True)\n\n ## Putting all time into minutes instead of milliseconds\n sleep_cols = [\n 'qualityDuration', 'needBreakdown.baseline',\n 'needBreakdown.debt', 'needBreakdown.naps',\n 'needBreakdown.strain', 'needBreakdown.total'\n ]\n for sleep_col in sleep_cols:\n time_data['sleep.' + sleep_col] = time_data[\n 'sleep.' + sleep_col].astype(float).apply(\n lambda x: np.nan if np.isnan(x) else x / 60000)\n\n ## Making nap variable\n time_data['nap_duration'] = time_data['sleep.naps'].apply(\n lambda x: x[0]['qualityDuration'] / 60000\n if len(x) == 1 else (sum([\n y['qualityDuration'] for y in x\n if y['qualityDuration'] is not None\n ]) / 60000 if len(x) > 1 else 0))\n time_data.drop(['sleep.naps'], axis=1, inplace=True)\n\n ## removing duplicates\n time_data.drop_duplicates(subset=['day', 'sleep.id'],\n inplace=True)\n\n return time_data\n else:\n print(\"Whoop: Please run the authorization function first\")\n\n def get_activities_timeframe(self,\n start,\n end=datetime.strftime(datetime.utcnow(),\n \"%Y-%m-%d\")):\n '''\n Activity data is pulled through the get_keydata functions so if the data pull is present, this function\n just transforms the activity column into a dataframe of activities, where each activity is a row.\n If it has not been pulled, this function runs the key data function then returns the activity dataframe\n\n If no end date is specified, it will default to today's date.\n '''\n\n st = datetime.strptime(start, '%Y-%m-%d')\n e = datetime.strptime(end, '%Y-%m-%d')\n if st > e:\n if e > datetime.today():\n print(\"Please enter an end date earlier than tomorrow\")\n else:\n print(\n \"Please enter a start date that is earlier than your end date\"\n )\n else:\n\n if self.auth_code:\n\n if self.sport_dict:\n sport_dict = self.sport_dict\n else:\n sports = self.pull_api('https://api-7.whoop.com/sports')\n sport_dict = {\n sport['id']: sport['name']\n for sport in sports\n }\n self.sport_dict = self.sport_dict\n\n ## process activity data\n if self.all_data is not None:\n ## use existing\n data = self.all_data\n data = data[(data.day >= start)\n & (data.day <= end)].copy(deep=True)\n else:\n ## pull timeframe data\n data = self.get_keydata_timeframe(start, end)\n ## now process activities data\n act_data = pd.json_normalize(\n data[data['strain.workouts'].apply(len) > 0]\n ['strain.workouts'].apply(lambda x: x[0]))\n act_data[['during.upper', 'during.lower'\n ]] = act_data[['during.upper',\n 'during.lower']].apply(pd.to_datetime)\n act_data['total_minutes'] = act_data.apply(\n lambda x: (x['during.upper'] - x['during.lower']\n ).total_seconds() / 60.0,\n axis=1)\n for z in range(0, 6):\n act_data['zone{}_minutes'.format(\n z +\n 1)] = act_data['zones'].apply(lambda x: x[z] / 60000.)\n act_data['sport_name'] = act_data.sportId.apply(\n lambda x: sport_dict[x])\n\n act_data['day'] = act_data['during.lower'].dt.strftime(\n '%Y-%m-%d')\n act_data.drop(['zones', 'during.bounds'], axis=1, inplace=True)\n act_data.drop_duplicates(inplace=True)\n self.all_activities = act_data\n return act_data\n else:\n print(\"Whoop: Please run the authorization function first\")\n\n def get_sleep_timeframe(self,\n start,\n end=datetime.strftime(datetime.utcnow(),\n \"%Y-%m-%d\")):\n '''\n This function returns sleep metrics in a data frame, for timeframe specified by the user.\n Each row in the data frame represents one night of sleep.\n\n If no end date is specified, it will default to today's date.\n\n All sleep times are returned in minutes.\n '''\n\n st = datetime.strptime(start, '%Y-%m-%d')\n e = datetime.strptime(end, '%Y-%m-%d')\n if st > e:\n if e > datetime.today():\n print(\"Whoop: Please enter an end date earlier than tomorrow\")\n else:\n print(\n \"Whoop: Please enter a start date that is earlier than your end date\"\n )\n else:\n if self.auth_code:\n if self.all_data is not None:\n ## use existing\n data = self.all_data\n data = data[(data.day >= start)\n & (data.day <= end)].copy(deep=True)\n else:\n ## pull timeframe data\n data = self.get_keydata_timeframe(start, end)\n\n ## getting all the sleep ids\n sleep_ids = data['sleep.id'].values.tolist()\n sleep_list = [int(x) for x in sleep_ids if pd.isna(x) == False]\n if self.all_sleep is not None:\n ## All sleep data already pulled so just filter\n all_sleep = self.all_sleep\n time_sleep = all_sleep[all_sleep.activityId.isin(\n sleep_list)]\n return time_sleep\n\n else:\n time_sleep = pd.DataFrame()\n for s in sleep_list:\n m = self.pull_sleep_main(s)\n time_sleep = pd.concat([time_sleep, m])\n\n ## Cleaning sleep data\n sleep_update = [\n 'qualityDuration', 'latency', 'debtPre', 'debtPost',\n 'needFromStrain', 'sleepNeed', 'habitualSleepNeed',\n 'timeInBed', 'lightSleepDuration',\n 'slowWaveSleepDuration', 'remSleepDuration',\n 'wakeDuration', 'arousalTime', 'noDataDuration',\n 'creditFromNaps', 'projectedSleep'\n ]\n\n for col in sleep_update:\n time_sleep[col] = time_sleep[col].astype(float).apply(\n lambda x: np.nan if np.isnan(x) else x / 60000)\n\n time_sleep.drop(['during.bounds', 'events'],\n axis=1,\n inplace=True)\n\n return time_sleep\n else:\n print(\"Whoop: Please run the authorization function first\")\n\n def get_sleep_events_timeframe(self,\n start,\n end=datetime.strftime(\n datetime.utcnow(), \"%Y-%m-%d\")):\n '''\n This function returns sleep events in a data frame, for the time frame specified by the user.\n Each row in the data frame represents an individual sleep event within an individual night of sleep.\n Sleep events can be joined against the sleep or main datasets by sleep id.\n\n If no end date is specified, it will default to today's date.\n '''\n\n st = datetime.strptime(start, '%Y-%m-%d')\n e = datetime.strptime(end, '%Y-%m-%d')\n if st > e:\n if e > datetime.today():\n print(\"Whoop: Please enter an end date earlier than tomorrow\")\n else:\n print(\n \"Whoop: Please enter a start date that is earlier than your end date\"\n )\n else:\n\n if self.auth_code:\n if self.all_data is not None:\n ## use existing\n data = self.all_data\n data = data[(data.day >= start)\n & (data.day <= end)].copy(deep=True)\n else:\n ## pull timeframe data\n data = self.get_keydata_timeframe(start, end)\n\n ## getting all the sleep ids\n sleep_ids = data['sleep.id'].values.tolist()\n sleep_list = [int(x) for x in sleep_ids if pd.isna(x) == False]\n if self.all_sleep_events is not None:\n ## All sleep data already pulled so just filter\n all_sleep_events = self.all_sleep_events\n time_sleep_events = all_sleep_events[\n all_sleep_events.id.isin(sleep_list)]\n return time_sleep_events\n\n else:\n if self.all_sleep is not None:\n sleep_events = self.all_sleep[['activityId', 'events']]\n time_sleep = sleep_events[sleep_events.id.isin(\n sleep_list)]\n time_sleep_events = pd.concat([\n pd.concat([\n pd.json_normalize(events),\n pd.DataFrame({'id': len(events) * [sleep]})\n ],\n axis=1) for events, sleep in\n zip(time_sleep['events'], time_sleep['activityId'])\n ])\n else:\n time_sleep_events = pd.DataFrame()\n for s in sleep_list:\n events = self.pull_sleep_events(s)\n time_sleep_events = pd.concat(\n [time_sleep_events, events])\n\n ## Cleaning sleep events data\n time_sleep_events['during.lower'] = pd.to_datetime(\n time_sleep_events['during.lower'])\n time_sleep_events['during.upper'] = pd.to_datetime(\n time_sleep_events['during.upper'])\n time_sleep_events.drop(['during.bounds'],\n axis=1,\n inplace=True)\n time_sleep_events[\n 'total_minutes'] = time_sleep_events.apply(\n lambda x: (x['during.upper'] - x['during.lower']\n ).total_seconds() / 60.0,\n axis=1)\n\n return time_sleep_events\n else:\n print(\"Whoop: Please run the authorization function first\")\n\n def get_hr_timeframe(self,\n start,\n end=datetime.strftime(datetime.utcnow(), \"%Y-%m-%d\"),\n returnType=None):\n '''\n This function will pull every heart rate measurement recorded, for the time frame specified by the user.\n The default return for this function is a list of lists, where each \"row\" contains the date, time, and hr value.\n The measurements are spaced out every ~6 seconds on average.\n\n To return a dataframe, set df=True. This will take a bit longer, but will return a data frame.\n\n If no end date is specified, it will default to today's date.\n\n NOTE: This api pull takes about 6 seconds per week of data ... or 1 minutes for 10 weeks of data,\n so be careful when you pull, it may take a while.\n '''\n\n st = datetime.strptime(start, '%Y-%m-%d')\n e = datetime.strptime(end, '%Y-%m-%d')\n if st > e:\n if e > datetime.today():\n print(\"Whoop: Please enter an end date earlier than tomorrow\")\n else:\n print(\n \"Whoop: Please enter a start date that is earlier than your end date\"\n )\n else:\n\n if self.start_datetime:\n athlete_id = self.whoop_id\n start_date = parser.isoparse(\n self.start_datetime).replace(tzinfo=None)\n end_time = 'T23:59:59.999Z'\n start_time = 'T00:00:00.000Z'\n ## using the st and e since it needs the datetime formatted date\n intervals = rrule.rrule(freq=WEEKLY,\n interval=1,\n until=e,\n dtstart=st)\n date_range = [[\n d.strftime('%Y-%m-%d') + start_time,\n (d +\n relativedelta.relativedelta(weeks=1)).strftime('%Y-%m-%d')\n + end_time\n ] for d in intervals]\n\n hr_list = []\n for dates in date_range:\n start = dates[0]\n end = dates[1]\n ul = '''https://api-7.whoop.com/users/{}/metrics/heart_rate?end={}&order=t&start={}&step=6'''.format(\n athlete_id, end, start)\n hr_vals = self.pull_api(ul)['values']\n hr_values = [[\n str(datetime.utcfromtimestamp(h['time'] / 1e3).date()),\n str(datetime.utcfromtimestamp(h['time'] / 1e3).time()),\n h['data']\n ] for h in hr_vals]\n hr_list.extend(hr_values)\n if returnType == \"df\":\n hr_df = pd.DataFrame(hr_list)\n hr_df.columns = ['date', 'time', 'hr']\n return hr_df\n elif returnType == \"json\":\n hr_json = [{\n 'datetime': str(h[0]) + 'T' + str(h[1]),\n 'hr': h[2]\n } for h in hr_list]\n return hr_json\n else:\n return hr_list\n else:\n print(\"Whoop: Please run the authorization function first\")\n"
] | [
[
"pandas.DataFrame",
"pandas.json_normalize",
"pandas.to_datetime",
"pandas.concat",
"numpy.isnan",
"pandas.isna"
]
] |
swidi/poemo-generation | [
"3a349ac3a6fc3e82b24410013bced60a24c2d8bf"
] | [
"third_party/texar-0.2.0/examples/bert/utils/data_utils.py"
] | [
"\"\"\"\nThis is the Data Loading Pipeline for Sentence Classifier Task from\nhttps://github.com/google-research/bert/blob/master/run_classifier.py\n\"\"\"\n# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport csv\nimport collections\nimport sys\nsys.path.append(os.path.dirname(__file__))\nimport tokenization\nimport tensorflow as tf\n\nclass InputExample():\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None):\n \"\"\"Constructs a InputExample.\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence.\n For single sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second\n sequence. Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n\nclass InputFeatures():\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids, label_id):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_test_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for prediction.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines\n\nclass SSTProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n if set_type == 'train' or set_type == 'dev':\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[0])\n # Single sentence classification, text_b doesn't exist\n text_b = None\n label = tokenization.convert_to_unicode(line[1])\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=text_b, label=label))\n if set_type == 'test':\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[1])\n # Single sentence classification, text_b doesn't exist\n text_b = None\n label = '0' # arbitrary set as 0\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=text_b, label=label))\n return examples\n\nclass XnliProcessor(DataProcessor):\n \"\"\"Processor for the XNLI data set.\"\"\"\n\n def __init__(self):\n self.language = \"zh\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n lines = self._read_tsv(\n os.path.join(data_dir, \"multinli\",\n \"multinli.train.%s.tsv\" % self.language))\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"train-%d\" % (i)\n text_a = tokenization.convert_to_unicode(line[0])\n text_b = tokenization.convert_to_unicode(line[1])\n label = tokenization.convert_to_unicode(line[2])\n if label == tokenization.convert_to_unicode(\"contradictory\"):\n label = tokenization.convert_to_unicode(\"contradiction\")\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=text_b, label=label))\n return examples\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n lines = self._read_tsv(os.path.join(data_dir, \"xnli.dev.tsv\"))\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"dev-%d\" % (i)\n language = tokenization.convert_to_unicode(line[0])\n if language != tokenization.convert_to_unicode(self.language):\n continue\n text_a = tokenization.convert_to_unicode(line[6])\n text_b = tokenization.convert_to_unicode(line[7])\n label = tokenization.convert_to_unicode(line[1])\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=text_b, label=label))\n return examples\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]\n\nclass MnliProcessor(DataProcessor):\n \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")),\n \"dev_matched\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")),\n \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type,\n tokenization.convert_to_unicode(line[0]))\n text_a = tokenization.convert_to_unicode(line[8])\n text_b = tokenization.convert_to_unicode(line[9])\n if set_type == \"test\":\n label = \"contradiction\"\n else:\n label = tokenization.convert_to_unicode(line[-1])\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=text_b, label=label))\n return examples\n\nclass MrpcProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")),\n \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")),\n \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")),\n \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[3])\n text_b = tokenization.convert_to_unicode(line[4])\n if set_type == \"test\":\n label = \"0\"\n else:\n label = tokenization.convert_to_unicode(line[0])\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=text_b, label=label))\n return examples\n\nclass ColaProcessor(DataProcessor):\n \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")),\n \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")),\n \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")),\n \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n # Only the test set has a header\n if set_type == \"test\" and i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n if set_type == \"test\":\n text_a = tokenization.convert_to_unicode(line[1])\n label = \"0\"\n else:\n text_a = tokenization.convert_to_unicode(line[3])\n label = tokenization.convert_to_unicode(line[1])\n examples.append(InputExample(guid=guid, text_a=text_a,\n text_b=None, label=label))\n return examples\n\n\ndef convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention rule is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # segment_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # sigment_ids: 0 0 0 0 0 0 0\n #\n # Where \"segment_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n\n # here we disable the verbose printing of the data\n if ex_index < 0:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_ids length: %d\" % len(input_ids))\n tf.logging.info(\"input_mask: %s\" %\\\n \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" %\\\n \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n feature = InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id)\n return feature\n\n\ndef file_based_convert_examples_to_features(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for (ex_index, example) in enumerate(examples):\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n def create_int_feature(values):\n return tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n\n tf_example = tf.train.Example(\n features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal\n # percent of tokens from each, since if one sequence is very short then\n # each token that's truncated likely contains more information than a\n # longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef prepare_TFRecord_data(processor, tokenizer,\n data_dir, max_seq_length, output_dir):\n \"\"\"\n Args:\n processor: Data Preprocessor, which must have get_lables,\n get_train/dev/test/examples methods defined.\n tokenizer: The Sentence Tokenizer. Generally should be\n SentencePiece Model.\n data_dir: The input data directory.\n max_seq_length: Max sequence length.\n batch_size: mini-batch size.\n model: `train`, `eval` or `test`.\n output_dir: The directory to save the TFRecord in.\n \"\"\"\n label_list = processor.get_labels()\n\n train_examples = processor.get_train_examples(data_dir)\n train_file = os.path.join(output_dir, \"train.tf_record\")\n file_based_convert_examples_to_features(\n train_examples, label_list, max_seq_length,\n tokenizer, train_file)\n\n eval_examples = processor.get_dev_examples(data_dir)\n eval_file = os.path.join(output_dir, \"eval.tf_record\")\n file_based_convert_examples_to_features(\n eval_examples, label_list,\n max_seq_length, tokenizer, eval_file)\n\n test_examples = processor.get_test_examples(data_dir)\n test_file = os.path.join(output_dir, \"predict.tf_record\")\n file_based_convert_examples_to_features(\n test_examples, label_list,\n max_seq_length, tokenizer, test_file)\n"
] | [
[
"tensorflow.train.Features",
"tensorflow.gfile.Open",
"tensorflow.logging.info",
"tensorflow.python_io.TFRecordWriter"
]
] |
simonlevine/x-transformer-icd | [
"17d0a84f8b8e1f69623a82c0afab26830c7a1eb8"
] | [
"app/lib/models.py"
] | [
"\"\"\"deserialize auto-icd models and provide a consistent interface\"\"\"\n\nimport typing as t\nimport json\nimport pickle\nfrom pathlib import Path\nimport numpy as np\nimport onnxruntime as rt\n\nAPP_ROOT = Path(\"./app\")\nASSETS_DIR = APP_ROOT/\"assets\"\n\n\nclass AutoICDModel:\n\n def __init__(self, onnx_model_fp):\n assert onnx_model_fp.exists()\n self.sess = rt.InferenceSession(str(onnx_model_fp.resolve()))\n\n def ___call__(self, free_text: str) -> t.Set[str]:\n raise NotImplementedError(\"Subclasses just provide model interaction logic!\")\n\n\n# class KissModel(AutoICDModel):\n\n# def __init__(self, onnx_model_fp, icd9_codes: t.List[str]):\n# \"\"\"because we are only loading a few codes,\n# we need to know which ones in otder to decode\n# decode the model output, which is a 1x|icd9_codes| matrix\"\"\"\n# super().__init__(onnx_model_fp)\n# self.icd9_codes = icd9_codes\n\n# def ___call__(self, free_text: str) -> t.Set[str]:\n# X = np.array([[free_text]])\n# predictions, predictions_proba \\\n# = sess.run(None, {\"free_text_input\": X})[0]\n# codes_predicted = [\n# code for prediction, code in zip(predictions, self.icd9_codes)\n# if prediction == 1 # i.e., if the code is predicted to be present\n# ]\n# codes2predicted_proba = {\n# code: proba for code, proba in zip(self.icd9_codes, predictions_proba)\n# }\n# return codes_predicted, codes2predicted_proba\n\n\n# def get_kiss_model():\n# onnx_model_fp = ASSETS_DIR/\"kiss_model.onnx\"\n# with open(ASSETS_DIR/\"kiss_model.onnx.metadata.json\") as f:\n# icd9_codes = json.load(f)[\"icd9_codes_relevant\"]\n# model = KissModel(onnx_model_fp, icd9_codes)\n# return model\n\n\nclass KissModel:\n \"\"\"Kiss Model using pickle for persistence\"\"\"\n\n def __init__(self):\n with open(ASSETS_DIR/\"kiss_model.pkl.metadata.json\") as f_meta:\n self.icd9_codes = json.load(f_meta)[\"icd9_codes_relevant\"]\n with open(ASSETS_DIR/\"kiss_model.pkl\", \"rb\") as f:\n self.model = pickle.loads(f.read())\n \n def __call__(self, free_text: str):\n X = np.array([free_text])\n predicted_codes_proba = self.model.predict_proba(X)\n return np.array([proba.tolist() for proba in predicted_codes_proba])[:,0,1]"
] | [
[
"numpy.array"
]
] |
kayaei/pands-problem-set | [
"a7c48059e3024955794c67d9e6f969a42f4e3a6d"
] | [
"plotfunction.py"
] | [
"# Etem Kaya 16-Mar-2019\n\n# Solution to Problem-10.\n# File name: \"plotfunction.py\".\n\n# Problem-10: Write a program that displays a plot of the functions x, x2 & 2x\n# in the range [0, 4].\n\n#Import matplotlib and numpy packages \nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# setup the lenght and scale of the x axis\n# plt.axis([0, 4, 0, 15])\nx = np.arange(0.0, 4.0, 0.5)\n\n# define the functions y1, y2 and y3 \ny1 = x # f(x) function\ny2 = x**2 # f(x**2) function \ny3 = 2**x # f(2**x) function\n\n## plot the y1, y2 and y3 functions\nplt.plot(x, y1)\nplt.plot(x, y2)\nplt.plot(x, y3)\n\n# pionts where the y1, y2 and y3 functions intersect and_\n# mark the point where they intersect with orange and blue colours\nplt.plot(1, 1, 'or')\nplt.plot(2, 4, 'bo')\n\n## Config the graph\nplt.title('Plotting Graph for functions f(x), f(x^2) and f(2^x)')\nplt.xlabel('X - Axis')\nplt.ylabel('Y - Axis')\n\n# turnon grid lines visibility\nplt.grid(True)\n\n# setup plot legends for each line and their locations for display\nplt.legend(['y1 = x', 'y2 = x^2', 'y3 = 2^x'], loc='upper left')\n\n## plot the y1, y2 and y3 functions on the graph\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.grid",
"numpy.arange",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel"
]
] |
srio/shadow3-scripts | [
"10712641333c29ca9854e9cc60d86cb321f3762b"
] | [
"ID09/run_wofry_polychromatic_partial_coherence.py"
] | [
"\n\n\n#\n# Import section\n#\nimport numpy\n\nfrom syned.beamline.beamline_element import BeamlineElement\nfrom syned.beamline.element_coordinates import ElementCoordinates\nfrom wofry.propagator.propagator import PropagationManager, PropagationElements, PropagationParameters\n\nfrom wofry.propagator.wavefront1D.generic_wavefront import GenericWavefront1D\n\nfrom wofryimpl.propagator.propagators1D.fresnel import Fresnel1D\nfrom wofryimpl.propagator.propagators1D.fresnel_convolution import FresnelConvolution1D\nfrom wofryimpl.propagator.propagators1D.fraunhofer import Fraunhofer1D\nfrom wofryimpl.propagator.propagators1D.integral import Integral1D\nfrom wofryimpl.propagator.propagators1D.fresnel_zoom import FresnelZoom1D\nfrom wofryimpl.propagator.propagators1D.fresnel_zoom_scaling_theorem import FresnelZoomScaling1D\n\n\n#\n# SOURCE========================\n#\n\n\n# def run_source(my_mode_index=0):\ndef run_source(my_mode_index=0,energy=20016.1):\n\n global coherent_mode_decomposition\n try:\n if my_mode_index == 0: raise Exception()\n tmp = coherent_mode_decomposition\n except:\n\n ########## SOURCE ##########\n\n #\n # create output_wavefront\n #\n #\n from wofryimpl.propagator.util.undulator_coherent_mode_decomposition_1d import \\\n UndulatorCoherentModeDecomposition1D\n coherent_mode_decomposition = UndulatorCoherentModeDecomposition1D(\n electron_energy=6,\n electron_current=0.2,\n undulator_period=0.017,\n undulator_nperiods=117.647,\n K=0.09683,\n photon_energy= energy,\n abscissas_interval=0.0001,\n number_of_points=2500,\n distance_to_screen=100,\n scan_direction='V',\n sigmaxx=3.63641e-06,\n sigmaxpxp=1.37498e-06,\n useGSMapproximation=False, )\n # make calculation\n coherent_mode_decomposition_results = coherent_mode_decomposition.calculate()\n\n mode_index = 0\n output_wavefront = coherent_mode_decomposition.get_eigenvector_wavefront(mode_index)\n output_wavefront = coherent_mode_decomposition.get_eigenvector_wavefront(my_mode_index)\n return output_wavefront\n\n\n#\n# BEAMLINE========================\n#\n\n\ndef run_beamline(output_wavefront):\n ########## OPTICAL SYSTEM ##########\n\n ########## OPTICAL ELEMENT NUMBER 1 ##########\n\n input_wavefront = output_wavefront.duplicate()\n from wofryimpl.beamline.optical_elements.ideal_elements.screen import WOScreen1D\n\n optical_element = WOScreen1D()\n\n # drift_before 27.066 m\n #\n # propagating\n #\n #\n propagation_elements = PropagationElements()\n beamline_element = BeamlineElement(optical_element=optical_element,\n coordinates=ElementCoordinates(p=27.066000, q=0.000000,\n angle_radial=numpy.radians(0.000000),\n angle_azimuthal=numpy.radians(0.000000)))\n propagation_elements.add_beamline_element(beamline_element)\n propagation_parameters = PropagationParameters(wavefront=input_wavefront, propagation_elements=propagation_elements)\n # self.set_additional_parameters(propagation_parameters)\n #\n propagation_parameters.set_additional_parameters('magnification_x', 20.0)\n propagation_parameters.set_additional_parameters('magnification_N', 1.0)\n #\n propagator = PropagationManager.Instance()\n try:\n propagator.add_propagator(Integral1D())\n except:\n pass\n output_wavefront = propagator.do_propagation(propagation_parameters=propagation_parameters,\n handler_name='INTEGRAL_1D')\n\n ########## OPTICAL ELEMENT NUMBER 2 ##########\n\n input_wavefront = output_wavefront.duplicate()\n from syned.beamline.shape import Rectangle\n boundary_shape = Rectangle(-0.0005, 0.0005, -0.0005, 0.0005)\n from wofryimpl.beamline.optical_elements.absorbers.slit import WOSlit1D\n optical_element = WOSlit1D(boundary_shape=boundary_shape)\n\n # no drift in this element\n output_wavefront = optical_element.applyOpticalElement(input_wavefront)\n\n ########## OPTICAL ELEMENT NUMBER 3 ##########\n\n input_wavefront = output_wavefront.duplicate()\n\n from orangecontrib.esrf.wofry.util.mirror import WOMirror1D\n\n optical_element = WOMirror1D.create_from_keywords(\n name='',\n shape=0,\n p_focus=44.54,\n q_focus=45.4695,\n grazing_angle_in=0.0025,\n p_distance=17.474,\n q_distance=11.3,\n zoom_factor=2,\n error_flag=1,\n error_file='/home/srio/Oasys/dabam_profile_140461924578000.dat',\n error_file_oversampling_factor=30,\n mirror_length=0,\n mirror_points=0,\n write_profile=0)\n\n # no drift in this element\n output_wavefront = optical_element.applyOpticalElement(input_wavefront)\n return output_wavefront\n\n\n#\n# MAIN FUNCTION========================\n#\n\n\n# def main():\ndef main(energy=20016.064):\n from srxraylib.plot.gol import plot, plot_image\n from orangecontrib.esrf.wofry.util.tally import TallyCoherentModes\n\n tally = TallyCoherentModes()\n for my_mode_index in range(10):\n output_wavefront = run_source(my_mode_index=my_mode_index,energy=energy)\n output_wavefront = run_beamline(output_wavefront)\n tally.append(output_wavefront)\n\n # tally.plot_cross_spectral_density(show=1, filename=\"\")\n # tally.plot_spectral_density(show=1, filename=\"\")\n # tally.plot_occupation(show=1, filename=\"\")\n\n tally.save_spectral_density(filename=\"id09_3mrad_spectral_density.dat\")\n tally.save_occupation(filename=\"id09_3mrad_occupation.dat\")\n\n\n#\n# MAIN========================\n#\n\n\nmain()\n\n#\n# MAIN========================\n#\n\nimport os\n# Energy = numpy.linspace(18000,22000,50)\nEnergy = numpy.linspace(18500,20500,100)\nfor energy in Energy:\n main(energy)\n command = \"mv id09_3mrad_spectral_density.dat results/id09_3mrad_spectral_density_%4d.dat\" % energy\n print(command)\n os.system(command)\n command = \"mv id09_3mrad_occupation.dat results/occupation_%4d.dat\" % energy\n print(command)\n os.system(command)"
] | [
[
"numpy.linspace",
"numpy.radians"
]
] |
brianzhang01/tskit | [
"e4d80810e19034cffa77bb14bc0b8d77537103ad"
] | [
"python/tests/test_metadata.py"
] | [
"# -*- coding: utf-8 -*-\n# MIT License\n#\n# Copyright (c) 2018-2019 Tskit Developers\n# Copyright (c) 2017 University of Oxford\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nTests for metadata handling.\n\"\"\"\nimport io\nimport json\nimport os\nimport tempfile\nimport unittest\nimport pickle\n\nimport numpy as np\nimport python_jsonschema_objects as pjs\nimport msprime\n\nimport tskit\n\n\nclass TestMetadataHdf5RoundTrip(unittest.TestCase):\n \"\"\"\n Tests that we can encode metadata under various formats and this will\n successfully round-trip through the HDF5 format.\n \"\"\"\n def setUp(self):\n fd, self.temp_file = tempfile.mkstemp(prefix=\"msp_hdf5meta_test_\")\n os.close(fd)\n\n def tearDown(self):\n os.unlink(self.temp_file)\n\n def test_json(self):\n ts = msprime.simulate(10, random_seed=1)\n tables = ts.dump_tables()\n nodes = tables.nodes\n # For each node, we create some Python metadata that can be JSON encoded.\n metadata = [\n {\"one\": j, \"two\": 2 * j, \"three\": list(range(j))} for j in range(len(nodes))]\n encoded, offset = tskit.pack_strings(map(json.dumps, metadata))\n nodes.set_columns(\n flags=nodes.flags, time=nodes.time, population=nodes.population,\n metadata_offset=offset, metadata=encoded)\n self.assertTrue(np.array_equal(nodes.metadata_offset, offset))\n self.assertTrue(np.array_equal(nodes.metadata, encoded))\n ts1 = tables.tree_sequence()\n for j, node in enumerate(ts1.nodes()):\n decoded_metadata = json.loads(node.metadata.decode())\n self.assertEqual(decoded_metadata, metadata[j])\n ts1.dump(self.temp_file)\n ts2 = tskit.load(self.temp_file)\n self.assertEqual(ts1.tables.nodes, ts2.tables.nodes)\n\n def test_pickle(self):\n ts = msprime.simulate(10, random_seed=1)\n tables = ts.dump_tables()\n # For each node, we create some Python metadata that can be pickled\n metadata = [\n {\"one\": j, \"two\": 2 * j, \"three\": list(range(j))}\n for j in range(ts.num_nodes)]\n encoded, offset = tskit.pack_bytes(list(map(pickle.dumps, metadata)))\n tables.nodes.set_columns(\n flags=tables.nodes.flags, time=tables.nodes.time,\n population=tables.nodes.population,\n metadata_offset=offset, metadata=encoded)\n self.assertTrue(np.array_equal(tables.nodes.metadata_offset, offset))\n self.assertTrue(np.array_equal(tables.nodes.metadata, encoded))\n ts1 = tables.tree_sequence()\n for j, node in enumerate(ts1.nodes()):\n decoded_metadata = pickle.loads(node.metadata)\n self.assertEqual(decoded_metadata, metadata[j])\n ts1.dump(self.temp_file)\n ts2 = tskit.load(self.temp_file)\n self.assertEqual(ts1.tables.nodes, ts2.tables.nodes)\n\n\nclass ExampleMetadata(object):\n \"\"\"\n Simple class that we can pickle/unpickle in metadata.\n \"\"\"\n def __init__(self, one=None, two=None):\n self.one = one\n self.two = two\n\n\nclass TestMetadataPickleDecoding(unittest.TestCase):\n \"\"\"\n Tests in which use pickle.pickle to decode metadata in nodes, sites and mutations.\n \"\"\"\n\n def test_nodes(self):\n tables = tskit.TableCollection(sequence_length=1)\n metadata = ExampleMetadata(one=\"node1\", two=\"node2\")\n pickled = pickle.dumps(metadata)\n tables.nodes.add_row(time=0.125, metadata=pickled)\n ts = tables.tree_sequence()\n node = ts.node(0)\n self.assertEqual(node.time, 0.125)\n self.assertEqual(node.metadata, pickled)\n unpickled = pickle.loads(node.metadata)\n self.assertEqual(unpickled.one, metadata.one)\n self.assertEqual(unpickled.two, metadata.two)\n\n def test_sites(self):\n tables = tskit.TableCollection(sequence_length=1)\n metadata = ExampleMetadata(one=\"node1\", two=\"node2\")\n pickled = pickle.dumps(metadata)\n tables.sites.add_row(position=0.1, ancestral_state=\"A\", metadata=pickled)\n ts = tables.tree_sequence()\n site = ts.site(0)\n self.assertEqual(site.position, 0.1)\n self.assertEqual(site.ancestral_state, \"A\")\n self.assertEqual(site.metadata, pickled)\n unpickled = pickle.loads(site.metadata)\n self.assertEqual(unpickled.one, metadata.one)\n self.assertEqual(unpickled.two, metadata.two)\n\n def test_mutations(self):\n tables = tskit.TableCollection(sequence_length=1)\n metadata = ExampleMetadata(one=\"node1\", two=\"node2\")\n pickled = pickle.dumps(metadata)\n tables.nodes.add_row(time=0)\n tables.sites.add_row(position=0.1, ancestral_state=\"A\")\n tables.mutations.add_row(site=0, node=0, derived_state=\"T\", metadata=pickled)\n ts = tables.tree_sequence()\n mutation = ts.site(0).mutations[0]\n self.assertEqual(mutation.site, 0)\n self.assertEqual(mutation.node, 0)\n self.assertEqual(mutation.derived_state, \"T\")\n self.assertEqual(mutation.metadata, pickled)\n unpickled = pickle.loads(mutation.metadata)\n self.assertEqual(unpickled.one, metadata.one)\n self.assertEqual(unpickled.two, metadata.two)\n\n\nclass TestJsonSchemaDecoding(unittest.TestCase):\n \"\"\"\n Tests in which use json-schema to decode the metadata.\n \"\"\"\n schema = \"\"\"{\n \"title\": \"Example Metadata\",\n \"type\": \"object\",\n \"properties\": {\n \"one\": {\"type\": \"string\"},\n \"two\": {\"type\": \"string\"}\n },\n \"required\": [\"one\", \"two\"]\n }\"\"\"\n\n def test_nodes(self):\n tables = tskit.TableCollection(sequence_length=1)\n builder = pjs.ObjectBuilder(json.loads(self.schema))\n ns = builder.build_classes()\n metadata = ns.ExampleMetadata(one=\"node1\", two=\"node2\")\n encoded = json.dumps(metadata.as_dict()).encode()\n tables.nodes.add_row(time=0.125, metadata=encoded)\n ts = tables.tree_sequence()\n node = ts.node(0)\n self.assertEqual(node.time, 0.125)\n self.assertEqual(node.metadata, encoded)\n decoded = ns.ExampleMetadata.from_json(node.metadata.decode())\n self.assertEqual(decoded.one, metadata.one)\n self.assertEqual(decoded.two, metadata.two)\n\n\nclass TestLoadTextMetadata(unittest.TestCase):\n \"\"\"\n Tests that use the load_text interface.\n \"\"\"\n\n def test_individuals(self):\n individuals = io.StringIO(\"\"\"\\\n id flags location metadata\n 0 1 0.0,1.0,0.0 abc\n 1 1 1.0,2.0 XYZ+\n 2 0 2.0,3.0,0.0 !@#$%^&*()\n \"\"\")\n i = tskit.parse_individuals(\n individuals, strict=False, encoding='utf8', base64_metadata=False)\n expected = [(1, [0.0, 1.0, 0.0], 'abc'),\n (1, [1.0, 2.0], 'XYZ+'),\n (0, [2.0, 3.0, 0.0], '!@#$%^&*()')]\n for a, b in zip(expected, i):\n self.assertEqual(a[0], b.flags)\n self.assertEqual(len(a[1]), len(b.location))\n for x, y in zip(a[1], b.location):\n self.assertEqual(x, y)\n self.assertEqual(a[2].encode('utf8'),\n b.metadata)\n\n def test_nodes(self):\n nodes = io.StringIO(\"\"\"\\\n id is_sample time metadata\n 0 1 0 abc\n 1 1 0 XYZ+\n 2 0 1 !@#$%^&*()\n \"\"\")\n n = tskit.parse_nodes(\n nodes, strict=False, encoding='utf8', base64_metadata=False)\n expected = ['abc', 'XYZ+', '!@#$%^&*()']\n for a, b in zip(expected, n):\n self.assertEqual(a.encode('utf8'),\n b.metadata)\n\n def test_sites(self):\n sites = io.StringIO(\"\"\"\\\n position ancestral_state metadata\n 0.1 A abc\n 0.5 C XYZ+\n 0.8 G !@#$%^&*()\n \"\"\")\n s = tskit.parse_sites(\n sites, strict=False, encoding='utf8', base64_metadata=False)\n expected = ['abc', 'XYZ+', '!@#$%^&*()']\n for a, b in zip(expected, s):\n self.assertEqual(a.encode('utf8'),\n b.metadata)\n\n def test_mutations(self):\n mutations = io.StringIO(\"\"\"\\\n site node derived_state metadata\n 0 2 C mno\n 0 3 G )(*&^%$#@!\n \"\"\")\n m = tskit.parse_mutations(\n mutations, strict=False, encoding='utf8', base64_metadata=False)\n expected = ['mno', ')(*&^%$#@!']\n for a, b in zip(expected, m):\n self.assertEqual(a.encode('utf8'),\n b.metadata)\n\n def test_populations(self):\n populations = io.StringIO(\"\"\"\\\n id metadata\n 0 mno\n 1 )(*&^%$#@!\n \"\"\")\n p = tskit.parse_populations(\n populations, strict=False, encoding='utf8', base64_metadata=False)\n expected = ['mno', ')(*&^%$#@!']\n for a, b in zip(expected, p):\n self.assertEqual(a.encode('utf8'),\n b.metadata)\n"
] | [
[
"numpy.array_equal"
]
] |
mahnooranjum/Python_Programming | [
"ba251e0e855842112efeb968d06458c60eaf1bd3"
] | [
"Misc/d3_heatmap.py"
] | [
"'''\n Mahnoor Anjum\n Python:\n Trivariate Analysis\n'''\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport math\nimport random \nfrom mpl_toolkits.mplot3d import Axes3D\n# sns.set()\n\n\npath = 'data/private/savepath/'\nfilename = 'v3_1'\ngenpath = 'data/private/gen/'\ngenname = 'g3_1'\n\ndata = pd.read_csv(path + filename+'.csv')\ngen = pd.read_csv(genpath + genname + '.csv')\n\nk = 50\ndata = data.sample(k)\n\nx = data['x1']\ny = data['x2']\nz = data['x3']\n\nfig = plt.figure(figsize=(20,20))\n\ndata = pd.DataFrame({'X': x, 'Y': y, 'Z': z})\ndata_pivoted = data.pivot(\"X\", \"Y\", \"Z\")\nax = sns.heatmap(data_pivoted)\nax.set_xlabel('x1')\nax.set_ylabel('x2')\nax.set_xticks([])\nax.set_yticks([])\nax.set_title(str(k)+\"_samples\")\n\n"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.figure",
"pandas.DataFrame"
]
] |
mtzgroup/aimsprop | [
"464d88ad7a817da73027fd2ab7b12476bf59f83d"
] | [
"aimsprop/pes.py"
] | [
"import numpy as np\n\nfrom .bundle import Bundle\n\n\ndef compute_pes(\n bundle: Bundle,\n carrier_frequency: float,\n alpha: float,\n eKT: np.ndarray,\n) -> Bundle:\n\n \"\"\"Compute the simple photoelectron spectroscopy, with Guassian blurring\n\n User is responsible for calculating and assigning properties to the bundle frames:\n Dyson Orbitals\n Ionization Potential (IP)\n\n Params:\n bundle: the Bundle object to compute the property for (modified in\n place)\n carrier_frequency: experimental probe pulse carrier frequency (hbar*omega)\n alpha: the Guassian blurring exponent\n eKT: electron energies\n\n Return:\n bundle: reference to the input Bundle object. The property\n key \"pes\" is set to computed PES property.\n \"\"\"\n\n for frame in bundle.frames:\n IPs = frame.properties[\"IP\"]\n dyson_norms = frame.properties[\"dyson_norms\"]\n pes = np.zeros_like(eKT)\n for ind, (state, IP) in enumerate(IPs):\n dyson_norm = dyson_norms[np.where(dyson_norms[:, 0] == state), 1][0]\n pes += (\n dyson_norm\n * np.sqrt(alpha / np.pi)\n * np.exp(-alpha * (carrier_frequency - IP - eKT) ** 2)\n )\n frame.properties[\"pes\"] = pes\n\n return bundle\n"
] | [
[
"numpy.sqrt",
"numpy.zeros_like",
"numpy.where",
"numpy.exp"
]
] |
frssp/pymatgen | [
"5cc42912a12a265a603df7e34c856561f76edc1f"
] | [
"dev_scripts/chemenv/equivalent_indices.py"
] | [
"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\nfrom __future__ import division, unicode_literals\n\n\"\"\"\nDevelopment script of the ChemEnv utility to get the equivalent indices of the model coordination environments\n\"\"\"\n\n__author__ = \"David Waroquiers\"\n__copyright__ = \"Copyright 2012, The Materials Project\"\n__version__ = \"2.0\"\n__maintainer__ = \"David Waroquiers\"\n__email__ = \"[email protected]\"\n__date__ = \"Feb 20, 2016\"\n\nimport numpy as np\n\n\nif __name__ == '__main__':\n\n cg_symbol = 'O:6'\n equiv_list = []\n\n # O:6\n if cg_symbol == 'O:6':\n opposite_points = {0: 1,\n 1: 0,\n 2: 3,\n 3: 2,\n 4: 5,\n 5: 4}\n perp_plane = {0: [2, 3, 4, 5],\n 1: [2, 3, 4, 5],\n 2: [0, 1, 4, 5],\n 3: [0, 1, 4, 5],\n 4: [0, 1, 2, 3],\n 5: [0, 1, 2, 3]}\n # 0. any point\n for i0 in range(6):\n # 1. point opposite to point 0.\n i1 = opposite_points[i0]\n # 2. one of the 4 points in the perpendicular plane\n for i2 in perp_plane[i0]:\n # 3. point opposite to point 2.\n i3 = opposite_points[i2]\n remaining = range(6)\n remaining.remove(i0)\n remaining.remove(i1)\n remaining.remove(i2)\n remaining.remove(i3)\n # 4. one of the 2 remaining points\n for i4 in remaining:\n # 5. point opposite to point 4.\n i5 = opposite_points[i4]\n equiv_list.append([i0, i1, i2, i3, i4, i5])\n\n # PB:7\n if cg_symbol == 'PB:7':\n for i0 in range(5):\n for turn in [1, -1]:\n i1 = np.mod(i0+turn, 5)\n i2 = np.mod(i1+turn, 5)\n i3 = np.mod(i2+turn, 5)\n i4 = np.mod(i3+turn, 5)\n for i5 in [5, 6]:\n i6 = 5 if i5 == 6 else 6\n equiv_list.append([i0, i1, i2, i3, i4, i5, i6])\n\n # HB:8\n if cg_symbol == 'HB:8':\n for i0 in range(6):\n for turn in [1, -1]:\n i1 = np.mod(i0 + turn, 6)\n i2 = np.mod(i1 + turn, 6)\n i3 = np.mod(i2 + turn, 6)\n i4 = np.mod(i3 + turn, 6)\n i5 = np.mod(i4 + turn, 6)\n for i6 in [6, 7]:\n i7 = 6 if i6 == 7 else 7\n equiv_list.append([i0, i1, i2, i3, i4, i5, i6, i7])\n\n # SBT:8\n if cg_symbol == 'SBT:8':\n #0. any point on the square face without cap\n for i0 in [0, 1, 3, 4]:\n #1. point in this square face but also in the triangular plane of point 0\n #2. last point in the triangular plane of point 0\n if i0 < 3:\n i1 = 0 if i0 == 1 else 1\n i2 = 2\n else:\n i1 = 3 if i0 == 4 else 4\n i2 = 5\n #3.4.5. corresponding points in the opposite triangular plane to the one of points 0.1.2.\n i3 = np.mod(i0 + 3, 6)\n i4 = np.mod(i1 + 3, 6)\n i5 = np.mod(i2 + 3, 6)\n #6. cap point opposite to the first point\n i6 = 7 if i0 in [1, 4] else 6\n #7. last cap point\n i7 = 6 if i0 in [1, 4] else 7\n equiv_list.append([i0, i1, i2, i3, i4, i5, i6, i7])\n\n # SA:8\n if cg_symbol == 'SA:8':\n sf1 = [0, 2, 1, 3]\n sf2 = [4, 5, 7, 6]\n # 0. any point\n for i0 in range(8):\n # 1. point opposite to point 0. in the square face\n if i0 in [0, 2]:\n i1 = i0 + 1\n elif i0 in [1, 3]:\n i1 = i0 - 1\n elif i0 == 4:\n i1 = 7\n elif i0 == 5:\n i1 = 6\n elif i0 == 6:\n i1 = 5\n elif i0 == 7:\n i1 = 4\n # 2. one of the two last points in the square face\n sfleft = list(sf1) if i0 in sf1 else list(sf2)\n sfleft.remove(i0)\n sfleft.remove(i1)\n for i2 in sfleft:\n sfleft2 = list(sfleft)\n sfleft2.remove(i2)\n # 3. last point in the square face\n i3 = sfleft2[0]\n # 4. point opposite to point 3. and closest to point 0.\n i4 = 0\n\n # 3.4.5. corresponding points in the opposite triangular plane to the one of points 0.1.2.\n i3 = np.mod(i0 + 3, 6)\n i4 = np.mod(i1 + 3, 6)\n i5 = np.mod(i2 + 3, 6)\n # 6. cap point opposite to the first point\n i6 = 7 if i0 in [1, 4] else 6\n # 7. last cap point\n i7 = 6 if i0 in [1, 4] else 7\n equiv_list.append([i0, i1, i2, i3, i4, i5, i6, i7])\n\n print('Equivalent indices ({:d}) for {} : '.format(len(equiv_list), cg_symbol))\n print(equiv_list)"
] | [
[
"numpy.mod"
]
] |
MostaSchoolOfAI/crab | [
"1c1fc21e902e4ee422ab367d691df16978972f8c"
] | [
"scikits/crab/recommenders/knn/classes.py"
] | [
"\"\"\"\nGeneralized Recommender models.\n\nThis module contains basic memory recommender interfaces used throughout\nthe whole scikit-crab package.\n\nThe interfaces are realized as abstract base classes (ie., some optional\nfunctionality is provided in the interface itself, so that the interfaces\ncan be subclassed).\n\n\"\"\"\n\n# Author: Marcel Caraciolo <[email protected]>\n#\n# License: BSD Style.\nfrom sklearn.base import BaseEstimator\nfrom .base import ItemRecommender, UserRecommender\nfrom .item_strategies import ItemsNeighborhoodStrategy\nfrom .neighborhood_strategies import NearestNeighborsStrategy\nimport numpy as np\n\n\nclass ItemBasedRecommender(ItemRecommender):\n \"\"\"\n Item Based Collaborative Filtering Recommender.\n\n\n Parameters\n -----------\n data_model: The data model instance that will be data source\n for the recommender.\n\n similarity: The Item Similarity instance that will be used to\n score the items that will be recommended.\n\n items_selection_strategy: The item candidates strategy that you\n can choose for selecting the possible items to recommend.\n default = ItemsNeighborhoodStrategy\n\n capper: bool (default=True)\n Cap the preferences with maximum and minimum preferences\n in the model.\n with_preference: bool (default=False)\n Return the recommendations with the estimated preferences if True.\n\n Attributes\n -----------\n `model`: The data model instance that will be data source\n for the recommender.\n\n `similarity`: The Item Similarity instance that will be used to\n score the items that will be recommended.\n\n `items_selection_strategy`: The item candidates strategy that you\n can choose for selecting the possible items to recommend.\n default = ItemsNeighborhoodStrategy\n\n `capper`: bool (default=True)\n Cap the preferences with maximum and minimum preferences\n in the model.\n `with_preference`: bool (default=False)\n Return the recommendations with the estimated preferences if True.\n\n Examples\n -----------\n >>> from scikits.crab.models.classes import MatrixPreferenceDataModel\n >>> from scikits.crab.recommenders.knn.classes import ItemBasedRecommender\n >>> from scikits.crab.similarities.basic_similarities import ItemSimilarity\n >>> from scikits.crab.recommenders.knn.item_strategies import ItemsNeighborhoodStrategy\n >>> from scikits.crab.metrics.pairwise import euclidean_distances\n >>> movies = {'Marcel Caraciolo': {'Lady in the Water': 2.5, \\\n 'Snakes on a Plane': 3.5, \\\n 'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5, \\\n 'The Night Listener': 3.0}, \\\n 'Paola Pow': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5, \\\n 'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0, \\\n 'You, Me and Dupree': 3.5}, \\\n 'Leopoldo Pires': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0, \\\n 'Superman Returns': 3.5, 'The Night Listener': 4.0}, \\\n 'Lorena Abreu': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, \\\n 'The Night Listener': 4.5, 'Superman Returns': 4.0, \\\n 'You, Me and Dupree': 2.5}, \\\n 'Steve Gates': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \\\n 'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0, \\\n 'You, Me and Dupree': 2.0}, \\\n 'Sheldom': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \\\n 'The Night Listener': 3.0, 'Superman Returns': 5.0, \\\n 'You, Me and Dupree': 3.5}, \\\n 'Penny Frewman': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0, \\\n 'Superman Returns':4.0}, \\\n 'Maria Gabriela': {}}\n >>> model = MatrixPreferenceDataModel(movies)\n >>> items_strategy = ItemsNeighborhoodStrategy()\n >>> similarity = ItemSimilarity(model, euclidean_distances)\n >>> recsys = ItemBasedRecommender(model, similarity, items_strategy)\n >>> #Return the recommendations for the given user.\n >>> recsys.recommend('Leopoldo Pires')\n ['Just My Luck', 'You, Me and Dupree']\n >>> #Return the 2 explanations for the given recommendation.\n >>> recsys.recommended_because('Leopoldo Pires', 'Just My Luck',2)\n ['The Night Listener', 'Superman Returns']\n\n Notes\n -----------\n This ItemBasedRecommender does not yet provide\n suppot for rescorer functions.\n\n References\n -----------\n Item-based collaborative filtering recommendation algorithms by Sarwar\n http://portal.acm.org/citation.cfm?id=372071\n\n \"\"\"\n\n def __init__(self, model, similarity, items_selection_strategy=None,\n capper=True, with_preference=False):\n ItemRecommender.__init__(self, model, with_preference)\n self.similarity = similarity\n self.capper = capper\n if items_selection_strategy is None:\n self.items_selection_strategy = ItemsNeighborhoodStrategy()\n else:\n self.items_selection_strategy = items_selection_strategy\n\n def recommend(self, user_id, how_many=None, **params):\n '''\n Return a list of recommended items, ordered from most strongly\n recommend to least.\n\n Parameters\n ----------\n user_id: int or string\n User for which recommendations are to be computed.\n how_many: int\n Desired number of recommendations (default=None ALL)\n\n '''\n self._set_params(**params)\n\n candidate_items = self.all_other_items(user_id)\n\n recommendable_items = self._top_matches(user_id, \\\n candidate_items, how_many)\n\n return recommendable_items\n\n def estimate_preference(self, user_id, item_id, **params):\n '''\n Parameters\n ----------\n user_id: int or string\n User for which recommendations are to be computed.\n\n item_id: int or string\n ID of item for which wants to find the estimated preference.\n\n Returns\n -------\n Return an estimated preference if the user has not expressed a\n preference for the item, or else the user's actual preference for the\n item. If a preference cannot be estimated, returns None.\n '''\n preference = self.model.preference_value(user_id, item_id)\n\n if not np.isnan(preference):\n return preference\n\n #TODO: It needs optimization\n prefs = self.model.preferences_from_user(user_id)\n\n if not self.model.has_preference_values():\n prefs = [(pref, 1.0) for pref in prefs]\n\n similarities = \\\n np.array([self.similarity.get_similarity(item_id, to_item_id) \\\n for to_item_id, pref in prefs if to_item_id != item_id]).flatten()\n\n prefs = np.array([pref for it, pref in prefs])\n prefs_sim = np.sum(prefs[~np.isnan(similarities)] *\n similarities[~np.isnan(similarities)])\n total_similarity = np.sum(similarities)\n\n #Throw out the estimate if it was based on no data points,\n #of course, but also if based on\n #just one. This is a bit of a band-aid on the 'stock'\n #item-based algorithm for the moment.\n #The reason is that in this case the estimate is, simply,\n #the user's rating for one item\n #that happened to have a defined similarity.\n #The similarity score doesn't matter, and that\n #seems like a bad situation.\n if total_similarity == 0.0 or \\\n not similarities[~np.isnan(similarities)].size:\n return np.nan\n\n estimated = prefs_sim / total_similarity\n\n if self.capper:\n max_p = self.model.maximum_preference_value()\n min_p = self.model.minimum_preference_value()\n estimated = max_p if estimated > max_p else min_p \\\n if estimated < min_p else estimated\n return estimated\n\n def all_other_items(self, user_id, **params):\n '''\n Parameters\n ----------\n user_id: int or string\n User for which recommendations are to be computed.\n\n Returns\n ---------\n Return items in the `model` for which the user has not expressed\n the preference and could possibly be recommended to the user.\n\n '''\n return self.items_selection_strategy.candidate_items(user_id, \\\n self.model)\n\n def _top_matches(self, source_id, target_ids, how_many=None, **params):\n '''\n Parameters\n ----------\n target_ids: array of shape [n_target_ids]\n\n source_id: int or string\n item id to compare against.\n\n how_many: int\n Desired number of most top items to recommend (default=None ALL)\n\n Returns\n --------\n Return the top N matches\n It can be user_ids or item_ids.\n '''\n #Empty target_ids\n if target_ids.size == 0:\n return np.array([])\n\n estimate_preferences = np.vectorize(self.estimate_preference)\n\n preferences = estimate_preferences(source_id, target_ids)\n\n preference_values = preferences[~np.isnan(preferences)]\n target_ids = target_ids[~np.isnan(preferences)]\n\n sorted_preferences = np.lexsort((preference_values,))[::-1]\n\n sorted_preferences = sorted_preferences[0:how_many] \\\n if how_many and sorted_preferences.size > how_many \\\n else sorted_preferences\n\n if self.with_preference:\n top_n_recs = [(target_ids[ind], \\\n preferences[ind]) for ind in sorted_preferences]\n else:\n top_n_recs = [target_ids[ind]\n for ind in sorted_preferences]\n\n return top_n_recs\n\n def most_similar_items(self, item_id, how_many=None):\n '''\n Return the most similar items to the given item, ordered\n from most similar to least.\n\n Parameters\n -----------\n item_id: int or string\n ID of item for which to find most similar other items\n\n how_many: int\n Desired number of most similar items to find (default=None ALL)\n '''\n old_how_many = self.similarity.num_best\n #+1 since it returns the identity.\n self.similarity.num_best = how_many + 1 \\\n if how_many is not None else None\n similarities = self.similarity[item_id]\n self.similarity.num_best = old_how_many\n\n return np.array([item for item, pref in similarities \\\n if item != item_id and not np.isnan(pref)])\n\n def recommended_because(self, user_id, item_id, how_many=None, **params):\n '''\n Returns the items that were most influential in recommending a\n given item to a given user. In most implementations, this\n method will return items that the user prefers and that\n are similar to the given item.\n\n Parameters\n -----------\n user_id : int or string\n ID of the user who was recommended the item\n\n item_id: int or string\n ID of item that was recommended\n\n how_many: int\n Maximum number of items to return (default=None ALL)\n\n Returns\n ----------\n The list of items ordered from most influential in\n recommended the given item to least\n '''\n preferences = self.model.preferences_from_user(user_id)\n\n if self.model.has_preference_values():\n similarities = \\\n np.array([self.similarity.get_similarity(item_id, to_item_id) \\\n for to_item_id, pref in preferences\n if to_item_id != item_id]).flatten()\n prefs = np.array([pref for it, pref in preferences])\n item_ids = np.array([it for it, pref in preferences])\n else:\n similarities = \\\n np.array([self.similarity.get_similarity(item_id, to_item_id) \\\n for to_item_id in preferences\n if to_item_id != item_id]).flatten()\n prefs = np.array([1.0 for it in preferences])\n item_ids = np.array(preferences)\n\n scores = prefs[~np.isnan(similarities)] * \\\n (1.0 + similarities[~np.isnan(similarities)])\n\n sorted_preferences = np.lexsort((scores,))[::-1]\n\n sorted_preferences = sorted_preferences[0:how_many] \\\n if how_many and sorted_preferences.size > how_many \\\n else sorted_preferences\n\n if self.with_preference:\n top_n_recs = [(item_ids[ind], \\\n prefs[ind]) for ind in sorted_preferences]\n else:\n top_n_recs = [item_ids[ind]\n for ind in sorted_preferences]\n\n return top_n_recs\n\n\n#=====================\n#User Based Recommender\n\nclass UserBasedRecommender(UserRecommender):\n \"\"\"\n User Based Collaborative Filtering Recommender.\n\n\n Parameters\n -----------\n data_model: The data model instance that will be data source\n for the recommender.\n\n similarity: The User Similarity instance that will be used to\n score the users that are the most similar to the user.\n\n neighborhood_strategy: The user neighborhood strategy that you\n can choose for selecting the most similar users to find\n the items to recommend.\n default = NearestNeighborsStrategy\n\n capper: bool (default=True)\n Cap the preferences with maximum and minimum preferences\n in the model.\n with_preference: bool (default=False)\n Return the recommendations with the estimated preferences if True.\n\n Attributes\n -----------\n `model`: The data model instance that will be data source\n for the recommender.\n\n `similarity`: The User Similarity instance that will be used to\n score the users that are the most similar to the user.\n\n `neighborhood_strategy`: The user neighborhood strategy that you\n can choose for selecting the most similar users to find\n the items to recommend.\n default = NearestNeighborsStrategy\n\n `capper`: bool (default=True)\n Cap the preferences with maximum and minimum preferences\n in the model.\n `with_preference`: bool (default=False)\n Return the recommendations with the estimated preferences if True.\n\n Examples\n -----------\n >>> from scikits.crab.models.classes import MatrixPreferenceDataModel\n >>> from scikits.crab.recommenders.knn.classes import UserBasedRecommender\n >>> from scikits.crab.similarities.basic_similarities import UserSimilarity\n >>> from scikits.crab.recommenders.knn.neighborhood_strategies import NearestNeighborsStrategy\n >>> from scikits.crab.metrics.pairwise import euclidean_distances\n >>> movies = {'Marcel Caraciolo': {'Lady in the Water': 2.5, \\\n 'Snakes on a Plane': 3.5, \\\n 'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5, \\\n 'The Night Listener': 3.0}, \\\n 'Paola Pow': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5, \\\n 'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0, \\\n 'You, Me and Dupree': 3.5}, \\\n 'Leopoldo Pires': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0, \\\n 'Superman Returns': 3.5, 'The Night Listener': 4.0}, \\\n 'Lorena Abreu': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, \\\n 'The Night Listener': 4.5, 'Superman Returns': 4.0, \\\n 'You, Me and Dupree': 2.5}, \\\n 'Steve Gates': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \\\n 'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0, \\\n 'You, Me and Dupree': 2.0}, \\\n 'Sheldom': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \\\n 'The Night Listener': 3.0, 'Superman Returns': 5.0, \\\n 'You, Me and Dupree': 3.5}, \\\n 'Penny Frewman': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0, \\\n 'Superman Returns':4.0}, \\\n 'Maria Gabriela': {}}\n >>> model = MatrixPreferenceDataModel(movies)\n >>> nhood_strategy = NearestNeighborsStrategy()\n >>> similarity = UserSimilarity(model, euclidean_distances)\n >>> recsys = UserBasedRecommender(model, similarity, nhood_strategy)\n >>> #Return the recommendations for the given user.\n >>> recsys.recommend('Leopoldo Pires')\n ['Just My Luck', 'You, Me and Dupree']\n >>> #Return the 2 explanations for the given recommendation.\n >>> recsys.recommended_because('Leopoldo Pires', 'Just My Luck',2)\n ['Lorena Abreu', 'Marcel Caraciolo']\n\n Notes\n -----------\n This UserBasedRecommender does not yet provide\n suppot for rescorer functions.\n\n References\n -----------\n User-based collaborative filtering recommendation algorithms by\n\n \"\"\"\n\n def __init__(self, model, similarity, neighborhood_strategy=None,\n capper=True, with_preference=False):\n UserRecommender.__init__(self, model, with_preference)\n self.similarity = similarity\n self.capper = capper\n if neighborhood_strategy is None:\n self.neighborhood_strategy = NearestNeighborsStrategy()\n else:\n self.neighborhood_strategy = neighborhood_strategy\n\n def all_other_items(self, user_id, **params):\n '''\n Parameters\n ----------\n user_id: int or string\n User for which recommendations are to be computed. (default= 'user_similarity')\n\n Optional Parameters\n --------------------\n n_similarity: string\n The similarity used in the neighborhood strategy\n\n distance: the metrics.pairwise function to set.\n The pairwise function to compute the similarity (default = euclidean_distances)\n\n nhood_size: int\n The neighborhood size (default=None ALL)\n\n minimal_similarity: float\n minimal similarity required for neighbors (default = 0.0)\n\n sampling_rate: int\n percentage of users to consider when building neighborhood\n (default = 1)\n\n Returns\n ---------\n Return items in the `model` for which the user has not expressed\n the preference and could possibly be recommended to the user.\n\n '''\n n_similarity = params.pop('n_similarity', 'user_similarity')\n distance = params.pop('distance', self.similarity.distance)\n nhood_size = params.pop('nhood_size', None)\n\n nearest_neighbors = self.neighborhood_strategy.user_neighborhood(user_id,\n self.model, n_similarity, distance, nhood_size, **params)\n\n items_from_user_id = self.model.items_from_user(user_id)\n possible_items = []\n for to_user_id in nearest_neighbors:\n possible_items.extend(self.model.items_from_user(to_user_id))\n\n possible_items = np.unique(np.array(possible_items).flatten())\n\n return np.setdiff1d(possible_items, items_from_user_id)\n\n def estimate_preference(self, user_id, item_id, **params):\n '''\n Parameters\n ----------\n user_id: int or string\n User for which recommendations are to be computed.\n\n item_id: int or string\n ID of item for which wants to find the estimated preference.\n\n Returns\n -------\n Return an estimated preference if the user has not expressed a\n preference for the item, or else the user's actual preference for the\n item. If a preference cannot be estimated, returns None.\n '''\n\n preference = self.model.preference_value(user_id, item_id)\n if not np.isnan(preference):\n return preference\n\n n_similarity = params.pop('n_similarity', 'user_similarity')\n distance = params.pop('distance', self.similarity.distance)\n nhood_size = params.pop('nhood_size', None)\n\n nearest_neighbors = self.neighborhood_strategy.user_neighborhood(user_id,\n self.model, n_similarity, distance, nhood_size, **params)\n\n preference = 0.0\n total_similarity = 0.0\n\n similarities = np.array([self.similarity.get_similarity(user_id, to_user_id)\n for to_user_id in nearest_neighbors]).flatten()\n\n prefs = np.array([self.model.preference_value(to_user_id, item_id)\n for to_user_id in nearest_neighbors])\n\n \n # prefs = prefs[~np.isnan(prefs)]\n # similarities = similarities[~np.isnan(prefs)]\n\n prefs_sim = np.sum(prefs[~np.isnan(similarities)] *\n similarities[~np.isnan(similarities)])\n total_similarity = np.sum(similarities)\n\n #Throw out the estimate if it was based on no data points,\n #of course, but also if based on just one. This is a bit\n #of a band-aid on the 'stock' item-based algorithm for\n #the moment. The reason is that in this case the estimate\n #is, simply, the user's rating for one item that happened\n #to have a defined similarity. The similarity score doesn't\n #matter, and that seems like a bad situation.\n if total_similarity == 0.0 or \\\n not similarities[~np.isnan(similarities)].size:\n return np.nan\n\n estimated = prefs_sim / total_similarity\n\n if self.capper:\n max_p = self.model.maximum_preference_value()\n min_p = self.model.minimum_preference_value()\n estimated = max_p if estimated > max_p else min_p \\\n if estimated < min_p else estimated\n\n return estimated\n\n def most_similar_users(self, user_id, how_many=None):\n '''\n Return the most similar users to the given user, ordered\n from most similar to least.\n\n Parameters\n -----------\n user_id: int or string\n ID of user for which to find most similar other users\n\n how_many: int\n Desired number of most similar users to find (default=None ALL)\n '''\n old_how_many = self.similarity.num_best\n #+1 since it returns the identity.\n self.similarity.num_best = how_many + 1 \\\n if how_many is not None else None\n similarities = self.similarity[user_id]\n self.similarity.num_best = old_how_many\n return np.array([to_user_id for to_user_id, pref in similarities \\\n if user_id != to_user_id and not np.isnan(pref)])\n\n def recommend(self, user_id, how_many=None, **params):\n '''\n Return a list of recommended items, ordered from most strongly\n recommend to least.\n\n Parameters\n ----------\n user_id: int or string\n User for which recommendations are to be computed.\n how_many: int\n Desired number of recommendations (default=None ALL)\n\n '''\n\n self.set_params(**params)\n\n candidate_items = self.all_other_items(user_id, **params)\n\n recommendable_items = self._top_matches(user_id, \\\n candidate_items, how_many)\n\n return recommendable_items\n\n def _top_matches(self, source_id, target_ids, how_many=None, **params):\n '''\n Parameters\n ----------\n target_ids: array of shape [n_target_ids]\n\n source_id: int or string\n item id to compare against.\n\n how_many: int\n Desired number of most top items to recommend (default=None ALL)\n\n Returns\n --------\n Return the top N matches\n It can be user_ids or item_ids.\n '''\n #Empty target_ids\n if target_ids.size == 0:\n return np.array([])\n\n estimate_preferences = np.vectorize(self.estimate_preference)\n\n preferences = estimate_preferences(source_id, target_ids)\n\n preference_values = preferences[~np.isnan(preferences)]\n target_ids = target_ids[~np.isnan(preferences)]\n\n sorted_preferences = np.lexsort((preference_values,))[::-1]\n\n sorted_preferences = sorted_preferences[0:how_many] \\\n if how_many and sorted_preferences.size > how_many \\\n else sorted_preferences\n\n if self.with_preference:\n top_n_recs = [(target_ids[ind], \\\n preferences[ind]) for ind in sorted_preferences]\n else:\n top_n_recs = [target_ids[ind]\n for ind in sorted_preferences]\n\n return top_n_recs\n\n def recommended_because(self, user_id, item_id, how_many=None, **params):\n '''\n Returns the users that were most influential in recommending a\n given item to a given user. In most implementations, this\n method will return users that prefers the recommended item and that\n are similar to the given user.\n\n Parameters\n -----------\n user_id : int or string\n ID of the user who was recommended the item\n\n item_id: int or string\n ID of item that was recommended\n\n how_many: int\n Maximum number of items to return (default=None ALL)\n\n Returns\n ----------\n The list of items ordered from most influential in\n recommended the given item to least\n '''\n preferences = self.model.preferences_for_item(item_id)\n\n if self.model.has_preference_values():\n similarities = \\\n np.array([self.similarity.get_similarity(user_id, to_user_id) \\\n for to_user_id, pref in preferences\n if to_user_id != user_id]).flatten()\n prefs = np.array([pref for it, pref in preferences])\n user_ids = np.array([usr for usr, pref in preferences])\n else:\n similarities = \\\n np.array([self.similarity.get_similarity(user_id, to_user_id) \\\n for to_user_id in preferences\n if to_user_id != user_id]).flatten()\n prefs = np.array([1.0 for it in preferences])\n user_ids = np.array(preferences)\n\n scores = prefs[~np.isnan(similarities)] * \\\n (1.0 + similarities[~np.isnan(similarities)])\n\n sorted_preferences = np.lexsort((scores,))[::-1]\n\n sorted_preferences = sorted_preferences[0:how_many] \\\n if how_many and sorted_preferences.size > how_many \\\n else sorted_preferences\n\n if self.with_preference:\n top_n_recs = [(user_ids[ind], \\\n prefs[ind]) for ind in sorted_preferences]\n else:\n top_n_recs = [user_ids[ind]\n for ind in sorted_preferences]\n\n return top_n_recs\n"
] | [
[
"numpy.sum",
"numpy.vectorize",
"numpy.setdiff1d",
"numpy.lexsort",
"numpy.isnan",
"numpy.array"
]
] |
Dipeshtamboli/domain-shift | [
"3f29577df6ab7269ad69a5fc651b63ed78708f0b"
] | [
"data_statistics.py"
] | [
"import pdb\r\nimport numpy as np\r\nimport os\r\nimport glob\r\nimport torch\r\nimport torch.nn as nn\r\nimport torchvision.models as models\r\nimport torchvision.transforms as transforms\r\nfrom torch.autograd import Variable\r\nfrom PIL import Image\r\nfrom tqdm import tqdm\r\n\r\nrelative_path = 'datasets/resnet_features_subset_office31/'\r\n# relative_path = 'datasets/office-31_10_class_subset/'\r\n\r\nall_npys = glob.glob(os.path.dirname(os.path.realpath(__file__))+'/'+relative_path+\"**/*.npy\" , recursive=True)\r\n\r\nnum_plot_classes = 31\r\nall_features = np.zeros((num_plot_classes*3*5,1000))\r\nall_feat = {\r\n \"amazon\": np.zeros((num_plot_classes*5,1000)),\r\n \"dslr\": np.zeros((num_plot_classes*5,1000)),\r\n \"webcam\": np.zeros((num_plot_classes*5,1000)),\r\n}\r\ndomain_names =[]\r\nclass_names = []\r\ncounter = 0\r\nfor i, npy_loc in enumerate(all_npys):\r\n unique_labels, unique_counts = np.unique(class_names, return_counts=True)\r\n domain = npy_loc.split('/')[-3]\r\n class_name = npy_loc.split('/')[-2]\r\n\r\n if len(np.unique(class_names)) < num_plot_classes or class_name in class_names:\r\n all_features[counter] = np.load(npy_loc)\r\n counter += 1\r\n domain_names.append(domain)\r\n class_names.append(class_name)"
] | [
[
"numpy.load",
"numpy.unique",
"numpy.zeros"
]
] |
hephaex/probability | [
"740d0db0bf2b1e1a04cfd0b55481c44380b3cb05"
] | [
"tensorflow_probability/python/distributions/poisson_lognormal.py"
] | [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The PoissonLogNormalQuadratureCompound distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_probability.python.bijectors import exp as exp_bijector\nfrom tensorflow_probability.python.distributions import categorical\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.distributions import normal\nfrom tensorflow_probability.python.distributions import poisson\nfrom tensorflow_probability.python.distributions import seed_stream\nfrom tensorflow_probability.python.distributions import transformed_distribution\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import reparameterization\n\n\n__all__ = [\n \"PoissonLogNormalQuadratureCompound\",\n \"quadrature_scheme_lognormal_gauss_hermite\",\n \"quadrature_scheme_lognormal_quantiles\",\n]\n\n\ndef quadrature_scheme_lognormal_gauss_hermite(\n loc, scale, quadrature_size,\n validate_args=False, name=None): # pylint: disable=unused-argument\n \"\"\"Use Gauss-Hermite quadrature to form quadrature on positive-reals.\n\n Note: for a given `quadrature_size`, this method is generally less accurate\n than `quadrature_scheme_lognormal_quantiles`.\n\n Args:\n loc: `float`-like (batch of) scalar `Tensor`; the location parameter of\n the LogNormal prior.\n scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of\n the LogNormal prior.\n quadrature_size: Python `int` scalar representing the number of quadrature\n points.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n name: Python `str` name prefixed to Ops created by this class.\n\n Returns:\n grid: (Batch of) length-`quadrature_size` vectors representing the\n `log_rate` parameters of a `Poisson`.\n probs: (Batch of) length-`quadrature_size` vectors representing the\n weight associate with each `grid` value.\n \"\"\"\n with tf.name_scope(name, \"vector_diffeomixture_quadrature_gauss_hermite\",\n [loc, scale]):\n grid, probs = np.polynomial.hermite.hermgauss(deg=quadrature_size)\n grid = grid.astype(loc.dtype.as_numpy_dtype)\n probs = probs.astype(loc.dtype.as_numpy_dtype)\n probs /= np.linalg.norm(probs, ord=1, keepdims=True)\n probs = tf.convert_to_tensor(value=probs, name=\"probs\", dtype=loc.dtype)\n # The following maps the broadcast of `loc` and `scale` to each grid\n # point, i.e., we are creating several log-rates that correspond to the\n # different Gauss-Hermite quadrature points and (possible) batches of\n # `loc` and `scale`.\n grid = (loc[..., tf.newaxis] + np.sqrt(2.) * scale[..., tf.newaxis] * grid)\n return grid, probs\n\n\ndef quadrature_scheme_lognormal_quantiles(\n loc, scale, quadrature_size,\n validate_args=False, name=None):\n \"\"\"Use LogNormal quantiles to form quadrature on positive-reals.\n\n Args:\n loc: `float`-like (batch of) scalar `Tensor`; the location parameter of\n the LogNormal prior.\n scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of\n the LogNormal prior.\n quadrature_size: Python `int` scalar representing the number of quadrature\n points.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n name: Python `str` name prefixed to Ops created by this class.\n\n Returns:\n grid: (Batch of) length-`quadrature_size` vectors representing the\n `log_rate` parameters of a `Poisson`.\n probs: (Batch of) length-`quadrature_size` vectors representing the\n weight associate with each `grid` value.\n \"\"\"\n with tf.name_scope(name, \"quadrature_scheme_lognormal_quantiles\",\n [loc, scale]):\n # Create a LogNormal distribution.\n dist = transformed_distribution.TransformedDistribution(\n distribution=normal.Normal(loc=loc, scale=scale),\n bijector=exp_bijector.Exp(),\n validate_args=validate_args)\n batch_ndims = dist.batch_shape.ndims\n if batch_ndims is None:\n batch_ndims = tf.shape(input=dist.batch_shape_tensor())[0]\n\n def _compute_quantiles():\n \"\"\"Helper to build quantiles.\"\"\"\n # Omit {0, 1} since they might lead to Inf/NaN.\n zero = tf.zeros([], dtype=dist.dtype)\n edges = tf.linspace(zero, 1., quadrature_size + 3)[1:-1]\n # Expand edges so its broadcast across batch dims.\n edges = tf.reshape(\n edges,\n shape=tf.concat(\n [[-1], tf.ones([batch_ndims], dtype=tf.int32)], axis=0))\n quantiles = dist.quantile(edges)\n # Cyclically permute left by one.\n perm = tf.concat([tf.range(1, 1 + batch_ndims), [0]], axis=0)\n quantiles = tf.transpose(a=quantiles, perm=perm)\n return quantiles\n quantiles = _compute_quantiles()\n\n # Compute grid as quantile midpoints.\n grid = (quantiles[..., :-1] + quantiles[..., 1:]) / 2.\n # Set shape hints.\n grid.set_shape(dist.batch_shape.concatenate([quadrature_size]))\n\n # By construction probs is constant, i.e., `1 / quadrature_size`. This is\n # important, because non-constant probs leads to non-reparameterizable\n # samples.\n probs = tf.fill(\n dims=[quadrature_size], value=1. / tf.cast(quadrature_size, dist.dtype))\n\n return grid, probs\n\n\nclass PoissonLogNormalQuadratureCompound(distribution.Distribution):\n \"\"\"`PoissonLogNormalQuadratureCompound` distribution.\n\n The `PoissonLogNormalQuadratureCompound` is an approximation to a\n Poisson-LogNormal [compound distribution](\n https://en.wikipedia.org/wiki/Compound_probability_distribution), i.e.,\n\n ```none\n p(k|loc, scale)\n = int_{R_+} dl LogNormal(l | loc, scale) Poisson(k | l)\n approx= sum{ prob[d] Poisson(k | lambda(grid[d])) : d=0, ..., deg-1 }\n ```\n\n By default, the `grid` is chosen as quantiles of the `LogNormal` distribution\n parameterized by `loc`, `scale` and the `prob` vector is\n `[1. / quadrature_size]*quadrature_size`.\n\n In the non-approximation case, a draw from the LogNormal prior represents the\n Poisson rate parameter. Unfortunately, the non-approximate distribution lacks\n an analytical probability density function (pdf). Therefore the\n `PoissonLogNormalQuadratureCompound` class implements an approximation based\n on [quadrature](https://en.wikipedia.org/wiki/Numerical_integration).\n\n Note: although the `PoissonLogNormalQuadratureCompound` is approximately the\n Poisson-LogNormal compound distribution, it is itself a valid distribution.\n Viz., it possesses a `sample`, `log_prob`, `mean`, `variance`, etc. which are\n all mutually consistent.\n\n #### Mathematical Details\n\n The `PoissonLogNormalQuadratureCompound` approximates a Poisson-LogNormal\n [compound distribution](\n https://en.wikipedia.org/wiki/Compound_probability_distribution). Using\n variable-substitution and [numerical quadrature](\n https://en.wikipedia.org/wiki/Numerical_integration) (default:\n based on `LogNormal` quantiles) we can redefine the distribution to be a\n parameter-less convex combination of `deg` different Poisson samples.\n\n That is, defined over positive integers, this distribution is parameterized\n by a (batch of) `loc` and `scale` scalars.\n\n The probability density function (pdf) is,\n\n ```none\n pdf(k | loc, scale, deg)\n = sum{ prob[d] Poisson(k | lambda=exp(grid[d]))\n : d=0, ..., deg-1 }\n ```\n\n #### Examples\n\n ```python\n tfd = tfp.distributions\n\n # Create two batches of PoissonLogNormalQuadratureCompounds, one with\n # prior `loc = 0.` and another with `loc = 1.` In both cases `scale = 1.`\n pln = tfd.PoissonLogNormalQuadratureCompound(\n loc=[0., -0.5],\n scale=1.,\n quadrature_size=10,\n validate_args=True)\n \"\"\"\n\n def __init__(self,\n loc,\n scale,\n quadrature_size=8,\n quadrature_fn=quadrature_scheme_lognormal_quantiles,\n validate_args=False,\n allow_nan_stats=True,\n name=\"PoissonLogNormalQuadratureCompound\"):\n \"\"\"Constructs the PoissonLogNormalQuadratureCompound`.\n\n Note: `probs` returned by (optional) `quadrature_fn` are presumed to be\n either a length-`quadrature_size` vector or a batch of vectors in 1-to-1\n correspondence with the returned `grid`. (I.e., broadcasting is only\n partially supported.)\n\n Args:\n loc: `float`-like (batch of) scalar `Tensor`; the location parameter of\n the LogNormal prior.\n scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of\n the LogNormal prior.\n quadrature_size: Python `int` scalar representing the number of quadrature\n points.\n quadrature_fn: Python callable taking `loc`, `scale`,\n `quadrature_size`, `validate_args` and returning `tuple(grid, probs)`\n representing the LogNormal grid and corresponding normalized weight.\n normalized) weight.\n Default value: `quadrature_scheme_lognormal_quantiles`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`,\n statistics (e.g., mean, mode, variance) use the value \"`NaN`\" to\n indicate the result is undefined. When `False`, an exception is raised\n if one or more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n\n Raises:\n TypeError: if `quadrature_grid` and `quadrature_probs` have different base\n `dtype`.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name, values=[loc, scale]) as name:\n dtype = dtype_util.common_dtype([loc, scale], tf.float32)\n if loc is not None:\n loc = tf.convert_to_tensor(value=loc, name=\"loc\", dtype=dtype)\n if scale is not None:\n scale = tf.convert_to_tensor(value=scale, dtype=dtype, name=\"scale\")\n self._quadrature_grid, self._quadrature_probs = tuple(quadrature_fn(\n loc, scale, quadrature_size, validate_args))\n\n dt = self._quadrature_grid.dtype\n if dt.base_dtype != self._quadrature_probs.dtype.base_dtype:\n raise TypeError(\"Quadrature grid dtype ({}) does not match quadrature \"\n \"probs dtype ({}).\".format(\n dt.name, self._quadrature_probs.dtype.name))\n\n self._distribution = poisson.Poisson(\n log_rate=self._quadrature_grid,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats)\n\n self._mixture_distribution = categorical.Categorical(\n logits=tf.math.log(self._quadrature_probs),\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats)\n\n self._loc = loc\n self._scale = scale\n self._quadrature_size = quadrature_size\n\n super(PoissonLogNormalQuadratureCompound, self).__init__(\n dtype=dt,\n reparameterization_type=reparameterization.NOT_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n graph_parents=[loc, scale],\n name=name)\n\n @property\n def mixture_distribution(self):\n \"\"\"Distribution which randomly selects a Poisson with quadrature param.\"\"\"\n return self._mixture_distribution\n\n @property\n def distribution(self):\n \"\"\"Base Poisson parameterized by a quadrature grid.\"\"\"\n return self._distribution\n\n @property\n def loc(self):\n \"\"\"Location parameter of the LogNormal prior.\"\"\"\n return self._loc\n\n @property\n def scale(self):\n \"\"\"Scale parameter of the LogNormal prior.\"\"\"\n return self._scale\n\n @property\n def quadrature_size(self):\n return self._quadrature_size\n\n def _batch_shape_tensor(self):\n return tf.broadcast_dynamic_shape(\n self.distribution.batch_shape_tensor(),\n tf.shape(input=self.mixture_distribution.logits))[:-1]\n\n def _batch_shape(self):\n return tf.broadcast_static_shape(\n self.distribution.batch_shape,\n self.mixture_distribution.logits.shape)[:-1]\n\n def _event_shape(self):\n return tf.TensorShape([])\n\n def _sample_n(self, n, seed=None):\n # Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get\n # ids as a [n]-shaped vector.\n batch_size = self.batch_shape.num_elements()\n if batch_size is None:\n batch_size = tf.reduce_prod(input_tensor=self.batch_shape_tensor())\n # We need to \"sample extra\" from the mixture distribution if it doesn't\n # already specify a probs vector for each batch coordinate.\n # We only support this kind of reduced broadcasting, i.e., there is exactly\n # one probs vector for all batch dims or one for each.\n stream = seed_stream.SeedStream(\n seed, salt=\"PoissonLogNormalQuadratureCompound\")\n ids = self._mixture_distribution.sample(\n sample_shape=concat_vectors(\n [n],\n distribution_util.pick_vector(\n self.mixture_distribution.is_scalar_batch(),\n [batch_size],\n np.int32([]))),\n seed=stream())\n # We need to flatten batch dims in case mixture_distribution has its own\n # batch dims.\n ids = tf.reshape(\n ids,\n shape=concat_vectors([n],\n distribution_util.pick_vector(\n self.is_scalar_batch(), np.int32([]),\n np.int32([-1]))))\n\n # Stride `quadrature_size` for `batch_size` number of times.\n offset = tf.range(\n start=0,\n limit=batch_size * self._quadrature_size,\n delta=self._quadrature_size,\n dtype=ids.dtype)\n ids += offset\n rate = tf.gather(tf.reshape(self.distribution.rate, shape=[-1]), ids)\n rate = tf.reshape(\n rate, shape=concat_vectors([n], self.batch_shape_tensor()))\n return tf.random.poisson(lam=rate, shape=[], dtype=self.dtype, seed=seed)\n\n def _log_prob(self, x):\n return tf.reduce_logsumexp(\n input_tensor=(self.mixture_distribution.logits +\n self.distribution.log_prob(x[..., tf.newaxis])),\n axis=-1)\n\n def _mean(self):\n return tf.exp(\n tf.reduce_logsumexp(\n input_tensor=self.mixture_distribution.logits +\n self.distribution.log_rate,\n axis=-1))\n\n def _variance(self):\n return tf.exp(self._log_variance())\n\n def _stddev(self):\n return tf.exp(0.5 * self._log_variance())\n\n def _log_variance(self):\n # Following calculation is based on law of total variance:\n #\n # Var[Z] = E[Var[Z | V]] + Var[E[Z | V]]\n #\n # where,\n #\n # Z|v ~ interpolate_affine[v](distribution)\n # V ~ mixture_distribution\n #\n # thus,\n #\n # E[Var[Z | V]] = sum{ prob[d] Var[d] : d=0, ..., deg-1 }\n # Var[E[Z | V]] = sum{ prob[d] (Mean[d] - Mean)**2 : d=0, ..., deg-1 }\n v = tf.stack(\n [\n # log(self.distribution.variance()) = log(Var[d]) = log(rate[d])\n self.distribution.log_rate,\n # log((Mean[d] - Mean)**2)\n 2. * tf.math.log(\n tf.abs(self.distribution.mean() -\n self._mean()[..., tf.newaxis])),\n ],\n axis=-1)\n return tf.reduce_logsumexp(\n input_tensor=self.mixture_distribution.logits[..., tf.newaxis] + v,\n axis=[-2, -1])\n\n\ndef concat_vectors(*args):\n \"\"\"Concatenates input vectors, statically if possible.\"\"\"\n args_ = [tf.get_static_value(x) for x in args]\n if any(vec is None for vec in args_):\n return tf.concat(args, axis=0)\n return [val for vec in args_ for val in vec]\n"
] | [
[
"tensorflow.random.poisson",
"tensorflow.reduce_logsumexp",
"tensorflow.reshape",
"tensorflow.ones",
"tensorflow.linspace",
"tensorflow.name_scope",
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"tensorflow.math.log",
"tensorflow.transpose",
"numpy.polynomial.hermite.hermgauss",
"tensorflow.shape",
"numpy.int32",
"tensorflow.cast",
"tensorflow.TensorShape",
"numpy.linalg.norm",
"tensorflow.zeros",
"tensorflow.broadcast_static_shape",
"tensorflow.range",
"numpy.sqrt",
"tensorflow.get_static_value"
]
] |
endymecy/NDIToolbox | [
"f7a0a642b4a778d9d0c131871f4bfb9822ecb3da"
] | [
"models/tests/test_dataio.py"
] | [
"\"\"\"test_dataio.py - tests the dataio module\n\nChris R. Coughlin (TRI/Austin, Inc.)\n\"\"\"\n\n__author__ = 'Chris R. Coughlin'\n\nimport unittest\nfrom models import dataio\nfrom controllers import pathfinder\nfrom utils.skiptest import skipIfModuleNotInstalled\nimport h5py\nimport numpy as np\nimport numpy.testing\nimport scipy.misc\nimport os\nimport random\n\n\nclass TestDataIO(unittest.TestCase):\n \"\"\"Tests Data IO functions\"\"\"\n\n def setUp(self):\n self.sample_data = np.array(self.random_data())\n self.sample_data_basename = \"sample.dat\"\n self.sample_data_file = os.path.join(os.path.dirname(__file__),\n self.sample_data_basename)\n with h5py.File(self.sample_data_file, 'w') as fidout:\n fidout.create_dataset(self.sample_data_basename, data=self.sample_data)\n\n def random_data(self):\n \"\"\"Returns a list of random data\"\"\"\n return [random.uniform(-100, 100) for i in range(25)]\n\n def test_save_data(self):\n \"\"\"Verify save_data function saves NumPy array to disk\"\"\"\n sample_filename = \"test_savedata.dat\"\n sample_path = os.path.join(os.path.dirname(__file__), sample_filename)\n dataio.save_data(sample_path, self.sample_data)\n self.assertTrue(os.path.exists(sample_path + \".hdf5\"))\n with h5py.File(sample_path + \".hdf5\", \"r\") as fidin:\n froot, ext = os.path.splitext(os.path.basename(sample_filename))\n for key in fidin.keys():\n if key.startswith(froot):\n read_data = fidin[key][...]\n self.assertTrue(np.array_equal(self.sample_data, read_data))\n if os.path.exists(sample_path + \".hdf5\"):\n os.remove(sample_path + \".hdf5\")\n\n def test_get_data(self):\n \"\"\"Verify get_data function returns a NumPy array\"\"\"\n read_data = dataio.get_data(self.sample_data_file)\n self.assertTrue(np.array_equal(self.sample_data, read_data))\n\n def test_get_data_slice(self):\n \"\"\"Verify get_data function returns a slice if specified\"\"\"\n slice_idx = np.s_[5:15]\n read_hyperslab = dataio.get_data(self.sample_data_file, slice_idx)\n self.assertTrue(np.array_equal(self.sample_data[slice_idx], read_hyperslab))\n\n def test_get_txt_data(self):\n \"\"\"Verify retrieval of ASCII delimited data\"\"\"\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',\n '1.25 from hole Single Column.asc')\n assert(os.path.exists(sample_data_file))\n import_params = {'delimiter': None}\n expected_data = np.loadtxt(sample_data_file, delimiter=import_params['delimiter'])\n retrieved_data = dataio.get_txt_data(sample_data_file, **import_params)\n self.assertTrue(np.array_equal(expected_data, retrieved_data))\n\n def test_import_txt(self):\n \"\"\"Verify import of ASCII delimited data files\"\"\"\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',\n '1.25 from hole Single Column.asc')\n assert(os.path.exists(sample_data_file))\n import_params = {'delimiter': None}\n expected_data = np.loadtxt(sample_data_file, delimiter=import_params['delimiter'])\n dataio.import_txt(sample_data_file, **import_params)\n dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(sample_data_file) + \".hdf5\")\n self.assertTrue(os.path.exists(dest_file))\n with h5py.File(dest_file, \"r\") as fidin:\n root, ext = os.path.splitext(os.path.basename(dest_file))\n for key in fidin.keys():\n if key.startswith(root):\n read_data = fidin[key][...]\n self.assertTrue(np.array_equal(expected_data, read_data))\n try:\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # file in use\n pass\n\n def test_export_txt(self):\n \"\"\"Verify export of data to delimited ASCII\"\"\"\n # Use integer data to avoid the floating point conversion to/from files\n sample_data = self.sample_data.astype(np.int64)\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',\n 'sample.hdf5')\n dest_file = os.path.join(os.path.dirname(__file__), 'support_files',\n 'sample.txt')\n with h5py.File(sample_data_file, \"w\") as fidout:\n fidout.create_dataset(os.path.basename(sample_data_file), data=sample_data)\n export_params = {'delimiter': ','}\n dataio.export_txt(dest_file, sample_data_file, **export_params)\n retrieved_data = np.genfromtxt(dest_file, delimiter=export_params['delimiter'])\n self.assertTrue(np.array_equal(sample_data, retrieved_data))\n try:\n if os.path.exists(sample_data_file):\n os.remove(sample_data_file)\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # file in use\n pass\n\n def test_export3D_txt(self):\n \"\"\"Verify export of 3D data to delimited ASCII\"\"\"\n x_size = 5\n y_size = 4\n z_size = 6\n sample_data = np.empty((y_size, x_size, z_size))\n for xidx in range(x_size):\n for yidx in range(y_size):\n for zidx in range(z_size):\n sample_data[yidx, xidx, zidx] = int(random.uniform(-100, 100))\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'sample3d.hdf5')\n dest_file = os.path.join(os.path.dirname(__file__), 'support_files', 'sample3d.txt')\n with h5py.File(sample_data_file, \"w\") as fidout:\n fidout.create_dataset(os.path.basename(sample_data_file), data=sample_data)\n export_params = {'delimiter': ','}\n dataio.export_txt(dest_file, sample_data_file, **export_params)\n retrieved_data = np.empty(sample_data.shape)\n with open(dest_file, \"rb\") as fidin:\n zidx = 0\n for line in fidin:\n if not line.startswith('#'):\n x, y, z = line.split(export_params['delimiter'])\n x = int(x)\n y = int(y)\n z = float(z.strip())\n retrieved_data[y, x, zidx] = z\n zidx += 1\n if zidx > sample_data.shape[2]-1:\n zidx = 0\n self.assertTrue(np.array_equal(sample_data, retrieved_data))\n try:\n if os.path.exists(sample_data_file):\n os.remove(sample_data_file)\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # file in use\n pass\n\n @skipIfModuleNotInstalled(\"dicom\")\n def test_get_dicom_data(self):\n \"\"\"Verify retrieval of DICOM / DICONDE data\"\"\"\n import dicom\n diconde_folder = os.path.join(os.path.dirname(__file__), 'support_files')\n for root, dirs, files in os.walk(diconde_folder):\n for fname in files:\n dicom_data_file = os.path.join(root, fname)\n basename, ext = os.path.splitext(dicom_data_file)\n # Simple check to ensure we're looking at DICOM files\n if ext.lower() == '.dcm':\n dicom_data = dicom.read_file(dicom_data_file)\n dicom_arr = dicom_data.pixel_array\n retrieved_data = dataio.get_dicom_data(dicom_data_file)\n self.assertTrue(np.array_equal(dicom_arr, retrieved_data))\n\n @skipIfModuleNotInstalled(\"dicom\")\n def test_import_dicom(self):\n \"\"\"Verify import of DICOM / DICONDE data\"\"\"\n # Load the ASTM DICONDE example files,\n # save, then ensure the resulting arrays\n # are identical\n import dicom\n\n diconde_folder = os.path.join(os.path.dirname(__file__), 'support_files')\n for root, dirs, files in os.walk(diconde_folder):\n for fname in files:\n dicom_data_file = os.path.join(root, fname)\n basename, ext = os.path.splitext(dicom_data_file)\n # Simple check to ensure we're looking at DICOM files\n if ext.lower() == '.dcm':\n dicom_data = dicom.read_file(dicom_data_file)\n dicom_arr = dicom_data.pixel_array\n dataio.import_dicom(dicom_data_file)\n dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(dicom_data_file) + \".hdf5\")\n self.assertTrue(os.path.exists(dest_file))\n with h5py.File(dest_file, \"r\") as fidin:\n froot, ext = os.path.splitext(os.path.basename(dest_file))\n for key in fidin.keys():\n if key.startswith(froot):\n read_data = fidin[key][...]\n self.assertTrue(np.array_equal(dicom_arr, read_data))\n try:\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # File in use\n pass\n\n def test_get_img_data(self):\n \"\"\"Verify retrieval of bitmap data\"\"\"\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',\n 'austin_sky320x240.jpg')\n assert(os.path.exists(sample_data_file))\n expected_data = scipy.misc.imread(sample_data_file, flatten=True)\n retrieved_data = dataio.get_img_data(sample_data_file, flatten=True)\n self.assertTrue(np.array_equal(expected_data, retrieved_data))\n\n def test_import_img(self):\n \"\"\"Verify import of images\"\"\"\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',\n 'austin_sky320x240.jpg')\n assert(os.path.exists(sample_data_file))\n expected_data = scipy.misc.imread(sample_data_file, flatten=True)\n dataio.import_img(sample_data_file, flatten=True)\n dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(sample_data_file) + \".hdf5\")\n self.assertTrue(os.path.exists(dest_file))\n with h5py.File(dest_file, \"r\") as fidin:\n root, ext = os.path.splitext(os.path.basename(dest_file))\n for key in fidin.keys():\n if key.startswith(root):\n read_data = fidin[key][...]\n self.assertTrue(np.array_equal(expected_data, read_data))\n try:\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # file in use\n pass\n\n def test_get_utwin_tof_data(self):\n \"\"\"Verify retrieval of UTWin Time Of Flight data through convenience function\"\"\"\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')\n tof_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_tofdata.npy')\n tof_resolution = 0.01\n assert(os.path.exists(tof_data_file))\n expected_tof_data = np.load(tof_data_file) * tof_resolution\n returned_tof_data = dataio.get_utwin_tof_data(sample_data_file)[0]\n numpy.testing.assert_array_almost_equal(expected_tof_data, returned_tof_data, decimal=3)\n\n def test_import_utwin_tof(self):\n \"\"\"Verify import of UTWin Time Of Flight data through convenience function\"\"\"\n tof_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_tofdata.npy')\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')\n tof_resolution = 0.01\n expected_tof_data = np.load(tof_data_file) * tof_resolution\n root, ext = os.path.splitext(os.path.basename(sample_data_file))\n dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(root) + \"_tofdata0.csc.hdf5\")\n dataio.import_utwin_tof(sample_data_file)\n self.assertTrue(os.path.exists(dest_file))\n with h5py.File(dest_file, \"r\") as fidin:\n root, ext = os.path.splitext(os.path.basename(dest_file))\n for key in fidin.keys():\n if key.startswith(root):\n read_data = fidin[key][...]\n numpy.testing.assert_array_almost_equal(expected_tof_data, read_data, decimal=3)\n try:\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # file in use\n pass\n\n def test_get_utwin_amp_data(self):\n \"\"\"Verify retrieval of UTWin amplitude data through convenience function\"\"\"\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')\n amp_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_ampdata.npy')\n assert(os.path.exists(amp_data_file))\n expected_tof_data = np.load(amp_data_file)\n self.assertTrue(np.array_equal(expected_tof_data, dataio.get_utwin_amp_data(sample_data_file)[0]))\n\n def test_import_utwin_amp(self):\n \"\"\"Verify import of UTWin amplitude data through convenience function\"\"\"\n amp_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_ampdata.npy')\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')\n expected_amp_data = np.load(amp_data_file)\n root, ext = os.path.splitext(os.path.basename(sample_data_file))\n dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(root) + \"_ampdata0.csc.hdf5\")\n dataio.import_utwin_amp(sample_data_file)\n self.assertTrue(os.path.exists(dest_file))\n with h5py.File(dest_file, \"r\") as fidin:\n root, ext = os.path.splitext(os.path.basename(dest_file))\n for key in fidin.keys():\n if key.startswith(root):\n read_data = fidin[key][...]\n self.assertTrue(np.array_equal(expected_amp_data, read_data))\n try:\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # file in use\n pass\n\n def test_get_utwin_data(self):\n \"\"\"Verify returning UTWin data\"\"\"\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')\n sample_reader = dataio.UTWinCScanDataFile(sample_data_file)\n sample_reader.read_data()\n expected_data = sample_reader.data\n returned_data = dataio.get_utwin_data(sample_data_file)\n for datatype in expected_data:\n self.assertTrue(np.array_equal(expected_data[datatype], returned_data[datatype]))\n\n def test_get_winspect_data(self):\n \"\"\"Verify retrieval of Winspect data through convenience function\"\"\"\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'sample_data.sdt')\n assert(os.path.exists(sample_data_file))\n scan_reader = dataio.WinspectReader(sample_data_file)\n expected_data_list = scan_reader.get_winspect_data()\n retrieved_data_list = dataio.get_winspect_data(sample_data_file)\n self.assertEqual(len(expected_data_list), len(retrieved_data_list))\n for data_array_idx in range(len(expected_data_list)):\n self.assertTrue(np.array_equal(expected_data_list[data_array_idx].data, retrieved_data_list[data_array_idx].data))\n\n def test_import_winspect(self):\n \"\"\"Verify import of Winspect data through convenience function\"\"\"\n sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'sample_data.sdt')\n assert(os.path.exists(sample_data_file))\n output_basename, ext = os.path.splitext(sample_data_file)\n amp_dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(output_basename) + \"_ampdata0\" + ext + \".hdf5\")\n waveform_dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(output_basename) + \"_waveformdata0\" + ext + \".hdf5\")\n dataio.import_winspect(sample_data_file)\n expected_data_list = dataio.get_winspect_data(sample_data_file)\n for dataset in expected_data_list:\n if \"amplitude\" in dataset.data_type:\n dest_file = amp_dest_file\n elif \"waveform\" in dataset.data_type:\n dest_file = waveform_dest_file\n with h5py.File(dest_file, \"r\") as fidin:\n root, ext = os.path.splitext(os.path.basename(dest_file))\n for key in fidin.keys():\n if key.startswith(root):\n read_data = fidin[key][...]\n self.assertTrue(np.array_equal(dataset.data, read_data))\n try:\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # file in use\n pass\n\n def tearDown(self):\n if os.path.exists(self.sample_data_file + \".hdf5\"):\n os.remove(self.sample_data_file + \".hdf5\")\n if os.path.exists(self.sample_data_file):\n os.remove(self.sample_data_file)\n\n\nclass TestUTWinCScanReader(unittest.TestCase):\n \"\"\"Tests the UTWinCScanReader class\"\"\"\n\n def setUp(self):\n self.sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')\n assert(os.path.exists(self.sample_data_file))\n self.cscan_reader = dataio.UTWinCscanReader()\n\n def test_basicfile_parameters(self):\n \"\"\"Verify the basic parameters of the CSC file format are correct\"\"\"\n self.assertEqual(self.cscan_reader.header_string_length, 15)\n expected_message_ids = {'CSCAN_DATA': 2300,\n 'WAVEFORM_pre240': 2016,\n 'WAVEFORM_post240': 2303,\n 'UTSAVE_UTCD0': 2010,\n 'UTSAVE_UTCD1': 2011,\n 'UTSAVE_UTCD2': 2012,\n 'UTSAVE_UTCD4': 2014,\n 'UTSAVE_UTPro0': 253,\n 'PROJECT': 301,\n 'UTSAVE_UTHead': 100,\n 'UTSAVE_UTCScan0': 750,\n 'UTSAVE_UTCD10': 2020,\n 'UTSAVE_UTCScan3': 753}\n self.assertDictEqual(expected_message_ids, self.cscan_reader.message_ids)\n\n def test_is_cscanfile(self):\n \"\"\"Verify reader correctly identifies CSC files\"\"\"\n self.assertTrue(self.cscan_reader.is_cscanfile(self.sample_data_file))\n\n def test_msg_info(self):\n \"\"\"Verify reader correctly returns message ID and length\"\"\"\n with open(self.sample_data_file, \"rb\") as fidin:\n fidin.seek(self.cscan_reader.header_string_length)\n first_message = (100, 14)\n self.assertTupleEqual(first_message, self.cscan_reader.msg_info(fidin))\n\n def test_find_message(self):\n \"\"\"Verify find_message returns the expected file positions\"\"\"\n expected_file_positions = ((2014, 38037),\n (2011, 38059),\n (2010, 38003),\n (2012, 422075),\n (2010, 38003),\n (2010, 38003))\n for message_id, expected_pos in expected_file_positions:\n self.assertEqual(self.cscan_reader.find_message(self.sample_data_file, message_id), expected_pos)\n\n def test_find_blocks(self):\n \"\"\"Verify find_blocks returns the file positions for the specified message ID\"\"\"\n # Search for UTSave_UTAD0 (Message ID 950) - contains A/D settings for each channel\n expected_filed_positions = [173, 920, 1667, 2414, 3161, 3908, 4655, 5402]\n self.assertListEqual(expected_filed_positions, self.cscan_reader.find_blocks(self.sample_data_file, 950))\n\n def test_read_field(self):\n \"\"\"Verify read_field correctly parses the specified message block\"\"\"\n start_pos = self.cscan_reader.find_message(self.sample_data_file, 950)\n self.assertTrue(start_pos != -1)\n with open(self.sample_data_file, \"rb\") as fidin:\n fidin.seek(start_pos)\n # Read a sample of A/D settings for the first channel\n expected_ad_delay = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]\n expected_ad_width = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]\n expected_ad_blanking_width = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]\n expected_ad_gain = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]\n expected_ad_offset = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]\n expected_ad_trigger_level = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]\n expected_ad_trigger_rate = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]\n with open(self.sample_data_file, \"rb\") as fidin:\n fidin.seek(start_pos)\n ad_delay = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])\n ad_width = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])\n ad_blanking_width = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])\n ad_gain = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])\n ad_offset = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])\n ad_trigger_level = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])\n ad_trigger_rate = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])\n self.assertAlmostEqual(expected_ad_delay, ad_delay)\n self.assertAlmostEqual(expected_ad_width, ad_width)\n self.assertAlmostEqual(expected_ad_blanking_width, ad_blanking_width)\n self.assertAlmostEqual(expected_ad_gain, ad_gain)\n self.assertAlmostEqual(expected_ad_offset, ad_offset)\n self.assertAlmostEqual(expected_ad_trigger_level, ad_trigger_level)\n self.assertAlmostEqual(expected_ad_trigger_rate, ad_trigger_rate)\n\n\nclass TestUTWinCScanDataFile(unittest.TestCase):\n \"\"\"Tests the UTWinCScanDataFile class.\n\n Note: the sample UTWin data files available to TRI as of May 2013 are export-controlled and can't be\n distributed, which in turn limits the tests that can be performed. The UTWinCScanDataFile class has been\n tested against real inspection data, however without additional sample files you should consider the code\n experimental. For more details, contact TRI.\n \"\"\"\n\n def setUp(self):\n self.sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')\n self.cscan_datafile = dataio.UTWinCScanDataFile(self.sample_data_file)\n\n def test_get_scan_version(self):\n \"\"\"Verify get_scan_version returns the correct scan version\"\"\"\n self.assertEqual(self.cscan_datafile.get_scan_version(), 117)\n\n def test_read_scan_properties(self):\n \"\"\"Verify read_scan_properties correctly compiles required scan settings\"\"\"\n # Read a sample of the most important properties, verify read\n important_scan_properties = {'n_height':320,\n 'n_width':600,\n 'rf_length':2994,\n 'channel_active':[1, 0, 0, 0, 0, 0, 0, 0]}\n for idx in important_scan_properties.keys():\n prop = important_scan_properties[idx]\n if not isinstance(prop, list):\n self.assertEqual(prop, self.cscan_datafile.scan_properties[idx])\n else:\n self.assertListEqual(prop, self.cscan_datafile.scan_properties[idx])\n\n def test_read_tof_data(self):\n \"\"\"Verify read_tof_data correctly reads Time Of Flight data\"\"\"\n # Verify one TOF dataset\n tof_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_tofdata.npy')\n tof_resolution = 0.01\n assert(os.path.exists(tof_data_file))\n expected_tof_data = np.load(tof_data_file) * tof_resolution\n self.cscan_datafile.read_tof_data()\n numpy.testing.assert_array_almost_equal(expected_tof_data, self.cscan_datafile.data['tof'][0], decimal=3)\n\n def test_read_amplitude_data(self):\n \"\"\"Verify read_amplitude_data correctly reads amplitude data\"\"\"\n amp_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_ampdata.npy')\n assert(os.path.exists(amp_data_file))\n expected_amp_data = np.load(amp_data_file)\n self.cscan_datafile.read_amplitude_data()\n self.assertTrue(np.array_equal(expected_amp_data, self.cscan_datafile.data['amplitude'][0]))\n\n def test_import_tof(self):\n \"\"\"Verify import of Time Of Flight data\"\"\"\n tof_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_tofdata.npy')\n tof_resolution = 0.01\n csc_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData')\n assert(os.path.exists(tof_data_file))\n expected_tof_data = np.load(tof_data_file) * tof_resolution\n dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(csc_data_file) + \"_tofdata0.csc.hdf5\")\n self.cscan_datafile.import_tof_data()\n self.assertTrue(os.path.exists(dest_file))\n with h5py.File(dest_file, \"r\") as fidin:\n root, ext = os.path.splitext(os.path.basename(dest_file))\n for key in fidin.keys():\n if key.startswith(root):\n read_data = fidin[key][...]\n numpy.testing.assert_array_almost_equal(expected_tof_data, read_data, decimal=3)\n try:\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # file in use\n pass\n\n def test_import_amp(self):\n \"\"\"Verify import of amplitude data\"\"\"\n amp_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_ampdata.npy')\n csc_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData')\n assert(os.path.exists(amp_data_file))\n expected_amp_data = np.load(amp_data_file)\n dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(csc_data_file) + \"_ampdata0.csc.hdf5\")\n self.cscan_datafile.import_amplitude_data()\n self.assertTrue(os.path.exists(dest_file))\n with h5py.File(dest_file, \"r\") as fidin:\n root, ext = os.path.splitext(os.path.basename(dest_file))\n for key in fidin.keys():\n if key.startswith(root):\n read_data = fidin[key][...]\n self.assertTrue(np.array_equal(expected_amp_data, read_data))\n try:\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # file in use\n pass\n\n\nclass TestWinspectReader(unittest.TestCase):\n \"\"\"Tests the WinspectReader class.\"\"\"\n\n def setUp(self):\n self.sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',\n 'sample_data.sdt')\n assert(os.path.exists(self.sample_data_file))\n self.scan_reader = dataio.WinspectReader(self.sample_data_file)\n\n def test_find_numbers(self):\n \"\"\"Verify find_numbers static method correctly pulls numbers from strings\"\"\"\n float_strings = {\"0.000000 mm\":0.0, \"0.775995 Usec\":0.775995}\n int_strings = {\"35 18 0 22 3 112 \":[35, 18, 0, 22, 3, 112],\n \"Number of Sample Points : 3500\":3500}\n bad_strings = {\"Ramshackle\":[], \"\":[]}\n for string in float_strings:\n self.assertAlmostEqual(float_strings[string], self.scan_reader.find_numbers(string))\n\n def test_get_winspect_data(self):\n \"\"\"Verify returning the list of arrays read from the data file\"\"\"\n data_reader = dataio.WinspectDataFile(self.sample_data_file)\n data_reader.read_data()\n expected_data_list = data_reader.datasets\n retrieved_data_list = self.scan_reader.get_winspect_data()\n self.assertEqual(len(expected_data_list), len(retrieved_data_list))\n for data_array_idx in range(len(expected_data_list)):\n self.assertTrue(np.array_equal(expected_data_list[data_array_idx].data, retrieved_data_list[data_array_idx].data))\n\n def test_import_winspect(self):\n \"\"\"Verify importing datasets\"\"\"\n output_basename, ext = os.path.splitext(self.sample_data_file)\n amp_dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(output_basename) + \"_ampdata0\" + ext + \".hdf5\")\n waveform_dest_file = os.path.join(pathfinder.data_path(),\n os.path.basename(output_basename) + \"_waveformdata0\" + ext + \".hdf5\")\n self.scan_reader.import_winspect()\n data_reader = dataio.WinspectDataFile(self.sample_data_file)\n data_reader.read_data()\n expected_data_list = data_reader.datasets\n for dataset in expected_data_list:\n if \"amplitude\" in dataset.data_type:\n dest_file = amp_dest_file\n elif \"waveform\" in dataset.data_type:\n dest_file = waveform_dest_file\n with h5py.File(dest_file, \"r\") as fidin:\n root, ext = os.path.splitext(os.path.basename(dest_file))\n for key in fidin.keys():\n if key.startswith(root):\n read_data = fidin[key][...]\n self.assertTrue(np.array_equal(dataset.data, read_data))\n try:\n if os.path.exists(dest_file):\n os.remove(dest_file)\n except WindowsError: # file in use\n pass\n\n\nif __name__ == \"__main__\":\n random.seed()\n unittest.main()"
] | [
[
"numpy.load",
"numpy.fromfile",
"numpy.empty",
"numpy.array_equal",
"numpy.genfromtxt",
"numpy.loadtxt"
]
] |
Kohulan/Decimer-Python | [
"17373e02faedb28ba94742f61001bb3c6b015798"
] | [
"Networks/4_layer_net_Parameter_optimization.py"
] | [
"'''\r\n * This Software is under the MIT License\r\n * Refer to LICENSE or https://opensource.org/licenses/MIT for more information\r\n * Written by Kohulan Rajan\r\n * © 2019\r\n'''\r\n#Parallelized datareading network\r\n\r\nimport tensorflow as tf\r\nimport os\r\nimport sys\r\nimport numpy as np\r\nimport matplotlib as mpl\r\nimport csv\r\nmpl.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nfrom datetime import datetime\r\nfrom numpy import array\r\nimport pickle\r\nimport lz4.frame as lz\r\nimport multiprocessing\r\nnp.set_printoptions(threshold=np.nan)\r\n\r\n#Set the Desired Gpu from the cluster\r\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\r\n\r\n#Set Hidden neurons count\r\nhidden_neurons_list_I = [2,4,8,16,32,64,128,512,1024,2048,4096]\r\nhidden_neurons_list_II = [2,4,8,16,32,64,128,512,1024,2048,4096]\r\n\r\n#Set Batch Size\r\nbatch_sizer_list = [500,1000]\r\n\r\n#Set Learning rate\r\nlearning_rate_list = [0.001,0.003,0.005,0.007,0.009,0.01]\r\n\r\n#Paramter Optimizing loops\r\nfor hidden_neurons_I in range(len(hidden_neurons_list_I)):\r\n\tfor hidden_neurons_II in range(len(hidden_neurons_list_II)):\r\n\t\tfor batch_sizer in range(len(batch_sizer_list)):\r\n\t\t\tfor learning_rate_ in range(len(learning_rate_list)):\r\n\t\t\t\tf = open(\"/Results/Batch Size_{}_learning_rate_{}_hidden_neurons_{}_x_{}.txt\".format(batch_sizer_list[batch_sizer],learning_rate_list[learning_rate_],hidden_neurons_list_I[hidden_neurons_I],hidden_neurons_list_II[hidden_neurons_II]), 'w',0)\r\n\t\t\t\tsys.stdout = f\r\n\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Network Started\")\r\n\r\n\t\t\t\t#Data input from image data\r\n\r\n\t\t\t\t#labels\r\n\t\t\t\tdef label_data(is_test=False):\r\n\t\t\t\t\tdata_path = \"train\"\r\n\t\t\t\t\tif is_test:\r\n\t\t\t\t\t\tdata_path = \"test\"\r\n\t\t\t\t\tmyFile = open('/Data/Potential'+data_path+'_labels.csv',\"r\")\r\n\t\t\t\t\tlabels = []\r\n\t\t\t\t\tfor row in myFile:\r\n\t\t\t\t\t\tx = int(row.strip().split(\",\")[1])\r\n\t\t\t\t\t\tlabels.append(x)\r\n\t\t\t\t\tmyFile.close()\r\n\t\t\t\t\treturn np.asarray(labels)\r\n\r\n\t\t\t\ty_train = label_data()\r\n\t\t\t\ty_test = label_data(is_test=True)\r\n\r\n\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Labels loaded !!\")\r\n\r\n\t\t\t\t#Image array data\r\n\t\t\t\tTrain_Images = pickle.load( open(\"/Data/train_compressed.txt\",\"rb\"))\r\n\t\t\t\tTest_Images = pickle.load( open(\"/Data/test_compressed.txt\",\"rb\"))\r\n\t\t\t\ttrain_items = Train_Images.items()\r\n\t\t\t\ttest_items = Test_Images.items()\r\n\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Loading done! Train\",len(train_items))\r\n\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Loading done! Test\",len(test_items))\r\n\r\n\t\t\t\t#one hot vector transformation\r\n\t\t\t\tdef one_hot(y, n_labels):\r\n\t\t\t\t\tmat = np.zeros((len(y), n_labels))\r\n\t\t\t\t\tfor i, val in enumerate(y):\r\n\t\t\t\t\t\tmat[i, val] = 1\r\n\t\t\t\t\treturn mat\r\n\r\n\t\t\t\t# Parameters\r\n\t\t\t\tlearning_rate = learning_rate_list[learning_rate_]\r\n\t\t\t\ttraining_epochs = 20\r\n\t\t\t\tbatch_size = batch_sizer_list[batch_sizer]\r\n\t\t\t\tdisplay_step = 1\r\n\t\t\t\ttestbatch_size = 1000\r\n\t\t\t\ttotaltrain_batch = len(train_items)/batch_size\r\n\t\t\t\ttotaltest_batch = len(test_items)/testbatch_size\r\n\r\n\t\t\t\t# Network Parameters\r\n\t\t\t\tn_hidden_1 = hidden_neurons_list_I[hidden_neurons_I] # 1st layer number of neurons\r\n\t\t\t\tn_hidden_2 = hidden_neurons_list_II[hidden_neurons_II] # 1st layer number of neurons\r\n\t\t\t\tn_input = 256*256 # Data input (Image shape: 1024 * 1024)\r\n\t\t\t\tn_classes = 36 # Bond_Count\r\n\r\n\t\t\t\t# tf Graph input\r\n\t\t\t\tX = tf.placeholder(\"float\", [None, n_input])\r\n\t\t\t\tY = tf.placeholder(\"float\", [None, n_classes])\r\n\r\n\t\t\t\t# Store layers weight & bias\r\n\t\t\t\tweights = {\r\n\t\t\t\t\t'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\r\n\t\t\t\t\t'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\r\n\t\t\t\t\t'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))\r\n\t\t\t\t}\r\n\t\t\t\tbiases = {\r\n\t\t\t\t\t'b1': tf.Variable(tf.random_normal([n_hidden_1])),\r\n\t\t\t\t\t'b2': tf.Variable(tf.random_normal([n_hidden_2])),\r\n\t\t\t\t\t'out': tf.Variable(tf.random_normal([n_classes]))\r\n\t\t\t\t}\r\n\r\n\t\t\t\t# Create model\r\n\t\t\t\tdef multilayer_perceptron(x):\r\n\t\t\t\t\t# Fully Connected Hidden Layers\r\n\t\t\t\t\tlayer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\r\n\t\t\t\t\tlayer_1 = tf.nn.relu(layer_1)\r\n\t\t\t\t\t\r\n\t\t\t\t\tlayer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\r\n\t\t\t\t\tlayer_2 = tf.nn.relu(layer_2)\r\n\r\n\t\t\t\t\t# Output fully connected layer with a neuron for each class\r\n\t\t\t\t\tout_layer = tf.matmul(layer_2, weights['out']) + biases['out']\r\n\t\t\t\t\treturn out_layer\r\n\r\n\t\t\t\t# Construct model\r\n\t\t\t\tlogits = multilayer_perceptron(X)\r\n\r\n\t\t\t\t# Define loss and optimizer\r\n\t\t\t\tloss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))\r\n\t\t\t\toptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\r\n\t\t\t\ttrain_op = optimizer.minimize(loss_op)\r\n\r\n\t\t\t\t# Initializing the variables\r\n\t\t\t\tinit = tf.global_variables_initializer()\r\n\r\n\t\t\t\t# encoding labels to one_hot vectors\r\n\t\t\t\ty_data_enc = one_hot(y_train, n_classes)\r\n\t\t\t\ty_test_enc = one_hot(y_test, n_classes)\r\n\r\n\t\t\t\t# Evaluate model (with test logits, for dropout to be disabled)\r\n\t\t\t\tcorrect_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))\r\n\t\t\t\taccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n\r\n\t\t\t\t# Evaluate the errors, mean,median and maximum errors\r\n\t\t\t\tpred = tf.argmax(logits, 1)\r\n\t\t\t\tpred_difference = tf.subtract(tf.argmax(Y, 1),tf.argmax(logits, 1))\r\n\t\t\t\tmean_error=[]\r\n\t\t\t\tmedian_error=[]\r\n\t\t\t\tmaximum_error=[]\r\n\t\t\t\t\r\n\t\t\t\t#Initiating data for plots\r\n\t\t\t\tloss_history = []\r\n\t\t\t\tacc_history = []\r\n\t\t\t\tvalid_history = []\r\n\t\t\t\tacc_valid_history = []\r\n\t\t\t\tdifference_history = []\r\n\t\t\t\ttest_loss_history = []\r\n\t\t\t\ttest_accuracy_history = []\r\n\r\n\t\t\t\tprint (\"Data decompression for test batch started!\")\r\n\r\n\t\t\t\t#-----------------------------------------------------------------------------------------------------------------\r\n\t\t\t\tprint (\"Total available threads for multiprocessing: \",multiprocessing.cpu_count())\r\n\r\n\t\t\t\t#Decompressing Lines Test\r\n\t\t\t\tdef decomp_test(k):\r\n\t\t\t\t\tstrarraytest = (lz.decompress(Test_Images.values()[k]))\r\n\t\t\t\t\tfloatarray_test = np.fromstring(strarraytest, dtype=float, sep=',')\r\n\t\t\t\t\tfloatarray32_test = np.array(floatarray_test).astype(np.float32)\r\n\t\t\t\t\tencoded_array_test=(1.0-floatarray32_test/255.0)\r\n\t\t\t\t\treturn encoded_array_test\r\n\r\n\t\t\t\tpool_test = multiprocessing.Pool()\r\n\r\n\t\t\t\tdef decomp_train(j):\r\n\t\t\t\t\tstrarray = (lz.decompress(Train_Images.values()[j]))\r\n\t\t\t\t\tfloatarray = np.fromstring(strarray, dtype=float, sep=',')\r\n\t\t\t\t\tfloatarray32 = np.array(floatarray).astype(np.float32)\r\n\t\t\t\t\tencoded_array=(1.0-floatarray32/255.0)\r\n\t\t\t\t\treturn encoded_array\r\n\t\t\t\t\r\n\t\t\t\tpool_train = multiprocessing.Pool()\r\n\t\t\t\t\r\n\t\t\t\t#Network training\r\n\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Training Started\")\r\n\t\t\t\tconfig = tf.ConfigProto(allow_soft_placement=True)\r\n\t\t\t\tconfig.gpu_options.allow_growth = True\r\n\t\t\t\tconfig.gpu_options.allocator_type = 'BFC'\r\n\r\n\t\t\t\twith tf.Session(config=config) as sess:\r\n\t\t\t\t\tsess.run(init)\r\n\r\n\t\t\t\t\t# Training cycle\r\n\t\t\t\t\tfor epoch in range(training_epochs):\r\n\t\t\t\t\t\tavg_cost = 0\r\n\t\t\t\t\t\tprint (\"total batch\",totaltrain_batch)\r\n\t\t\t\t\t\tcounter=0\r\n\t\t\t\t\t\ttotal_correct_preds = 0\r\n\t\t\t\t\t\tTrain_loss_per_batch = 0\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t# Loop over all batches\r\n\t\t\t\t\t\tfor l in range(totaltrain_batch):\r\n\t\t\t\t\t\t\tprint (\"bathc\",l)\r\n\t\t\t\t\t\t\tprint (\"tests\",\"count\",counter,\"batchsize\",counter+batch_size)\r\n\t\t\t\t\t\t\ttrain_batchX = pool_train.map(decomp_train,range(counter,counter+batch_size))\r\n\t\t\t\t\t\t\tbatch_x=train_batchX\r\n\t\t\t\t\t\t\tbatch_y=y_data_enc[counter:(counter+len(train_batchX))]\r\n\t\t\t\t\t\t\t_, c = sess.run([train_op, loss_op], feed_dict={X: batch_x,Y: batch_y})\r\n\t\t\t\t\t\t\tTrain_loss_per_batch += c \r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\r\n\t\t\t\t\t\t\t#Validation and calculating training accuracy\r\n\t\t\t\t\t\t\t_, accu_train = sess.run([loss_op, accuracy], feed_dict={X: batch_x,Y: batch_y})\r\n\t\t\t\t\t\t\tvalid_history.append(accu_train)\r\n\t\t\t\t\t\t\ttotal_correct_preds += accu_train\r\n\t\t\t\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"train Accuracy:\",accu_train)\r\n\t\t\t\t\r\n\t\t\t\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),counter,\"batch over\")\r\n\t\t\t\t\t\t\tcounter += len(train_batchX)\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\tvalidation_accuracy = total_correct_preds/totaltrain_batch\r\n\t\t\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Train accuracy:\",validation_accuracy)\r\n\t\t\t\t\t\tacc_valid_history.append(validation_accuracy)\r\n\t\t\t\t\t\tloss_history.append(Train_loss_per_batch/totaltrain_batch)\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t#Testing\r\n\t\t\t\t\t\tcounter_test = 0\r\n\t\t\t\t\t\tAll_test_loss = 0\r\n\t\t\t\t\t\tAll_error = 0\r\n\t\t\t\t\t\ttest_accuracy_perbatch = 0\r\n\t\t\t\t\t\tfor test_set in range(totaltest_batch):\r\n\t\t\t\t\t\t\tX_test = pool_test.map(decomp_test,range(counter_test,counter_test+testbatch_size))\r\n\t\t\t\t\t\t\tY_test = y_test_enc[counter_test:(counter_test+len(X_test))]\r\n\t\t\t\t\r\n\t\t\t\t\t\t\ttest_acc = accuracy.eval({X: X_test, Y: Y_test})\r\n\t\t\t\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Accuracy:\", test_acc)\r\n\t\t\t\t\t\t\ttest_accuracy_perbatch += test_acc\r\n\t\t\t\t\t\t\ttest_loss_batch,predict,error = sess.run([loss_op,pred,pred_difference], feed_dict={X: X_test, Y: Y_test})\r\n\t\t\t\t\t\t\tAll_test_loss += test_loss_batch\r\n\t\t\t\t\t\t\tAll_error += error\r\n\t\t\t\t\t\t\t#print(predict)\r\n\t\t\t\t\t\t\tcounter_test += len(X_test)\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t#Statistics\t\r\n\t\t\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Final Test Accuracy:\",test_accuracy_perbatch/totaltest_batch)\t\r\n\t\t\t\t\t\tmean_error.append(np.absolute(np.mean(All_error/totaltest_batch)))\t\r\n\t\t\t\t\t\tmedian_error.append(np.absolute(np.median(All_error/totaltest_batch)))\t\r\n\t\t\t\t\t\tmaximum_error.append(np.absolute(np.amax(All_error/totaltest_batch)))\t\r\n\t\t\t\t\t\ttest_loss_history.append(All_test_loss/totaltest_batch)\t\r\n\t\t\t\t\t\ttest_accuracy_history.append(test_accuracy_perbatch/totaltest_batch)\t\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t# Display logs per epoch step\t\r\n\t\t\t\t\t\tif epoch % display_step == 0:\t\r\n\t\t\t\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Epoch:\", '%04d' % (epoch+1))\t\r\n\t\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Optimization Finished!\")\t\r\n\t\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Network completed\")\t\r\n\t\t\t\t\tf.close()\t\r\n\t\t\t\t\tpool_train.close()\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t# Final results for various bond counts\r\n\t\t\t\t\tfile_append = open('/Results/Final_Report.txt' , 'a+')\r\n\t\t\t\t\tsys.stdout = file_append\r\n\t\t\t\t\tprint(\"\\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------\\n\")\r\n\t\t\t\t\tprint(\"Batch Size_{}_learning_rate_{}_hidden_neurons_{}_x_{}.txt\".format(batch_sizer_list[batch_sizer],learning_rate_list[learning_rate_],hidden_neurons_list_I[hidden_neurons_I],hidden_neurons_list_II[hidden_neurons_II]))\r\n\t\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Final Train accuracy:\",validation_accuracy)\r\n\t\t\t\t\tprint (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\"Final Test Accuracy:\",test_accuracy_perbatch/totaltest_batch)\r\n\t\t\t\t\tcounter_test_x = 0\r\n\t\t\t\t\tprediction_difference = 0\r\n\t\t\t\t\tfor testing in range(totaltest_batch):\r\n\t\t\t\t\t\tX_test = pool_test.map(decomp_test,range(counter_test_x,counter_test_x+testbatch_size))\r\n\t\t\t\t\t\tY_test = y_test_enc[counter_test_x:(counter_test_x+len(X_test))]\r\n\t\t\t\t\t\t_, predict,prediction_difference_batch = sess.run([loss_op,pred,pred_difference], feed_dict={X: X_test, Y: Y_test})\r\n\t\t\t\t\t\tprediction_difference += prediction_difference_batch\r\n\t\t\t\t\t\tcounter_test_x += len(X_test)\r\n\t\t\t\t\t\t\r\n\t\t\t\t\tprediction_window = np.absolute(prediction_difference)\r\n\t\t\t\t\tpool_test.close()\r\n\t\t\t\t\tfor j in range(10):\r\n\t\t\t\t\t\tcount_error = 0\r\n\t\t\t\t\t\tfor i in prediction_window:\r\n\t\t\t\t\t\t\tif i<=j: \r\n\t\t\t\t\t\t\t\tcount_error+=1\r\n\t\t\t\t\t\tWindow_accuracy = float(count_error)/len(test_items)*100\r\n\t\t\t\t\t\tprint(\"Currectly predicted bond count with error less than\",j,\"bonds, Accuracy ={:.2f}\".format(Window_accuracy))\r\n\t\t\t\tfile_append.close()\r\n \r\n #Matplot plot depiction\r\n\t\t\t\tplt.subplot(3,1,1)\r\n\t\t\t\tplt.plot(loss_history, '-o', label='Train Loss value')\r\n\t\t\t\tplt.title('Training & Tesing Loss')\r\n\t\t\t\tplt.xlabel('Epoch x Batches')\r\n\t\t\t\tplt.ylabel('Loss Value')\r\n\t\t\t\tplt.plot(test_loss_history, '-o', label='Test Loss value')\r\n\t\t\t\tplt.xlabel('Epoch x Batches')\r\n\t\t\t\tplt.ylabel('Loss Value')\r\n\t\t\t\tplt.legend(ncol=2, loc='upper right')\r\n\t\t\t\tplt.subplot(3,1,2)\r\n\t\t\t\tplt.gca().set_ylim([0,1.0])\r\n\t\t\t\tplt.plot(acc_valid_history, '-o', label='Train Accuracy value')\r\n\t\t\t\tplt.plot(test_accuracy_history, '-o', label='Test Accuracy value')\r\n\t\t\t\t#plt.plot(difference_history, '-o', label='Train-Test Accuracy')\r\n\t\t\t\tplt.title('Train & Test Accuracy')\r\n\t\t\t\tplt.xlabel('Batches')\r\n\t\t\t\tplt.ylabel('Accuracy')\r\n\t\t\t\tplt.legend(ncol=2, loc='lower right')\r\n\t\t\t\tplt.subplot(3,1,3)\r\n\t\t\t\tplt.plot(mean_error, '-o', label='Mean of error')\r\n\t\t\t\tplt.plot(median_error, '-o', label='Median of error')\r\n\t\t\t\tplt.plot(maximum_error, '-o', label='Maximum error')\r\n\t\t\t\tplt.xlabel('Batches')\r\n\t\t\t\tplt.ylabel('Error')\r\n\t\t\t\tplt.legend(ncol=2, loc='lower right')\r\n\t\t\t\tplt.gcf().set_size_inches(15, 30)\r\n\t\t\t\tplt.savefig(\"/Results/Batch Size_{}_learning_rate_{}_hidden_neurons_{}_x_{}.png\".format(batch_sizer_list[batch_sizer],learning_rate_list[learning_rate_],hidden_neurons_list_I[hidden_neurons_I],hidden_neurons_list_II[hidden_neurons_II]))\r\n\t\t\t\tplt.close()"
] | [
[
"numpy.asarray",
"tensorflow.matmul",
"matplotlib.pyplot.ylabel",
"numpy.amax",
"matplotlib.pyplot.plot",
"tensorflow.random_normal",
"tensorflow.global_variables_initializer",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"numpy.set_printoptions",
"matplotlib.pyplot.title",
"numpy.absolute",
"matplotlib.use",
"numpy.fromstring",
"numpy.mean",
"numpy.median",
"tensorflow.cast",
"matplotlib.pyplot.close",
"tensorflow.Session",
"tensorflow.ConfigProto",
"tensorflow.placeholder",
"matplotlib.pyplot.legend",
"tensorflow.train.AdamOptimizer",
"matplotlib.pyplot.subplot",
"tensorflow.argmax",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"numpy.array",
"tensorflow.nn.relu",
"matplotlib.pyplot.xlabel"
]
] |
PyJedi/quantum | [
"3f4a3c320e048b8a8faf3a10339975d2d5366fb6"
] | [
"tensorflow_quantum/core/ops/batch_util_test.py"
] | [
"# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test parallel Cirq simulations.\"\"\"\nimport numpy as np\nimport tensorflow as tf\nfrom absl.testing import parameterized\nfrom scipy import stats\nimport cirq\n\nfrom tensorflow_quantum.core.ops import batch_util\nfrom tensorflow_quantum.python import util\n\nBATCH_SIZE = 12\nN_QUBITS = 5\nPAULI_LENGTH = 3\nSYMBOLS = ['alpha', 'beta', 'gamma']\n\n\ndef _get_mixed_batch(qubits, symbols, size):\n circuit1, resolver1 = util.random_circuit_resolver_batch(qubits, size // 2)\n circuit2, resolver2 = util.random_symbol_circuit_resolver_batch(\n qubits, symbols, size // 2)\n return circuit1 + circuit2, resolver1 + resolver2\n\n\ndef _pad_state(sim, state, n):\n if isinstance(sim, cirq.sim.sparse_simulator.Simulator):\n state = state.final_state\n if isinstance(sim, cirq.DensityMatrixSimulator):\n state = state.final_density_matrix\n return np.pad(state, (0, (1 << n) - state.shape[-1]),\n 'constant',\n constant_values=-2)\n\n\ndef _expectation_helper(sim, circuit, params, op):\n if isinstance(sim, cirq.sim.sparse_simulator.Simulator):\n state = sim.simulate(circuit, params).final_state.astype(np.complex128)\n return [\n op.expectation_from_wavefunction(\n state,\n dict(\n zip(sorted(circuit.all_qubits()),\n (j for j in range(len(circuit.all_qubits())))))).real\n ]\n if isinstance(sim, cirq.DensityMatrixSimulator):\n state = sim.simulate(circuit, params).final_density_matrix\n return [\n sum(\n x._expectation_from_density_matrix_no_validation(\n state,\n dict(\n zip(sorted(circuit.all_qubits()), (\n j\n for j in range(len(circuit.all_qubits()))))))\n for x in op)\n ]\n\n return NotImplemented\n\n\ndef _sample_helper(sim, state, n_qubits, n_samples):\n if isinstance(sim, cirq.sim.sparse_simulator.Simulator):\n return cirq.sample_state_vector(state.final_state,\n list(range(n_qubits)),\n repetitions=n_samples)\n if isinstance(sim, cirq.DensityMatrixSimulator):\n return cirq.sample_density_matrix(state.final_density_matrix,\n list(range(n_qubits)),\n repetitions=n_samples)\n\n return NotImplemented\n\n\nclass BatchUtilTest(tf.test.TestCase, parameterized.TestCase):\n \"\"\"Test cases for BatchUtils main functions.\"\"\"\n\n @parameterized.parameters([{\n 'sim': cirq.DensityMatrixSimulator()\n }, {\n 'sim': cirq.sim.sparse_simulator.Simulator()\n }])\n def test_batch_simulate_state(self, sim):\n \"\"\"Test variable sized wavefunction output.\"\"\"\n circuit_batch, resolver_batch = _get_mixed_batch(\n cirq.GridQubit.rect(1, N_QUBITS), SYMBOLS, BATCH_SIZE)\n results = batch_util.batch_calculate_state(circuit_batch,\n resolver_batch, sim)\n\n for circuit, resolver, result in zip(circuit_batch, resolver_batch,\n results):\n r = _pad_state(sim, sim.simulate(circuit, resolver), N_QUBITS)\n self.assertAllClose(r, result, rtol=1e-5, atol=1e-5)\n\n self.assertDTypeEqual(results, np.complex64)\n\n @parameterized.parameters([{\n 'sim': cirq.DensityMatrixSimulator()\n }, {\n 'sim': cirq.sim.sparse_simulator.Simulator()\n }])\n def test_batch_expectation(self, sim):\n \"\"\"Test expectation.\"\"\"\n qubits = cirq.GridQubit.rect(1, N_QUBITS)\n circuit_batch, resolver_batch = _get_mixed_batch(\n qubits + [cirq.GridQubit(9, 9)], SYMBOLS, BATCH_SIZE)\n ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE)\n\n results = batch_util.batch_calculate_expectation(\n circuit_batch, resolver_batch, [[x] for x in ops], sim)\n\n for circuit, resolver, result, op in zip(circuit_batch, resolver_batch,\n results, ops):\n r = _expectation_helper(sim, circuit, resolver, op)\n self.assertAllClose(r, result, rtol=1e-5, atol=1e-5)\n\n self.assertDTypeEqual(results, np.float32)\n\n @parameterized.parameters([{\n 'sim': cirq.DensityMatrixSimulator()\n }, {\n 'sim': cirq.sim.sparse_simulator.Simulator()\n }])\n def test_batch_sampled_expectation(self, sim):\n \"\"\"Test expectation.\"\"\"\n qubits = cirq.GridQubit.rect(1, N_QUBITS)\n circuit_batch, resolver_batch = _get_mixed_batch(\n qubits + [cirq.GridQubit(9, 9)], SYMBOLS, BATCH_SIZE)\n\n ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE)\n n_samples = [[1000] for _ in range(len(ops))]\n\n results = batch_util.batch_calculate_sampled_expectation(\n circuit_batch, resolver_batch, [[x] for x in ops], n_samples, sim)\n\n for circuit, resolver, result, op in zip(circuit_batch, resolver_batch,\n results, ops):\n r = _expectation_helper(sim, circuit, resolver, op)\n self.assertAllClose(r, result, rtol=1.0, atol=1e-1)\n\n self.assertDTypeEqual(results, np.float32)\n\n @parameterized.parameters([{\n 'sim': cirq.DensityMatrixSimulator()\n }, {\n 'sim': cirq.sim.sparse_simulator.Simulator()\n }])\n def test_batch_sample_basic(self, sim):\n \"\"\"Test sampling.\"\"\"\n n_samples = 1\n n_qubits = 8\n qubits = cirq.GridQubit.rect(1, n_qubits)\n circuit = cirq.Circuit(*cirq.Z.on_each(*qubits[:n_qubits // 2]),\n *cirq.X.on_each(*qubits[n_qubits // 2:]))\n\n test_results = batch_util.batch_sample([circuit],\n [cirq.ParamResolver({})],\n n_samples, sim)\n\n state = sim.simulate(circuit, cirq.ParamResolver({}))\n expected_results = _sample_helper(sim, state, len(qubits), n_samples)\n\n self.assertAllEqual(expected_results, test_results[0])\n self.assertDTypeEqual(test_results, np.int32)\n\n @parameterized.parameters([{\n 'sim': cirq.DensityMatrixSimulator()\n }, {\n 'sim': cirq.sim.sparse_simulator.Simulator()\n }])\n def test_batch_sample(self, sim):\n \"\"\"Test sampling.\"\"\"\n n_samples = 2000 * (2**N_QUBITS)\n\n circuit_batch, resolver_batch = _get_mixed_batch(\n cirq.GridQubit.rect(1, N_QUBITS), SYMBOLS, BATCH_SIZE)\n\n results = batch_util.batch_sample(circuit_batch, resolver_batch,\n n_samples, sim)\n\n tfq_histograms = []\n for r in results:\n tfq_histograms.append(\n np.histogram(r.dot(1 << np.arange(r.shape[-1] - 1, -1, -1)),\n range=(0, 2**N_QUBITS),\n bins=2**N_QUBITS)[0])\n\n cirq_histograms = []\n for circuit, resolver in zip(circuit_batch, resolver_batch):\n state = sim.simulate(circuit, resolver)\n r = _sample_helper(sim, state, len(circuit.all_qubits()), n_samples)\n cirq_histograms.append(\n np.histogram(r.dot(1 << np.arange(r.shape[-1] - 1, -1, -1)),\n range=(0, 2**N_QUBITS),\n bins=2**N_QUBITS)[0])\n\n for a, b in zip(tfq_histograms, cirq_histograms):\n self.assertLess(stats.entropy(a + 1e-8, b + 1e-8), 0.005)\n\n self.assertDTypeEqual(results, np.int32)\n\n @parameterized.parameters([{\n 'sim': cirq.DensityMatrixSimulator()\n }, {\n 'sim': cirq.sim.sparse_simulator.Simulator()\n }])\n def test_empty_circuits(self, sim):\n \"\"\"Test functions with empty circuits.\"\"\"\n # Common preparation\n resolver_batch = [cirq.ParamResolver({}) for _ in range(BATCH_SIZE)]\n circuit_batch = [cirq.Circuit() for _ in range(BATCH_SIZE)]\n qubits = cirq.GridQubit.rect(1, N_QUBITS)\n ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE)\n n_samples = [[1000] for _ in range(len(ops))]\n # If there is no op on a qubit, the expectation answer is -2.0\n true_expectation = (-2.0,)\n\n # (1) Test expectation\n results = batch_util.batch_calculate_expectation(\n circuit_batch, resolver_batch, [[x] for x in ops], sim)\n\n for _, _, result, _ in zip(circuit_batch, resolver_batch, results, ops):\n self.assertAllClose(true_expectation, result, rtol=1e-5, atol=1e-5)\n\n self.assertDTypeEqual(results, np.float32)\n\n # (2) Test sampled_expectation\n results = batch_util.batch_calculate_sampled_expectation(\n circuit_batch, resolver_batch, [[x] for x in ops], n_samples, sim)\n\n for _, _, result, _ in zip(circuit_batch, resolver_batch, results, ops):\n self.assertAllClose(true_expectation, result, rtol=1.0, atol=1e-1)\n\n self.assertDTypeEqual(results, np.float32)\n\n # (3) Test state\n results = batch_util.batch_calculate_state(circuit_batch,\n resolver_batch, sim)\n\n for circuit, resolver, result in zip(circuit_batch, resolver_batch,\n results):\n r = _pad_state(sim, sim.simulate(circuit, resolver), 0)\n self.assertAllClose(r, result, rtol=1e-5, atol=1e-5)\n\n self.assertDTypeEqual(results, np.complex64)\n\n # (4) Test sampling\n n_samples = 2000 * (2**N_QUBITS)\n results = batch_util.batch_sample(circuit_batch, resolver_batch,\n n_samples, sim)\n\n for circuit, resolver, a in zip(circuit_batch, resolver_batch, results):\n state = sim.simulate(circuit, resolver)\n r = _sample_helper(sim, state, len(circuit.all_qubits()), n_samples)\n self.assertAllClose(r, a, atol=1e-5)\n\n self.assertDTypeEqual(results, np.int32)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"numpy.arange",
"numpy.pad",
"scipy.stats.entropy",
"tensorflow.test.main"
]
] |
maxgreat/dsve-loc | [
"dd6807d02c0d5fd3e215be8e5c7a88e73102e561"
] | [
"text_features_extraction.py"
] | [
"\"\"\"\r\n****************** COPYRIGHT AND CONFIDENTIALITY INFORMATION ******************\r\nCopyright (c) 2018 [Thomson Licensing]\r\nAll Rights Reserved\r\nThis program contains proprietary information which is a trade secret/business \\\r\nsecret of [Thomson Licensing] and is protected, even if unpublished, under \\\r\napplicable Copyright laws (including French droit d'auteur) and/or may be \\\r\nsubject to one or more patent(s).\r\nRecipient is to retain this program in confidence and is not permitted to use \\\r\nor make copies thereof other than as permitted in a written agreement with \\\r\n[Thomson Licensing] unless otherwise expressly allowed by applicable laws or \\\r\nby [Thomson Licensing] under express agreement.\r\nThomson Licensing is a company of the group TECHNICOLOR\r\n*******************************************************************************\r\nThis scripts permits one to reproduce training and experiments of:\r\n Engilberge, M., Chevallier, L., Pérez, P., & Cord, M. (2018, April).\r\n Finding beans in burgers: Deep semantic-visual embedding with localization.\r\n In Proceedings of CVPR (pp. 3984-3993)\r\n\r\nAuthor: Martin Engilberge\r\n\"\"\"\r\n\r\nimport argparse\r\nimport time\r\n\r\nimport numpy as np\r\nimport torch\r\n\r\nfrom misc.dataset import TextDataset\r\nfrom misc.model import joint_embedding\r\nfrom misc.utils import save_obj, collate_fn_cap_padded\r\nfrom torch.utils.data import DataLoader\r\n\r\n\r\ndevice = torch.device(\"cuda\")\r\n# device = torch.device(\"cpu\") # uncomment to run with cpu\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n parser = argparse.ArgumentParser(description='Extract embedding representation for images')\r\n parser.add_argument(\"-p\", '--path', dest=\"model_path\", help='Path to the weights of the model to evaluate', required=True)\r\n parser.add_argument(\"-d\", '--data', dest=\"data_path\", help='path to the file containing the sentence to embed')\r\n parser.add_argument(\"-o\", '--output', dest=\"output_path\", help='path of the output file', default=\"./text_embedding\")\r\n parser.add_argument(\"-bs\", \"--batch_size\", help=\"The size of the batches\", type=int, default=1)\r\n\r\n args = parser.parse_args()\r\n\r\n print(\"Loading model from:\", args.model_path)\r\n checkpoint = torch.load(args.model_path, map_location=lambda storage, loc: storage)\r\n\r\n join_emb = joint_embedding(checkpoint['args_dict'])\r\n join_emb.load_state_dict(checkpoint[\"state_dict\"])\r\n\r\n for param in join_emb.parameters():\r\n param.requires_grad = False\r\n\r\n join_emb.to(device)\r\n join_emb.eval()\r\n\r\n dataset = TextDataset(args.data_path)\r\n print(\"Dataset size: \", len(dataset))\r\n\r\n dataset_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=3, pin_memory=True, collate_fn=collate_fn_cap_padded)\r\n\r\n caps_enc = list()\r\n\r\n print(\"### Starting sentence embedding ###\")\r\n end = time.time()\r\n for i, (caps, length) in enumerate(dataset_loader, 0):\r\n\r\n input_caps = caps.to(device)\r\n\r\n with torch.no_grad():\r\n _, output_emb = join_emb(None, input_caps, length)\r\n\r\n caps_enc.append(output_emb.cpu().data.numpy())\r\n\r\n if i % 100 == 99:\r\n print(str((i + 1) * args.batch_size) + \"/\" + str(len(dataset)) + \" captions encoded - Time per batch: \" + str((time.time() - end)) + \"s\")\r\n\r\n end = time.time()\r\n\r\n print(\"Processing done -> saving\")\r\n caps_stack = np.vstack(caps_enc)\r\n\r\n save_obj(caps_stack, args.output_path)\r\n print(\"The data has been save to \", args.output_path)\r\n"
] | [
[
"numpy.vstack",
"torch.utils.data.DataLoader",
"torch.load",
"torch.no_grad",
"torch.device"
]
] |
MIRCen/brukerapi-python | [
"5455800895924c69bf839fa621fa7a06d343b4ff"
] | [
"test/test_jcampdx.py"
] | [
"from brukerapi.jcampdx import JCAMPDX\nimport numpy as np\nfrom pathlib import Path\nimport pytest\n\[email protected](reason=\"in progress\")\ndef test_jcampdx(test_jcampdx_data):\n\n j = JCAMPDX(Path(test_jcampdx_data[1]) / test_jcampdx_data[0]['path'])\n for key, ref in test_jcampdx_data[0]['parameters'].items():\n parameter_test = j.get_parameter(key)\n size_test= parameter_test.size\n value_test= parameter_test.value\n type_test = value_test.__class__\n\n value_ref = ref['value']\n size_ref = ref['size']\n type_ref = ref['type']\n\n #test SIZE\n if size_ref == 'None':\n size_ref = None\n if isinstance(size_ref, list):\n size_ref = tuple(size_ref)\n elif isinstance(size_ref, int):\n size_ref = (size_ref,)\n assert size_ref == size_test\n\n #test TYPE\n assert type_ref == type_test.__name__\n\n #test VALUE\n if isinstance(value_test, np.ndarray):\n value_ref = np.array(value_ref)\n assert np.array_equal(value_ref, value_test)\n elif isinstance(value_test, list):\n assert value_test == value_ref\n else:\n assert value_ref == value_test\n\n"
] | [
[
"numpy.array",
"numpy.array_equal"
]
] |
lkelvinm/OpenAeroStruct | [
"395075d28783c1b99b4ab25ddf034000caf9cd0d"
] | [
"openaerostruct/structures/section_properties_tube.py"
] | [
"from __future__ import division, print_function\nimport numpy as np\n\nfrom openmdao.api import ExplicitComponent\n\nclass SectionPropertiesTube(ExplicitComponent):\n \"\"\"\n Compute geometric properties for a tube element.\n The thicknesses are added to the interior of the element, so the\n 'radius' value is the outer radius of the tube.\n\n parameters\n ----------\n radius : numpy array\n Outer radii for each FEM element.\n thickness : numpy array\n Tube thickness for each FEM element.\n\n Returns\n -------\n A : numpy array\n Cross-sectional area for each FEM element.\n Iy : numpy array\n Area moment of inertia around the y-axis for each FEM element.\n Iz : numpy array\n Area moment of inertia around the z-axis for each FEM element.\n J : numpy array\n Polar moment of inertia for each FEM element.\n \"\"\"\n\n def initialize(self):\n self.options.declare('surface', types=dict)\n\n def setup(self):\n self.surface = surface = self.options['surface']\n\n self.ny = surface['num_y']\n\n self.add_input('radius', val=np.ones((self.ny - 1)), units='m')\n self.add_input('thickness', val=np.ones((self.ny - 1)) * .1, units='m')\n self.add_output('A', val=np.zeros((self.ny - 1)), units='m**2')\n self.add_output('Iy', val=np.zeros((self.ny - 1)), units='m**4')\n self.add_output('Iz', val=np.zeros((self.ny - 1)), units='m**4')\n self.add_output('J', val=np.zeros((self.ny - 1)), units='m**4')\n\n a = np.arange((self.ny - 1))\n self.declare_partials('*', '*', rows=a, cols=a)\n self.set_check_partial_options(wrt='*', method='cs')\n\n def compute(self, inputs, outputs):\n pi = np.pi\n\n # Add thickness to the interior of the radius.\n # The outer radius is the inputs['radius'] amount.\n r1 = inputs['radius'] - inputs['thickness']\n r2 = inputs['radius']\n\n # Compute the area, area moments of inertia, and polar moment of inertia\n outputs['A'] = pi * (r2**2 - r1**2)\n outputs['Iy'] = pi * (r2**4 - r1**4) / 4.\n outputs['Iz'] = pi * (r2**4 - r1**4) / 4.\n outputs['J'] = pi * (r2**4 - r1**4) / 2.\n\n def compute_partials(self, inputs, partials):\n pi = np.pi\n radius = inputs['radius'].real\n t = inputs['thickness'].real\n r1 = radius - t\n r2 = radius\n\n dr1_dr = 1.\n dr2_dr = 1.\n dr1_dt = -1.\n dr2_dt = 0.\n\n r1_3 = r1**3\n r2_3 = r2**3\n\n partials['A', 'radius'] = 2 * pi * (r2 * dr2_dr - r1 * dr1_dr)\n partials['A', 'thickness'] = 2 * pi * (r2 * dr2_dt - r1 * dr1_dt)\n partials['Iy', 'radius'] = pi * (r2_3 * dr2_dr - r1_3 * dr1_dr)\n partials['Iy', 'thickness'] = pi * (r2_3 * dr2_dt - r1_3 * dr1_dt)\n partials['Iz', 'radius'] = pi * (r2_3 * dr2_dr - r1_3 * dr1_dr)\n partials['Iz', 'thickness'] = pi * (r2_3 * dr2_dt - r1_3 * dr1_dt)\n partials['J', 'radius'] = 2 * pi * (r2_3 * dr2_dr - r1_3 * dr1_dr)\n partials['J', 'thickness'] = 2 * pi * (r2_3 * dr2_dt - r1_3 * dr1_dt)\n"
] | [
[
"numpy.arange",
"numpy.ones",
"numpy.zeros"
]
] |
mo-cmyk/wbgapi | [
"a0f8658b7a74ec79256d7b66ff58cb95726e89aa"
] | [
"wbgapi/data.py"
] | [
"\n'''Access World Bank API data\n'''\n\nimport wbgapi as w\ntry:\n import numpy as np\n import pandas as pd\nexcept ImportError:\n np = None\n pd = None\n\ndef fetch(series, economy='all', time='all', mrv=None, mrnev=None, skipBlanks=False, labels=False, skipAggs=False, numericTimeKeys=False, params={}, db=None, **dimensions):\n '''Retrieve rows of data for the current database\n\n Arguments:\n series: a series identifier or list-like, e.g., SP.POP.TOTL\n\n economy: an economy identifier or list-like, e.g., 'BRA' or ['USA', 'CAN', 'MEX']\n\n time: a time identifier or list-like, e.g., 'YR2015' or range(2010,2020).\n Both element keys and values are acceptable\n\n mrv: return only the specified number of most recent values (same time period for all economies)\n\n mrnev: return only the specified number of non-empty most recent values (time period varies)\n\n skipBlanks: skip empty observations\n\n labels: include both dimension id and name (e.g., ZWE & Zimbabwe, not just ZWE)\n\n skipAggs: skip aggregates\n\n numericTimeKeys: store the time object by value (e.g., 2014) instead of key ('YR2014') if value is numeric\n\n params: extra query parameters to pass to the API\n\n dimensions: extra dimensions, database specific (e.g., version)\n\n Returns:\n A generator object\n\n Examples:\n # print name and population of all economies for all available years\n for elem in wbgapi.data.fetch('SP.POP.TOTL',labels=True):\n print(elem['economy']['value'], elem['time']['value'], elem['value'])\n\n # fetch data for Brazil for odd-numbered years\n for elem in wbgapi.data.fetch('NY.GDP.PCAP.CD', 'BRA', range(2011,2020,2)):\n print(elem['value'])\n\n # most recent poverty rates for all LAC countries\n for elem in wbgapi.data.fetch('SI.POV.NAHC', economy=wb.region.members('LAC'), mrnev=1):\n print(elem['economy'], elem['time'], elem['value'])\n\n # dict of most recent population data for economies over 100000\n popData = {i['economy']: i['value'] for i in wbgapi.data.fetch('SP.POP.TOTL', mrnev=1, skipAggs=True) if i['value'] > 100000}\n \n '''\n\n if db is None:\n db = w.db\n\n concepts = w.source.concepts(db)\n concept_keys = {v['key']: k for k,v in concepts.items()}\n params_ = {}\n params_.update(params)\n if mrv:\n params_['mrv'] = mrv\n elif mrnev:\n params_['mrnev'] = mrnev\n\n # you can thus pass series, economy, and time in the dimensions array, and those will overwrite the explicit parameters\n dimensions_ = {'series': series, 'economy': economy, 'time': time}\n dimensions_.update(dimensions)\n\n url = 'sources/{}'.format(db)\n keys = ['series', 'economy', 'time']\n values = {}\n for k,v in dimensions_.items():\n if k not in concepts:\n raise KeyError('{} is not a concept in database {}'.format(k, db))\n\n if k not in keys:\n keys.append(k)\n\n url += '/{}/{}'.format(concepts[k]['key'], '{' + k + '}')\n values[k] = w.queryParam(v, concept=k, db=db)\n\n aggs = w.economy.aggregates()\n\n for row in w.refetch(url, keys, params=params_, **values):\n if skipBlanks and row['value'] is None:\n continue\n\n skip = False\n\n x = {'value': row['value']}\n for elem in row['variable']:\n key = concept_keys[elem['concept'].lower()]\n if key == 'economy' and skipAggs and elem['id'] in aggs:\n skip = True\n break\n\n if not skip:\n if labels:\n del(elem['concept'])\n x[key] = elem\n if key == 'economy':\n x[key]['aggregate'] = elem['id'] in aggs\n elif key == 'time' and numericTimeKeys and elem['value'].isdigit():\n x[key]['id'] = int(elem['value'])\n else:\n x[key] = elem['id']\n if key == 'economy':\n x['aggregate'] = elem['id'] in aggs\n elif key == 'time' and numericTimeKeys and elem['value'].isdigit():\n x[key] = int(elem['value'])\n\n if not skip:\n yield x\n\ndef FlatFrame(series, economy='all', time='all', mrv=None, mrnev=None, skipBlanks=False, labels=False, skipAggs=False, params={}, db=None, **dimensions):\n '''Retrieve a flat pandas dataframe (1 row per observation)\n\n Arguments:\n series: a series identifier or list-like, e.g., SP.POP.TOTL\n\n economy: an economy identifier or list-like, e.g., 'BRA' or ['USA', 'CAN', 'MEX']\n\n time: a time identifier or list-like, e.g., 'YR2015' or range(2010,2020).\n Both element keys and values are acceptable\n\n mrv: return only the specified number of most recent values (same time period for all economies)\n\n mrnev: return only the specified number of non-empty most recent values (time period varies)\n\n skipBlanks: skip empty observations\n\n labels: return the dimension name instead of the identifier\n\n skipAggs: skip aggregates\n\n params: extra query parameters to pass to the API\n\n dimensions: extra dimensions, database specific (e.g., version)\n \n Returns:\n a pandas DataFrame\n\n Notes:\n values in the time column are numeric if possible (2015 not 'YR2015')\n '''\n\n if pd is None:\n raise ModuleNotFoundError('you must install pandas to use this feature')\n\n key = 'value' if labels else 'id'\n df = None\n\n # we set numericTimeKeys=True so that time values will always be numeric if possible\n for row in fetch(series, economy, time, mrv=mrv, mrnev=mrnev, skipBlanks=skipBlanks, labels=True, numericTimeKeys=True, skipAggs=skipAggs, params=params, db=db, **dimensions):\n if df is None:\n # this assumes that the API returns the same object structure in every row, so we can use the first as a template\n columns = row.keys()\n df = pd.DataFrame(columns=columns)\n\n df.loc[len(df)] = [row[i][key] if type(row[i]) is dict else row[i] for i in columns]\n\n return df\n\ndef DataFrame(series, economy='all', time='all', index=None, columns=None, mrv=None, mrnev=None, skipBlanks=False, labels=False, skipAggs=False, numericTimeKeys=False, timeColumns=False, params={}, db=None, **dimensions):\n '''Retrieve a 2-dimensional pandas dataframe. \n \n Arguments:\n series: a series identifier or list-like, e.g., SP.POP.TOTL\n\n economy: an economy identifier or list-like, e.g., 'BRA' or ['USA', 'CAN', 'MEX']\n\n time: a time identifier or list-like, e.g., 'YR2015' or range(2010,2020).\n Both element keys and values are acceptable\n\n index: name or list of dimensions for the DataFrame's index, e.g., 'economy'. If None then the function\n will define the index based on your request. Note: to get a dataframe with no index\n (i.e., 0-based integers) call `reset_index()` with on the return value of this function.\n\n columns: name of the dimension for the DataFrame's columns, e.g., 'series'. If None then the function\n will define columns based on your request.\n\n mrv: return only the specified number of most recent values (same time period for all economies)\n\n mrnev: return only the specified number of non-empty most recent values (time period varies)\n\n skipBlanks: skip empty observations\n\n labels: include the dimension name for rows\n\n skipAggs: skip aggregates\n\n numericTimeKeys: store the time object by value (e.g., 2014) instead of key ('YR2014') if value is numeric\n\n timeColumns: add extra columns to show the time dimension for each series/economy\n If 'auto' then the function will guess based on other parameters\n\n params: extra query parameters to pass to the API\n\n dimensions: extra dimensions, database specific (e.g., version)\n \n Returns:\n a pandas DataFrame\n\n Examples:\n # 5 years of population data (with economy names)\n wbgapi.data.DataFrame('SP.POP.TOTL', time=range(2010,2020),labels=True)\n\n # Most recent poverty and income data for LAC\n wbgapi.data.DataFrame(['SI.POV.NAHC', 'NY.GDP.PCAP.CD'], economy=wb.region.members('LAC'),mrnev=1,timeColumns=True)\n\n # Fetch most recent CO2 emissions for each country and merge its income group\n wbgapi.data.DataFrame('EN.ATM.CO2E.PC',mrnev=1).join(wbgapi.economy.DataFrame()['incomeLevel'])\n\n # Top 10 emitters per capita\n wbgapi.data.DataFrame('EN.ATM.CO2E.PC',mrnev=1,labels=True).sort_values('EN.ATM.CO2E.PC',ascending=False).head(10)\n\n Notes:\n timeColumns currently defaults to False so that the default column composition is consistent. This may change to 'auto'\n at some point, so that mrv behavior is more intuitive for data discovery\n '''\n\n def frame(index):\n\n if len(index) > 1:\n i = [[]] * len(index)\n return pd.DataFrame(index=pd.MultiIndex(levels=i, codes=i, names=tuple(index)))\n\n df = pd.DataFrame()\n df.index.name = index[0]\n return df\n\n def is_single(x):\n\n if type(x) is str:\n if x == 'all':\n return False\n elif x == 'mrv':\n return True\n\n # not necessary to pass db since we don't actually care about the parameters just the count of them\n return len(w.queryParam(x).split(';')) == 1\n\n if pd is None:\n raise ModuleNotFoundError('you must install pandas to use this feature')\n\n # set up the axes by looking at the index/column parameters\n concepts = ['economy','series','time']\n for k,v in w.source.concepts(db).items():\n if k not in concepts:\n concepts.insert(0, k)\n\n if type(index) is str:\n index = [index]\n\n if index is None or columns is None:\n # we need to infer at least one dimension\n\n dimensions_ = {'series': series, 'economy': economy, 'time': time}\n dimensions_.update(dimensions)\n\n axes = concepts.copy()\n\n # now we reduce axes by eliminating any dimension consisting of \n # one element not defined in the calling parameters, with a stop\n # if we reduce to 2 dimensions\n x = concepts.copy()\n x.reverse()\n for k in x:\n if len(axes) == 2:\n break\n\n if k == columns or (type(index) is list and k in index):\n continue\n\n values = dimensions_.get(k, 'all')\n if k == 'time' and (mrv == 1 or mrnev == 1 or is_single(values)):\n axes.remove(k)\n if timeColumns == 'auto' and (mrv == 1 or mrnev == 1):\n timeColumns = True\n\n elif is_single(values):\n axes.remove(k)\n\n if columns is None and index is None:\n columns = axes.pop(-1)\n index = axes\n elif columns is None:\n # try to guess a column based on what index doesn't define\n x = list(filter(lambda x: x not in index, axes))\n if len(x) > 0:\n columns = x[-1]\n elif (set(concepts) - set(list)) > 0:\n # index has claimed all non-singular dimensions, so set columns from the full concepts list\n x = list(filter(lambda x: x not in index, concepts))\n columns = x[-1]\n else:\n # index is the same as the concepts list. That's not allowed\n raise ValueError('one dimension must be a column')\n\n elif index is None:\n axes.remove(columns)\n index = axes\n\n # sanity checks\n if type(columns) is not str or columns not in concepts:\n raise ValueError('columns must be None or a dimension')\n\n if type(index) is not list or len(set(index) - set(concepts)) > 0:\n raise ValueError('index must be None or a dimension list')\n\n if columns in index:\n raise ValueError('columns cannot be an element in index')\n\n if columns == 'time' or 'time' in index or timeColumns == 'auto':\n timeColumns = False\n\n # for now let's see if it works to build the dataframe dynamically\n df = frame(index)\n dummy = pd.Series() # empty series - never assigned actual values\n ts_suffix = ':T'\n concepts = w.source.concepts(db)\n if labels:\n # create a separate dataframe for labels so that we can control the column position below\n df2 = frame(index)\n\n for row in fetch(series, economy, time, mrv=mrv, mrnev=mrnev, skipBlanks=skipBlanks, labels=True, skipAggs=skipAggs, numericTimeKeys=numericTimeKeys, params=params, db=db, **dimensions):\n column_key = row[columns]['id']\n if len(index) == 1:\n index_key = row[index[0]]['id']\n else:\n index_key = tuple(map(lambda x: row[x]['id'], index))\n\n # this logic only assigns values to locations that don't yet exist. First observations thus take precedent over subsequent ones\n if pd.isna(df.get(column_key, dummy).get(index_key)):\n df.loc[index_key, column_key] = np.nan if row['value'] is None else row['value']\n if timeColumns:\n df.loc[index_key, column_key + ts_suffix] = row['time']['value']\n\n if labels:\n for i in index:\n df2.loc[index_key, concepts[i]['value']] = row[i]['value']\n \n df.sort_index(axis=0,inplace=True)\n df.sort_index(axis=1,inplace=True)\n if labels:\n return df2.join(df)\n # return pd.concat([df2,df], axis=1, sort=False)\n \n return df\n \n\ndef get(series, economy, time='all', mrv=None, mrnev=None, labels=False, numericTimeKeys=False, db=None, **dimensions):\n '''Retrieve a single data point for the current database\n\n Arguments:\n series: a series identifier\n\n economy: an economy identifier\n\n time: a time identifier. Both element keys and values are acceptable\n\n mrv: return only the specified number of most recent values (same time period for all economies)\n\n mrnev: return only the specified number of non-empty most recent values (time period varies)\n\n labels: include both dimension id and name (e.g., ZWE & Zimbabwe, not just ZWE)\n\n numericTimeKeys: store the time object by value (e.g., 2014) instead of key ('YR2014') if value is numeric\n\n dimensions: extra dimensions, database specific (e.g., version)\n\n Returns:\n a data observation\n\n Notes:\n This function simply calls fetch() and returns the first result. Hence, you should set mrv or mrnev to 1, or set\n time to a single value to get predictable results.\n\n Example:\n # print the last population estimate for France\n print(wbgapi.data.get('SP.POP.TOTL', 'FRA', mrnev=1)['value'])\n '''\n\n for row in fetch(series, economy, time, mrv=mrv, mrnev=mrnev, labels=labels, numericTimeKeys=numericTimeKeys, params={'per_page': 1}, db=db, **dimensions):\n return row\n\ndef footnote(series, economy, time, db=None):\n '''Return the footnote for a single data point, if any\n\n Arguments:\n series: a series identifier\n\n economy: an economy identifier\n\n time: a time identifier. Both element keys and values are acceptable\n\n Returns:\n footnote text, or None\n\n Example:\n print(wbgapi.data.footnote('SP.POP.TOTL', 'FRA', 2015))\n '''\n\n if db is None:\n db = w.db\n\n # note that this only supports singular footnote references at this point, although the interface suggests otherwise\n url = 'sources/{source}/footnote/{economy}~{series}~{time}/metadata'\n try:\n for row in w.metadata(url, ['series'], source=db, series=series, economy=economy, time=w.queryParam(time, 'time', db=db)):\n return row.metadata['FootNote']\n except:\n pass # will return None then\n\n"
] | [
[
"pandas.Series",
"pandas.DataFrame"
]
] |
kpoeppel/pytorch_probgraph | [
"b78595ab03bbe92595ad2f6b35f5dd8bf84d6da0"
] | [
"examples/Model_HM_RWS.py"
] | [
"\nimport site\nsite.addsitedir('..')\n\nimport torch\nfrom pytorch_probgraph import BernoulliLayer\nfrom pytorch_probgraph import InteractionLinear\nfrom pytorch_probgraph import HelmholtzMachine\nfrom itertools import chain\nfrom tqdm import tqdm\n\nclass Model_HM_RWS(torch.nn.Module):\n def __init__(self):\n super().__init__()\n layer0 = BernoulliLayer(torch.nn.Parameter(torch.zeros([1, 1, 28, 28]), requires_grad=True))\n layer1 = BernoulliLayer(torch.nn.Parameter(torch.zeros([1, 200]), requires_grad=True))\n layer2 = BernoulliLayer(torch.nn.Parameter(torch.zeros([1, 200]), requires_grad=True))\n\n interactionUp1 = InteractionLinear(layer0.bias.shape[1:], layer1.bias.shape[1:])\n interactionDown1 = InteractionLinear(layer1.bias.shape[1:], layer0.bias.shape[1:])\n interactionUp2 = InteractionLinear(layer1.bias.shape[1:], layer2.bias.shape[1:])\n interactionDown2 = InteractionLinear(layer2.bias.shape[1:], layer1.bias.shape[1:])\n\n parameters = chain(*[m.parameters() for m in [layer0, layer1, layer2, interactionUp1, interactionUp2, interactionDown1, interactionDown2]])\n opt = torch.optim.Adam(parameters)\n\n self.model = HelmholtzMachine([layer0, layer1, layer2],\n [interactionUp1, interactionUp2],\n [interactionDown1, interactionDown2],\n optimizer=opt)\n #print(interaction.weight.shape)\n\n def train(self, data, epochs=1, device=None):\n for epoch in range(epochs):\n for dat in data:\n self.model.trainReweightedWS(dat.to(device), ksamples=5)\n if isinstance(data, tqdm):\n data = tqdm(data)\n #print(torch.sum(self.model.interaction.weight))\n\n def loglikelihood(self, data):\n return self.model.loglikelihood(data, ksamples=100).cpu().detach()\n\n def generate(self, N=1):\n return self.model.sampleAll(N=N)[0][0].cpu()\n"
] | [
[
"torch.zeros",
"torch.optim.Adam"
]
] |
acmore/ray | [
"9f0f54266064e203b0bdcc9d3fa947cb4518ebc0"
] | [
"rllib/utils/torch_ops.py"
] | [
"import numpy as np\n\nfrom ray.rllib.utils import try_import_tree\nfrom ray.rllib.utils.framework import try_import_torch\n\ntorch, _ = try_import_torch()\ntree = try_import_tree()\n\n\ndef explained_variance(y, pred):\n y_var = torch.var(y, dim=[0])\n diff_var = torch.var(y - pred, dim=[0])\n min_ = torch.Tensor([-1.0])\n return torch.max(\n min_.to(device=torch.device(\"cuda\"))\n if torch.cuda.is_available() else min_,\n 1 - (diff_var / y_var))\n\n\ndef global_norm(tensors):\n \"\"\"Returns the global L2 norm over a list of tensors.\n\n output = sqrt(SUM(t ** 2 for t in tensors)),\n where SUM reduces over all tensors and over all elements in tensors.\n\n Args:\n tensors (List[torch.Tensor]): The list of tensors to calculate the\n global norm over.\n \"\"\"\n # List of single tensors' L2 norms: SQRT(SUM(xi^2)) over all xi in tensor.\n single_l2s = [\n torch.pow(torch.sum(torch.pow(t, 2.0)), 0.5) for t in tensors\n ]\n # Compute global norm from all single tensors' L2 norms.\n return torch.pow(sum(torch.pow(l2, 2.0) for l2 in single_l2s), 0.5)\n\n\ndef huber_loss(x, delta=1.0):\n \"\"\"Reference: https://en.wikipedia.org/wiki/Huber_loss\"\"\"\n return torch.where(\n torch.abs(x) < delta,\n torch.pow(x, 2.0) * 0.5, delta * (torch.abs(x) - 0.5 * delta))\n\n\ndef l2_loss(x):\n \"\"\"Computes half the L2 norm of a tensor without the sqrt.\n\n output = sum(x ** 2) / 2\n \"\"\"\n return torch.sum(torch.pow(x, 2.0)) / 2.0\n\n\ndef reduce_mean_ignore_inf(x, axis):\n \"\"\"Same as torch.mean() but ignores -inf values.\"\"\"\n mask = torch.ne(x, float(\"-inf\"))\n x_zeroed = torch.where(mask, x, torch.zeros_like(x))\n return torch.sum(x_zeroed, axis) / torch.sum(mask.float(), axis)\n\n\ndef minimize_and_clip(optimizer, clip_val=10):\n \"\"\"Clips gradients found in `optimizer.param_groups` to given value.\n\n Ensures the norm of the gradients for each variable is clipped to\n `clip_val`\n \"\"\"\n for param_group in optimizer.param_groups:\n for p in param_group[\"params\"]:\n if p.grad is not None:\n torch.nn.utils.clip_grad_norm_(p.grad, clip_val)\n\n\ndef sequence_mask(lengths, maxlen=None, dtype=None):\n \"\"\"Offers same behavior as tf.sequence_mask for torch.\n\n Thanks to Dimitris Papatheodorou\n (https://discuss.pytorch.org/t/pytorch-equivalent-for-tf-sequence-mask/\n 39036).\n \"\"\"\n if maxlen is None:\n maxlen = lengths.max()\n\n mask = ~(torch.ones((len(lengths), maxlen)).to(\n lengths.device).cumsum(dim=1).t() > lengths).t()\n mask.type(dtype or torch.bool)\n\n return mask\n\n\ndef convert_to_non_torch_type(stats):\n \"\"\"Converts values in `stats` to non-Tensor numpy or python types.\n\n Args:\n stats (any): Any (possibly nested) struct, the values in which will be\n converted and returned as a new struct with all torch tensors\n being converted to numpy types.\n\n Returns:\n Any: A new struct with the same structure as `stats`, but with all\n values converted to non-torch Tensor types.\n \"\"\"\n\n # The mapping function used to numpyize torch Tensors.\n def mapping(item):\n if isinstance(item, torch.Tensor):\n return item.cpu().item() if len(item.size()) == 0 else \\\n item.cpu().detach().numpy()\n else:\n return item\n\n return tree.map_structure(mapping, stats)\n\n\ndef convert_to_torch_tensor(stats, device=None):\n \"\"\"Converts any struct to torch.Tensors.\n\n stats (any): Any (possibly nested) struct, the values in which will be\n converted and returned as a new struct with all leaves converted\n to torch tensors.\n\n Returns:\n Any: A new struct with the same structure as `stats`, but with all\n values converted to torch Tensor types.\n \"\"\"\n\n def mapping(item):\n if torch.is_tensor(item):\n return item if device is None else item.to(device)\n tensor = torch.from_numpy(np.asarray(item))\n # Floatify all float64 tensors.\n if tensor.dtype == torch.double:\n tensor = tensor.float()\n return tensor if device is None else tensor.to(device)\n\n return tree.map_structure(mapping, stats)\n\n\ndef atanh(x):\n return 0.5 * torch.log((1 + x) / (1 - x))\n"
] | [
[
"numpy.asarray"
]
] |
kanekosh/openconcept | [
"7878e5725eed78a023136b58250361531c7c7654"
] | [
"openconcept/analysis/performance/solver_phases.py"
] | [
"from __future__ import division\nfrom openmdao.api import Group, ExplicitComponent, IndepVarComp, BalanceComp, ImplicitComponent\nimport openconcept.api as oc\nfrom openconcept.analysis.atmospherics.compute_atmos_props import ComputeAtmosphericProperties\nfrom openconcept.analysis.aerodynamics import Lift, StallSpeed\nfrom openconcept.utilities.math import ElementMultiplyDivideComp, AddSubtractComp\nfrom openconcept.utilities.math.integrals import Integrator\nfrom openconcept.utilities.linearinterp import LinearInterpolator\nfrom openconcept.utilities.math.integrals import Integrator\nimport numpy as np\nimport copy\n\nclass ClimbAngleComp(ExplicitComponent):\n \"\"\"\n Computes steady climb angle based on excess thrust.\n\n This is a helper function\n and shouldn't be instantiated in the top-level model directly.\n\n Inputs\n ------\n drag : float\n Aircraft drag at v2 (climb out) flight condition (scalar, N)\n weight : float\n Takeoff weight (scalar, kg)\n thrust : float\n Thrust at the v2 (climb out) flight condition (scalar, N)\n\n Outputs\n -------\n gamma : float\n Climb out flight path angle (scalar, rad)\n\n Options\n -------\n num_nodes : int\n Number of points to run\n \"\"\"\n def initialize(self):\n self.options.declare('num_nodes', default=1)\n\n def setup(self):\n nn = self.options['num_nodes']\n self.add_input('drag', units='N',shape=(nn,))\n self.add_input('weight', units='kg', shape=(nn,))\n self.add_input('thrust', units='N',shape=(nn,))\n self.add_output('gamma', units='rad',shape=(nn,))\n\n self.declare_partials(['gamma'], ['weight','thrust','drag'], cols=np.arange(0,nn), rows=np.arange(0,nn))\n\n def compute(self, inputs, outputs):\n g = 9.80665 #m/s^2\n outputs['gamma'] = np.arcsin((inputs['thrust']-inputs['drag'])/inputs['weight']/g)\n\n def compute_partials(self, inputs, J):\n g = 9.80665 #m/s^2\n interior_qty = (inputs['thrust']-inputs['drag'])/inputs['weight']/g\n d_arcsin = 1/np.sqrt(1-interior_qty**2)\n J['gamma','thrust'] = d_arcsin/inputs['weight']/g\n J['gamma','drag'] = -d_arcsin/inputs['weight']/g\n J['gamma','weight'] = -d_arcsin*(inputs['thrust']-inputs['drag'])/inputs['weight']**2/g\n\n\nclass FlipVectorComp(ExplicitComponent):\n \"\"\"\n Reverses the order of an OpenMDAO vector\n\n This is a helper function\n and shouldn't be instantiated in the top-level model directly.\n\n Inputs\n ------\n vec_in : float\n Incoming vector in forward order\n\n Outputs\n -------\n vec_out : float\n Reversed order version of vec_in\n\n Options\n -------\n num_nodes : int\n Number of points to run\n negative : boolean\n Whether to apply a negative scaler. Default False preserves vector values.\n True returns all values with negative sign.\n units : string or None\n Units for vec_in and vec_out (Default None)\n Specify as an OpenMDAO unit string (e.g. 'kg')\n \"\"\"\n def initialize(self):\n self.options.declare('num_nodes',default=1)\n self.options.declare('negative',default=False)\n self.options.declare('units',default=None)\n\n def setup(self):\n nn = self.options['num_nodes']\n units = self.options['units']\n self.add_input('vec_in', units=units, shape=(nn,))\n self.add_output('vec_out', units=units, shape=(nn,))\n negative = self.options['negative']\n if negative:\n scaler = -1\n else:\n scaler = 1\n self.declare_partials(['vec_out'],['vec_in'],rows=np.arange(nn-1,-1,-1),cols=np.arange(0,nn,1),val=scaler*np.ones((nn,)))\n\n def compute(self, inputs, outputs):\n negative = self.options['negative']\n if negative:\n scaler = -1\n else:\n scaler = 1\n outputs['vec_out'] = scaler * np.flip(inputs['vec_in'], 0)\n\n\nclass BFLImplicitSolve(ImplicitComponent):\n \"\"\"\n Computes a residual equation so Newton solver can set v1 to analyze balanced field length\n\n This residual is equal to zero if:\n - The rejected takeoff and engine-out takeoff distances are equal, or:\n - V1 is equal to VR and the engine out takeoff distance is longer than the RTO distance\n\n Since this is a discontinous function, the partial derivatives are written in a special way\n to 'coax' the V1 value into the right setting with a Newton step. It's kind of a hack.\n\n Inputs\n ------\n distance_continue : float\n Engine-out takeoff distance (scalar, m)\n distance_abort : float\n Distance to full-stop when takeoff is rejected at V1 (scalar, m)\n takeoff|vr : float\n Rotation speed (scalar, m/s)\n\n Outputs\n -------\n takeoff|v1 : float\n Decision speed (scalar, m/s)\n\n \"\"\"\n def setup(self):\n self.add_input('distance_continue', units='m')\n self.add_input('distance_abort', units='m')\n self.add_input('takeoff|vr', units='m/s')\n self.add_output('takeoff|v1', units='m/s',val=20,lower=10,upper=150)\n self.declare_partials('takeoff|v1',['distance_continue','distance_abort','takeoff|v1','takeoff|vr'])\n\n def apply_nonlinear(self, inputs, outputs, residuals):\n speedtol = 1e-1\n disttol = 0\n #force the decision speed to zero\n if inputs['takeoff|vr'] < outputs['takeoff|v1'] + speedtol:\n residuals['takeoff|v1'] = inputs['takeoff|vr'] - outputs['takeoff|v1']\n else:\n residuals['takeoff|v1'] = inputs['distance_continue'] - inputs['distance_abort']\n\n #if you are within vtol on the correct side but the stopping distance bigger, use the regular mode\n if inputs['takeoff|vr'] >= outputs['takeoff|v1'] and inputs['takeoff|vr'] - outputs['takeoff|v1'] < speedtol and (inputs['distance_abort'] - inputs['distance_continue']) > disttol:\n residuals['takeoff|v1'] = inputs['distance_continue'] - inputs['distance_abort']\n\n\n def linearize(self, inputs, outputs, partials):\n speedtol = 1e-1\n disttol = 0\n\n if inputs['takeoff|vr'] < outputs['takeoff|v1'] + speedtol:\n partials['takeoff|v1','distance_continue'] = 0\n partials['takeoff|v1','distance_abort'] = 0\n partials['takeoff|v1','takeoff|vr'] = 1\n partials['takeoff|v1','takeoff|v1'] = -1\n else:\n partials['takeoff|v1','distance_continue'] = 1\n partials['takeoff|v1','distance_abort'] = -1\n partials['takeoff|v1','takeoff|vr'] = 0\n partials['takeoff|v1','takeoff|v1'] = 0\n\n if inputs['takeoff|vr'] >= outputs['takeoff|v1'] and inputs['takeoff|vr'] - outputs['takeoff|v1'] < speedtol and (inputs['distance_abort'] - inputs['distance_continue']) > disttol:\n partials['takeoff|v1','distance_continue'] = 1\n partials['takeoff|v1','distance_abort'] = -1\n partials['takeoff|v1','takeoff|vr'] = 0\n partials['takeoff|v1','takeoff|v1'] = 0\n\nclass Groundspeeds(ExplicitComponent):\n \"\"\"\n Computes groundspeed for vectorial true airspeed and true vertical speed.\n\n This is a helper function for the main mission analysis routines\n and shouldn't be instantiated directly.\n\n Inputs\n ------\n fltcond|vs : float\n Vertical speed for all mission phases (vector, m/s)\n fltcond|Utrue : float\n True airspeed for all mission phases (vector, m/s)\n\n Outputs\n -------\n fltcond|groundspeed : float\n True groundspeed for all mission phases (vector, m/s)\n fltcond|cosgamma : float\n Cosine of the flght path angle for all mission phases (vector, dimensionless)\n fltcond|singamma : float\n Sine of the flight path angle for all mission phases (vector, dimensionless)\n\n Options\n -------\n num_nodes : int\n Number of points to run\n \"\"\"\n def initialize(self):\n\n self.options.declare('num_nodes',default=1,desc=\"Number of Simpson intervals to use per seg (eg. climb, cruise, descend). Number of analysis points is 2N+1\")\n\n def setup(self):\n nn = self.options['num_nodes']\n self.add_input('fltcond|vs', units='m/s',shape=(nn,))\n self.add_input('fltcond|Utrue', units='m/s',shape=(nn,))\n self.add_output('fltcond|groundspeed', units='m/s',shape=(nn,))\n self.add_output('fltcond|cosgamma', shape=(nn,), desc='Cosine of the flight path angle')\n self.add_output('fltcond|singamma', shape=(nn,), desc='sin of the flight path angle' )\n self.declare_partials(['fltcond|groundspeed','fltcond|cosgamma','fltcond|singamma'], ['fltcond|vs','fltcond|Utrue'], rows=range(nn), cols=range(nn))\n\n def compute(self, inputs, outputs):\n\n nn = self.options['num_nodes']\n #compute the groundspeed on climb and desc\n inside = inputs['fltcond|Utrue']**2-inputs['fltcond|vs']**2\n groundspeed = np.sqrt(inside)\n groundspeed_fixed = np.sqrt(np.where(np.less(inside, 0.0), 0.01, inside))\n #groundspeed = np.sqrt(inputs['fltcond|Utrue']**2-inputs['fltcond|vs']**2)\n #groundspeed_fixed= np.where(np.isnan(groundspeed),0,groundspeed)\n outputs['fltcond|groundspeed'] = groundspeed_fixed\n outputs['fltcond|singamma'] = np.where(np.isnan(groundspeed),1,inputs['fltcond|vs'] / inputs['fltcond|Utrue'])\n outputs['fltcond|cosgamma'] = groundspeed_fixed / inputs['fltcond|Utrue']\n\n def compute_partials(self, inputs, J):\n inside = inputs['fltcond|Utrue']**2-inputs['fltcond|vs']**2\n groundspeed = np.sqrt(inside)\n groundspeed_fixed = np.sqrt(np.where(np.less(inside, 0.0), 0.01, inside))\n J['fltcond|groundspeed','fltcond|vs'] = np.where(np.isnan(groundspeed),0,(1/2) / groundspeed_fixed * (-2) * inputs['fltcond|vs'])\n J['fltcond|groundspeed','fltcond|Utrue'] = np.where(np.isnan(groundspeed),0, (1/2) / groundspeed_fixed * 2 * inputs['fltcond|Utrue'])\n J['fltcond|singamma','fltcond|vs'] = np.where(np.isnan(groundspeed), 0, 1 / inputs['fltcond|Utrue'])\n J['fltcond|singamma','fltcond|Utrue'] = np.where(np.isnan(groundspeed), 0, - inputs['fltcond|vs'] / inputs['fltcond|Utrue'] ** 2)\n J['fltcond|cosgamma','fltcond|vs'] = J['fltcond|groundspeed','fltcond|vs'] / inputs['fltcond|Utrue']\n J['fltcond|cosgamma','fltcond|Utrue'] = (J['fltcond|groundspeed','fltcond|Utrue'] * inputs['fltcond|Utrue'] - groundspeed_fixed) / inputs['fltcond|Utrue']**2\n\nclass HorizontalAcceleration(ExplicitComponent):\n \"\"\"\n Computes acceleration during takeoff run and effectively forms the T-D residual.\n\n Inputs\n ------\n weight : float\n Aircraft weight (scalar, kg)\n drag : float\n Aircraft drag at each analysis point (vector, N)\n lift : float\n Aircraft lift at each analysis point (vector, N)\n thrust : float\n Thrust at each TO analysis point (vector, N)\n fltcond|singamma : float\n The sine of the flight path angle gamma (vector, dimensionless)\n braking : float\n Effective rolling friction multiplier at each point (vector, dimensionless)\n\n Outputs\n -------\n accel_horiz : float\n Aircraft horizontal acceleration (vector, m/s**2)\n\n Options\n -------\n num_nodes : int\n Number of analysis points to run\n \"\"\"\n def initialize(self):\n self.options.declare('num_nodes',default=1)\n\n def setup(self):\n nn = self.options['num_nodes']\n g = 9.80665 #m/s^2\n self.add_input('weight', units='kg', shape=(nn,))\n self.add_input('drag', units='N',shape=(nn,))\n self.add_input('lift', units='N',shape=(nn,))\n self.add_input('thrust', units='N',shape=(nn,))\n self.add_input('fltcond|singamma',shape=(nn,))\n self.add_input('braking',shape=(nn,))\n\n self.add_output('accel_horiz', units='m/s**2', shape=(nn,))\n arange=np.arange(nn)\n self.declare_partials(['accel_horiz'], ['weight','drag','lift','thrust','braking'], rows=arange, cols=arange)\n self.declare_partials(['accel_horiz'], ['fltcond|singamma'], rows=arange, cols=arange, val=-g*np.ones((nn,)))\n\n\n def compute(self, inputs, outputs):\n nn = self.options['num_nodes']\n g = 9.80665 #m/s^2\n m = inputs['weight']\n floor_vec = np.where(np.less((g-inputs['lift']/m),0.0),0.0,1.0)\n accel = inputs['thrust']/m - inputs['drag']/m - floor_vec*inputs['braking']*(g-inputs['lift']/m) - g*inputs['fltcond|singamma']\n outputs['accel_horiz'] = accel\n\n def compute_partials(self, inputs, J):\n g = 9.80665 #m/s^2\n m = inputs['weight']\n floor_vec = np.where(np.less((g-inputs['lift']/m),0.0),0.0,1.0)\n J['accel_horiz','thrust'] = 1/m\n J['accel_horiz','drag'] = -1/m\n J['accel_horiz','braking'] = -floor_vec*(g-inputs['lift']/m)\n J['accel_horiz','lift'] = floor_vec*inputs['braking']/m\n J['accel_horiz','weight'] = (inputs['drag']-inputs['thrust']-floor_vec*inputs['braking']*inputs['lift'])/m**2\n\nclass VerticalAcceleration(ExplicitComponent):\n \"\"\"\n Computes acceleration during takeoff run in the vertical plane.\n Only used during full unsteady takeoff performance analysis due to stability issues\n\n Inputs\n ------\n weight : float\n Aircraft weight (scalar, kg)\n drag : float\n Aircraft drag at each analysis point (vector, N)\n lift : float\n Aircraft lift at each analysis point (vector, N)\n thrust : float\n Thrust at each TO analysis point (vector, N)\n fltcond|singamma : float\n The sine of the flight path angle gamma (vector, dimensionless)\n fltcond|cosgamma : float\n The sine of the flight path angle gamma (vector, dimensionless)\n\n Outputs\n -------\n accel_vert : float\n Aircraft horizontal acceleration (vector, m/s**2)\n\n Options\n -------\n num_nodes : int\n Number of analysis points to run\n \"\"\"\n def initialize(self):\n self.options.declare('num_nodes',default=1)\n\n def setup(self):\n nn = self.options['num_nodes']\n g = 9.80665 #m/s^2\n self.add_input('weight', units='kg', shape=(nn,))\n self.add_input('drag', units='N',shape=(nn,))\n self.add_input('lift', units='N',shape=(nn,))\n self.add_input('thrust', units='N',shape=(nn,))\n self.add_input('fltcond|singamma',shape=(nn,))\n self.add_input('fltcond|cosgamma',shape=(nn,))\n\n self.add_output('accel_vert', units='m/s**2', shape=(nn,),upper=2.5*g,lower=-1*g)\n arange=np.arange(nn)\n self.declare_partials(['accel_vert'], ['weight','drag','lift','thrust','fltcond|singamma','fltcond|cosgamma'], rows=arange, cols=arange)\n\n\n def compute(self, inputs, outputs):\n nn = self.options['num_nodes']\n g = 9.80665 #m/s^2\n cosg = inputs['fltcond|cosgamma']\n sing = inputs['fltcond|singamma']\n accel = (inputs['lift']*cosg + (inputs['thrust']-inputs['drag'])*sing - g*inputs['weight'])/inputs['weight']\n accel = np.clip(accel, -g, 2.5*g)\n outputs['accel_vert'] = accel\n\n def compute_partials(self, inputs, J):\n g = 9.80665 #m/s^2\n m = inputs['weight']\n cosg = inputs['fltcond|cosgamma']\n sing = inputs['fltcond|singamma']\n\n J['accel_vert','thrust'] = sing / m\n J['accel_vert','drag'] = -sing / m\n J['accel_vert','lift'] = cosg / m\n J['accel_vert','fltcond|singamma'] = (inputs['thrust']-inputs['drag']) / m\n J['accel_vert','fltcond|cosgamma'] = inputs['lift'] / m\n J['accel_vert','weight'] = -(inputs['lift']*cosg + (inputs['thrust']-inputs['drag'])*sing)/m**2\n\nclass SteadyFlightCL(ExplicitComponent):\n \"\"\"\n Computes lift coefficient at each analysis point\n\n This is a helper function for the main mission analysis routine\n and shouldn't be instantiated directly.\n\n Inputs\n ------\n weight : float\n Aircraft weight at each analysis point (vector, kg)\n fltcond|q : float\n Dynamic pressure at each analysis point (vector, Pascal)\n ac|geom|wing|S_ref : float\n Reference wing area (scalar, m**2)\n fltcond|cosgamma : float\n Cosine of the flght path angle for all mission phases (vector, dimensionless)\n\n Outputs\n -------\n fltcond|CL : float\n Lift coefficient (vector, dimensionless)\n\n Options\n -------\n num_nodes : int\n Number of analysis nodes to run\n mission_segments : list\n The list of mission segments to track\n \"\"\"\n def initialize(self):\n\n self.options.declare('num_nodes',default=5,desc=\"Number of Simpson intervals to use per seg (eg. climb, cruise, descend). Number of analysis points is 2N+1\")\n self.options.declare('mission_segments',default=['climb','cruise','descent'])\n def setup(self):\n nn = self.options['num_nodes']\n arange = np.arange(nn)\n self.add_input('weight', units='kg', shape=(nn,))\n self.add_input('fltcond|q', units='N * m**-2', shape=(nn,))\n self.add_input('ac|geom|wing|S_ref', units='m **2')\n self.add_input('fltcond|cosgamma', val=1.0, shape=(nn,))\n self.add_output('fltcond|CL',shape=(nn,))\n self.declare_partials(['fltcond|CL'], ['weight','fltcond|q',\"fltcond|cosgamma\"], rows=arange, cols=arange)\n self.declare_partials(['fltcond|CL'], ['ac|geom|wing|S_ref'], rows=arange, cols=np.zeros(nn))\n\n def compute(self, inputs, outputs):\n g = 9.80665 #m/s^2\n outputs['fltcond|CL'] = inputs['fltcond|cosgamma']*g*inputs['weight']/inputs['fltcond|q']/inputs['ac|geom|wing|S_ref']\n\n def compute_partials(self, inputs, J):\n g = 9.80665 #m/s^2\n J['fltcond|CL','weight'] = inputs['fltcond|cosgamma']*g/inputs['fltcond|q']/inputs['ac|geom|wing|S_ref']\n J['fltcond|CL','fltcond|q'] = - inputs['fltcond|cosgamma']*g*inputs['weight'] / inputs['fltcond|q']**2 / inputs['ac|geom|wing|S_ref']\n J['fltcond|CL','ac|geom|wing|S_ref'] = - inputs['fltcond|cosgamma']*g*inputs['weight'] / inputs['fltcond|q'] / inputs['ac|geom|wing|S_ref']**2\n J['fltcond|CL','fltcond|cosgamma'] = g*inputs['weight']/inputs['fltcond|q']/inputs['ac|geom|wing|S_ref']\n\nclass GroundRollPhase(oc.PhaseGroup):\n \"\"\"\n This component group models the ground roll phase of a takeoff (acceleration before flight)\n User-settable parameters include:\n throttle (default 100 percent)\n rolling friction coeff (default 0.03 for accelerating segments and 0.4 for braking)\n propulsor_active (default 1 for v0 to v1, 0 for v1 to vr and braking) to model engine failure\n altitude (fltcond|h)\n\n The BaseAircraftGroup object is passed in.\n The BaseAircraftGroup should be built to accept the following inputs\n and return the following outputs.\n The outputs should be promoted to the top level in the component.\n\n Inputs\n ------\n range : float\n Total distance travelled (vector, m)\n fltcond|h : float\n Altitude (vector, m)\n fltcond|vs : float\n Vertical speed (vector, m/s)\n fltcond|Ueas : float\n Equivalent airspeed (vector, m/s)\n fltcond|Utrue : float\n True airspeed (vector, m/s)\n fltcond|p : float\n Pressure (vector, Pa)\n fltcond|rho : float\n Density (vector, kg/m3)\n fltcond|T : float\n Temperature (vector, K)\n fltcond|q : float\n Dynamic pressure (vector, Pa)\n fltcond|CL : float\n Lift coefficient (vector, dimensionless)\n throttle : float\n Motor / propeller throttle setting scaled from 0 to 1 or slightly more (vector, dimensionless)\n propulsor_active : float\n If a multi-propulsor airplane, a failure condition should be modeled in the propulsion model by multiplying throttle by propulsor_active.\n It will generally be 1.0 unless a failure condition is being modeled, in which case it will be 0 (vector, dimensionless)\n braking : float\n Brake friction coefficient (default 0.4 for dry runway braking, 0.03 for resistance unbraked)\n Should not be applied in the air or nonphysical effects will result (vector, dimensionless)\n lift : float\n Lift force (vector, N)\n\n Outputs\n -------\n thrust : float\n Total thrust force produced by all propulsors (vector, N)\n drag : float\n Total drag force in the airplane axis produced by all sources of drag (vector, N)\n weight : float\n Weight (mass, really) of the airplane at each point in time. (vector, kg)\n ac|geom|wing|S_ref\n Wing reference area (scalar, m**2)\n ac|aero|CLmax_TO\n CLmax with flaps in max takeoff position (scalar, dimensionless)\n ac|weights|MTOW\n Maximum takeoff weight (scalar, kg)\n \"\"\"\n\n def initialize(self):\n self.options.declare('num_nodes',default=1)\n self.options.declare('flight_phase',default=None,desc='Phase of flight e.g. v0v1, cruise')\n self.options.declare('aircraft_model',default=None)\n\n def setup(self):\n nn = self.options['num_nodes']\n ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=[\"*\"])\n # set CL = 0.1 for the ground roll per Raymer's book\n ivcomp.add_output('fltcond|CL', val=np.ones((nn,))*0.1)\n ivcomp.add_output('vr_vstall_mult',val=1.1)\n ivcomp.add_output('fltcond|h',val=np.zeros((nn,)),units='m')\n ivcomp.add_output('fltcond|vs',val=np.zeros((nn,)),units='m/s')\n ivcomp.add_output('zero_speed',val=2,units='m/s')\n\n\n flight_phase = self.options['flight_phase']\n if flight_phase == 'v0v1':\n ivcomp.add_output('braking',val=np.ones((nn,))*0.03)\n ivcomp.add_output('propulsor_active',val=np.ones((nn,)))\n ivcomp.add_output('throttle',val=np.ones((nn,)))\n zero_start = True\n elif flight_phase == 'v1vr':\n ivcomp.add_output('braking',val=np.ones((nn,))*0.03)\n ivcomp.add_output('propulsor_active',val=np.zeros((nn,)))\n ivcomp.add_output('throttle',val=np.ones((nn,)))\n zero_start = False\n\n elif flight_phase == 'v1v0':\n ivcomp.add_output('braking',val=0.4*np.ones((nn,)))\n ivcomp.add_output('propulsor_active',val=np.zeros((nn,)))\n ivcomp.add_output('throttle',val=np.zeros((nn,)))\n zero_start=False\n\n self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=True), promotes_inputs=['*'], promotes_outputs=['*'])\n self.add_subsystem('gs',Groundspeeds(num_nodes=nn),promotes_inputs=['*'],promotes_outputs=['*'])\n # add the user-defined aircraft model\n self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn,flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])\n\n self.add_subsystem('lift',Lift(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('stall',StallSpeed(),promotes_inputs=[('CLmax','ac|aero|CLmax_TO'),('weight','ac|weights|MTOW'),'ac|geom|wing|S_ref'],promotes_outputs=['*'])\n self.add_subsystem('vrspeed',ElementMultiplyDivideComp(output_name='takeoff|vr',input_names=['Vstall_eas','vr_vstall_mult'],input_units=['m/s',None]),promotes_inputs=['*'],promotes_outputs=['*'])\n\n\n self.add_subsystem('haccel',HorizontalAcceleration(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n if flight_phase == 'v1v0':\n #unfortunately need to shoot backwards to avoid negative airspeeds\n #reverse the order of the accelerations so the last one is first (and make them negative)\n self.add_subsystem('flipaccel', FlipVectorComp(num_nodes=nn, units='m/s**2', negative=True), promotes_inputs=[('vec_in','accel_horiz')])\n #integrate the timesteps in reverse from near zero speed.\n ode_integ = self.add_subsystem('ode_integ', Integrator(num_nodes=nn, method='simpson', diff_units='s',time_setup='duration'), promotes_inputs=['*'], promotes_outputs=['*'])\n ode_integ.add_integrand('vel_q', units='m/s', rate_name='vel_dqdt', start_name='zero_speed', end_name='fltcond|Utrue_initial', lower=1.5) \n self.connect('flipaccel.vec_out','vel_dqdt')\n #flip the result of the reverse integration again so the flight condition is forward and consistent with everythign else\n self.add_subsystem('flipvel', FlipVectorComp(num_nodes=nn, units='m/s', negative=False), promotes_outputs=[('vec_out','fltcond|Utrue')])\n self.connect('vel_q','flipvel.vec_in')\n # now set the time step so that backwards shooting results in the correct 'initial' segment airspeed\n self.add_subsystem('v0constraint',BalanceComp(name='duration',units='s',eq_units='m/s',rhs_name='fltcond|Utrue_initial',lhs_name='takeoff|v1',val=10.,upper=100.,lower=1.),\n promotes_inputs=['*'],promotes_outputs=['duration'])\n else:\n # forward shooting for these acceleration segmentes\n ode_integ = self.add_subsystem('ode_integ', Integrator(num_nodes=nn, method='simpson', diff_units='s',time_setup='duration'), promotes_inputs=['*'], promotes_outputs=['*'])\n ode_integ.add_integrand('fltcond|Utrue', units='m/s', rate_name='accel_horiz', start_name='fltcond|Utrue_initial', end_name='fltcond|Utrue_final', lower=1.5)\n if flight_phase == 'v0v1':\n self.connect('zero_speed','fltcond|Utrue_initial')\n self.add_subsystem('v1constraint',BalanceComp(name='duration',units='s',eq_units='m/s',rhs_name='fltcond|Utrue_final',lhs_name='takeoff|v1',val=10.,upper=100.,lower=1.),\n promotes_inputs=['*'],promotes_outputs=['duration'])\n elif flight_phase == 'v1vr':\n self.add_subsystem('vrconstraint',BalanceComp(name='duration',units='s',eq_units='m/s',rhs_name='fltcond|Utrue_final',lhs_name='takeoff|vr',val=5.,upper=12.,lower=0.0),\n promotes_inputs=['*'],promotes_outputs=['duration'])\n\n if zero_start:\n ode_integ.add_integrand('range', rate_name='fltcond|groundspeed', units='m', zero_start=True)\n else:\n ode_integ.add_integrand('range', rate_name='fltcond|groundspeed', units='m')\n\nclass RotationPhase(oc.PhaseGroup):\n \"\"\"\n This group models the transition from ground roll to climb out during a takeoff\n using force balance in the vertical and horizontal directions.\n\n User-settable parameters include:\n throttle (default 100 percent)\n rolling friction coeff (default 0.03 for accelerating segments and 0.4 for braking)\n propulsor_active (default 1 for v0 to v1, 0 for v1 to vr and braking) to model engine failure\n altitude (fltcond|h)\n obstacle clearance hight (h_obs) default 35 feet per FAR 25\n Rotation CL/CLmax ratio (default 0.83)\n\n The BaseAircraftGroup object is passed in.\n The BaseAircraftGroup should be built to accept the following inputs\n and return the following outputs.\n The outputs should be promoted to the top level in the component.\n\n Inputs\n ------\n range : float\n Total distance travelled (vector, m)\n fltcond|h : float\n Altitude (vector, m)\n fltcond|vs : float\n Vertical speed (vector, m/s)\n fltcond|Ueas : float\n Equivalent airspeed (vector, m/s)\n fltcond|Utrue : float\n True airspeed (vector, m/s)\n fltcond|p : float\n Pressure (vector, Pa)\n fltcond|rho : float\n Density (vector, kg/m3)\n fltcond|T : float\n Temperature (vector, K)\n fltcond|q : float\n Dynamic pressure (vector, Pa)\n fltcond|CL : float\n Lift coefficient (vector, dimensionless)\n throttle : float\n Motor / propeller throttle setting scaled from 0 to 1 or slightly more (vector, dimensionless)\n propulsor_active : float\n If a multi-propulsor airplane, a failure condition should be modeled in the propulsion model by multiplying throttle by propulsor_active.\n It will generally be 1.0 unless a failure condition is being modeled, in which case it will be 0 (vector, dimensionless)\n braking : float\n Percentage brakes applied, from 0 to 1. Should not be applied in the air or nonphysical effects will result (vector, dimensionless)\n lift : float\n Lift force (vector, N)\n\n Outputs\n -------\n thrust : float\n Total thrust force produced by all propulsors (vector, N)\n drag : float\n Total drag force in the airplane axis produced by all sources of drag (vector, N)\n weight : float\n Weight (mass, really) of the airplane at each point in time. Generally will need to be integrated by Dymos as a state with a rate source (vector, kg)\n ac|geom|wing|S_ref\n Wing reference area (scalar, m**2)\n ac|aero|CLmax_TO\n CLmax with flaps in max takeoff position (scalar, dimensionless)\n ac|weights|MTOW\n Maximum takeoff weight (scalar, kg)\n \"\"\"\n\n def initialize(self):\n self.options.declare('num_nodes',default=1)\n self.options.declare('flight_phase',default=None)\n self.options.declare('aircraft_model',default=None)\n\n def setup(self):\n nn = self.options['num_nodes']\n ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=[\"*\"])\n ivcomp.add_output('CL_rotate_mult', val=np.ones((nn,))*0.83)\n ivcomp.add_output('h_obs', val=35, units='ft')\n flight_phase = self.options['flight_phase']\n if flight_phase == 'rotate':\n ivcomp.add_output('braking',val=np.zeros((nn,)))\n ivcomp.add_output('propulsor_active',val=np.zeros((nn,)))\n ivcomp.add_output('throttle',val=np.ones((nn,)))\n\n self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=True), promotes_inputs=['*'], promotes_outputs=['*'])\n self.add_subsystem('gs',Groundspeeds(num_nodes=nn),promotes_inputs=['*'],promotes_outputs=['*'])\n clcomp = self.add_subsystem('clcomp',ElementMultiplyDivideComp(output_name='fltcond|CL', input_names=['CL_rotate_mult','ac|aero|CLmax_TO'],\n vec_size=[nn,1], length=1),\n promotes_inputs=['*'], promotes_outputs=['*'])\n self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn,flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])\n\n\n self.add_subsystem('lift',Lift(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('haccel',HorizontalAcceleration(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('vaccel',VerticalAcceleration(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n \n # TODO always starts from zero altitude\n self.add_subsystem('clear_obstacle',BalanceComp(name='duration',units='s',val=1,eq_units='m',rhs_name='fltcond|h_final',lhs_name='h_obs',lower=0.1,upper=15),\n promotes_inputs=['*'],promotes_outputs=['duration'])\n int1 = self.add_subsystem('intvelocity', Integrator(num_nodes=nn, method='simpson',diff_units='s',time_setup='duration'), promotes_outputs=['*'], promotes_inputs=['*'])\n int1.add_integrand('fltcond|Utrue', rate_name='accel_horiz', units='m/s', lower=0.1)\n int2 = self.add_subsystem('intrange', Integrator(num_nodes=nn, method='simpson',diff_units='s',time_setup='duration'), promotes_outputs=['*'], promotes_inputs=['*'])\n int2.add_integrand('range', rate_name='fltcond|groundspeed', units='m') \n int3 = self.add_subsystem('intvs', Integrator(num_nodes=nn, method='simpson',diff_units='s',time_setup='duration'), promotes_outputs=['*'], promotes_inputs=['*'])\n int3.add_integrand('fltcond|vs', rate_name='accel_vert', units='m/s', zero_start=True) \n int4 = self.add_subsystem('inth', Integrator(num_nodes=nn, method='simpson',diff_units='s',time_setup='duration'), promotes_outputs=['*'], promotes_inputs=['*'])\n int4.add_integrand('fltcond|h', rate_name='fltcond|vs', units='m', zero_start=True) \n\nclass SteadyFlightPhase(oc.PhaseGroup):\n \"\"\"\n This component group models steady flight conditions.\n Settable mission parameters include:\n Airspeed (fltcond|Ueas)\n Vertical speed (fltcond|vs)\n Duration of the segment (duration)\n\n Throttle is set automatically to ensure steady flight\n\n The BaseAircraftGroup object is passed in.\n The BaseAircraftGroup should be built to accept the following inputs\n and return the following outputs.\n The outputs should be promoted to the top level in the component.\n\n Inputs\n ------\n range : float\n Total distance travelled (vector, m)\n fltcond|h : float\n Altitude (vector, m)\n fltcond|vs : float\n Vertical speed (vector, m/s)\n fltcond|Ueas : float\n Equivalent airspeed (vector, m/s)\n fltcond|Utrue : float\n True airspeed (vector, m/s)\n fltcond|p : float\n Pressure (vector, Pa)\n fltcond|rho : float\n Density (vector, kg/m3)\n fltcond|T : float\n Temperature (vector, K)\n fltcond|q : float\n Dynamic pressure (vector, Pa)\n fltcond|CL : float\n Lift coefficient (vector, dimensionless)\n throttle : float\n Motor / propeller throttle setting scaled from 0 to 1 or slightly more (vector, dimensionless)\n propulsor_active : float\n If a multi-propulsor airplane, a failure condition should be modeled in the propulsion model by multiplying throttle by propulsor_active.\n It will generally be 1.0 unless a failure condition is being modeled, in which case it will be 0 (vector, dimensionless)\n braking : float\n Brake friction coefficient (default 0.4 for dry runway braking, 0.03 for resistance unbraked)\n Should not be applied in the air or nonphysical effects will result (vector, dimensionless)\n lift : float\n Lift force (vector, N)\n\n Outputs\n -------\n thrust : float\n Total thrust force produced by all propulsors (vector, N)\n drag : float\n Total drag force in the airplane axis produced by all sources of drag (vector, N)\n weight : float\n Weight (mass, really) of the airplane at each point in time. (vector, kg)\n ac|geom|wing|S_ref\n Wing reference area (scalar, m**2)\n ac|aero|CLmax_TO\n CLmax with flaps in max takeoff position (scalar, dimensionless)\n ac|weights|MTOW\n Maximum takeoff weight (scalar, kg)\n \"\"\"\n def initialize(self):\n self.options.declare('num_nodes',default=1)\n self.options.declare('flight_phase',default=None,desc='Phase of flight e.g. v0v1, cruise')\n self.options.declare('aircraft_model',default=None)\n\n def setup(self):\n nn = self.options['num_nodes']\n ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=[\"*\"])\n ivcomp.add_output('propulsor_active', val=np.ones(nn))\n ivcomp.add_output('braking', val=np.zeros(nn))\n ivcomp.add_output('fltcond|Ueas',val=np.ones((nn,))*90, units='m/s')\n ivcomp.add_output('fltcond|vs',val=np.ones((nn,))*1, units='m/s')\n ivcomp.add_output('zero_accel',val=np.zeros((nn,)),units='m/s**2')\n \n integ = self.add_subsystem('ode_integ', Integrator(num_nodes=nn, diff_units='s', time_setup='duration', method='simpson'), promotes_inputs=['fltcond|vs', 'fltcond|groundspeed'], promotes_outputs=['fltcond|h', 'range'])\n integ.add_integrand('fltcond|h', rate_name='fltcond|vs', val=1.0, units='m')\n self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=False), promotes_inputs=['*'], promotes_outputs=['*'])\n self.add_subsystem('gs',Groundspeeds(num_nodes=nn),promotes_inputs=['*'],promotes_outputs=['*'])\n # add the user-defined aircraft model\n self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn, flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('clcomp',SteadyFlightCL(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('lift',Lift(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('haccel',HorizontalAcceleration(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n integ.add_integrand('range', rate_name='fltcond|groundspeed', val=1.0, units='m')\n self.add_subsystem('steadyflt',BalanceComp(name='throttle',val=np.ones((nn,))*0.5,lower=0.01,upper=2.0,units=None,normalize=False,eq_units='m/s**2',rhs_name='accel_horiz',lhs_name='zero_accel',rhs_val=np.zeros((nn,))),\n promotes_inputs=['accel_horiz','zero_accel'],promotes_outputs=['throttle'])\n\n# class OldSteadyFlightPhase(Group):\n# \"\"\"\n# This component group models steady flight conditions.\n# Settable mission parameters include:\n# Airspeed (fltcond|Ueas)\n# Vertical speed (fltcond|vs)\n# Duration of the segment (duration)\n\n# Throttle is set automatically to ensure steady flight\n\n# The BaseAircraftGroup object is passed in.\n# The BaseAircraftGroup should be built to accept the following inputs\n# and return the following outputs.\n# The outputs should be promoted to the top level in the component.\n\n# Inputs\n# ------\n# range : float\n# Total distance travelled (vector, m)\n# fltcond|h : float\n# Altitude (vector, m)\n# fltcond|vs : float\n# Vertical speed (vector, m/s)\n# fltcond|Ueas : float\n# Equivalent airspeed (vector, m/s)\n# fltcond|Utrue : float\n# True airspeed (vector, m/s)\n# fltcond|p : float\n# Pressure (vector, Pa)\n# fltcond|rho : float\n# Density (vector, kg/m3)\n# fltcond|T : float\n# Temperature (vector, K)\n# fltcond|q : float\n# Dynamic pressure (vector, Pa)\n# fltcond|CL : float\n# Lift coefficient (vector, dimensionless)\n# throttle : float\n# Motor / propeller throttle setting scaled from 0 to 1 or slightly more (vector, dimensionless)\n# propulsor_active : float\n# If a multi-propulsor airplane, a failure condition should be modeled in the propulsion model by multiplying throttle by propulsor_active.\n# It will generally be 1.0 unless a failure condition is being modeled, in which case it will be 0 (vector, dimensionless)\n# braking : float\n# Brake friction coefficient (default 0.4 for dry runway braking, 0.03 for resistance unbraked)\n# Should not be applied in the air or nonphysical effects will result (vector, dimensionless)\n# lift : float\n# Lift force (vector, N)\n\n# Outputs\n# -------\n# thrust : float\n# Total thrust force produced by all propulsors (vector, N)\n# drag : float\n# Total drag force in the airplane axis produced by all sources of drag (vector, N)\n# weight : float\n# Weight (mass, really) of the airplane at each point in time. (vector, kg)\n# ac|geom|wing|S_ref\n# Wing reference area (scalar, m**2)\n# ac|aero|CLmax_TO\n# CLmax with flaps in max takeoff position (scalar, dimensionless)\n# ac|weights|MTOW\n# Maximum takeoff weight (scalar, kg)\n# \"\"\"\n# def initialize(self):\n# self.options.declare('num_nodes',default=1)\n# self.options.declare('flight_phase',default=None,desc='Phase of flight e.g. v0v1, cruise')\n# self.options.declare('aircraft_model',default=None)\n\n# def setup(self):\n# nn = self.options['num_nodes']\n# ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=[\"*\"])\n# ivcomp.add_output('propulsor_active', val=np.ones(nn))\n# ivcomp.add_output('braking', val=np.zeros(nn))\n# ivcomp.add_output('fltcond|Ueas',val=np.ones((nn,))*90, units='m/s')\n# ivcomp.add_output('fltcond|vs',val=np.ones((nn,))*1, units='m/s')\n# ivcomp.add_output('zero_accel',val=np.zeros((nn,)),units='m/s**2')\n \n# self.add_subsystem('inth',Integrator(num_nodes=nn, method='simpson', quantity_units='m', diff_units='s', time_setup='duration'),\n# promotes_inputs=[('dqdt','fltcond|vs'),'duration',('q_initial','fltcond|h_initial')],promotes_outputs=[('q','fltcond|h'),('q_final','fltcond|h_final')])\n# self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=False), promotes_inputs=['*'], promotes_outputs=['*'])\n# self.add_subsystem('gs',Groundspeeds(num_nodes=nn),promotes_inputs=['*'],promotes_outputs=['*'])\n# # add the user-defined aircraft model\n# self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn, flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])\n# self.add_subsystem('clcomp',SteadyFlightCL(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n# self.add_subsystem('lift',Lift(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n# self.add_subsystem('haccel',HorizontalAcceleration(num_nodes=nn), promotes_inputs=['*'],promotes_outputs=['*'])\n\n# self.add_subsystem('intrange',Integrator(num_nodes=nn, method='simpson', quantity_units='m', diff_units='s', time_setup='duration'),\n# promotes_inputs=[('dqdt','fltcond|groundspeed'),'duration',('q_initial','range_initial')],promotes_outputs=[('q','range'),('q_final','range_final')])\n\n\n# self.add_subsystem('steadyflt',BalanceComp(name='throttle',val=np.ones((nn,))*0.5,lower=0.01,upper=2.0,units=None,normalize=False,eq_units='m/s**2',rhs_name='accel_horiz',lhs_name='zero_accel',rhs_val=np.zeros((nn,))),\n# promotes_inputs=['accel_horiz','zero_accel'],promotes_outputs=['throttle'])\n\nclass ClimbAnglePhase(Group):\n \"\"\"\n This component checks the climb angle for a\n single flight condition at the V2 speed. No integration is performed.\n\n User settable parameter includes the V2/Vstall multiple (default 1.2)\n\n Useful for ensuring all-engine climb gradients in optimization.\n Choose flight_phase = AllEngineClimbAngle or EngineOutClimbAngle\n to set the propulsor_active property correctly.\n\n Inputs\n ------\n range : float\n Total distance travelled (vector, m)\n fltcond|h : float\n Altitude (vector, m)\n fltcond|vs : float\n Vertical speed (vector, m/s)\n fltcond|Ueas : float\n Equivalent airspeed (vector, m/s)\n fltcond|Utrue : float\n True airspeed (vector, m/s)\n fltcond|p : float\n Pressure (vector, Pa)\n fltcond|rho : float\n Density (vector, kg/m3)\n fltcond|T : float\n Temperature (vector, K)\n fltcond|q : float\n Dynamic pressure (vector, Pa)\n fltcond|CL : float\n Lift coefficient (vector, dimensionless)\n throttle : float\n Motor / propeller throttle setting scaled from 0 to 1 or slightly more (vector, dimensionless)\n propulsor_active : float\n If a multi-propulsor airplane, a failure condition should be modeled in the propulsion model by multiplying throttle by propulsor_active.\n It will generally be 1.0 unless a failure condition is being modeled, in which case it will be 0 (vector, dimensionless)\n lift : float\n Lift force (vector, N)\n\n Outputs\n -------\n thrust : float\n Total thrust force produced by all propulsors (vector, N)\n drag : float\n Total drag force in the airplane axis produced by all sources of drag (vector, N)\n weight : float\n Weight (mass, really) of the airplane at each point in time. Generally will need to be integrated by Dymos as a state with a rate source (vector, kg)\n ac|geom|wing|S_ref\n Wing reference area (scalar, m**2)\n ac|aero|CLmax_TO\n CLmax with flaps in max takeoff position (scalar, dimensionless)\n ac|weights|MTOW\n Maximum takeoff weight (scalar, kg)\n \"\"\"\n\n def initialize(self):\n self.options.declare('num_nodes',default=1)\n self.options.declare('flight_phase',default=None,desc='Phase of flight e.g. v0v1, cruise')\n self.options.declare('aircraft_model',default=None)\n\n def setup(self):\n nn = self.options['num_nodes']\n ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=[\"*\"])\n ivcomp.add_output('v2_vstall_mult',val=1.2)\n ivcomp.add_output('fltcond|h',val=np.zeros((nn,)),units='m')\n ivcomp.add_output('fltcond|cosgamma', val=np.ones((nn,)))\n\n flight_phase = self.options['flight_phase']\n if flight_phase == 'AllEngineClimbAngle':\n ivcomp.add_output('propulsor_active',val=np.ones((nn,)))\n ivcomp.add_output('throttle',val=np.ones((nn,)))\n elif flight_phase == 'EngineOutClimbAngle':\n ivcomp.add_output('propulsor_active',val=np.zeros((nn,)))\n ivcomp.add_output('throttle',val=np.ones((nn,)))\n self.add_subsystem('stall',StallSpeed(),promotes_inputs=[('CLmax','ac|aero|CLmax_TO'),('weight','ac|weights|MTOW'),'ac|geom|wing|S_ref'],promotes_outputs=['*'])\n self.add_subsystem('vrspeed',ElementMultiplyDivideComp(output_name='takeoff|v2',input_names=['Vstall_eas','v2_vstall_mult'],input_units=['m/s',None]),promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=False), promotes_inputs=['*'], promotes_outputs=['*'])\n self.add_subsystem('clcomp',SteadyFlightCL(num_nodes=nn), promotes_inputs=[('weight','ac|weights|MTOW'),'fltcond|*','ac|*'],promotes_outputs=['*'])\n self.connect('takeoff|v2','fltcond|Ueas')\n # the aircraft model needs to provide thrust and drag\n self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn,flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('climbangle',ClimbAngleComp(num_nodes=nn),promotes_inputs=['drag',('weight','ac|weights|MTOW'),'thrust'],promotes_outputs=['gamma'])\n\nclass TakeoffTransition(ExplicitComponent):\n \"\"\"\n Computes distance and altitude at end of circular transition.\n\n Based on TO distance analysis method in Raymer book.\n Obstacle clearance height set for GA / Part 23 aircraft\n Override for analyzing Part 25 aircraft\n\n Inputs\n ------\n fltcond|Utrue\n Transition true airspeed (generally avg of vr and v2) (scalar, m/s)\n gamma : float\n Climb out flight path angle (scalar, rad)\n\n Outputs\n -------\n s_transition : float\n Horizontal distance during transition to v2 climb out (scalar, m)\n h_transition : float\n Altitude at transition point (scalar, m)\n t_transition : float\n Elapsed time in transition (scalar, s)\n\n Options\n -------\n h_obstacle : float\n Obstacle height to clear (in **meters**) (default 10.66, equiv. 35 ft)\n load_factor : float\n Load factor during rotation and transition (default 1.2 from Raymer book)\n \"\"\"\n\n def initialize(self):\n self.options.declare('h_obstacle',default=10.66,desc='Obstacle clearance height in m')\n self.options.declare('load_factor', default=1.2, desc='Load factor during circular arc transition')\n def setup(self):\n self.add_input('fltcond|Utrue', units='m/s', src_indices=0)\n self.add_input('gamma', units='rad', src_indices=0)\n self.add_output('s_transition', units='m')\n self.add_output('h_transition', units='m')\n self.add_output('t_transition',units='s')\n self.declare_partials(['s_transition','h_transition','t_transition'], ['fltcond|Utrue','gamma'])\n\n def compute(self, inputs, outputs):\n hobs = self.options['h_obstacle']\n nfactor = self.options['load_factor'] - 1\n g = 9.80665 #m/s^2\n gam = inputs['gamma']\n ut = inputs['fltcond|Utrue']\n\n R = ut**2/nfactor/g\n st = R*np.sin(gam)\n ht = R*(1-np.cos(gam))\n #alternate formula if the obstacle is cleared during transition\n if ht > hobs:\n st = np.sqrt(R**2-(R-hobs)**2)\n ht = hobs\n outputs['s_transition'] = st\n outputs['h_transition'] = ht\n outputs['t_transition'] = st / ut\n\n def compute_partials(self, inputs, J):\n hobs = self.options['h_obstacle']\n nfactor = self.options['load_factor'] - 1\n g = 9.80665 #m/s^2\n gam = inputs['gamma']\n ut = inputs['fltcond|Utrue']\n R = ut**2/nfactor/g\n dRdut = 2*ut/nfactor/g\n st = R*np.sin(gam)\n ht = R*(1-np.cos(gam))\n #alternate formula if the obstacle is cleared during transition\n if ht > hobs:\n st = np.sqrt(R**2-(R-hobs)**2)\n dstdut = 1/2/np.sqrt(R**2-(R-hobs)**2) * (2*R*dRdut - 2*(R-hobs)*dRdut)\n dstdgam = 0\n dhtdut = 0\n dhtdgam = 0\n else:\n dhtdut = dRdut*(1-np.cos(gam))\n dhtdgam = R*np.sin(gam)\n dstdut = dRdut*np.sin(gam)\n dstdgam = R*np.cos(gam)\n J['s_transition','gamma'] = dstdgam\n J['s_transition','fltcond|Utrue'] = dstdut\n J['h_transition','gamma'] = dhtdgam\n J['h_transition','fltcond|Utrue'] = dhtdut\n J['t_transition','gamma'] = dstdgam / ut\n J['t_transition','fltcond|Utrue'] = (dstdut * ut - st) / ut ** 2\n\nclass TakeoffClimb(ExplicitComponent):\n \"\"\"\n Computes ground distance from end of transition until obstacle is cleared.\n\n Analysis based on Raymer book.\n\n Inputs\n ------\n gamma : float\n Climb out flight path angle (scalar, rad)\n h_transition : float\n Altitude at transition point (scalar, m)\n\n Outputs\n -------\n s_climb : float\n Horizontal distance from end of transition until obstacle is cleared (scalar, m)\n\n Options\n -------\n h_obstacle : float\n Obstacle height to clear (in **meters**) (default 10.66, equiv. 35 ft)\n \"\"\"\n\n def initialize(self):\n self.options.declare('h_obstacle',default=10.66,desc='Obstacle clearance height in m')\n def setup(self):\n self.add_input('h_transition', units='m')\n self.add_input('gamma', units='rad',src_indices=-1)\n self.add_input('fltcond|Utrue', units='m/s',src_indices=-1)\n\n self.add_output('s_climb', units='m')\n self.add_output('t_climb', units='s')\n self.declare_partials(['s_climb'], ['h_transition','gamma'])\n self.declare_partials(['t_climb'], ['h_transition','gamma','fltcond|Utrue'])\n\n def compute(self, inputs, outputs):\n hobs = self.options['h_obstacle']\n gam = inputs['gamma']\n ht = inputs['h_transition']\n ut = inputs['fltcond|Utrue']\n sc = (hobs-ht)/np.tan(gam)\n outputs['s_climb'] = sc\n outputs['t_climb'] = sc / ut\n\n def compute_partials(self, inputs, J):\n hobs = self.options['h_obstacle']\n gam = inputs['gamma']\n ht = inputs['h_transition']\n ut = inputs['fltcond|Utrue']\n sc = (hobs-ht)/np.tan(gam)\n J['s_climb','gamma'] = -(hobs-ht)/np.tan(gam)**2 * (1/np.cos(gam))**2\n J['s_climb','h_transition'] = -1/np.tan(gam)\n J['t_climb','gamma'] = J['s_climb','gamma'] / ut\n J['t_climb','h_transition'] = J['s_climb','h_transition'] / ut\n J['t_climb','fltcond|Utrue'] = - sc / ut ** 2\n\n\nclass RobustRotationPhase(oc.PhaseGroup):\n \"\"\"\n This adds general mission analysis capabilities to an existing airplane model.\n The BaseAircraftGroup object is passed in. It should be built to accept the following inputs and return the following outputs.\n The outputs should be promoted to the top level in the component.\n\n Inputs\n ------\n range : float\n Total distance travelled (vector, m)\n fltcond|h : float\n Altitude (vector, m)\n fltcond|vs : float\n Vertical speed (vector, m/s)\n fltcond|Ueas : float\n Equivalent airspeed (vector, m/s)\n fltcond|Utrue : float\n True airspeed (vector, m/s)\n fltcond|p : float\n Pressure (vector, Pa)\n fltcond|rho : float\n Density (vector, kg/m3)\n fltcond|T : float\n Temperature (vector, K)\n fltcond|q : float\n Dynamic pressure (vector, Pa)\n fltcond|CL : float\n Lift coefficient (vector, dimensionless)\n throttle : float\n Motor / propeller throttle setting scaled from 0 to 1 or slightly more (vector, dimensionless)\n propulsor_active : float\n If a multi-propulsor airplane, a failure condition should be modeled in the propulsion model by multiplying throttle by propulsor_active.\n It will generally be 1.0 unless a failure condition is being modeled, in which case it will be 0 (vector, dimensionless)\n braking : float\n Percentage brakes applied, from 0 to 1. Should not be applied in the air or nonphysical effects will result (vector, dimensionless)\n lift : float\n Lift force (vector, N)\n\n Outputs\n -------\n thrust : float\n Total thrust force produced by all propulsors (vector, N)\n drag : float\n Total drag force in the airplane axis produced by all sources of drag (vector, N)\n weight : float\n Weight (mass, really) of the airplane at each point in time. Generally will need to be integrated by Dymos as a state with a rate source (vector, kg)\n ac|geom|wing|S_ref\n Wing reference area (scalar, m**2)\n ac|aero|CLmax_TO\n CLmax with flaps in max takeoff position (scalar, dimensionless)\n ac|weights|MTOW\n Maximum takeoff weight (scalar, kg)\n \"\"\"\n\n def initialize(self):\n self.options.declare('num_nodes',default=1)\n self.options.declare('flight_phase',default=None,desc='Phase of flight e.g. v0v1, cruise')\n self.options.declare('aircraft_model',default=None)\n self.options.declare('h_obstacle',default=10.66, )\n\n def setup(self):\n nn = self.options['num_nodes']\n ivcomp = self.add_subsystem('const_settings', IndepVarComp(), promotes_outputs=[\"*\"])\n flight_phase = self.options['flight_phase']\n if flight_phase == 'rotate':\n ivcomp.add_output('braking',val=np.zeros((nn,)))\n ivcomp.add_output('propulsor_active',val=np.zeros((nn,)))\n ivcomp.add_output('throttle',val=np.ones((nn,)))\n # flight conditions are sea level takeoff, transition speed\n # split off a single node to compute climb angle\n # compute the transition distance and add it to range_initial\n # compute the transition time as a function of the groundspeed\n # provide transition time as duration\n ivcomp.add_output('v2_vstall_mult',val=1.2)\n ivcomp.add_output('vr_vstall_mult',val=1.1)\n ivcomp.add_output('fltcond|vs', val=np.zeros((nn,)),units='m/s')\n ivcomp.add_output('fltcond|cosgamma', val=np.ones((nn,)),units=None)\n\n\n\n ivcomp.add_output('h_obstacle',val=35,units='ft')\n\n self.add_subsystem('altitudes',LinearInterpolator(num_nodes=nn, units='m'),promotes_inputs=[('start_val','h_initial')],promotes_outputs=[('vec','fltcond|h')])\n self.connect('h_obstacle','altitudes.end_val')\n\n self.add_subsystem('stall',StallSpeed(),promotes_inputs=[('CLmax','ac|aero|CLmax_TO'),('weight','ac|weights|MTOW'),'ac|geom|wing|S_ref'],promotes_outputs=['*'])\n self.add_subsystem('vrspeed',ElementMultiplyDivideComp(output_name='takeoff|vr',input_names=['Vstall_eas','vr_vstall_mult'],input_units=['m/s',None]),promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('v2speed',ElementMultiplyDivideComp(output_name='takeoff|v2',input_names=['Vstall_eas','v2_vstall_mult'],input_units=['m/s',None]),promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('speeds',LinearInterpolator(num_nodes=nn,units='kn'),promotes_inputs=[('start_val','takeoff|vr'),('end_val','takeoff|v2')],promotes_outputs=[('vec','fltcond|Ueas')])\n self.add_subsystem('atmos', ComputeAtmosphericProperties(num_nodes=nn, true_airspeed_in=False), promotes_inputs=['*'], promotes_outputs=['*'])\n # pretty confident there's a simpler closed form multiple for CL at v2\n self.add_subsystem('clcomp',SteadyFlightCL(num_nodes=nn), promotes_inputs=['weight','fltcond|*','ac|*'],promotes_outputs=['*'])\n # the aircraft model needs to provide thrust and drag\n self.add_subsystem('acmodel',self.options['aircraft_model'](num_nodes=nn,flight_phase=self.options['flight_phase']),promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('climbangle',ClimbAngleComp(num_nodes=nn),promotes_inputs=['drag','weight','thrust'],promotes_outputs=['gamma'])\n self.add_subsystem('transition',TakeoffTransition(),promotes_inputs=['fltcond|Utrue','gamma'],promotes_outputs=['h_transition','s_transition','t_transition'])\n self.add_subsystem('v2climb',TakeoffClimb(),promotes_inputs=['h_transition','gamma','fltcond|Utrue'],promotes_outputs=['s_climb','t_climb'])\n self.add_subsystem('tod_final',AddSubtractComp(output_name='range_final',input_names=['range_initial','s_transition','s_climb'],units='m'),promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('duration',AddSubtractComp(output_name='duration',input_names=['t_transition','t_climb'],units='s'),promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('h_final',AddSubtractComp(output_name='fltcond|h_final',input_names=['h_obstacle'],units='m'),promotes_inputs=['*'],promotes_outputs=['*'])\n self.add_subsystem('ranges',LinearInterpolator(num_nodes=nn,units='m'),promotes_inputs=[('start_val','range_initial'),('end_val','range_final')],promotes_outputs=[('vec','range')])\n\n\n\n"
] | [
[
"numpy.ones",
"numpy.arcsin",
"numpy.zeros",
"numpy.less",
"numpy.cos",
"numpy.arange",
"numpy.clip",
"numpy.tan",
"numpy.flip",
"numpy.isnan",
"numpy.sqrt",
"numpy.sin"
]
] |
jialeli1/From-Voxel-to-Point | [
"b4dba9c4e9cd83e04199d9224f6ec7bf06b71f93"
] | [
"pcdet/utils/loss_utils.py"
] | [
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom . import box_utils\nfrom . import center_utils\n\ntry:\n from itertools import ifilterfalse\nexcept ImportError: # py3k\n from itertools import filterfalse as ifilterfalse\n\n\n\nclass SigmoidFocalClassificationLoss(nn.Module):\n \"\"\"\n Sigmoid focal cross entropy loss.\n \"\"\"\n\n def __init__(self, gamma: float = 2.0, alpha: float = 0.25):\n \"\"\"\n Args:\n gamma: Weighting parameter to balance loss for hard and easy examples.\n alpha: Weighting parameter to balance loss for positive and negative examples.\n \"\"\"\n super(SigmoidFocalClassificationLoss, self).__init__()\n self.alpha = alpha\n self.gamma = gamma\n\n @staticmethod\n def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):\n \"\"\" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:\n max(x, 0) - x * z + log(1 + exp(-abs(x))) in\n https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits\n\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predicted logits for each class\n target: (B, #anchors, #classes) float tensor.\n One-hot encoded classification targets\n\n Returns:\n loss: (B, #anchors, #classes) float tensor.\n Sigmoid cross entropy loss without reduction\n \"\"\"\n loss = torch.clamp(input, min=0) - input * target + \\\n torch.log1p(torch.exp(-torch.abs(input)))\n return loss\n\n def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):\n \"\"\"\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predicted logits for each class\n target: (B, #anchors, #classes) float tensor.\n One-hot encoded classification targets\n weights: (B, #anchors) float tensor.\n Anchor-wise weights.\n\n Returns:\n weighted_loss: (B, #anchors, #classes) float tensor after weighting.\n \"\"\"\n pred_sigmoid = torch.sigmoid(input)\n alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)\n pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid\n focal_weight = alpha_weight * torch.pow(pt, self.gamma)\n\n bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)\n\n loss = focal_weight * bce_loss\n\n if weights.shape.__len__() == 2 or \\\n (weights.shape.__len__() == 1 and target.shape.__len__() == 2):\n weights = weights.unsqueeze(-1)\n\n assert weights.shape.__len__() == loss.shape.__len__()\n\n return loss * weights\n\n\nclass WeightedSmoothL1Loss(nn.Module):\n \"\"\"\n Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss\n https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py\n | 0.5 * x ** 2 / beta if abs(x) < beta\n smoothl1(x) = |\n | abs(x) - 0.5 * beta otherwise,\n where x = input - target.\n \"\"\"\n def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):\n \"\"\"\n Args:\n beta: Scalar float.\n L1 to L2 change point.\n For beta values < 1e-5, L1 loss is computed.\n code_weights: (#codes) float list if not None.\n Code-wise weights.\n \"\"\"\n super(WeightedSmoothL1Loss, self).__init__()\n self.beta = beta\n if code_weights is not None:\n self.code_weights = np.array(code_weights, dtype=np.float32)\n self.code_weights = torch.from_numpy(self.code_weights).cuda()\n\n @staticmethod\n def smooth_l1_loss(diff, beta):\n if beta < 1e-5:\n loss = torch.abs(diff)\n else:\n n = torch.abs(diff)\n loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)\n\n return loss\n\n def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):\n \"\"\"\n Args:\n input: (B, #anchors, #codes) float tensor.\n Ecoded predicted locations of objects.\n target: (B, #anchors, #codes) float tensor.\n Regression targets.\n weights: (B, #anchors) float tensor if not None.\n\n Returns:\n loss: (B, #anchors) float tensor.\n Weighted smooth l1 loss without reduction.\n \"\"\"\n target = torch.where(torch.isnan(target), input, target) # ignore nan targets\n\n diff = input - target\n # code-wise weighting\n if self.code_weights is not None:\n diff = diff * self.code_weights.view(1, 1, -1)\n\n loss = self.smooth_l1_loss(diff, self.beta)\n\n # anchor-wise weighting\n if weights is not None:\n assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]\n loss = loss * weights.unsqueeze(-1)\n\n return loss\n\n\nclass WeightedL1Loss(nn.Module):\n def __init__(self, code_weights: list = None):\n \"\"\"\n Args:\n code_weights: (#codes) float list if not None.\n Code-wise weights.\n \"\"\"\n super(WeightedL1Loss, self).__init__()\n if code_weights is not None:\n self.code_weights = np.array(code_weights, dtype=np.float32)\n self.code_weights = torch.from_numpy(self.code_weights).cuda()\n\n def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):\n \"\"\"\n Args:\n input: (B, #anchors, #codes) float tensor.\n Ecoded predicted locations of objects.\n target: (B, #anchors, #codes) float tensor.\n Regression targets.\n weights: (B, #anchors) float tensor if not None.\n\n Returns:\n loss: (B, #anchors) float tensor.\n Weighted smooth l1 loss without reduction.\n \"\"\"\n target = torch.where(torch.isnan(target), input, target) # ignore nan targets\n\n diff = input - target\n # code-wise weighting\n if self.code_weights is not None:\n diff = diff * self.code_weights.view(1, 1, -1)\n\n loss = torch.abs(diff)\n\n # anchor-wise weighting\n if weights is not None:\n assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]\n loss = loss * weights.unsqueeze(-1)\n\n return loss\n\n\nclass WeightedCrossEntropyLoss(nn.Module):\n \"\"\"\n Transform input to fit the fomation of PyTorch offical cross entropy loss\n with anchor-wise weighting.\n \"\"\"\n def __init__(self):\n super(WeightedCrossEntropyLoss, self).__init__()\n\n def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):\n \"\"\"\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predited logits for each class.\n target: (B, #anchors, #classes) float tensor.\n One-hot classification targets.\n weights: (B, #anchors) float tensor.\n Anchor-wise weights.\n\n Returns:\n loss: (B, #anchors) float tensor.\n Weighted cross entropy loss without reduction\n \"\"\"\n input = input.permute(0, 2, 1)\n target = target.argmax(dim=-1)\n loss = F.cross_entropy(input, target, reduction='none') * weights\n return loss\n\n\ndef get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):\n \"\"\"\n Args:\n pred_bbox3d: (N, 7) float Tensor.\n gt_bbox3d: (N, 7) float Tensor.\n\n Returns:\n corner_loss: (N) float Tensor.\n \"\"\"\n assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]\n\n pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)\n gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)\n\n # 这里flip的目的应该是忽略朝向,但实际上呢把朝向也纳入整体更好还是说它会造成不稳定呢?\n gt_bbox3d_flip = gt_bbox3d.clone()\n gt_bbox3d_flip[:, 6] += np.pi\n gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)\n # (N, 8)\n corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),\n torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))\n # (N, 8)\n corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)\n\n return corner_loss.mean(dim=1)\n\n\n\n\ndef get_corner_loss_mse(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):\n \"\"\"\n Args:\n pred_bbox3d: (N, 7) float Tensor.\n gt_bbox3d: (N, 7) float Tensor.\n\n Returns:\n corner_loss: (1,) float scaler\n \"\"\"\n assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]\n\n # (N, 8, 3)\n pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)\n gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)\n # print('==> pred_box_corners[0, :, :]')\n # print(pred_box_corners[0,:,:])\n # print('==> gt_box_corners[0, :, :]')\n # print(gt_box_corners[0,:,:])\n # print('==> pred_box_corners[10, :, :]')\n # print(pred_box_corners[10,:,:])\n # print('==> gt_box_corners[10, :, :]')\n # print(gt_box_corners[10,:,:])\n # print('==> pred_box_corners[100, :, :]')\n # print(pred_box_corners[100,:,:])\n # print('==> gt_box_corners[100, :, :]')\n # print(gt_box_corners[100,:,:])\n\n # for each box, mean by 8 corners.\n corner_loss_x = F.mse_loss(input=pred_box_corners[:,:,0], target=gt_box_corners[:,:,0]) # (N, 8) -> (N)\n corner_loss_y = F.mse_loss(input=pred_box_corners[:,:,1], target=gt_box_corners[:,:,1]) # (N, 8) -> (N)\n corner_loss_z = F.mse_loss(input=pred_box_corners[:,:,2], target=gt_box_corners[:,:,2]) # (N, 8) -> (N)\n\n # xyz之间求和\n corner_loss = corner_loss_x + corner_loss_y + corner_loss_z\n\n return corner_loss \n\n\ndef get_iouscore_loss_bce(iou_preds, iou_gts, iou_fg_thresh=0.75, iou_bg_thresh=0.25):\n \"\"\"\n Args:\n iou_preds: (N,)\n iou_gts: (N, )\n Returns:\n loss_iouscore:\n \"\"\"\n # prepare the labels\n # now only for car class, 08132020\n\n # iou_preds = iou_preds.view(-1)\n # iou_gts = iou_gts.view(-1)\n\n # print('==> iou_preds.size()')\n # print(iou_preds.size())\n # print(torch.sigmoid(iou_preds))\n # print('==> iou_gts.size()')\n # print(iou_gts.size())\n # print(iou_gts)\n\n # CLS_FG_THRESH: 0.75\n # CLS_BG_THRESH: 0.25\n # iou_bg_thresh = self.roi_sampler_cfg.CLS_BG_THRESH\n # iou_fg_thresh = self.roi_sampler_cfg.CLS_FG_THRESH\n # iou_bg_thresh = 0.25\n # iou_fg_thresh = 0.75\n\n fg_mask = iou_gts > iou_fg_thresh\n bg_mask = iou_gts < iou_bg_thresh\n interval_mask = (fg_mask == 0) & (bg_mask == 0)\n \n iou_cls_labels = (fg_mask > 0).float()\n iou_cls_labels[interval_mask] = \\\n (iou_gts[interval_mask] - iou_bg_thresh) / (iou_fg_thresh - iou_bg_thresh)\n\n # print('==> iou_cls_labels')\n # print(iou_cls_labels.size())\n # print(iou_cls_labels[:50])\n \n # 这里CE是计算的整个范围的iou,但是最后求和的时候只计算了iou>=0这部分的。\n # 条件 iou_cls_labels >= 0 选出来了那些iou >= 0 的候选框。\n loss_ioucls = F.binary_cross_entropy(torch.sigmoid(iou_preds), iou_cls_labels.float(), reduction='none')\n cls_valid_mask = (iou_cls_labels >= 0).float()\n loss_iouscore = (loss_ioucls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)\n\n return loss_iouscore\n\n\n\ndef get_rot_binres_loss(pred_reg, reg_label, num_head_bin, get_ry_fine=False):\n \"\"\"\n Bin-based 3D bounding boxes regression loss. See https://arxiv.org/abs/1812.04244 for more details.\n \n :param pred_reg: (N, C)\n :param reg_label: (N, 1), ry\n :param num_head_bin: constant\n :param get_ry_fine: False\n :return:\n \"\"\"\n # print('==> pred_reg.size()')\n # print(pred_reg.size()) # should be (N, 24)\n\n reg_loss_dict = {}\n # angle loss\n start_offset = 0\n ry_bin_l, ry_bin_r = start_offset, start_offset + num_head_bin\n ry_res_l, ry_res_r = ry_bin_r, ry_bin_r + num_head_bin\n start_offset = ry_res_r\n ry_label = reg_label.squeeze(dim=-1)\n # print('==> reg_label[] in encode')\n # print(reg_label.size()) # should be (N, C)\n # print(reg_label[100:150])\n # print('==> ry_label[] in encode')\n # print(ry_label.size()) # should be (N,)\n # print(ry_label[100:150])\n if get_ry_fine:\n assert False, \"one-stage should not get_ry_fine.\"\n\n # divide pi/2 into several bins\n angle_per_class = (np.pi / 2) / num_head_bin\n\n ry_label = ry_label % (2 * np.pi) # 0 ~ 2pi\n opposite_flag = (ry_label > np.pi * 0.5) & (ry_label < np.pi * 1.5)\n ry_label[opposite_flag] = (ry_label[opposite_flag] + np.pi) % (2 * np.pi) # (0 ~ pi/2, 3pi/2 ~ 2pi)\n shift_angle = (ry_label + np.pi * 0.5) % (2 * np.pi) # (0 ~ pi)\n\n shift_angle = torch.clamp(shift_angle - np.pi * 0.25, min=1e-3, max=np.pi * 0.5 - 1e-3) # (0, pi/2)\n\n # bin center is (5, 10, 15, ..., 85)\n ry_bin_label = (shift_angle / angle_per_class).floor().long()\n ry_res_label = shift_angle - (ry_bin_label.float() * angle_per_class + angle_per_class / 2)\n ry_res_norm_label = ry_res_label / (angle_per_class / 2)\n\n else:\n # divide 2pi into several bins\n angle_per_class = (2 * np.pi) / num_head_bin\n heading_angle = ry_label % (2 * np.pi) # 0 ~ 2pi\n # print('==> heading_angle[] in encode')\n # print(heading_angle.size())\n # print(heading_angle[100:150])\n\n shift_angle = (heading_angle + angle_per_class / 2) % (2 * np.pi)\n ry_bin_label = (shift_angle / angle_per_class).floor().long()\n ry_res_label = shift_angle - (ry_bin_label.float() * angle_per_class + angle_per_class / 2)\n ry_res_norm_label = ry_res_label / (angle_per_class / 2)\n # print('==> ry_bin_label in encode')\n # print(ry_bin_label.size())\n # print(ry_bin_label[100:150])\n\n\n ry_bin_onehot = torch.cuda.FloatTensor(ry_bin_label.size(0), num_head_bin).zero_()\n ry_bin_onehot.scatter_(1, ry_bin_label.view(-1, 1).long(), 1)\n loss_ry_bin = F.cross_entropy(pred_reg[:, ry_bin_l:ry_bin_r], ry_bin_label)\n loss_ry_res = F.smooth_l1_loss((pred_reg[:, ry_res_l: ry_res_r] * ry_bin_onehot).sum(dim=1), ry_res_norm_label)\n\n reg_loss_dict['loss_ry_bin'] = loss_ry_bin.item()\n reg_loss_dict['loss_ry_res'] = loss_ry_res.item()\n angle_loss = loss_ry_bin + loss_ry_res\n # Total regression loss\n reg_loss_dict['loss_angle'] = angle_loss\n\n return angle_loss, reg_loss_dict\n\n\n\nclass CenterNetFocalLoss(nn.Module):\n '''nn.Module warpper for focal loss'''\n def __init__(self, gamma=4, alpha=2):\n super(CenterNetFocalLoss, self).__init__()\n # self.neg_loss = _neg_loss\n self.gamma = gamma\n self.alpha = alpha\n\n def _sigmoid(self, x):\n # y = torch.clamp(x.sigmoid_(), min=1e-4, max=1 - 1e-4)\n # dnnt use the replace version!\n y = torch.clamp(torch.sigmoid(x), min=1e-4, max=1 - 1e-4)\n\n # too small will cause loss nan.\n # y = torch.clamp(x.sigmoid_(), min=1e-12, max=1 - 1e-12)\n return y\n\n def _neg_loss(self, pred, gt):\n ''' Modified focal loss. Exactly the same as CornerNet.\n Runs faster and costs a little bit more memory\n Arguments:\n pred: (batch x c x h x w), do some clamp or not?. should be clampped already.\n gt: (batch x c x h x w)\n '''\n pos_inds = gt.eq(1).float()\n neg_inds = gt.lt(1).float()\n\n # neg_weights = torch.pow(1 - gt, 4)\n neg_weights = torch.pow(1 - gt, self.gamma)\n\n loss = 0\n\n # pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds\n # neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds\n pos_loss = torch.log(pred) * torch.pow(1 - pred, self.alpha) * pos_inds\n neg_loss = torch.log(1 - pred) * torch.pow(pred, self.alpha) * neg_weights * neg_inds\n\n num_pos = pos_inds.float().sum()\n\n pos_loss = pos_loss.sum()\n neg_loss = neg_loss.sum()\n\n if num_pos == 0:\n loss = loss - neg_loss\n else:\n loss = loss - (pos_loss + neg_loss) / num_pos\n return loss\n\n\n def forward(self, out, target):\n out_norm = self._sigmoid(out)\n\n return self._neg_loss(out_norm, target)\n\n\nclass CenterNetResLoss(nn.Module):\n def __init__(self, cfg):\n super(CenterNetResLoss, self).__init__()\n self.res_func_type = cfg['res_func']\n\n def forward(self, output, mask, ind, target):\n \"\"\"\n Args:\n output: torch.Size([B, C, 152, 152])\n mask: torch.Size([B, max_objs])\n ind: torch.Size([B, max_objs])\n target: torch.Size([B, max_objs, C])\n Returns:\n reduced and weighted loss term.\n \"\"\"\n pred = center_utils._transpose_and_gather_feat(output, ind) # (B, max_objs, C)\n\n # print('==> (ind != 0).float().sum(): ', (ind != 0).float().sum() )\n # print('==> mask.sum(): ', mask.sum() )\n\n if mask.sum():\n # 1. flatten.\n pred_flat = pred.view(-1, pred.shape[-1]) #(B*max_objs, C)\n target_flat = target.view(-1, target.shape[-1]) #(B*max_objs, C)\n mask_flat = mask.view(-1).bool() #(B*max_objs)\n # 2. valid select\n pred_valid = pred_flat[mask_flat] #(num_valid, C)\n target_valid = target_flat[mask_flat] #(num_valid, C)\n # 3. un-reduced loss term\n if self.res_func_type == 'smooth-l1':\n loss = F.smooth_l1_loss(pred_valid, target_valid, reduction='none')\n elif self.res_func_type == 'l1':\n loss = F.l1_loss(pred_valid, target_valid, reduction='none') \n elif self.res_func_type == 'balanced_l1':\n loss = get_balanced_l1_loss(pred_valid, target_valid)\n else:\n raise NotImplementedError \n\n # mean for num_obj_dims, sum for channel_dims\n # (num_valid, C) -> (C) -> ()\n loss = loss.mean(dim=0).sum() \n else:\n loss = 0.\n\n return loss\n\nclass CenterNetRotBinResLoss(nn.Module):\n def __init__(self, cfg):\n super(CenterNetRotBinResLoss, self).__init__()\n\n self.num_head_bin = cfg['num_bins']\n\n def forward(self, output, mask, ind, target):\n \"\"\"\n Args:\n output: torch.Size([B, C, 152, 152])\n mask: torch.Size([B, max_objs])\n ind: torch.Size([B, max_objs])\n target: torch.Size([B, max_objs, C])\n Returns:\n reduced and weighted loss term.\n \"\"\"\n pred = center_utils._transpose_and_gather_feat(output, ind) # torch.Size([1, 500, 2])\n\n if mask.sum():\n # 1. flatten\n pred_flat = pred.view(-1, pred.shape[-1]) # (B*max_objs, C)\n target_flat = target.view(-1, target.shape[-1]) # (B*max_objs, 1)\n mask_flat = mask.view(-1).bool() # (B*max_objs)\n # 2. valid select\n pred_valid = pred_flat[mask_flat] # (num_valid, C)\n target_valid = target_flat[mask_flat] # (num_valid, 1)\n\n # 3. return the reduced rot loss term.\n loss, _ = get_rot_binres_loss(pred_valid, target_valid, num_head_bin=self.num_head_bin)\n \n else:\n loss = 0.\n\n # print('==> loss in rot')\n # print(loss)\n return loss\n\n\n\n\ndef lovasz_softmax(probas, labels, classes='present', per_image=False, ignore=None):\n \"\"\"\n Multi-class Lovasz-Softmax loss\n NOTE probas should be applied with softmax.\n probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).\n Interpreted as binary (sigmoid) output with outputs of size [B, H, W].\n labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)\n classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.\n per_image: compute the loss per image instead of per batch\n ignore: void class labels\n \"\"\"\n # print('==> lovasz_softmax, classes: ', classes)\n # print('==> lovasz_softmax, per_image: ', per_image)\n # print('==> lovasz_softmax, ignore: ', ignore)\n\n if per_image:\n loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes)\n for prob, lab in zip(probas, labels))\n else:\n loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), classes=classes)\n return loss\n\n\n\ndef lovasz_softmax_flat(probas, labels, classes='present'):\n \"\"\"\n Multi-class Lovasz-Softmax loss\n probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)\n labels: [P] Tensor, ground truth labels (between 0 and C - 1)\n classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.\n \"\"\"\n if probas.numel() == 0:\n # only void pixels, the gradients should be 0\n return probas * 0.\n C = probas.size(1)\n losses = []\n class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes\n for c in class_to_sum:\n fg = (labels == c).float() # foreground for class c\n if (classes is 'present' and fg.sum() == 0):\n continue\n if C == 1:\n if len(classes) > 1:\n raise ValueError('Sigmoid output possible only with 1 class')\n class_pred = probas[:, 0]\n else:\n class_pred = probas[:, c]\n errors = (Variable(fg) - class_pred).abs()\n errors_sorted, perm = torch.sort(errors, 0, descending=True)\n perm = perm.data\n fg_sorted = fg[perm]\n losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))\n return mean(losses)\n\n\ndef lovasz_grad(gt_sorted):\n \"\"\"\n Computes gradient of the Lovasz extension w.r.t sorted errors\n See Alg. 1 in paper\n \"\"\"\n p = len(gt_sorted)\n gts = gt_sorted.sum()\n intersection = gts - gt_sorted.float().cumsum(0)\n union = gts + (1 - gt_sorted).float().cumsum(0)\n jaccard = 1. - intersection / union\n if p > 1: # cover 1-pixel case\n jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]\n return jaccard\n\n\ndef flatten_probas(probas, labels, ignore=None):\n \"\"\"\n Flattens predictions in the batch\n \"\"\"\n if probas.dim() == 2: \n # do nothing, 3D segmentation for sparse tensor\n pass\n elif probas.dim() == 3:\n # assumes output of a sigmoid layer\n B, H, W = probas.size()\n probas = probas.view(B, 1, H, W)\n probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C\n elif probas.dim() == 5:\n # 3D segmentation for dense tensor\n B, C, L, H, W = probas.size()\n probas = probas.contiguous().view(B, C, L, H*W)\n probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C\n\n\n labels = labels.view(-1)\n if ignore is not None:\n valid = (labels != ignore)\n # vprobas = probas[valid.nonzero().squeeze()]\n # for newer pytorch\n vprobas = probas[torch.nonzero(valid, as_tuple=False).squeeze()]\n vlabels = labels[valid]\n return vprobas, vlabels\n else: \n return probas, labels\n\n\n# --------------------------- HELPER FUNCTIONS ---------------------------\ndef isnan(x):\n return x != x\n \n \ndef mean(l, ignore_nan=False, empty=0):\n \"\"\"\n nanmean compatible with generators.\n \"\"\"\n l = iter(l)\n if ignore_nan:\n l = ifilterfalse(isnan, l)\n try:\n n = 1\n acc = next(l)\n except StopIteration:\n if empty == 'raise':\n raise ValueError('Empty mean')\n return empty\n for n, v in enumerate(l, 2):\n acc += v\n if n == 1:\n return acc\n return acc / n\n\n"
] | [
[
"torch.nn.functional.mse_loss",
"torch.nonzero",
"torch.pow",
"torch.sort",
"torch.nn.functional.l1_loss",
"torch.autograd.Variable",
"torch.nn.functional.smooth_l1_loss",
"torch.norm",
"torch.where",
"torch.log",
"torch.from_numpy",
"torch.abs",
"torch.nn.functional.cross_entropy",
"numpy.array",
"torch.isnan",
"torch.sigmoid",
"torch.clamp"
]
] |
Bharat-Runwal/path2vec | [
"f99188b882752ff9aa2c87334979b75483940ae0"
] | [
"wsd/graph_wsd_test_v1.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 7 17:13:25 2018\n\n@author: dorgham\n\"\"\"\n\nimport networkx as nx\nfrom nltk.corpus import wordnet as wn\nfrom nltk.corpus import wordnet_ic\nfrom nltk.stem import WordNetLemmatizer\nimport matplotlib.pyplot as plt\nimport xml.etree.ElementTree as ET\nfrom collections import OrderedDict\nimport codecs\nimport string\nfrom nltk.corpus import stopwords\nfrom sklearn.metrics import f1_score, precision_score, recall_score\n\n#algorithm parameters\nUSE_POS_INFO = True\nUSE_LESK = False\nUSE_PAGERANK = True\nAVG_METHOD = 'micro'\nMAX_DEPTH = 3\nLESK_NORM_FACTOR = 20 #this value is emperical\nsenseval_fpath = 'WSD_Unified_Evaluation_Datasets/senseval2/senseval2.data.xml'\ngold_tags_fpath = 'WSD_Unified_Evaluation_Datasets/senseval2/senseval2.gold.key.txt'\n\ninfo_content = wordnet_ic.ic('ic-semcor.dat')\nwnlemmatizer = WordNetLemmatizer()\npywsd_stopwords = [u\"'s\", u\"``\", u\"`\"]\nSTOPWORDS = set(stopwords.words('english') + list(string.punctuation) + pywsd_stopwords)\n\n\ndef lch_similarity(synset1, synset2):\n return wn.lch_similarity(synset1, synset2)\n \ndef jcn_similarity(synset1, synset2):\n return wn.jcn_similarity(synset1, synset2, info_content)\n \ndef lesk_similarity(synset1, synset2):\n str1 = str(synset1.definition()).translate(str.maketrans('','',string.punctuation))\n for example in synset1.examples():\n str1 += ' ' + str(example).translate(str.maketrans('','',string.punctuation))\n lemmatized_str1=''\n for word in set(str1.split()):\n lemmatized_str1 += wnlemmatizer.lemmatize(word) + ' '\n for lemma in synset1.lemma_names():\n lemmatized_str1 += ' ' + lemma\n hyper_hypo = set(synset1.hyponyms() + synset1.hypernyms() + synset1.instance_hyponyms() + synset1.instance_hypernyms())\n for hh in hyper_hypo:\n for lemma in hh.lemma_names():\n lemmatized_str1 += ' ' + lemma\n current_set = set(lemmatized_str1.split())\n current_set = set(cs.lower() for cs in current_set)\n current_set = current_set.difference(STOPWORDS)\n #print (current_set)\n str2 = str(synset2.definition()).translate(str.maketrans('','',string.punctuation))\n for example in synset2.examples():\n str2 += ' ' + str(example).translate(str.maketrans('','',string.punctuation))\n lemmatized_str2=''\n for word in set(str2.split()):\n lemmatized_str2 += wnlemmatizer.lemmatize(word) + ' '\n for lemma in synset2.lemma_names():\n lemmatized_str2 += ' ' + lemma\n hyper_hypo = set(synset2.hyponyms() + synset2.hypernyms() + synset2.instance_hyponyms() + synset2.instance_hypernyms())\n for hh in hyper_hypo:\n for lemma in hh.lemma_names():\n lemmatized_str2 += ' ' + lemma\n neighbor_set = set(lemmatized_str2.split())\n neighbor_set = set(ns.lower() for ns in neighbor_set)\n neighbor_set = neighbor_set.difference(STOPWORDS)\n #print (neighbor_set)\n return len(current_set.intersection(neighbor_set))\n\ndef convert_to_wordnet_pos(senseval_pos):\n if senseval_pos == 'VERB':\n return wn.VERB\n elif senseval_pos == 'NOUN':\n return wn.NOUN\n elif senseval_pos == 'ADV':\n return wn.ADV\n elif senseval_pos == 'ADJ':\n return wn.ADJ\n else:\n return None\n\ndef sentence_wsd(sentences, poses):\n counter=0\n output_dict = dict()\n for sentence in sentences:\n G=nx.Graph()\n sent_len = len(sentence.keys())\n G_pos = dict() #used for aligning the nodes when drawing the graph\n pos_idx=1\n token_nodeNames_map = dict()\n pos_dict = poses[counter]\n \n #construct the nodes of the graph\n for i, _id in enumerate(sentence.keys()):\n if USE_POS_INFO: #restrict the retrieved snysets from wordnet to the target pos\n wn_pos = convert_to_wordnet_pos(pos_dict[_id])\n else:\n wn_pos = None\n \n synsets_list = list(wn.synsets(sentence[_id], pos=wn_pos))\n if len(synsets_list) > 0:\n node_names = []\n for synset in synsets_list:\n node_name = str(i) + ' ' + synset.name()\n #adding the index to the node name is important in the case of \n #having a word that is repeated in the sentence but with \n #different sense each time, so we want unique node for each one.\n G.add_node(node_name)\n node_names.append(node_name)\n token_nodeNames_map[_id] = node_names\n G_pos.update( (label, (pos_idx, j)) for j, label in enumerate(node_names) ) \n pos_idx+=1\n \n #compute word similarity\n ids_list = list(sentence.keys())\n lch_sim_dict = dict()\n jcn_sim_dict = dict()\n lesk_sim_dict = dict()\n #print sentence.values()\n for idx, key in enumerate(ids_list):\n if USE_POS_INFO:\n wn_pos = convert_to_wordnet_pos(pos_dict[ids_list[idx]])\n else:\n wn_pos = None\n synsets_list = list(wn.synsets(sentence[ids_list[idx]], pos=wn_pos))\n if len(synsets_list) > 0:\n i = 1\n while i<=MAX_DEPTH and idx+i<sent_len:\n if USE_POS_INFO:\n wn_pos = convert_to_wordnet_pos(pos_dict[ids_list[idx+i]])\n else:\n wn_pos = None\n \n next_synsets_list = list(wn.synsets(sentence[ids_list[idx+i]], pos=wn_pos))\n if len(next_synsets_list) > 0:\n for current_synset in synsets_list:\n for neighbor_synset in next_synsets_list:\n nodes = str(idx) + ' ' + current_synset.name() + ';'\n nodes += str(idx+i) + ' ' + neighbor_synset.name()\n if current_synset.pos() == 'v' and neighbor_synset.pos() == 'v':\n sim_weight = lch_similarity(current_synset, neighbor_synset)\n lch_sim_dict[nodes] = sim_weight\n elif current_synset.pos() == 'n' and neighbor_synset.pos() == 'n':\n sim_weight = jcn_similarity(current_synset, neighbor_synset)\n jcn_sim_dict[nodes] = sim_weight\n elif USE_LESK:\n sim_weight = lesk_similarity(current_synset, neighbor_synset)\n lesk_sim_dict[nodes] = sim_weight\n i+=1\n \n #normalize the similarity weights and build edges\n if lch_sim_dict:\n max_lch_score = max(lch_sim_dict.values())\n for key in lch_sim_dict:\n nodeIds = key.split(';')\n G.add_edge(nodeIds[0],nodeIds[1], weight=(lch_sim_dict[key]/max_lch_score))\n if jcn_sim_dict:\n max_jcn_score = max(jcn_sim_dict.values())\n for key in jcn_sim_dict:\n nodeIds = key.split(';')\n G.add_edge(nodeIds[0],nodeIds[1], weight=(jcn_sim_dict[key]/max_jcn_score))\n if USE_LESK:\n if lesk_sim_dict:\n max_lesk_score = max(lesk_sim_dict.values())\n if max_lesk_score > 0:\n for key in lesk_sim_dict:\n nodeIds = key.split(';')\n G.add_edge(nodeIds[0],nodeIds[1], weight=(lesk_sim_dict[key]/LESK_NORM_FACTOR))\n \n \n #compute graph centrality\n node_scores = dict()\n if USE_PAGERANK:\n node_scores = nx.pagerank(G)\n else:\n node_scores = G.degree(G.nodes(), \"weight\")\n \n for token_id in ids_list:\n nodeNames = token_nodeNames_map.get(token_id)\n scores = []\n max_label = \"\"\n wordnet_key = \"\"\n if nodeNames:\n for nodeName in nodeNames:\n scores.append(node_scores[nodeName])\n if scores:\n max_index = max(range(len(scores)), key=scores.__getitem__)\n max_label = nodeNames[max_index]\n if max_label:\n i = max_label.find(' ')\n lemmas = wn.synset(max_label[i+1:]).lemmas()\n for lemma in lemmas:\n wordnet_key += lemma.key()+';'\n wordnet_key = wordnet_key[0:-1]\n output_dict[token_id] = wordnet_key\n \n #add the weight as attribute to the nodes of the graph\n #for node in node_scores.keys():\n # G.node[node]['weight']=node_scores[node]\n \n counter += 1\n if counter==1: #draw the graph of the first sentence\n plt.close()\n nx.draw(G, pos=G_pos, with_labels = True)\n plt.show()\n G.clear()\n \n return output_dict\n\n\ndef load_senseval_data(file_path):\n tokens_dict = OrderedDict()\n pos_dict = OrderedDict()\n sentences = []\n pos_list = []\n tree = ET.parse(file_path)\n root = tree.getroot()\n for text in root:\n for sentence in text:\n for word in sentence:\n if word.tag == 'instance' and word.attrib['id']: #only include words with the <instance> tag\n tokens_dict[word.attrib['id']] = word.text\n pos_dict[word.attrib['id']] = word.attrib['pos']\n if tokens_dict:\n sentences.append(tokens_dict)\n pos_list.append(pos_dict)\n tokens_dict = dict()\n pos_dict = dict()\n \n return sentences, pos_list\n\n\n\nif __name__ == \"__main__\":\n sents, poses = load_senseval_data(senseval_fpath)\n output_dict = sentence_wsd(sents, poses)\n #load the gold results\n with codecs.open(gold_tags_fpath, 'r', 'utf-8') as f:\n lines = f.readlines()\n wsd_output = []\n gold_output = []\n for line in lines:\n id_key_pair = line.split()\n predicted_keys = output_dict[id_key_pair[0]].split(';')\n gold_keys_set = set(id_key_pair[1:])\n predected_keys_set = set(predicted_keys)\n if len(predected_keys_set.intersection(gold_keys_set)) > 0:\n wsd_output.append(predicted_keys[0])\n gold_output.append(predicted_keys[0])\n else:\n wsd_output.append(predicted_keys[0])\n gold_output.append(id_key_pair[1])\n \n assert len(wsd_output) == len(gold_output)\n\n f1 = f1_score(gold_output, wsd_output, average=AVG_METHOD)\n precision = precision_score(gold_output, wsd_output, average=AVG_METHOD)\n recall = recall_score(gold_output, wsd_output, average=AVG_METHOD)\n \n print ('F-score: %1.4f' % f1, ' Precision: %1.4f' % precision, ' Recall: %1.4f' % recall)\n \n \n \n"
] | [
[
"sklearn.metrics.f1_score",
"matplotlib.pyplot.show",
"sklearn.metrics.precision_score",
"matplotlib.pyplot.close",
"sklearn.metrics.recall_score"
]
] |
oshapoval/WarpX | [
"84d687da21ee93db67fdc43efec8a9cc80d0e6f9"
] | [
"Examples/Tests/PythonWrappers/PICMI_inputs_2d.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable\nfrom pywarpx import picmi\n\n# Number of time steps\nmax_steps = 100\n\n# Grid\nnx = 128\nnz = 128\n\n# Domain\nxmin = 0.e-6\nzmin = 0.e-6\nxmax = 50.e-6\nzmax = 50.e-6\n\n# Cell size\ndx = (xmax - xmin) / nx\ndz = (zmax - zmin) / nz\n\n# Domain decomposition\nmax_grid_size_x = 64\nmax_grid_size_z = 64\n\n# PML\nnxpml = 10\nnzpml = 10\nfield_boundary = ['open', 'open']\n\n# Spectral order\nnox = 8\nnoz = 8\n\n# Guard cells\nnxg = 8\nnzg = 8\n\n# Initialize grid\ngrid = picmi.Cartesian2DGrid(number_of_cells = [nx,nz],\n lower_bound = [xmin,zmin],\n upper_bound = [xmax,zmax],\n lower_boundary_conditions = field_boundary,\n upper_boundary_conditions = field_boundary,\n guard_cells = [nxg,nzg],\n moving_window_velocity = [0.,0.,0],\n warpx_max_grid_size_x = max_grid_size_x,\n warpx_max_grid_size_y = max_grid_size_z)\n\n# Initialize field solver\nsolver = picmi.ElectromagneticSolver(grid=grid, cfl=0.95, method='PSATD',\n stencil_order = [nox,noz],\n divE_cleaning = 1,\n divB_cleaning = 1,\n pml_divE_cleaning = 1,\n pml_divB_cleaning = 1,\n warpx_psatd_update_with_rho = True)\n\n# Initialize diagnostics\ndiag_field_list = [\"E\", \"B\"]\nfield_diag = picmi.FieldDiagnostic(name = 'diag1',\n grid = grid,\n period = 10,\n write_dir = '.',\n warpx_file_prefix = 'Python_wrappers_plt',\n data_list = diag_field_list)\n\n# Initialize simulation\nsim = picmi.Simulation(solver = solver,\n max_steps = max_steps,\n verbose = 1,\n particle_shape = 'cubic',\n warpx_current_deposition_algo = 'direct',\n warpx_particle_pusher_algo = 'boris',\n warpx_field_gathering_algo = 'energy-conserving',\n warpx_use_filter = 1)\n\n# Add diagnostics to simulation\nsim.add_diagnostic(field_diag)\n\n# Write input file to run with compiled version\nsim.write_input_file(file_name = 'inputs_2d')\n\n# Whether to include guard cells in data returned by Python wrappers\ninclude_ghosts = 1\n\n# Compute min and max of fields data\ndef compute_minmax(data):\n vmax = np.abs(data).max()\n vmin = -vmax\n return vmin, vmax\n\n# Plot fields data either in valid domain or in PML\ndef plot_data(data, pml, title, name):\n fig, ax = plt.subplots(nrows = 1, ncols = 1, gridspec_kw = dict(wspace = 0.5), figsize = [6,5])\n cax = make_axes_locatable(ax).append_axes('right', size='5%', pad='5%')\n lw = 0.8\n ls = '--'\n if pml:\n # Draw PMLs and ghost regions\n ax.axvline(x = 0 , linewidth = lw, linestyle = ls)\n ax.axvline(x = 0+nxg , linewidth = lw, linestyle = ls)\n ax.axvline(x = -nxpml , linewidth = lw, linestyle = ls)\n ax.axvline(x = nx , linewidth = lw, linestyle = ls)\n ax.axvline(x = nx-nxg , linewidth = lw, linestyle = ls)\n ax.axvline(x = nx+nxpml, linewidth = lw, linestyle = ls)\n ax.axhline(y = 0 , linewidth = lw, linestyle = ls)\n ax.axhline(y = 0+nzg , linewidth = lw, linestyle = ls)\n ax.axhline(y = -nzpml , linewidth = lw, linestyle = ls)\n ax.axhline(y = nz , linewidth = lw, linestyle = ls)\n ax.axhline(y = nz-nzg , linewidth = lw, linestyle = ls)\n ax.axhline(y = nz+nzpml, linewidth = lw, linestyle = ls)\n # Annotations\n ax.annotate('PML', xy = (-nxpml//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')\n ax.annotate('PML', xy = (nx+nxpml//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')\n ax.annotate('PML', xy = (nx//2,-nzpml//2), rotation = 'horizontal', ha = 'center', va = 'center')\n ax.annotate('PML', xy = (nx//2,nz+nzpml//2), rotation = 'horizontal', ha = 'center', va = 'center')\n ax.annotate('PML ghost', xy = (nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')\n ax.annotate('PML ghost', xy = (-nxpml-nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')\n ax.annotate('PML ghost', xy = (nx-nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')\n ax.annotate('PML ghost', xy = (nx+nxpml+nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')\n ax.annotate('PML ghost', xy = (nx//2,nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')\n ax.annotate('PML ghost', xy = (nx//2,-nzpml-nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')\n ax.annotate('PML ghost', xy = (nx//2,nz-nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')\n ax.annotate('PML ghost', xy = (nx//2,nz+nzpml+nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')\n # Set extent and sliced data\n extent = np.array([-nxg-nxpml, nx+nxpml+nxg, -nzg-nzpml, nz+nzpml+nzg])\n else:\n # Draw ghost regions\n ax.axvline(x = 0 , linewidth = lw, linestyle = ls)\n ax.axvline(x = nx, linewidth = lw, linestyle = ls)\n ax.axhline(y = 0 , linewidth = lw, linestyle = ls)\n ax.axhline(y = nz, linewidth = lw, linestyle = ls)\n # Annotations\n ax.annotate('ghost', xy = (-nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')\n ax.annotate('ghost', xy = (nx+nxg//2,nz//2), rotation = 'vertical', ha = 'center', va = 'center')\n ax.annotate('ghost', xy = (nx//2,-nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')\n ax.annotate('ghost', xy = (nx//2,nz+nzg//2), rotation = 'horizontal', ha = 'center', va = 'center')\n # Set extent and sliced data\n extent = np.array([-nxg, nx+nxg, -nzg, nz+nzg])\n X = data[:,:].transpose()\n # Min and max for colorbar\n vmin, vmax = compute_minmax(X)\n # Display data as image\n im = ax.imshow(X = X, origin = 'lower', extent = extent, vmin = vmin, vmax = vmax, cmap = 'seismic')\n # Add colorbar to plot\n fig.colorbar(im, cax = cax)\n # Set label for x- and y-axis, set title\n ax.set_xlabel('x')\n ax.set_ylabel('z')\n ax.set_title(title)\n # Set plot title\n suptitle = 'PML in (x,z), 4 grids 64 x 64'\n plt.suptitle(suptitle)\n # Save figure\n figname = 'figure_' + name + '.png'\n fig.savefig(figname, dpi = 100)\n\n# Initialize fields data (unit pulse) and apply smoothing\ndef init_data(data):\n impulse_1d = np.array([1./4., 1./2., 1./4.])\n impulse = np.outer(impulse_1d, impulse_1d)\n data[nx//2-1:nx//2+2,nz//2-1:nz//2+2] = impulse\n\n# Initialize inputs and WarpX instance\nsim.initialize_inputs()\nsim.initialize_warpx()\n\n# Get fields data using Python wrappers\nimport pywarpx.fields as pwxf\nEx = pwxf.ExFPWrapper(include_ghosts = include_ghosts)\nEy = pwxf.EyFPWrapper(include_ghosts = include_ghosts)\nEz = pwxf.EzFPWrapper(include_ghosts = include_ghosts)\nBx = pwxf.BxFPWrapper(include_ghosts = include_ghosts)\nBy = pwxf.ByFPWrapper(include_ghosts = include_ghosts)\nBz = pwxf.BzFPWrapper(include_ghosts = include_ghosts)\nF = pwxf.FFPWrapper(include_ghosts = include_ghosts)\nG = pwxf.GFPWrapper(include_ghosts = include_ghosts)\nExpml = pwxf.ExFPPMLWrapper(include_ghosts = include_ghosts)\nEypml = pwxf.EyFPPMLWrapper(include_ghosts = include_ghosts)\nEzpml = pwxf.EzFPPMLWrapper(include_ghosts = include_ghosts)\nBxpml = pwxf.BxFPPMLWrapper(include_ghosts = include_ghosts)\nBypml = pwxf.ByFPPMLWrapper(include_ghosts = include_ghosts)\nBzpml = pwxf.BzFPPMLWrapper(include_ghosts = include_ghosts)\nFpml = pwxf.FFPPMLWrapper(include_ghosts = include_ghosts)\nGpml = pwxf.GFPPMLWrapper(include_ghosts = include_ghosts)\n\n# Initialize fields data in valid domain\ninit_data(Ex)\ninit_data(Ey)\ninit_data(Ez)\ninit_data(Bx)\ninit_data(By)\ninit_data(Bz)\ninit_data(F)\ninit_data(G)\n\n# Advance simulation until last time step\nsim.step(max_steps)\n\n# Plot E\nplot_data(Ex, pml = False, title = 'Ex', name = 'Ex')\nplot_data(Ey, pml = False, title = 'Ey', name = 'Ey')\nplot_data(Ez, pml = False, title = 'Ez', name = 'Ez')\n\n# Plot B\nplot_data(Bx, pml = False, title = 'Bx', name = 'Bx')\nplot_data(By, pml = False, title = 'By', name = 'By')\nplot_data(Bz, pml = False, title = 'Bz', name = 'Bz')\n\n# F and G\nplot_data(F, pml = False, title = 'F', name = 'F')\nplot_data(G, pml = False, title = 'G', name = 'G')\n\n# Plot E in PML\nplot_data(Expml[:,:,0], pml = True, title = 'Exy in PML', name = 'Exy')\nplot_data(Expml[:,:,1], pml = True, title = 'Exz in PML', name = 'Exz')\nplot_data(Expml[:,:,2], pml = True, title = 'Exx in PML', name = 'Exx')\nplot_data(Eypml[:,:,0], pml = True, title = 'Eyz in PML', name = 'Eyz')\nplot_data(Eypml[:,:,1], pml = True, title = 'Eyx in PML', name = 'Eyx')\nplot_data(Eypml[:,:,2], pml = True, title = 'Eyy in PML', name = 'Eyy') # zero\nplot_data(Ezpml[:,:,0], pml = True, title = 'Ezx in PML', name = 'Ezx')\nplot_data(Ezpml[:,:,1], pml = True, title = 'Ezy in PML', name = 'Ezy') # zero\nplot_data(Ezpml[:,:,2], pml = True, title = 'Ezz in PML', name = 'Ezz')\n\n# Plot B in PML\nplot_data(Bxpml[:,:,0], pml = True, title = 'Bxy in PML', name = 'Bxy')\nplot_data(Bxpml[:,:,1], pml = True, title = 'Bxz in PML', name = 'Bxz')\nplot_data(Bxpml[:,:,2], pml = True, title = 'Bxx in PML', name = 'Bxx')\nplot_data(Bypml[:,:,0], pml = True, title = 'Byz in PML', name = 'Byz')\nplot_data(Bypml[:,:,1], pml = True, title = 'Byx in PML', name = 'Byx')\nplot_data(Bypml[:,:,2], pml = True, title = 'Byy in PML', name = 'Byy') # zero\nplot_data(Bzpml[:,:,0], pml = True, title = 'Bzx in PML', name = 'Bzx')\nplot_data(Bzpml[:,:,1], pml = True, title = 'Bzy in PML', name = 'Bzy') # zero\nplot_data(Bzpml[:,:,2], pml = True, title = 'Bzz in PML', name = 'Bzz')\n\n# Plot F and G in PML\nplot_data(Fpml[:,:,0], pml = True, title = 'Fx in PML', name = 'Fx')\nplot_data(Fpml[:,:,1], pml = True, title = 'Fy in PML', name = 'Fy')\nplot_data(Fpml[:,:,2], pml = True, title = 'Fz in PML', name = 'Fz')\nplot_data(Gpml[:,:,0], pml = True, title = 'Gx in PML', name = 'Gx')\nplot_data(Gpml[:,:,1], pml = True, title = 'Gy in PML', name = 'Gy')\nplot_data(Gpml[:,:,2], pml = True, title = 'Gz in PML', name = 'Gz')\n\n# Check values with benchmarks (precomputed from the same Python arrays)\ndef check_values(benchmark, data, rtol, atol):\n passed = np.allclose(benchmark, np.sum(np.abs(data[:,:])), rtol = rtol, atol = atol)\n assert(passed)\n\nrtol = 1e-09\natol = 1e-12\n\n# E\ncheck_values(1013263608.6369569, Ex[:,:], rtol, atol)\ncheck_values(717278253.4505507 , Ey[:,:], rtol, atol)\ncheck_values(717866566.5718911 , Ez[:,:], rtol, atol)\n# B\ncheck_values(3.0214509313437636, Bx[:,:], rtol, atol)\ncheck_values(3.0242765102729985, By[:,:], rtol, atol)\ncheck_values(3.0214509326970465, Bz[:,:], rtol, atol)\n# F and G\ncheck_values(3.0188584528062377, F[:,:], rtol, atol)\ncheck_values(1013672631.8764204, G[:,:], rtol, atol)\n# E in PML\ncheck_values(364287936.1526477 , Expml[:,:,0], rtol, atol)\ncheck_values(183582351.3212558 , Expml[:,:,1], rtol, atol)\ncheck_values(190065766.41491824, Expml[:,:,2], rtol, atol)\ncheck_values(440581905.9336025 , Eypml[:,:,0], rtol, atol)\ncheck_values(178117293.6629357 , Eypml[:,:,1], rtol, atol)\ncheck_values(0.0 , Eypml[:,:,2], rtol, atol)\ncheck_values(430277101.26568377, Ezpml[:,:,0], rtol, atol)\ncheck_values(0.0 , Ezpml[:,:,1], rtol, atol)\ncheck_values(190919663.2167449 , Ezpml[:,:,2], rtol, atol)\n# B in PML\ncheck_values(1.0565189315366146 , Bxpml[:,:,0], rtol, atol)\ncheck_values(0.4618191395098556 , Bxpml[:,:,1], rtol, atol)\ncheck_values(0.6849858273929585 , Bxpml[:,:,2], rtol, atol)\ncheck_values(1.7228584190213505 , Bypml[:,:,0], rtol, atol)\ncheck_values(0.47697331996765685, Bypml[:,:,1], rtol, atol)\ncheck_values(0.0 , Bypml[:,:,2], rtol, atol)\ncheck_values(1.5183380774611628 , Bzpml[:,:,0], rtol, atol)\ncheck_values(0.0 , Bzpml[:,:,1], rtol, atol)\ncheck_values(0.6849858291863835 , Bzpml[:,:,2], rtol, atol)\n# F and G in PML\ncheck_values(1.7808748509425263, Fpml[:,:,0], rtol, atol)\ncheck_values(0.0 , Fpml[:,:,1], rtol, atol)\ncheck_values(0.4307845604625681, Fpml[:,:,2], rtol, atol)\ncheck_values(536552745.42701197, Gpml[:,:,0], rtol, atol)\ncheck_values(0.0 , Gpml[:,:,1], rtol, atol)\ncheck_values(196016270.97767758, Gpml[:,:,2], rtol, atol)\n"
] | [
[
"numpy.array",
"numpy.abs",
"numpy.outer",
"matplotlib.pyplot.suptitle"
]
] |
hlebars/YoutubeDataAnalysis | [
"0845effcdfdf6ab3281adc25840ed090e47498c8"
] | [
"Script/test.py"
] | [
"import pandas as pd\nimport datetime\nimport numpy as np\nimport os\nimport re\nimport matplotlib.pyplot as plot\n\nimport pytz\n# @timeit (repeat=3,number=10)\n\ndef EclatedSubPlot(SerieAfterGrpBy,ActivatePlotting,ListOfDateAndTime,Abbreviation):\n\n\n DicoDayOfWeek={\n \"00\":('Mon','Monday'), \"01\":('Tue','Tuesday'), \"02\":('Wed','Wednesday'), \"03\":('Thu','Thursday'),\n \"04\":('Fri','Friday'), \"05\":('Sat','Saturday'), \"06\":('Sun','Sunday')\n }\n \n DicoMonthOfTheYear = {\n \"01\":(\"Jan\", \"January\"),\"02\":(\"Feb\",\"February\"),\"03\":(\"Mar\",\"March\"),\"04\":(\"Apr\",\"April\"),\"05\":(\"May\",\"May\"),\n \"06\":(\"Jun\",\"June\"),\"07\":(\"Jul\",\"July\"),\"08\":(\"Aug\",\"August\"),\"09\":(\"Sep\",\"September\"),\"10\":(\"Oct\",\"October\"),\n \"11\":(\"Nov\",\"November\"),\"12\":(\"Dec\",\"December\")\n }\n\n df_unstack=SerieAfterGrpBy.unstack(level=0)\n\n nblevels = df_unstack.index.nlevels \n \n \n if nblevels!=1:\n for ColumnsName in ListOfDateAndTime:\n\n ListMultiIndexName=df_unstack.index.names\n\n if ColumnsName in ListMultiIndexName:\n level_index=ListMultiIndexName.index(ColumnsName)\n \n if Abbreviation==True:\n if ColumnsName==\"WeekDay\":\n df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek), level=level_index)\n elif ColumnsName==\"M\":\n df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoMonthOfTheYear[x][0],DicoDayOfWeek), level=level_index)\n elif Abbreviation==False:\n if ColumnsName==\"WeekDay\":\n df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek), level=level_index)\n elif ColumnsName==\"M\":\n df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoMonthOfTheYear[x][1],DicoDayOfWeek), level=level_index)\n else:\n\n if Abbreviation==True:\n if ColumnsName==\"WeekDay\":\n df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek)\n elif ColumnsName==\"M\":\n df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][0],DicoMonthOfTheYear)\n elif Abbreviation==False:\n if ColumnsName==\"WeekDay\":\n df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek)\n elif ColumnsName==\"M\":\n df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][1],DicoMonthOfTheYear)\n\n else:\n\n if \"WeekDay\" in ListOfDateAndTime and \"WeekDay\"==ListOfDateAndTime[0]:\n if Abbreviation==True:\n df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek)\n else:\n df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek)\n\n if \"M\" in ListOfDateAndTime and \"M\"==ListOfDateAndTime[0]:\n if Abbreviation==True:\n df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][0],DicoMonthOfTheYear)\n elif Abbreviation==False:\n df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][1],DicoMonthOfTheYear)\n \n\n DicoConfigRowColumsSubPlot={\"Y\":(4,3),\"M\":(4,3),\"W\":(13,4),\"D\":(8,4),\"WeekDay\":(4,2),\"h\":(6,4),\"m\":(10,6),\"s\":(10,6)}\n fig=df_unstack.plot(subplots=True,figsize=(70, 60), layout=DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]],kind=\"bar\",sharex=True,sharey=True,legend=False,)#.flatten()#.map(set_xlabel=(\"toto\"))#**kwargs)\n\n\n # Add Legend for axis in function of the dimention of the subplot\n for Row in range(DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]):\n\n FigRow=fig[Row].flatten()\n\n if DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]%2!=0 and Row%3==1 and Row!=DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]:\n FigRow[0].set_ylabel(\"Nb. Video Trending\")\n elif DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]%2==0 and Row%2==1 and Row!=DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]:\n FigRow[0].set_ylabel(\"Nb. Video Trending\") \n elif DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]][0]==4:\n FigRow[0].set_ylabel(\"Nb. Video Trending\")\n \n for Column in range(len(FigRow)):\n FigRow[Column].set_xlabel(\"Time\")\n\n plot.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.2, hspace=0.5)\n plot.show()\n\n return df_unstack\n\n\n\n\ndef testtemps():\n print(pytz.country_timezones('JP'))\n # Hours=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]\n # Hours=pd.date_range('17:30:00', '21:00:00',freq='15T').strftime('%H:%M').tolist()\n # pd.to_datetime(Hours,format='%H:%M')\n # print(Hours)\n Hours=pd.date_range('00:00:00', '23:59:00',freq=str(30)+'T').time\n\n \n df_NumberHours=pd.DataFrame(0,index=Hours,columns=[\"Number\",\"Label\"])\n # df_NumberHours[\"Label\"]=HoursForLabels\n\n # print(df_NumberHours[\"Label\"].head(3))\n\n Country=\"FRA\"\n PathToInputData=os.path.join(\"Script\",\"Data\",\"Data_IN\",\"Youtube_CSV__And_JSON\",Country+\"videos.csv\")\n\n \n\n\n df=pd.read_csv(PathToInputData)#,engine=\"python\") \n\n #'video_id','title',\n\n df=df.drop(columns=['channel_title','category_id','tags','thumbnail_link','comments_disabled','ratings_disabled','video_error_or_removed','description'])\n\n #get the plublish time and put in the column publish time\n df['publish_time'] = pd.to_datetime(df['publish_time'], format='%Y-%m-%dT%H:%M:%S.%fZ')\n # print(df['publish_time'])\n\n\n\n # [\"JPN\",\n LocalTime=False\n\n if LocalTime==True:\n if Country==\"USA\":\n df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('US/Central')\n elif Country==\"MEX\":\n df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('America/Mexico_City')\n elif Country==\"FRA\":\n df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Europe/Paris')\n elif Country==\"DEU\":\n df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Europe/Berlin')\n elif Country==\"GBR\":\n df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Europe/London')\n elif Country==\"IND\":\n df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Asia/Kolkata')\n elif Country==\"CAN\":\n df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('America/Winnipeg')\n elif Country==\"KOR\":\n df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Asia/Seoul')\n elif Country==\"RUS\":\n df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Asia/Krasnoyarsk')\n elif Country==\"JPN\":\n df['publish_time']=pd.DatetimeIndex(df['publish_time']).tz_localize('utc').tz_convert('Asia/Tokyo')\n\n \n\n # filtertime=(df[df.index.time > datetime.time(12),] & df[df.index.time < datetime.time(13)])\n\n #Converting LOcal time to UTC time if LocalToUTCTime==True\n # df=ConvertLocalTimeToUTC(df,Country,LocalToUTCTime)\n print(df[\"video_id\"].nunique())\n df = df.drop_duplicates(subset = 'video_id', keep = 'first')\n print(df)\n df.set_index( df['publish_time'], inplace=True)\n # df_FiltResult=\n \n # df=df.groupby([df.index.day_name()],)[\"views\"].count()#,df.index.hour\n\n # df.plot(kind=\"bar\")\n # plot.show()\n\n df_grp=df.groupby([df.index.weekday,df.index.hour])\n ser=df_grp[\"views\"].count()\n\n # print(df_grp[\"views\"].agg([\"count\"])) \n # print(df_grp[\"views\"].agg([\"count\"]).loc[1]) \n # print(df_grp.get_group((1,0)))\n # df.unstack(level=0).plot(kind='bar', subplots=True)\n # plot.show()\n DicoDayOfWeek={\n \"00\":('Mon','Monday'), \"01\":('Tue','Tuesday'), \"02\":('Wed','Wednesday'), \"03\":('Thu','Thursday'),\n \"04\":('Fri','Friday'), \"05\":('Sat','Saturday'), \"06\":('Sun','Sunday')\n }\n # ser.index[0][0] = df.index[0][0].map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek)\n # ser.unstack(level=0).plot(subplots=True, figsize=(70, 60), layout=(4, 2),kind=\"bar\",sharex=True,title=ser.index[0][0] )\n # plot.show()\n # for i in range(1,max(df_grp.keys[0])):\n # print(df_grp[\"views\"].agg([\"count\"]).loc[i])\n # df_grp.plot(y=df_grp[\"views\"].agg([\"count\"]).loc[i].count)\n # plot.show()\n # fig, ax = plot.subplots(figsize=(10,4))\n # # ax.plot(df_grp[\"views\"].loc[1], df_grp['views'].count(), label=df_grp[\"views\"].loc[1])\n # for key, grp in df_grp:#df.groupby(ListOfDateAndTime):\n # print(key,grp)\n # ax.plot(grp.groupby(grp.index.hour), grp['views'].count(), label=key)\n\n # ax.legend()\n # plot.show()\n\n # df.plot()\n # plot.show()\n # plot.show()\n # filt=(df.title.str.find(sub)!=-1)\n # filt=None\n # df_FiltResult=df[\"title\"].resample(\"D\")\n #juste le filtre \n # df_FiltResultsample=df[\"title\"][filt].resample(\"M\").count()\n # totalite de la periode \n \n DicoMonthOfTheYear = {\n \"01\":(\"Jan\", \"January\"),\"02\":(\"Feb\",\"February\"),\"03\":(\"Mar\",\"March\"),\"04\":(\"Apr\",\"April\"),\"05\":(\"May\",\"May\"),\n \"06\":(\"Jun\",\"June\"),\"07\":(\"Jul\",\"July\"),\"08\":(\"Aug\",\"August\"),\"09\":(\"Sep\",\"September\"),\"10\":(\"Oct\",\"October\"),\n \"11\":(\"Nov\",\"November\"),\"12\":(\"Dec\",\"December\")\n }\n\n\n # sub=\"\"\n #fictionnary of group by possibilities\n DicoGroubyPossibility={\n \"Y\":df.index.year,\n \"M\":df.index.month,\n \"W\":df.index.week,\n \"D\":df.index.day,\n \"h\":df.index.hour,\n \"m\":df.index.minute,\n \"s\":df.index.second,\n \"time\":df.index.time,\n \"date\":df.index.date,\n \"WeekDay\":df.index.weekday,\n }\n # ListOfDateAndTime=[\"M\",\"D\"]#,\"M\",\"D\"]\n ListOfDateAndTime=[\"WeekDay\"]#,\"M\",\"D\"]\n #test if the list contain more than one parameter for grouby if it is true then it will group by by the composant o the list\n if len(ListOfDateAndTime)==1:\n\n \n \n \n #Create empty list for date and time classification\n ListOfDate=[]\n ListOfTime=[]\n\n #Classify Date and time in the corresponding list in fucntion of it is in upper case or not upper=date low=time\n for i in ListOfDateAndTime:\n if i.isupper() or i==\"date\" or i==\"WeekDay\":\n ListOfDate.append(i)\n else:\n ListOfTime.append(i)\n\n #get the list of all indexes \n SegmentOfDateOrTime=DicoGroubyPossibility[i].astype(str).tolist()\n\n # and add a zero in front of the index string to have 00 h and not 0h or days etc \n for DateOrTime in range(len(SegmentOfDateOrTime)):\n if len(SegmentOfDateOrTime[DateOrTime])==1:\n SegmentOfDateOrTime[DateOrTime]=str(0)+SegmentOfDateOrTime[DateOrTime]\n \n #Place it back in the columns of the date or time correspondant like Y(Year) or h(hour) to get a series grouby with different name\n df.loc[:,i]=SegmentOfDateOrTime\n\n\n #grouby in function of the entry in the list of date and time \n # df_grp=df.groupby(ListOfDateAndTime)#[\"views\"].count()\n Abbreviation=True\n\n\n df_grp=df.groupby([df.index.weekday,df.index.hour])#[\"views\"].count()\n\n df=df_grp[\"views\"].count()\n EclatedSubPlot(df,True,ListOfDateAndTime,Abbreviation)\n\n \n\n # Abbreviation=False\n \n\n # # fig, (ax1, ax2) = plot.subplots(2, 1)\n \n # # df.plot(x='Weekday', y='h', ax=ax1, legend=False)\n # # df.sort_values().plot(kind='barh', ax=ax2)\n # ser=df_grp[\"views\"].count()\n \n\n \n\n # df_unstack=ser.unstack(level=0)\n\n # nblevels = df_unstack.index.nlevels \n # print(nblevels)\n \n # if nblevels!=1:\n # for ColumnsName in ListOfDateAndTime:\n\n # ListMultiIndexName=df_unstack.index.names\n\n # if ColumnsName in ListMultiIndexName:\n # level_index=ListMultiIndexName.index(ColumnsName)\n \n # if Abbreviation==True:\n # if ColumnsName==\"WeekDay\":\n # df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek), level=level_index)\n # elif ColumnsName==\"M\":\n # df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoMonthOfTheYear[x][0],DicoDayOfWeek), level=level_index)\n # elif Abbreviation==False:\n # if ColumnsName==\"WeekDay\":\n # df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek), level=level_index)\n # elif ColumnsName==\"M\":\n # df_unstack.index = df_unstack.index.set_levels(df_unstack.index.levels[level_index].map(lambda x : DicoMonthOfTheYear[x][1],DicoDayOfWeek), level=level_index)\n # else:\n\n # if Abbreviation==True:\n # if ColumnsName==\"WeekDay\":\n # df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek)\n # elif ColumnsName==\"M\":\n # df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][0],DicoMonthOfTheYear)\n # elif Abbreviation==False:\n # if ColumnsName==\"WeekDay\":\n # df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek)\n # elif ColumnsName==\"M\":\n # df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][1],DicoMonthOfTheYear)\n\n # else:\n\n # if \"WeekDay\" in ListOfDateAndTime and \"WeekDay\"==ListOfDateAndTime[0]:\n # if Abbreviation==True:\n # df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek)\n # else:\n # df_unstack.columns = df_unstack.columns.map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek)\n # else:\n # if Abbreviation==True:\n # df_unstack.index = df_unstack.index.map(lambda x : DicoDayOfWeek[x][0],DicoDayOfWeek)\n # else:\n # df_unstack.index = df_unstack.index.map(lambda x : DicoDayOfWeek[x][1],DicoDayOfWeek)\n\n # if \"M\" in ListOfDateAndTime and \"M\"==ListOfDateAndTime[0]:\n # if Abbreviation==True:\n # df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][0],DicoMonthOfTheYear)\n # elif Abbreviation==False:\n # df_unstack.columns = df_unstack.columns.map(lambda x : DicoMonthOfTheYear[x][1],DicoMonthOfTheYear)\n # else:\n # if Abbreviation==True:\n # df_unstack.index = df_unstack.index.map(lambda x : DicoMonthOfTheYear[x][0],DicoMonthOfTheYear)\n # elif Abbreviation==False:\n # df_unstack.index = df_unstack.index.map(lambda x : DicoMonthOfTheYear[x][1],DicoMonthOfTheYear)\n # print(df_unstack.index)\n # # fig, axes=plot.subplots(nrows=4,ncols=2,)\n # # axes[0][0].plot(df_unstack)\n # # plot.show()\n # # ax.plot(df_unstack)\n # # fig = plot.figure() # create a figure object\n # # axs = fig.subplots(nrows=4,ncols=2)\n # # fig\n # # for ax in axs:\n # # ax.plot(df_grp[0])\n # # create an axes object in the figure\n # # ax.plot(df_unstack)\n # # ax.set_ylabel('some numbers')\n # # plot.figure(1)\n # # df_unstack.plot()\n # # fig=plot.figure()\n # # ax1=fig.add_subplot(df_unstack)\n\n # DicoConfigRowColumsSubPlot={\"Y\":(4,3),\"M\":(4,3),\"W\":(13,4),\"D\":(8,4),\"WeekDay\":(4,2),\"h\":(6,4),\"m\":(10,6),\"s\":(10,6)}\n # fig=df_unstack.plot(subplots=True,figsize=(70, 60), layout=DicoConfigRowColumsSubPlot[ListOfDateAndTime[0]],kind=\"bar\",sharex=True,sharey=True,legend=False,).flatten()#.map(set_xlabel=(\"toto\"))#**kwargs)\n # fig=fig.flatten()\n # # fig.text(0.5, 0.04, 'common xlabel', ha='center', va='center')\n # # fig.text(0.06, 0.5, 'common ylabel', ha='center', va='center', rotation='vertical')\n # # fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.2, hspace=0.2)\n # for i in range(len(fig)):\n \n # fig[i].set_ylabel(\"Nb. Video Trending\")\n # fig[i].set_xlabel(\"Time\")\n\n # plot.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.2, hspace=0.5)\n # plot.show()\n \n # plot.show()\n\n\n # df_unstack[df_unstack.columns[0]].plot(ax=axes[0,0])\n # df_unstack[df_unstack.columns[1]].plot(ax=axes[0,1])\n # plot.show()\n\n # rowlength = df_grp.ngroups//2\n # fig, axs = plot.subplots()\n # df_unstack.plot(subplot=True,layout=(4, 2), figsize=(70, 60),kind=\"bar\",sharex=True,sharey=True,)\n # fig=df_unstack.plot(subplot=True,ax=ax,kind=\"bar\")\n #title of the x axis of the plot\n # ax.set_xlabel('common xlabel')\n # fig.xlabel('common xlabel')\n # fig.ylabel('common ylabel')\n # plot.xlabel(\"hours\")\n\n #title of y axis of the plot\n # plot.ylabel(\"Number Of Video Trending\")\n # plot.(xtitle=\"hours\",ytitle=\"Number Of Video Trending\")\n # plot.tight_layout()\n plot.show()\n # plot.show()\n # fig, ax = plot.subplots(figsize=(10,4))\n # for key, grp in df.groupby(ListOfDateAndTime):\n # ax.plot(grp['WeekDay'], grp['h'], label=key)\n\n # ax.legend()\n # plot.show()\n\n\n #Go from pd series to dataframe with another index\n df=df.to_frame(name = 'Number Of Video Trending').reset_index()\n\n\n \n # fig, axs = plot.subplots(2, 1, sharex=True)\n\n # # gs = df.groupby([\"WeekDay\",\"h\"], axis=1)\n # # df.set_index('WeekDay',inplace=True)\n # gs = df.groupby([\"WeekDay\"], axis=1)\n # for (_, g), ax in zip(gs, axs):\n # g.plot.bar(stacked=True, ax=ax)\n\n # plot.show()\n \n if \"WeekDay\" in ListOfDateAndTime:\n dayOfWeek={\"00\":'Monday', \"01\":'Tuesday', \"02\":'Wednesday', \"03\":'Thursday', \"04\":'Friday', \"05\":'Saturday', \"06\":'Sunday'}\n df['WeekDay'] = df['WeekDay'].map(dayOfWeek)\n\n #create the columns time in function of the date and time in listoftime\n if len(ListOfDate)>0 and len(ListOfTime)>0:\n df['Time'] = df[ListOfDate].astype(str).agg('-'.join, axis=1)+\" \"+df[ListOfTime].astype(str).agg(':'.join, axis=1)\n elif len(ListOfDate)>0 and len(ListOfTime)==0:\n df['Time'] = df[ListOfDate].astype(str).agg('-'.join, axis=1)\n elif len(ListOfDate)==0 and len(ListOfTime)>0:\n df['Time'] = df[ListOfTime].astype(str).agg(':'.join, axis=1)\n \n #Put the column Time in index\n df.set_index( df['Time'], inplace=True)\n\n #add the column Time to ListOfDateAndTime before dropping every columns of ListOfDateAndTime to have a nice dataframe with just the number\n #of videos trending and the time index\n ListOfDateAndTime.append('Time')\n df=df.drop(ListOfDateAndTime,axis=1)\n\n else:\n #if their is only one thing in the list\n\n\n #get the list of all indexes \n SegmentOfDateOrTime=DicoGroubyPossibility[ListOfDateAndTime[0]].astype(str).tolist()\n\n # and add a zero in front of the index string to have 00 h and not 0h or days etc \n for DateOrTime in range(len(SegmentOfDateOrTime)):\n if len(SegmentOfDateOrTime[DateOrTime])==1:\n SegmentOfDateOrTime[DateOrTime]=str(0)+SegmentOfDateOrTime[DateOrTime]\n\n #grouby in function of the entry in the list of index \n df=df.groupby(SegmentOfDateOrTime)[\"views\"].count()\n\n #Create a dataframe with the grouby serie\n df=df.to_frame(name = 'Number Of Video Trending')#.reset_index()\n\n # Rename the dataframe index in Time\n df.index=df.index.rename('Time')\n\n \n \n # df1.columns=ListOfDateAndTime.split(\"_\")\n # df1=df1.to_frame(name = 'count').reset_index()\n \n # df=df.loc[:,ListOfTime].join()\n \n\n\n\n\n # df=df.resample(\"60T\").views.count()#, df.index.minute df.index.hour\n # df=df.groupby(pd.Grouper(key='publish_time',freq='30T')).views.count()#, df.index.minute df.index.hour\n # df=df.groupby([df.index.second]).views.count()#df.index.hour,\n # df=df.groupby([df.index.hour,df.index.minute,df.index.second]).views.count()\n # df=df.groupby([df.index.year,df.index.month,df.index.day,df.index.hour,df.index.minute,df.index.second]).views.count()\n # print(df)\n df.plot(kind=\"bar\")\n\n plot.show()\n \n # df_FiltResult=df[\"views\"].resample(\"H\").count()\n # print(df_FiltResult)\n FindText=\" !\"\n filtre=\"Minute\"\n NumberOfVideoTrendingByCountry=\"Number Of Video \"+Country\n DicoResampleAndGraph={\"Year\":(\"Y\",\"%y\"),\"Month\":(\"M\",\"%y/%m\"),\"Day\":(\"D\",\"%y/%m/%d\"),\"Hour\":(\"H\",\"%y/%m/%d %H\"),\"Minute\":(\"m\",\"%y/%m/%d %H:%m\")}\n # filt=(df.index.year==2017) | (df.index.year==2018)\n filt=(df.index.month==12) | (df.index.day==25)\n df=df[filt]\n if FindText!=\"\":\n df[\"result\"]=df[\"title\"].apply(lambda x: 1 if x.find(FindText)!=-1 else 0)\n df_FiltResult=df[\"result\"].resample(DicoResampleAndGraph[filtre][0]).sum()\n \n else:\n df_FiltResult=df[\"views\"].resample(DicoResampleAndGraph[filtre][0]).count()\n df_FiltResult.columns=[\"Label\",NumberOfVideoTrendingByCountry]\n df_FiltResult.index=df_FiltResult.index.strftime(DicoResampleAndGraph[filtre][1])#-%d\n\n # df_FiltResult.index=df_FiltResult.index.strftime(\"%V\")#-%d\n # print(df_FiltResult.index)\n # filt=(df.title.str.find(sub)!=-1)\n # df_FiltResult=df[\"title\"][filt].resample(\"W\").count()\n # df_FiltResult=df[\"title\"].resample(\"W\").count()\n # df_FiltResult.index=df_FiltResult.index.strftime(\"%V\")#-%d\n print(df_FiltResult)\n \n # if df\n # df_FiltResult.loc[\"value\"]=df[\"title\"][filt].count()\n # df.index=pd.to_datetime(df.index,format='%Y-%m-%d')\n # df_FiltResultsample.plot(y=0,kind=\"bar\")\n df_FiltResult.plot(y=0,kind=\"bar\")\n plot.show()\n NumberOfVideoTrendingByCountry=\"Number Of Video \"+Country\n Months=[\"January\",\"February\",\"March\",\"April\",\"May\",\"June\",\"July\",\"August\",\"October\",\"November\",\"December\"]\n Years=[]\n for Year in range(min(df.publish_time.dt.year),max(df.publish_time.dt.year)+1):\n Years.append(Year)\n df_VideoCountForDayOfTheWeek=pd.DataFrame(0,index=Years,columns=[NumberOfVideoTrendingByCountry])\n print(min(df.publish_time.dt.year))\n print(max(df.publish_time.dt.year))\n sub=\" Noël \"\n for Year in Years:\n filtervalue=(df.publish_time.dt.year==Year) & (df.title.str.find(sub)!=-1)\n df_VideoCountForDayOfTheWeek.loc[Year,NumberOfVideoTrendingByCountry]=max(df[filtervalue].count())\n print(df_VideoCountForDayOfTheWeek)\n WeekDays=[\"Monday\",\"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\",\"Saturday\",\"Sunday\"]\n df_VideoCountForDayOfTheWeek=pd.DataFrame(0,index=WeekDays,columns=[\"Number Of Videos\"])\n for WeekDay in WeekDays:\n df_VideoCountForDayOfTheWeek.loc[WeekDay,\"Number Of Videos\"]=max(df[df.publish_time.dt.day_name()==WeekDay].count())\n print(df_VideoCountForDayOfTheWeek)\n\n df_VideoCountForDayOfTheWeek.plot(y=\"Number Of Videos\",kind=\"bar\")\n plot.show()\n #insert publish date in the corresponding columns\n df.insert(5, 'publish_date', df['publish_time'].dt.date)\n\n # convert them into datetime time \n df['publish_time'] = df['publish_time'].dt.time\n\n #convert the trending date string into a datetime format\n df['trending_date'] = pd.to_datetime(df['trending_date'], format='%y.%d.%m')\n\n #Put the trending date in the same format before soustracting them to \n # get the time before trending\n df[\"trending_date\"]=df[\"trending_date\"].values.astype('datetime64[D]')\n df[\"publish_date\"]=df[\"publish_date\"].values.astype('datetime64[D]')\n\n\n\n # functionning from 1 s tp 24h \n IntervalMinute=1/60\n\n if IntervalMinute==1/60:\n\n \n counttotal=0\n countindex=0\n \n HoursForLabels=pd.date_range('00:00:00', '23:59:59',freq=str(IntervalMinute)+'T').strftime('%H:%M:%S').tolist()\n\n NumberOfVideoTrendingByCountry=\"Number Of Video \"+Country\n df_NumberHours=pd.DataFrame(0,index=HoursForLabels,columns=[\"Label\",NumberOfVideoTrendingByCountry])\n df_NumberHours[\"Label\"]=HoursForLabels\n\n\n\n for index in range(len(HoursForLabels)):\n if index<(len(HoursForLabels)-1):\n df_NumberHours.loc[HoursForLabels[index],NumberOfVideoTrendingByCountry]=df[\"views\"].between_time(start_time=HoursForLabels[index],end_time=HoursForLabels[index+1],include_end=False).count()\n else:\n df_NumberHours.loc[HoursForLabels[index],NumberOfVideoTrendingByCountry]=df[\"views\"].between_time(start_time=HoursForLabels[index],end_time=\"23:59:59\",include_start=True,include_end=True).count()\n\n else:\n #insert publish date in the corresponding columns\n df.insert(5, 'publish_date', df['publish_time'].dt.date)\n\n # convert them into datetime time \n df['publish_time'] = df['publish_time'].dt.time\n\n #convert the trending date string into a datetime format\n df['trending_date'] = pd.to_datetime(df['trending_date'], format='%y.%d.%m')\n\n #Put the trending date in the same format before soustracting them to \n # get the time before trending\n df[\"trending_date\"]=df[\"trending_date\"].values.astype('datetime64[D]')\n df[\"publish_date\"]=df[\"publish_date\"].values.astype('datetime64[D]')\n\n\n #Get all time data in function of the day of the week if DayOfTheWeek==\"All\" skip this to have all day of the dataframe\n df[\"weekday_publish_date\"] = df[\"publish_date\"].dt.day_name()\n # df=GetDFFromWeekDay(df,DayOfTheWeek)\n \n\n\n\n # get the time before trending\n df[\"Time_Before_Trending\"]=df[\"trending_date\"].sub(df[\"publish_date\"],axis=0)\n\n\n\n # count the number of video publish in the same time \n df_NumberHours=df['publish_time'].value_counts()\n df_NumberHours.sort_values(0,ascending=True)\n # df_NumberHours.index=sorted(df_NumberHours.index,key=)\n df_NumberHours=df_NumberHours.sort_index()\n HoursForLabels=pd.date_range('00:00:00', '23:59:59',freq=str(IntervalMinute)+'T').strftime('%H:%M:%S').tolist()\n for time in HoursForLabels:\n if time not in df_NumberHours.index:\n df_NumberHours.set_value(time,0)\n df_NumberHours.index=df_NumberHours.index.time\n #Supres the last row of the df for interval and video publish in the interval \n # because it is 23:59:59 but is empty cause every thing goes to 00:00:00\n df_NumberHours.drop(df_NumberHours.tail(1).index,inplace=True)\n\n # print(df_NumberHours)\n # print(len(df))\n # print(df_NumberHours[NumberOfVideoTrendingByCountry].sum())\n\n\n # df_NumberHours.plot(y=NumberOfVideoTrendingByCountry,kind=\"bar\")\n # plot.show()\n\n ##############################################################################################################################\n # x=2\n # print(df)\n # print(df[\"views\"].between_time(start_time=\"00:00:00\",end_time=\"23:59:59\").count())\n # print(df[\"views\"].count())\n # print(len(df[\"views\"]))\n\n # df_NumberHours.loc[\"23:59\",[\"Label\",NumberOfVideoTrendingByCountry]] = \"23:59\",0\n # print(df_NumberHours)\n # for index in range(len(HoursForLabels)+1):\n # if index<(len(HoursForLabels)-1):\n # # if HoursForLabels[index]==\"23:30\":\n # # x=1\n # df_NumberHours.loc[HoursForLabels[index],NumberOfVideoTrendingByCountry]=df[\"views\"].between_time(start_time=HoursForLabels[index],end_time=HoursForLabels[index+1],include_end=False).count()\n # elif index==(len(HoursForLabels)-1):\n # df_NumberHours.loc[HoursForLabels[-1],NumberOfVideoTrendingByCountry]=df[\"views\"].between_time(start_time=HoursForLabels[index-1],end_time=HoursForLabels[-1],include_end=False).count()\n # else:\n # df_NumberHours.loc[\"23:59\",NumberOfVideoTrendingByCountry]=df[\"views\"].between_time(start_time=HoursForLabels[-1],end_time=\"23:59:59\",include_start=True,include_end=True).count()\n\n\n # df_NumberHours.set_index(\"Label\",inplace=True)\n\n\n # for index in range(len(HoursForLabels)):\n # if index<(len(HoursForLabels)-1):\n # df_NumberHours.loc[HoursForLabels[index],NumberOfVideoTrendingByCountry]=df[\"views\"].between_time(start_time=HoursForLabels[index],end_time=HoursForLabels[index+1],include_end=False).count()\n # elif index==len(HoursForLabels)-1:\n # df_NumberHours.loc[HoursForLabels[-1],NumberOfVideoTrendingByCountry]=df[\"views\"].between_time(start_time=HoursForLabels[-1],end_time=\"23:59:59\",include_end=True).count()\n # df_NumberHours.loc[\"23:59\",NumberOfVideoTrendingByCountry]=df[\"views\"].between_time(start_time=HoursForLabels[-1],end_time=\"23:59:59\",include_start=True,include_end=True).count()\n # elif index==len(HoursForLabels):\n \n # print(df_NumberHours[NumberOfVideoTrendingByCountry].sum())\n\n #0 a 03 \n\n\n\n\n\n\ndef anepasutiliser():\n \n print(df_NumberHours[NumberOfVideoTrendingByCountry].sum())\n\n print(df_NumberHours)\n\n\n\n\n df_NumberHours=pd.DataFrame(0,index=HoursForLabels,columns=[\"Label\",NumberOfVideoTrendingByCountry])\n df.insert(5, 'publish_date', df['publish_time'].dt.date)\n\n #convert them into datetime time \n # df['publish_time'] = df['publish_time'].dt.time\n # df['publish_time'] =df['publish_time'] .astype('datetime64[D]')\n df['publish_time'] = pd.DatetimeIndex(df['publish_time'])\n df['publish_time']=df['publish_time'].dt.time\n print(df['publish_time'])\n # count the number of video publish in the same time \n df[\"Count\"]=df['publish_time'].value_counts()\n df.sort_values('Count',ascending=True)\n print(df)\n pd.to_timedelta(df['publish_time'])\n\n df.set_index(pd.to_datetime(df['publish_time'],\"hh:mm:ss\"), inplace=True)\n\n print(df.index.time)\n\n\n # df.set_index(pd.DatetimeIndex(df['publish_time']), inplace=True)\n\n\n print(df.index)\n\n\n\n\n\n\n print(df['views'].resample('T').sum())\n\n\n\n\n df['publish_time'] = df['publish_time']\n\n\n\n #convert the trending date string into a datetime format\n df['trending_date'] = pd.to_datetime(df['trending_date'], format='%y.%d.%m')\n\n\n\n #Put the trending date in the same format before soustracting them to \n # get the time before trending\n df[\"trending_date\"]=df[\"trending_date\"].values.astype('datetime64[D]')\n df[\"publish_date\"]=df[\"publish_date\"].values.astype('datetime64[D]')\n\n\n df[\"weekday_publish_date\"] = df[\"publish_date\"].dt.day_name()\n # df=df[df.weekday_publish_date==DayOfTheWeek]\n\n\n\n print(df)\n\n # get the time before trending\n df[\"Time_Before_Trending\"]=df[\"trending_date\"].sub(df[\"publish_date\"],axis=0)\n\n\n\n # count the number of video publish in the same time \n Df_TimeAndNumberOfPublication=df['publish_time'].value_counts()\n Df_TimeAndNumberOfPublication.sort_values(0,ascending=True)\n\n # print(datetime.time(hour=,minute=-30,second=40))\n print(df_NumberHours.tail(5))\n #40562 via fonction via tableau 40723 \n #il faut que les valeur centrer entre 16:30 avec 15 min a gauche 15 min a droite soit increment/2 \n\n\n print(df_NumberHours[\"Number Of Video\"].sum())\n #et si les minutes sont egales a zero alors il faut retirer une heure\n # \n # df_NumberHours.plot(x=\"Label\",y=NumberOfVideoTrendingByCountry, kind='bar')\n\n # #title of the plot\n # plot.title(\"Number of Video Trending in \" +Country +\" by publication time\")\n\n # #title of the x axis of the plot\n # plot.xlabel('Time')\n\n # #title of y axis of the plot\n # plot.ylabel('Number of Video Trending')\n\n # #show the graph\n # plot.show()\n\ntesttemps()\n\n\n\ndef NumberOfVideoFilterByPublishTime(df,Country,IntervalMinute):\n\n if IntervalMinute!=1/60:\n df.set_index( df['publish_time'], inplace=True)\n counttotal=0\n countindex=0\n IntervalMinute=1/60\n HoursForLabels=pd.date_range('00:00:00', '23:59:59',freq=str(IntervalMinute)+'T').strftime('%H:%M:%S').tolist()\n\n NumberOfVideoTrendingByCountry=\"Number Of Video \"+Country\n df_NumberHours=pd.DataFrame(0,index=HoursForLabels,columns=[\"Label\",NumberOfVideoTrendingByCountry])\n df_NumberHours[\"Label\"]=HoursForLabels\n\n\n\n for index in range(len(HoursForLabels)):\n if index<(len(HoursForLabels)-1):\n df_NumberHours.loc[HoursForLabels[index],NumberOfVideoTrendingByCountry]=df[\"views\"].between_time(start_time=HoursForLabels[index],end_time=HoursForLabels[index+1],include_end=False).count()\n else:\n df_NumberHours.loc[HoursForLabels[index],NumberOfVideoTrendingByCountry]=df[\"views\"].between_time(start_time=HoursForLabels[index],end_time=\"23:59:59\",include_start=True,include_end=True).count()\n else:\n\n\n #insert publish date in the corresponding columns\n df.insert(5, 'publish_date', df['publish_time'].dt.date)\n\n # convert them into datetime time \n df['publish_time'] = df['publish_time'].dt.time\n\n #convert the trending date string into a datetime format\n df['trending_date'] = pd.to_datetime(df['trending_date'], format='%y.%d.%m')\n\n #Put the trending date in the same format before soustracting them to \n # get the time before trending\n df[\"trending_date\"]=df[\"trending_date\"].values.astype('datetime64[D]')\n df[\"publish_date\"]=df[\"publish_date\"].values.astype('datetime64[D]')\n\n\n #Get all time data in function of the day of the week if DayOfTheWeek==\"All\" skip this to have all day of the dataframe\n df[\"weekday_publish_date\"] = df[\"publish_date\"].dt.day_name()\n df=GetDFFromWeekDay(df,DayOfTheWeek)\n \n\n\n\n # get the time before trending\n df[\"Time_Before_Trending\"]=df[\"trending_date\"].sub(df[\"publish_date\"],axis=0)\n\n\n\n # count the number of video publish in the same time \n df_NumberHours=df['publish_time'].value_counts()\n # df_NumberHours.sort_values(0,ascending=True)\n\n\n \n #Supres the last row of the df for interval and video publish in the interval \n # because it is 23:59:59 but is empty cause every thing goes to 00:00:00\n df_NumberHours.drop(df_NumberHours.tail(1).index,inplace=True)\n \n\n return df_NumberHours"
] | [
[
"pandas.DatetimeIndex",
"pandas.read_csv",
"pandas.DataFrame",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"pandas.to_datetime",
"pandas.to_timedelta"
]
] |
binnietom/py21cmmc_wv-1 | [
"2d5405700c1d99bd5f22c762999aea89d1ca1c23"
] | [
"devel/test_wv.py"
] | [
"from py21cmmc_wv import morlet\nimport numpy as np\n\nbw = 50.0\nnumin = 130.0\nN = 736\nnu = np.arange(N) * bw/N + numin\nmid = (nu[0] + nu[-1])/2\n\nspectrum = np.exp(-(nu-mid)**2/ (2*4.0**2))\n\ntrnsc, fc, _ = morlet.morlet_transform_c(spectrum, nu)\ntrnsc = np.abs(trnsc)**2\n"
] | [
[
"numpy.arange",
"numpy.abs",
"numpy.exp"
]
] |
vgp314/Udacity-Arvato-Identify-Customer-Segments | [
"6be1d4f1eeac391c17c70fdf584bdc4813f80fd8"
] | [
"cluster.py"
] | [
"from sklearn.cluster import KMeans\nfrom sklearn.cluster import MiniBatchKMeans\nimport matplotlib.pyplot as plt\n\n\ndef plot_clustering(data):\n\t'''\n\t\tDefinition:\n\t\t\tThis function plot the squared error for the clustered points\n\t\targs:\n\t\t\tdata to be clusterd\n\t\treturns:\n\t\t\tNone\n\t\t\n\t'''\t\n\tcost =[] \n\tmax_clusters = 20\n\tfor i in range(2, max_clusters):\n\t print(\"Analysing \", i, \" clusters\")\n\t KM = MiniBatchKMeans(n_clusters = i,batch_size=20000) \n\t KM.fit(data) \n\t cost.append(KM.inertia_) \n\t \n\n\tplt.plot(range(2, max_clusters), cost, color ='g', linewidth ='3') \n\tplt.xlabel(\"Number of Clusters\") \n\tplt.ylabel(\"Squared Error (Cost)\") \n\tplt.show()\n\t \n\ndef do_clustering(data,number_clusters):\n\t'''\n\t\tDefinition:\n\t\t\tThis function initizalize KMeans with number_clusters and fit to data\n\t\targs:\n\t\t\tdata to be clustered, number_clusters\n\t\treturns:\n\t\t\tfitted K-Means mdel\n\t\t\n\t'''\t\n\t\n\tkmeans = KMeans(number_clusters)\n\tfitted_model_k_means = kmeans.fit(data)\n\treturn fitted_model_k_means\n\n"
] | [
[
"sklearn.cluster.KMeans",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"sklearn.cluster.MiniBatchKMeans"
]
] |
tkhe/tkdetection | [
"54e6c112ef2930e755f457e38449736f5743a9ea"
] | [
"projects/PointRend/point_rend/coarse_mask_head.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom tkdet.layers import Conv2d\nfrom tkdet.layers import ShapeSpec\nfrom tkdet.models.roi_head.mask_head import MASK_HEAD_REGISTRY\nfrom tkdet.utils import weight_init\n\n__all__ = [\"CoarseMaskHead\"]\n\n\n@MASK_HEAD_REGISTRY.register()\nclass CoarseMaskHead(nn.Module):\n def __init__(self, cfg, input_shape: ShapeSpec):\n super(CoarseMaskHead, self).__init__()\n\n self.num_classes = cfg.MODEL.NUM_CLASSES\n conv_dim = cfg.MODEL.ROI_MASK_HEAD.CONV_DIM\n self.fc_dim = cfg.MODEL.ROI_MASK_HEAD.FC_DIM\n num_fc = cfg.MODEL.ROI_MASK_HEAD.NUM_FC\n self.output_side_resolution = cfg.MODEL.ROI_MASK_HEAD.OUTPUT_SIDE_RESOLUTION\n self.input_channels = input_shape.channels\n self.input_h = input_shape.height\n self.input_w = input_shape.width\n\n self.conv_layers = []\n if self.input_channels > conv_dim:\n self.reduce_channel_dim_conv = Conv2d(\n self.input_channels,\n conv_dim,\n kernel_size=1,\n activation=\"ReLU\"\n )\n self.conv_layers.append(self.reduce_channel_dim_conv)\n\n self.reduce_spatial_dim_conv = Conv2d(\n conv_dim,\n conv_dim,\n kernel_size=2,\n stride=2,\n padding=0,\n bias=True,\n activation=\"ReLU\"\n )\n self.conv_layers.append(self.reduce_spatial_dim_conv)\n\n input_dim = conv_dim * self.input_h * self.input_w\n input_dim //= 4\n\n self.fcs = []\n for k in range(num_fc):\n fc = nn.Linear(input_dim, self.fc_dim)\n self.add_module(\"coarse_mask_fc{}\".format(k + 1), fc)\n self.fcs.append(fc)\n input_dim = self.fc_dim\n\n output_dim = self.num_classes * self.output_side_resolution * self.output_side_resolution\n\n self.prediction = nn.Linear(self.fc_dim, output_dim)\n nn.init.normal_(self.prediction.weight, std=0.001)\n nn.init.constant_(self.prediction.bias, 0)\n\n for layer in self.conv_layers:\n weight_init.c2_msra_fill(layer)\n for layer in self.fcs:\n weight_init.c2_xavier_fill(layer)\n\n def forward(self, x):\n N = x.shape[0]\n x = x.view(N, self.input_channels, self.input_h, self.input_w)\n for layer in self.conv_layers:\n x = layer(x)\n x = torch.flatten(x, start_dim=1)\n for layer in self.fcs:\n x = F.relu(layer(x))\n return self.prediction(x).view(\n N,\n self.num_classes,\n self.output_side_resolution,\n self.output_side_resolution\n )\n"
] | [
[
"torch.flatten",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.nn.init.constant_"
]
] |
Chicoryn/dream-go | [
"6a4b71d7e1fcc28110ba859c0a2b59c10041c083"
] | [
"contrib/trainer/dream_tf/layers/policy_head.py"
] | [
"# Copyright (c) 2019 Karl Sundequist Blomdahl <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom .batch_norm import batch_norm_conv2d\nfrom .dense import dense\nfrom .recompute_grad import recompute_grad\n\n\ndef policy_head(x, mode, params):\n \"\"\"\n The policy head attached after the residual blocks as described by DeepMind:\n\n 1. A convolution of 8 filters of kernel size 3 × 3 with stride 1\n 2. Batch normalisation\n 3. A rectifier non-linearity\n 4. A fully connected linear layer that outputs a vector of size 19²+1 = 362\n corresponding to logit probabilities for all intersections and the pass\n move\n \"\"\"\n num_channels = params['num_channels']\n num_samples = params['num_samples']\n\n def _forward(x, is_recomputing=False):\n \"\"\" Returns the result of the forward inference pass on `x` \"\"\"\n y = batch_norm_conv2d(x, 'conv_1', (3, 3, num_channels, num_samples), mode, params, is_recomputing=is_recomputing)\n y = tf.nn.relu(y)\n\n y = tf.reshape(y, (-1, 361 * num_samples))\n y = dense(y, 'linear_1', (361 * num_samples, 362), policy_offset_op, mode, params, is_recomputing=is_recomputing)\n\n return tf.cast(y, tf.float32)\n\n return recompute_grad(_forward)(x)\n\n\ndef policy_offset_op(shape, dtype=None, partition_info=None):\n \"\"\" Initial value for the policy offset, this should roughly correspond to\n the log probability of each move being played. \"\"\"\n return np.array([\n -7.93991e+00, -6.91853e+00, -6.86255e+00, -6.78094e+00, -6.79361e+00, -6.75976e+00,\n -6.88288e+00, -6.90817e+00, -6.93508e+00, -6.92374e+00, -6.91856e+00, -6.91075e+00,\n -6.87607e+00, -6.75246e+00, -6.79823e+00, -6.80791e+00, -6.86863e+00, -6.89708e+00,\n -7.93729e+00, -6.95779e+00, -6.11830e+00, -5.85974e+00, -5.83566e+00, -5.81966e+00,\n -5.84875e+00, -5.90686e+00, -5.97848e+00, -5.99648e+00, -5.99342e+00, -5.99524e+00,\n -5.96306e+00, -5.88135e+00, -5.83725e+00, -5.81963e+00, -5.84671e+00, -5.85574e+00,\n -6.07402e+00, -6.89741e+00, -6.91472e+00, -5.87616e+00, -5.51456e+00, -5.48398e+00,\n -5.55522e+00, -5.49329e+00, -5.70271e+00, -5.65749e+00, -5.70621e+00, -5.68975e+00,\n -5.69774e+00, -5.66463e+00, -5.68246e+00, -5.43859e+00, -5.59398e+00, -5.44977e+00,\n -5.45890e+00, -5.81432e+00, -6.85663e+00, -6.83055e+00, -5.84429e+00, -5.40160e+00,\n -5.34049e+00, -5.66119e+00, -5.62512e+00, -5.71932e+00, -5.72455e+00, -5.70309e+00,\n -5.69903e+00, -5.70189e+00, -5.71451e+00, -5.68138e+00, -5.59716e+00, -5.64521e+00,\n -5.29867e+00, -5.42794e+00, -5.80074e+00, -6.80807e+00, -6.81930e+00, -5.82896e+00,\n -5.63177e+00, -5.67078e+00, -5.93261e+00, -5.78339e+00, -5.80250e+00, -5.78522e+00,\n -5.79703e+00, -5.79409e+00, -5.79848e+00, -5.78746e+00, -5.77879e+00, -5.76154e+00,\n -5.94899e+00, -5.67992e+00, -5.59753e+00, -5.78787e+00, -6.79474e+00, -6.79318e+00,\n -5.85460e+00, -5.47365e+00, -5.60804e+00, -5.79080e+00, -5.80699e+00, -5.80015e+00,\n -5.81436e+00, -5.81617e+00, -5.80918e+00, -5.81150e+00, -5.80510e+00, -5.77611e+00,\n -5.78804e+00, -5.76476e+00, -5.58303e+00, -5.41241e+00, -5.83056e+00, -6.78050e+00,\n -6.88840e+00, -5.91061e+00, -5.69064e+00, -5.71108e+00, -5.79579e+00, -5.80311e+00,\n -5.81472e+00, -5.81526e+00, -5.81671e+00, -5.81616e+00, -5.81570e+00, -5.80513e+00,\n -5.79622e+00, -5.77254e+00, -5.77513e+00, -5.67571e+00, -5.67228e+00, -5.89279e+00,\n -6.86025e+00, -6.91154e+00, -5.97718e+00, -5.66273e+00, -5.72542e+00, -5.78770e+00,\n -5.81699e+00, -5.81516e+00, -5.81869e+00, -5.81941e+00, -5.81940e+00, -5.81482e+00,\n -5.80754e+00, -5.79365e+00, -5.78832e+00, -5.75882e+00, -5.70202e+00, -5.63253e+00,\n -5.94600e+00, -6.88401e+00, -6.91774e+00, -5.99960e+00, -5.70958e+00, -5.70386e+00,\n -5.80010e+00, -5.81106e+00, -5.81648e+00, -5.81789e+00, -5.81997e+00, -5.81948e+00,\n -5.81279e+00, -5.80583e+00, -5.80135e+00, -5.78998e+00, -5.77203e+00, -5.68193e+00,\n -5.67815e+00, -5.96948e+00, -6.88898e+00, -6.91699e+00, -5.99684e+00, -5.69323e+00,\n -5.68440e+00, -5.79516e+00, -5.81060e+00, -5.81611e+00, -5.81406e+00, -5.81620e+00,\n -5.80901e+00, -5.81298e+00, -5.80653e+00, -5.79696e+00, -5.78196e+00, -5.76473e+00,\n -5.65428e+00, -5.66398e+00, -5.96876e+00, -6.89641e+00, -6.92151e+00, -5.99694e+00,\n -5.71110e+00, -5.71325e+00, -5.79821e+00, -5.80778e+00, -5.81212e+00, -5.81205e+00,\n -5.81020e+00, -5.81116e+00, -5.80801e+00, -5.79830e+00, -5.79276e+00, -5.78653e+00,\n -5.77101e+00, -5.68899e+00, -5.69274e+00, -5.97098e+00, -6.90131e+00, -6.89817e+00,\n -5.95772e+00, -5.64660e+00, -5.72654e+00, -5.77678e+00, -5.80212e+00, -5.80607e+00,\n -5.80127e+00, -5.80551e+00, -5.80743e+00, -5.80042e+00, -5.79346e+00, -5.79025e+00,\n -5.78733e+00, -5.75338e+00, -5.69506e+00, -5.63437e+00, -5.95747e+00, -6.88818e+00,\n -6.86408e+00, -5.86964e+00, -5.67686e+00, -5.70769e+00, -5.79369e+00, -5.78719e+00,\n -5.79913e+00, -5.80025e+00, -5.80054e+00, -5.80132e+00, -5.79529e+00, -5.78667e+00,\n -5.78821e+00, -5.76922e+00, -5.76675e+00, -5.69570e+00, -5.68074e+00, -5.90285e+00,\n -6.86338e+00, -6.76061e+00, -5.80263e+00, -5.41706e+00, -5.58843e+00, -5.78328e+00,\n -5.79366e+00, -5.78934e+00, -5.79841e+00, -5.79591e+00, -5.79041e+00, -5.79060e+00,\n -5.78705e+00, -5.78000e+00, -5.77674e+00, -5.75681e+00, -5.57623e+00, -5.50113e+00,\n -5.85626e+00, -6.78012e+00, -6.79139e+00, -5.80594e+00, -5.58041e+00, -5.65286e+00,\n -5.94338e+00, -5.77647e+00, -5.78968e+00, -5.77167e+00, -5.78232e+00, -5.76841e+00,\n -5.77241e+00, -5.75895e+00, -5.78530e+00, -5.76951e+00, -5.88238e+00, -5.64461e+00,\n -5.61617e+00, -5.82903e+00, -6.80791e+00, -6.81286e+00, -5.84175e+00, -5.48596e+00,\n -5.28293e+00, -5.71807e+00, -5.60505e+00, -5.71724e+00, -5.70963e+00, -5.68757e+00,\n -5.65039e+00, -5.67046e+00, -5.68983e+00, -5.69079e+00, -5.58636e+00, -5.60082e+00,\n -5.39104e+00, -5.38788e+00, -5.85818e+00, -6.81584e+00, -6.83461e+00, -5.85197e+00,\n -5.47331e+00, -5.40193e+00, -5.63715e+00, -5.47135e+00, -5.68295e+00, -5.64977e+00,\n -5.67997e+00, -5.64680e+00, -5.67367e+00, -5.61327e+00, -5.67216e+00, -5.50078e+00,\n -5.53072e+00, -5.40751e+00, -5.52960e+00, -5.87713e+00, -6.89602e+00, -6.89446e+00,\n -6.07997e+00, -5.83860e+00, -5.78284e+00, -5.77460e+00, -5.81606e+00, -5.88522e+00,\n -5.95163e+00, -5.97232e+00, -5.95954e+00, -5.96527e+00, -5.94048e+00, -5.88465e+00,\n -5.82810e+00, -5.82003e+00, -5.84255e+00, -5.88531e+00, -6.11968e+00, -6.92480e+00,\n -7.88397e+00, -6.89418e+00, -6.83908e+00, -6.78821e+00, -6.75784e+00, -6.75053e+00,\n -6.85545e+00, -6.88249e+00, -6.88945e+00, -6.88525e+00, -6.88876e+00, -6.86828e+00,\n -6.83631e+00, -6.75981e+00, -6.76317e+00, -6.74771e+00, -6.86408e+00, -6.90874e+00,\n -7.91371e+00, -6.27113e+00\n ])\n"
] | [
[
"numpy.array",
"tensorflow.cast",
"tensorflow.nn.relu",
"tensorflow.reshape"
]
] |
eblur/newdust | [
"7e843ae2604a844826606ea04c459694fdd5c178"
] | [
"newdust/graindist/composition/cmdrude.py"
] | [
"import numpy as np\nfrom newdust import constants as c\n\n__all__ = ['CmDrude']\n\nRHO_DRUDE = 3.0 # g cm^-3\nLAM_MAX = c.hc / 0.01 # maximal wavelength that we will allow for RG-Drude\n\nclass CmDrude(object):\n \"\"\"\n | **ATTRIBUTES**\n | cmtype : 'Drude'\n | rho : grain density [g cm^-3]\n | citation : A string containing citation to original work\n |\n | *functions*\n | rp(lam, unit='kev') : Returns real part (unit='kev'|'angs')\n | ip(lam, unit='kev') : Returns imaginary part (always 0.0)\n | cm(lam, unit='kev') : Complex index of refraction of dtype='complex'\n | plot(lam, unit='kev') : Plots Re(m-1)\n \"\"\"\n def __init__(self, rho=RHO_DRUDE): # Returns a CM using the Drude approximation\n self.cmtype = 'Drude'\n self.rho = rho\n self.citation = \"Using the Drude approximation.\\nBohren, C. F. & Huffman, D. R., 1983, Absorption and Scattering of Light by Small Particles (New York: Wiley)\"\n\n def rp(self, lam, unit='kev'):\n assert unit in c.ALLOWED_LAM_UNITS\n lam_cm = c._lam_cm(lam, unit)\n\n mm1 = self.rho / (2.0*c.m_p) * c.r_e/(2.0*np.pi) * np.power(lam_cm, 2)\n return mm1 + 1.0\n\n '''# Returns 1 if the wavelength supplied is too low energy (i.e. inappropriate for applying Drude)\n mm1 = np.zeros(np.size(lam_cm))\n if (np.size(lam_cm) == 1):\n if lam_cm >= LAM_MAX:\n pass\n else:\n mm1 = self.rho / (2.0*c.m_p) * c.r_e/(2.0*np.pi) * np.power(lam_cm, 2)\n else:\n ii = (lam_cm <= LAM_MAX)\n mm1[ii] = self.rho / (2.0*c.m_p) * c.r_e/(2.0*np.pi) * np.power(lam_cm[ii], 2)\n return mm1 + 1.0'''\n\n def ip(self, lam, unit='kev'):\n if np.size(lam) > 1:\n return np.zeros(np.size(lam))\n else:\n return 0.0\n\n def cm(self, lam, unit='kev'):\n return self.rp(lam, unit=unit) + 0j\n\n def plot(self, ax, lam, unit='kev', **kwargs):\n assert unit in c.ALLOWED_LAM_UNITS\n rp = self.rp(lam, unit=unit)\n ax.plot(lam, rp-1.0, **kwargs)\n ax.set_ylabel(\"m-1\")\n if unit == 'kev':\n ax.set_xlabel(\"Energy (keV)\")\n if unit == 'angs':\n ax.set_xlabel(\"Wavelength (Angstroms)\")\n"
] | [
[
"numpy.power",
"numpy.size"
]
] |
luvrpg/cleverhans | [
"1f2ee7a04cff1ec54c96dcba5294f6e2d7780d42"
] | [
"examples/nips17_adversarial_competition/dev_toolkit/sample_defenses/ens_adv_inception_resnet_v2/defense.py"
] | [
"\"\"\"Implementation of sample defense.\n\nThis defense loads inception resnet v2 checkpoint and classifies all images\nusing loaded checkpoint.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\nfrom scipy.misc import imread\n\nimport tensorflow as tf\n\nimport inception_resnet_v2\n\nslim = tf.contrib.slim\n\n\ntf.flags.DEFINE_string(\n 'master', '', 'The address of the TensorFlow master to use.')\n\ntf.flags.DEFINE_string(\n 'checkpoint_path', '', 'Path to checkpoint for inception network.')\n\ntf.flags.DEFINE_string(\n 'input_dir', '', 'Input directory with images.')\n\ntf.flags.DEFINE_string(\n 'output_file', '', 'Output file to save labels.')\n\ntf.flags.DEFINE_integer(\n 'image_width', 299, 'Width of each input images.')\n\ntf.flags.DEFINE_integer(\n 'image_height', 299, 'Height of each input images.')\n\ntf.flags.DEFINE_integer(\n 'batch_size', 16, 'How many images process at one time.')\n\nFLAGS = tf.flags.FLAGS\n\n\ndef load_images(input_dir, batch_shape):\n \"\"\"Read png images from input directory in batches.\n\n Args:\n input_dir: input directory\n batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3]\n\n Yields:\n filenames: list file names without path of each image\n Lenght of this list could be less than batch_size, in this case only\n first few images of the result are elements of the minibatch.\n images: array with all images from this batch\n \"\"\"\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images\n\n\ndef main(_):\n batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]\n num_classes = 1001\n\n tf.logging.set_verbosity(tf.logging.INFO)\n\n with tf.Graph().as_default():\n # Prepare graph\n x_input = tf.placeholder(tf.float32, shape=batch_shape)\n\n with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):\n _, end_points = inception_resnet_v2.inception_resnet_v2(\n x_input, num_classes=num_classes, is_training=False)\n\n predicted_labels = tf.argmax(end_points['Predictions'], 1)\n\n # Run computation\n saver = tf.train.Saver(slim.get_model_variables())\n session_creator = tf.train.ChiefSessionCreator(\n scaffold=tf.train.Scaffold(saver=saver),\n checkpoint_filename_with_path=FLAGS.checkpoint_path,\n master=FLAGS.master)\n\n with tf.train.MonitoredSession(session_creator=session_creator) as sess:\n with tf.gfile.Open(FLAGS.output_file, 'w') as out_file:\n for filenames, images in load_images(FLAGS.input_dir, batch_shape):\n labels = sess.run(predicted_labels, feed_dict={x_input: images})\n for filename, label in zip(filenames, labels):\n out_file.write('{0},{1}\\n'.format(filename, label))\n\n\nif __name__ == '__main__':\n tf.app.run()\n"
] | [
[
"tensorflow.placeholder",
"tensorflow.flags.DEFINE_integer",
"numpy.zeros",
"tensorflow.app.run",
"tensorflow.logging.set_verbosity",
"tensorflow.Graph",
"tensorflow.train.Scaffold",
"tensorflow.argmax",
"tensorflow.flags.DEFINE_string",
"tensorflow.gfile.Open",
"scipy.misc.imread",
"tensorflow.train.MonitoredSession"
]
] |
YaoYao1995/mbpo | [
"b9571e469459ce3a632b19dc3fee68c9ac3857b2"
] | [
"mbpo/algorithms/meee.py"
] | [
"## adapted from https://github.com/rail-berkeley/softlearning/blob/master/softlearning/algorithms/sac.py\r\n\r\nimport os\r\nimport math\r\nimport pickle\r\nfrom collections import OrderedDict\r\nfrom numbers import Number\r\nfrom itertools import count\r\nimport gtimer as gt\r\nimport pdb\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow.python.training import training_util\r\n\r\nfrom softlearning.algorithms.rl_algorithm import RLAlgorithm\r\nfrom softlearning.replay_pools.simple_replay_pool import WeightedReplayPool\r\n\r\nfrom mbpo.models.constructor import construct_model, format_samples_for_training\r\nfrom mbpo.models.fake_env import FakeEnv\r\nfrom mbpo.utils.writer import Writer\r\nfrom mbpo.utils.visualization import visualize_policy\r\nfrom mbpo.utils.logging import Progress\r\nimport mbpo.utils.filesystem as filesystem\r\n\r\n\r\ndef td_target(reward, discount, next_value):\r\n return reward + discount * next_value\r\n\r\n\r\nclass MEEE(RLAlgorithm):\r\n \"\"\" Model-Ensemble Policy Optimization (MEEE)\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n training_environment,\r\n evaluation_environment,\r\n policy,\r\n Qs,\r\n pool,\r\n static_fns,\r\n plotter=None,\r\n tf_summaries=False,\r\n\r\n lr=3e-4,\r\n reward_scale=1.0,\r\n target_entropy='auto',\r\n discount=0.99,\r\n tau=5e-3,\r\n target_update_interval=1,\r\n action_prior='uniform',\r\n reparameterize=False,\r\n store_extra_policy_info=False,\r\n\r\n deterministic=False,\r\n model_train_freq=250,\r\n num_networks=7,\r\n num_elites=5,\r\n model_retain_epochs=20,\r\n rollout_batch_size=100e3,\r\n real_ratio=0.1,\r\n rollout_schedule=[20,100,1,1],\r\n hidden_dim=200,\r\n max_model_t=None,\r\n **kwargs,\r\n ):\r\n \"\"\"\r\n Args:\r\n env (`SoftlearningEnv`): Environment used for training.\r\n policy: A policy function approximator.\r\n initial_exploration_policy: ('Policy'): A policy that we use\r\n for initial exploration which is not trained by the algorithm.\r\n Qs: Q-function approximators. The min of these\r\n approximators will be used. Usage of at least two Q-functions\r\n improves performance by reducing overestimation bias.\r\n pool (`PoolBase`): Replay pool to add gathered samples to.\r\n plotter (`QFPolicyPlotter`): Plotter instance to be used for\r\n visualizing Q-function during training.\r\n lr (`float`): Learning rate used for the function approximators.\r\n discount (`float`): Discount factor for Q-function updates.\r\n tau (`float`): Soft value function target update weight.\r\n target_update_interval ('int'): Frequency at which target network\r\n updates occur in iterations.\r\n reparameterize ('bool'): If True, we use a gradient estimator for\r\n the policy derived using the reparameterization trick. We use\r\n a likelihood ratio based estimator otherwise.\r\n \"\"\"\r\n\r\n super(MEEE, self).__init__(**kwargs)\r\n\r\n obs_dim = np.prod(training_environment.observation_space.shape)\r\n act_dim = np.prod(training_environment.action_space.shape)\r\n self._model = construct_model(obs_dim=obs_dim, act_dim=act_dim, hidden_dim=hidden_dim, num_networks=num_networks, num_elites=num_elites)\r\n self._static_fns = static_fns\r\n self.fake_env = FakeEnv(self._model, self._static_fns)\r\n\r\n self._rollout_schedule = rollout_schedule\r\n self._max_model_t = max_model_t\r\n\r\n # self._model_pool_size = model_pool_size\r\n # print('[ MBPO ] Model pool size: {:.2E}'.format(self._model_pool_size))\r\n # self._model_pool = WeightedReplayPool(pool._observation_space, pool._action_space, self._model_pool_size)\r\n\r\n self._model_retain_epochs = model_retain_epochs\r\n\r\n self._model_train_freq = model_train_freq\r\n self._rollout_batch_size = int(rollout_batch_size)\r\n self._deterministic = deterministic\r\n self._real_ratio = real_ratio\r\n\r\n self._log_dir = os.getcwd()\r\n self._writer = Writer(self._log_dir)\r\n\r\n self._training_environment = training_environment\r\n self._evaluation_environment = evaluation_environment\r\n self._policy = policy\r\n\r\n self._Qs = Qs\r\n self._Q_targets = tuple(tf.keras.models.clone_model(Q) for Q in Qs)\r\n\r\n self._pool = pool\r\n self._plotter = plotter\r\n self._tf_summaries = tf_summaries\r\n\r\n self._policy_lr = lr\r\n self._Q_lr = lr\r\n\r\n self._reward_scale = reward_scale\r\n self._target_entropy = (\r\n -np.prod(self._training_environment.action_space.shape)\r\n if target_entropy == 'auto'\r\n else target_entropy)\r\n print('[ MEEE ] Target entropy: {}'.format(self._target_entropy))\r\n\r\n self._discount = discount\r\n self._tau = tau\r\n self._target_update_interval = target_update_interval\r\n self._action_prior = action_prior\r\n\r\n self._reparameterize = reparameterize\r\n self._store_extra_policy_info = store_extra_policy_info\r\n\r\n observation_shape = self._training_environment.active_observation_shape\r\n action_shape = self._training_environment.action_space.shape\r\n\r\n assert len(observation_shape) == 1, observation_shape\r\n self._observation_shape = observation_shape\r\n assert len(action_shape) == 1, action_shape\r\n self._action_shape = action_shape\r\n\r\n self._build()\r\n\r\n def _build(self):\r\n self._training_ops = {}\r\n\r\n self._init_global_step()\r\n self._init_placeholders()\r\n self._init_actor_update()\r\n self._init_critic_update()\r\n\r\n def _train(self):\r\n \r\n \"\"\"Return a generator that performs RL training.\r\n\r\n Args:\r\n env (`SoftlearningEnv`): Environment used for training.\r\n policy (`Policy`): Policy used for training\r\n initial_exploration_policy ('Policy'): Policy used for exploration\r\n If None, then all exploration is done using policy\r\n pool (`PoolBase`): Sample pool to add samples to\r\n \"\"\"\r\n training_environment = self._training_environment\r\n evaluation_environment = self._evaluation_environment\r\n policy = self._policy\r\n pool = self._pool\r\n model_metrics = {}\r\n\r\n if not self._training_started:\r\n self._init_training()\r\n\r\n self._initial_exploration_hook(\r\n training_environment, self._initial_exploration_policy, pool)\r\n\r\n self.sampler.initialize(training_environment, policy, pool)\r\n\r\n gt.reset_root()\r\n gt.rename_root('RLAlgorithm')\r\n gt.set_def_unique(False)\r\n\r\n self._training_before_hook()\r\n\r\n for self._epoch in gt.timed_for(range(self._epoch, self._n_epochs)):\r\n\r\n self._epoch_before_hook()\r\n gt.stamp('epoch_before_hook')\r\n\r\n self._training_progress = Progress(self._epoch_length * self._n_train_repeat)\r\n start_samples = self.sampler._total_samples\r\n for i in count():\r\n samples_now = self.sampler._total_samples\r\n self._timestep = samples_now - start_samples\r\n\r\n if (samples_now >= start_samples + self._epoch_length\r\n and self.ready_to_train):\r\n break\r\n\r\n self._timestep_before_hook()\r\n gt.stamp('timestep_before_hook')\r\n\r\n if self._timestep % self._model_train_freq == 0 and self._real_ratio < 1.0:\r\n self._training_progress.pause()\r\n print('[ MEEE ] log_dir: {} | ratio: {}'.format(self._log_dir, self._real_ratio))\r\n print('[ MEEE ] Training model at epoch {} | freq {} | timestep {} (total: {}) | epoch train steps: {} (total: {})'.format(\r\n self._epoch, self._model_train_freq, self._timestep, self._total_timestep, self._train_steps_this_epoch, self._num_train_steps)\r\n )\r\n\r\n model_train_metrics = self._train_model(batch_size=256, max_epochs=None, holdout_ratio=0.2, max_t=self._max_model_t)\r\n model_metrics.update(model_train_metrics)\r\n gt.stamp('epoch_train_model')\r\n \r\n self._set_rollout_length()\r\n self._reallocate_model_pool()\r\n model_rollout_metrics = self._rollout_model(rollout_batch_size=self._rollout_batch_size, deterministic=self._deterministic)\r\n model_metrics.update(model_rollout_metrics)\r\n \r\n\r\n gt.stamp('epoch_rollout_model')\r\n # self._visualize_model(self._evaluation_environment, self._total_timestep)\r\n self._training_progress.resume()\r\n\r\n # No UCB exploration\r\n #self._do_sampling(timestep=self._total_timestep)\r\n \r\n self._do_sampling(timestep=self._total_timestep, disturb=True, fake_env=self.fake_env, Qs = self._Qs)\r\n #print(\"**exploration**\")\r\n gt.stamp('sample')\r\n\r\n if self.ready_to_train:\r\n self._do_training_repeats(timestep=self._total_timestep)\r\n gt.stamp('train')\r\n\r\n self._timestep_after_hook()\r\n gt.stamp('timestep_after_hook')\r\n\r\n training_paths = self.sampler.get_last_n_paths(\r\n math.ceil(self._epoch_length / self.sampler._max_path_length))\r\n gt.stamp('training_paths')\r\n evaluation_paths = self._evaluation_paths(\r\n policy, evaluation_environment)\r\n gt.stamp('evaluation_paths')\r\n\r\n training_metrics = self._evaluate_rollouts(\r\n training_paths, training_environment)\r\n gt.stamp('training_metrics')\r\n if evaluation_paths:\r\n evaluation_metrics = self._evaluate_rollouts(\r\n evaluation_paths, evaluation_environment)\r\n gt.stamp('evaluation_metrics')\r\n else:\r\n evaluation_metrics = {}\r\n\r\n self._epoch_after_hook(training_paths)\r\n gt.stamp('epoch_after_hook')\r\n\r\n sampler_diagnostics = self.sampler.get_diagnostics()\r\n\r\n diagnostics = self.get_diagnostics(\r\n iteration=self._total_timestep,\r\n batch=self._evaluation_batch(),\r\n training_paths=training_paths,\r\n evaluation_paths=evaluation_paths)\r\n\r\n time_diagnostics = gt.get_times().stamps.itrs\r\n\r\n diagnostics.update(OrderedDict((\r\n *(\r\n (f'evaluation/{key}', evaluation_metrics[key])\r\n for key in sorted(evaluation_metrics.keys())\r\n ),\r\n *(\r\n (f'training/{key}', training_metrics[key])\r\n for key in sorted(training_metrics.keys())\r\n ),\r\n *(\r\n (f'times/{key}', time_diagnostics[key][-1])\r\n for key in sorted(time_diagnostics.keys())\r\n ),\r\n *(\r\n (f'sampler/{key}', sampler_diagnostics[key])\r\n for key in sorted(sampler_diagnostics.keys())\r\n ),\r\n *(\r\n (f'model/{key}', model_metrics[key])\r\n for key in sorted(model_metrics.keys())\r\n ),\r\n ('epoch', self._epoch),\r\n ('timestep', self._timestep),\r\n ('timesteps_total', self._total_timestep),\r\n ('train-steps', self._num_train_steps),\r\n )))\r\n\r\n if self._eval_render_mode is not None and hasattr(\r\n evaluation_environment, 'render_rollouts'):\r\n training_environment.render_rollouts(evaluation_paths)\r\n\r\n yield diagnostics\r\n\r\n self.sampler.terminate()\r\n\r\n self._training_after_hook()\r\n\r\n self._training_progress.close()\r\n\r\n yield {'done': True, **diagnostics}\r\n\r\n def train(self, *args, **kwargs):\r\n return self._train(*args, **kwargs)\r\n\r\n def _log_policy(self):\r\n save_path = os.path.join(self._log_dir, 'models')\r\n filesystem.mkdir(save_path)\r\n weights = self._policy.get_weights()\r\n data = {'policy_weights': weights}\r\n full_path = os.path.join(save_path, 'policy_{}.pkl'.format(self._total_timestep))\r\n print('Saving policy to: {}'.format(full_path))\r\n pickle.dump(data, open(full_path, 'wb'))\r\n\r\n def _log_model(self):\r\n save_path = os.path.join(self._log_dir, 'models')\r\n filesystem.mkdir(save_path)\r\n print('Saving model to: {}'.format(save_path))\r\n self._model.save(save_path, self._total_timestep)\r\n\r\n def _set_rollout_length(self):\r\n min_epoch, max_epoch, min_length, max_length = self._rollout_schedule\r\n if self._epoch <= min_epoch:\r\n y = min_length\r\n else:\r\n dx = (self._epoch - min_epoch) / (max_epoch - min_epoch)\r\n dx = min(dx, 1)\r\n y = dx * (max_length - min_length) + min_length\r\n\r\n self._rollout_length = int(y)\r\n print('[ Model Length ] Epoch: {} (min: {}, max: {}) | Length: {} (min: {} , max: {})'.format(\r\n self._epoch, min_epoch, max_epoch, self._rollout_length, min_length, max_length\r\n ))\r\n\r\n def _reallocate_model_pool(self):\r\n obs_space = self._pool._observation_space\r\n act_space = self._pool._action_space\r\n\r\n rollouts_per_epoch = self._rollout_batch_size * self._epoch_length / self._model_train_freq\r\n model_steps_per_epoch = int(self._rollout_length * rollouts_per_epoch)\r\n new_pool_size = self._model_retain_epochs * model_steps_per_epoch\r\n\r\n if not hasattr(self, '_model_pool'):\r\n print('[ MEEE ] Initializing new model pool with size {:.2e}'.format(\r\n new_pool_size\r\n ))\r\n self._model_pool = WeightedReplayPool(obs_space, act_space, new_pool_size)\r\n \r\n elif self._model_pool._max_size != new_pool_size:\r\n print('[ MEEE ] Updating model pool | {:.2e} --> {:.2e}'.format(\r\n self._model_pool._max_size, new_pool_size\r\n ))\r\n samples = self._model_pool.return_all_samples()\r\n new_pool = WeightedReplayPool(obs_space, act_space, new_pool_size)\r\n new_pool.add_samples(samples)\r\n assert self._model_pool.size == new_pool.size\r\n self._model_pool = new_pool\r\n\r\n def _train_model(self, **kwargs):\r\n env_samples = self._pool.return_all_samples()\r\n train_inputs, train_outputs = format_samples_for_training(env_samples)\r\n model_metrics = self._model.train(train_inputs, train_outputs, **kwargs)\r\n return model_metrics\r\n\r\n def _rollout_model(self, rollout_batch_size, **kwargs):\r\n print('[ Model Rollout ] Starting | Epoch: {} | Rollout length: {} | Batch size: {}'.format(\r\n self._epoch, self._rollout_length, rollout_batch_size\r\n ))\r\n batch = self.sampler.random_batch(rollout_batch_size)\r\n obs = batch['observations']\r\n steps_added = []\r\n for i in range(self._rollout_length):\r\n act = self._policy.actions_np(obs)\r\n \r\n next_obs, rew, term, info = self.fake_env.step(obs, act, **kwargs)\r\n steps_added.append(len(obs))\r\n\r\n samples = {'observations': obs, 'actions': act, 'next_observations': next_obs, 'rewards': rew, 'terminals': term, 'stds': info['dev'][:,None]}\r\n self._model_pool.add_samples(samples)\r\n\r\n nonterm_mask = ~term.squeeze(-1)\r\n if nonterm_mask.sum() == 0:\r\n print('[ Model Rollout ] Breaking early: {} | {} / {}'.format(i, nonterm_mask.sum(), nonterm_mask.shape))\r\n break\r\n\r\n obs = next_obs[nonterm_mask]\r\n\r\n mean_rollout_length = sum(steps_added) / rollout_batch_size\r\n rollout_stats = {'mean_rollout_length': mean_rollout_length}\r\n print('[ Model Rollout ] Added: {:.1e} | Model pool: {:.1e} (max {:.1e}) | Length: {} | Train rep: {}'.format(\r\n sum(steps_added), self._model_pool.size, self._model_pool._max_size, mean_rollout_length, self._n_train_repeat\r\n ))\r\n return rollout_stats\r\n\r\n def _visualize_model(self, env, timestep):\r\n ## save env state\r\n state = env.unwrapped.state_vector()\r\n qpos_dim = len(env.unwrapped.sim.data.qpos)\r\n qpos = state[:qpos_dim]\r\n qvel = state[qpos_dim:]\r\n\r\n print('[ Visualization ] Starting | Epoch {} | Log dir: {}\\n'.format(self._epoch, self._log_dir))\r\n visualize_policy(env, self.fake_env, self._policy, self._writer, timestep)\r\n print('[ Visualization ] Done')\r\n ## set env state\r\n env.unwrapped.set_state(qpos, qvel)\r\n\r\n def _training_batch(self, batch_size=None):\r\n batch_size = batch_size or self.sampler._batch_size\r\n env_batch_size = int(batch_size*self._real_ratio)\r\n model_batch_size = batch_size - env_batch_size\r\n\r\n ## can sample from the env pool even if env_batch_size == 0\r\n env_batch = self._pool.random_batch(env_batch_size)\r\n\r\n if model_batch_size > 0:\r\n model_batch = self._model_pool.random_batch(model_batch_size)\r\n\r\n keys = env_batch.keys()\r\n batch = {k: np.concatenate((env_batch[k], model_batch[k]), axis=0) for k in keys}\r\n else:\r\n ## if real_ratio == 1.0, no model pool was ever allocated,\r\n ## so skip the model pool sampling\r\n batch = env_batch\r\n return batch\r\n\r\n def _init_global_step(self):\r\n self.global_step = training_util.get_or_create_global_step()\r\n self._training_ops.update({\r\n 'increment_global_step': training_util._increment_global_step(1)\r\n })\r\n\r\n def _init_placeholders(self):\r\n \"\"\"Create input placeholders for the SAC algorithm.\r\n\r\n Creates `tf.placeholder`s for:\r\n - observation\r\n - next observation\r\n - action\r\n - reward\r\n - terminals\r\n - stds\r\n \"\"\"\r\n self._iteration_ph = tf.placeholder(\r\n tf.int64, shape=None, name='iteration')\r\n\r\n self._observations_ph = tf.placeholder(\r\n tf.float32,\r\n shape=(None, *self._observation_shape),\r\n name='observation',\r\n )\r\n\r\n self._next_observations_ph = tf.placeholder(\r\n tf.float32,\r\n shape=(None, *self._observation_shape),\r\n name='next_observation',\r\n )\r\n\r\n self._actions_ph = tf.placeholder(\r\n tf.float32,\r\n shape=(None, *self._action_shape),\r\n name='actions',\r\n )\r\n\r\n self._rewards_ph = tf.placeholder(\r\n tf.float32,\r\n shape=(None, 1),\r\n name='rewards',\r\n )\r\n\r\n self._stds_ph = tf.placeholder(\r\n tf.float32,\r\n shape=(None, 1),\r\n name='stds',\r\n )\r\n\r\n self._terminals_ph = tf.placeholder(\r\n tf.float32,\r\n shape=(None, 1),\r\n name='terminals',\r\n )\r\n\r\n if self._store_extra_policy_info:\r\n self._log_pis_ph = tf.placeholder(\r\n tf.float32,\r\n shape=(None, 1),\r\n name='log_pis',\r\n )\r\n self._raw_actions_ph = tf.placeholder(\r\n tf.float32,\r\n shape=(None, *self._action_shape),\r\n name='raw_actions',\r\n )\r\n\r\n def _get_Q_target(self):\r\n next_actions = self._policy.actions([self._next_observations_ph])\r\n next_log_pis = self._policy.log_pis(\r\n [self._next_observations_ph], next_actions)\r\n\r\n next_Qs_values = tuple(\r\n Q([self._next_observations_ph, next_actions])\r\n for Q in self._Q_targets)\r\n\r\n min_next_Q = tf.reduce_min(next_Qs_values, axis=0)\r\n next_value = min_next_Q - self._alpha * next_log_pis\r\n\r\n Q_target = td_target(\r\n reward=self._reward_scale * self._rewards_ph,\r\n discount=self._discount,\r\n next_value=(1 - self._terminals_ph) * next_value)\r\n\r\n return Q_target\r\n\r\n def _init_critic_update(self):\r\n \"\"\"Create minimization operation for critic Q-function.\r\n\r\n Creates a `tf.optimizer.minimize` operation for updating\r\n critic Q-function with gradient descent, and appends it to\r\n `self._training_ops` attribute.\r\n \"\"\"\r\n Q_target = tf.stop_gradient(self._get_Q_target())\r\n\r\n assert Q_target.shape.as_list() == [None, 1]\r\n # weighted critic loss\r\n temperature_critic = 5.0\r\n weight_target_Q = tf.stop_gradient(tf.sigmoid(-self._stds_ph * temperature_critic))\r\n Q_values = self._Q_values = tuple(\r\n Q([self._observations_ph, self._actions_ph])\r\n for Q in self._Qs)\r\n\r\n Q_losses = self._Q_losses = tuple(\r\n tf.losses.mean_squared_error(\r\n labels=Q_target, predictions=Q_value, weights=weight_target_Q)\r\n for Q_value in Q_values)\r\n\r\n self._Q_optimizers = tuple(\r\n tf.train.AdamOptimizer(\r\n learning_rate=self._Q_lr,\r\n name='{}_{}_optimizer'.format(Q._name, i)\r\n ) for i, Q in enumerate(self._Qs))\r\n Q_training_ops = tuple(\r\n tf.contrib.layers.optimize_loss(\r\n Q_loss,\r\n self.global_step,\r\n learning_rate=self._Q_lr,\r\n optimizer=Q_optimizer,\r\n variables=Q.trainable_variables,\r\n increment_global_step=False,\r\n summaries=((\r\n \"loss\", \"gradients\", \"gradient_norm\", \"global_gradient_norm\"\r\n ) if self._tf_summaries else ()))\r\n for i, (Q, Q_loss, Q_optimizer)\r\n in enumerate(zip(self._Qs, Q_losses, self._Q_optimizers)))\r\n\r\n self._training_ops.update({'Q': tf.group(Q_training_ops)})\r\n\r\n def _init_actor_update(self):\r\n \"\"\"Create minimization operations for policy and entropy.\r\n\r\n Creates a `tf.optimizer.minimize` operations for updating\r\n policy and entropy with gradient descent, and adds them to\r\n `self._training_ops` attribute.\r\n \"\"\"\r\n\r\n actions = self._policy.actions([self._observations_ph])\r\n log_pis = self._policy.log_pis([self._observations_ph], actions)\r\n\r\n assert log_pis.shape.as_list() == [None, 1]\r\n\r\n log_alpha = self._log_alpha = tf.get_variable(\r\n 'log_alpha',\r\n dtype=tf.float32,\r\n initializer=0.0)\r\n alpha = tf.exp(log_alpha)\r\n\r\n if isinstance(self._target_entropy, Number):\r\n alpha_loss = -tf.reduce_mean(\r\n log_alpha * tf.stop_gradient(log_pis + self._target_entropy))\r\n\r\n self._alpha_optimizer = tf.train.AdamOptimizer(\r\n self._policy_lr, name='alpha_optimizer')\r\n self._alpha_train_op = self._alpha_optimizer.minimize(\r\n loss=alpha_loss, var_list=[log_alpha])\r\n\r\n self._training_ops.update({\r\n 'temperature_alpha': self._alpha_train_op\r\n })\r\n\r\n self._alpha = alpha\r\n\r\n if self._action_prior == 'normal':\r\n policy_prior = tf.contrib.distributions.MultivariateNormalDiag(\r\n loc=tf.zeros(self._action_shape),\r\n scale_diag=tf.ones(self._action_shape))\r\n policy_prior_log_probs = policy_prior.log_prob(actions)\r\n elif self._action_prior == 'uniform':\r\n policy_prior_log_probs = 0.0\r\n\r\n Q_log_targets = tuple(\r\n Q([self._observations_ph, actions])\r\n for Q in self._Qs)\r\n min_Q_log_target = tf.reduce_min(Q_log_targets, axis=0)\r\n\r\n # weighted actor loss\r\n temperature_act = 5.0\r\n weight_actor_Q = tf.stop_gradient(tf.sigmoid(-self._stds_ph * temperature_act) + 0.5)\r\n if self._reparameterize:\r\n policy_kl_losses = (\r\n alpha * log_pis\r\n - min_Q_log_target\r\n - policy_prior_log_probs) * weight_actor_Q\r\n else:\r\n raise NotImplementedError\r\n\r\n assert policy_kl_losses.shape.as_list() == [None, 1]\r\n\r\n policy_loss = tf.reduce_mean(policy_kl_losses)\r\n\r\n self._policy_optimizer = tf.train.AdamOptimizer(\r\n learning_rate=self._policy_lr,\r\n name=\"policy_optimizer\")\r\n policy_train_op = tf.contrib.layers.optimize_loss(\r\n policy_loss,\r\n self.global_step,\r\n learning_rate=self._policy_lr,\r\n optimizer=self._policy_optimizer,\r\n variables=self._policy.trainable_variables,\r\n increment_global_step=False,\r\n summaries=(\r\n \"loss\", \"gradients\", \"gradient_norm\", \"global_gradient_norm\"\r\n ) if self._tf_summaries else ())\r\n\r\n self._training_ops.update({'policy_train_op': policy_train_op})\r\n\r\n def _init_training(self):\r\n self._update_target(tau=1.0)\r\n\r\n def _update_target(self, tau=None):\r\n tau = tau or self._tau\r\n\r\n for Q, Q_target in zip(self._Qs, self._Q_targets):\r\n source_params = Q.get_weights()\r\n target_params = Q_target.get_weights()\r\n Q_target.set_weights([\r\n tau * source + (1.0 - tau) * target\r\n for source, target in zip(source_params, target_params)\r\n ])\r\n\r\n def _do_training(self, iteration, batch):\r\n \"\"\"Runs the operations for updating training and target ops.\"\"\"\r\n\r\n self._training_progress.update()\r\n self._training_progress.set_description()\r\n\r\n feed_dict = self._get_feed_dict(iteration, batch)\r\n\r\n self._session.run(self._training_ops, feed_dict)\r\n\r\n if iteration % self._target_update_interval == 0:\r\n # Run target ops here.\r\n self._update_target()\r\n\r\n def _get_feed_dict(self, iteration, batch):\r\n \"\"\"Construct TensorFlow feed_dict from sample batch.\"\"\"\r\n\r\n feed_dict = {\r\n self._observations_ph: batch['observations'],\r\n self._actions_ph: batch['actions'],\r\n self._next_observations_ph: batch['next_observations'],\r\n self._rewards_ph: batch['rewards'],\r\n self._terminals_ph: batch['terminals'],\r\n self._stds_ph: batch['stds'],\r\n }\r\n\r\n if self._store_extra_policy_info:\r\n feed_dict[self._log_pis_ph] = batch['log_pis']\r\n feed_dict[self._raw_actions_ph] = batch['raw_actions']\r\n\r\n if iteration is not None:\r\n feed_dict[self._iteration_ph] = iteration\r\n\r\n return feed_dict\r\n\r\n def get_diagnostics(self,\r\n iteration,\r\n batch,\r\n training_paths,\r\n evaluation_paths):\r\n \"\"\"Return diagnostic information as ordered dictionary.\r\n\r\n Records mean and standard deviation of Q-function and state\r\n value function, and TD-loss (mean squared Bellman error)\r\n for the sample batch.\r\n\r\n Also calls the `draw` method of the plotter, if plotter defined.\r\n \"\"\"\r\n\r\n feed_dict = self._get_feed_dict(iteration, batch)\r\n\r\n (Q_values, Q_losses, alpha, global_step) = self._session.run(\r\n (self._Q_values,\r\n self._Q_losses,\r\n self._alpha,\r\n self.global_step),\r\n feed_dict)\r\n\r\n diagnostics = OrderedDict({\r\n 'Q-avg': np.mean(Q_values),\r\n 'Q-std': np.std(Q_values),\r\n 'Q_loss': np.mean(Q_losses),\r\n 'alpha': alpha,\r\n })\r\n\r\n policy_diagnostics = self._policy.get_diagnostics(\r\n batch['observations'])\r\n diagnostics.update({\r\n f'policy/{key}': value\r\n for key, value in policy_diagnostics.items()\r\n })\r\n\r\n if self._plotter:\r\n self._plotter.draw()\r\n\r\n return diagnostics\r\n\r\n @property\r\n def tf_saveables(self):\r\n saveables = {\r\n '_policy_optimizer': self._policy_optimizer,\r\n **{\r\n f'Q_optimizer_{i}': optimizer\r\n for i, optimizer in enumerate(self._Q_optimizers)\r\n },\r\n '_log_alpha': self._log_alpha,\r\n }\r\n\r\n if hasattr(self, '_alpha_optimizer'):\r\n saveables['_alpha_optimizer'] = self._alpha_optimizer\r\n\r\n return saveables\r\n"
] | [
[
"tensorflow.sigmoid",
"tensorflow.ones",
"tensorflow.contrib.layers.optimize_loss",
"tensorflow.reduce_min",
"numpy.mean",
"tensorflow.losses.mean_squared_error",
"numpy.prod",
"numpy.std",
"tensorflow.group",
"tensorflow.placeholder",
"tensorflow.zeros",
"tensorflow.python.training.training_util._increment_global_step",
"tensorflow.train.AdamOptimizer",
"tensorflow.reduce_mean",
"tensorflow.python.training.training_util.get_or_create_global_step",
"tensorflow.stop_gradient",
"tensorflow.exp",
"tensorflow.keras.models.clone_model",
"numpy.concatenate",
"tensorflow.get_variable"
]
] |
RyoTTa/geopm | [
"74246c8ce70ee47f53bc5629638f51c2c391027b"
] | [
"test_integration/geopm_test_integration.py"
] | [
"#!/usr/bin/env python\n#\n# Copyright (c) 2015, 2016, 2017, 2018, 2019, Intel Corporation\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n#\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n\nfrom __future__ import absolute_import\n\nfrom __future__ import division\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import str\nimport os\nimport sys\nimport unittest\nimport subprocess\nimport time\nimport pandas\nimport collections\nimport socket\nimport shlex\nimport json\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom test_integration import util\nfrom test_integration import geopm_test_launcher\nimport geopmpy.io\nimport geopmpy.launcher\n\ndef create_frequency_map_policy(min_freq, max_freq, frequency_map, use_env=False):\n \"\"\"Create a frequency map to be consumed by the frequency map agent.\n\n Arguments:\n min_freq: Floor frequency for the agent\n max_freq: Ceiling frequency for the agent\n frequency_map: Dictionary mapping region names to frequencies\n use_env: If true, apply the map to an environment variable, and return\n the policy needed when the environment variable is in use.\n Otherwise, clear the environment variable and return the policy\n needed when the variable is not in use.\n \"\"\"\n policy = {'frequency_min': min_freq, 'frequency_max': max_freq}\n known_hashes = {\n 'dgemm': 0x00000000a74bbf35,\n 'all2all': 0x000000003ddc81bf,\n 'stream': 0x00000000d691da00,\n 'sleep': 0x00000000536c798f,\n 'MPI_Barrier': 0x000000007b561f45,\n 'model-init': 0x00000000644f9787,\n 'unmarked-region': 0x00000000725e8066 }\n\n if use_env:\n os.environ['GEOPM_FREQUENCY_MAP'] = json.dumps(frequency_map)\n else:\n if 'GEOPM_FREQUENCY_MAP' in os.environ:\n os.environ.pop('GEOPM_FREQUENCY_MAP')\n for i, (region_name, frequency) in enumerate(frequency_map.items()):\n region_hash = known_hashes[region_name]\n policy['HASH_{}'.format(i)] = int(region_hash)\n policy['FREQ_{}'.format(i)] = frequency\n\n return policy\n\nclass TestIntegration(unittest.TestCase):\n def setUp(self):\n self.longMessage = True\n self._agent = 'power_governor'\n self._options = {'power_budget': 150}\n self._tmp_files = []\n self._output = None\n self._power_limit = geopm_test_launcher.geopmread(\"MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT board 0\")\n self._frequency = geopm_test_launcher.geopmread(\"MSR::PERF_CTL:FREQ board 0\")\n self._original_freq_map_env = os.environ.get('GEOPM_FREQUENCY_MAP')\n\n def tearDown(self):\n geopm_test_launcher.geopmwrite(\"MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT board 0 \" + str(self._power_limit))\n geopm_test_launcher.geopmwrite(\"MSR::PERF_CTL:FREQ board 0 \" + str(self._frequency))\n if sys.exc_info() == (None, None, None) and os.getenv('GEOPM_KEEP_FILES') is None:\n if self._output is not None:\n self._output.remove_files()\n for ff in self._tmp_files:\n try:\n os.remove(ff)\n except OSError:\n pass\n if self._original_freq_map_env is None:\n if 'GEOPM_FREQUENCY_MAP' in os.environ:\n os.environ.pop('GEOPM_FREQUENCY_MAP')\n else:\n os.environ['GEOPM_FREQUENCY_MAP'] = self._original_freq_map_env\n\n def assertNear(self, a, b, epsilon=0.05, msg=''):\n denom = a if a != 0 else 1\n if abs((a - b) / denom) >= epsilon:\n self.fail('The fractional difference between {a} and {b} is greater than {epsilon}. {msg}'.format(a=a, b=b, epsilon=epsilon, msg=msg))\n\n def create_progress_df(self, df):\n # Build a df with only the first region entry and the exit.\n df = df.reset_index(drop=True)\n last_index = 0\n filtered_df = pandas.DataFrame()\n row_list = []\n progress_1s = df['REGION_PROGRESS'].loc[df['REGION_PROGRESS'] == 1]\n for index, _ in progress_1s.iteritems():\n row = df.loc[last_index:index].head(1)\n row_list += [row[['TIME', 'REGION_PROGRESS', 'REGION_RUNTIME']]]\n row = df.loc[last_index:index].tail(1)\n row_list += [row[['TIME', 'REGION_PROGRESS', 'REGION_RUNTIME']]]\n last_index = index + 1 # Set the next starting index to be one past where we are\n filtered_df = pandas.concat(row_list)\n return filtered_df\n\n def test_report_and_trace_generation(self):\n name = 'test_report_and_trace_generation'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 4\n num_rank = 16\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('sleep', 1.0)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(num_node, len(node_names))\n for nn in node_names:\n report = self._output.get_report_data(node_name=nn)\n self.assertNotEqual(0, len(report))\n trace = self._output.get_trace_data(node_name=nn)\n self.assertNotEqual(0, len(trace))\n\n def test_no_report_and_trace_generation(self):\n name = 'test_no_report_and_trace_generation'\n num_node = 4\n num_rank = 16\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('sleep', 1.0)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n @unittest.skipUnless('mr-fusion' in socket.gethostname(), \"This test only enabled on known working systems.\")\n def test_report_and_trace_generation_pthread(self):\n name = 'test_report_and_trace_generation_pthread'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 4\n num_rank = 16\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('sleep', 1.0)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.set_pmpi_ctl('pthread')\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(num_node, len(node_names))\n for nn in node_names:\n report = self._output.get_report_data(node_name=nn)\n self.assertNotEqual(0, len(report))\n trace = self._output.get_trace_data(node_name=nn)\n self.assertNotEqual(0, len(trace))\n\n @unittest.skipUnless(geopm_test_launcher.detect_launcher() != \"aprun\",\n 'ALPS does not support multi-application launch on the same nodes.')\n @util.skip_unless_batch()\n def test_report_and_trace_generation_application(self):\n name = 'test_report_and_trace_generation_application'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 4\n num_rank = 16\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('sleep', 1.0)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.set_pmpi_ctl('application')\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(num_node, len(node_names))\n for nn in node_names:\n report = self._output.get_report_data(node_name=nn)\n self.assertNotEqual(0, len(report))\n trace = self._output.get_trace_data(node_name=nn)\n self.assertNotEqual(0, len(trace))\n\n @unittest.skipUnless(geopm_test_launcher.detect_launcher() == \"srun\" and os.getenv('SLURM_NODELIST') is None,\n 'Requires non-sbatch SLURM session for alloc\\'d and idle nodes.')\n def test_report_generation_all_nodes(self):\n name = 'test_report_generation_all_nodes'\n report_path = name + '.report'\n num_node = 1\n num_rank = 1\n delay = 1.0\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('sleep', delay)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n time.sleep(5) # Wait a moment to finish cleaning-up from a previous test\n idle_nodes = launcher.get_idle_nodes()\n idle_nodes_copy = list(idle_nodes)\n alloc_nodes = launcher.get_alloc_nodes()\n launcher.write_log(name, 'Idle nodes : {nodes}'.format(nodes=idle_nodes))\n launcher.write_log(name, 'Alloc\\'d nodes : {nodes}'.format(nodes=alloc_nodes))\n node_names = []\n for nn in idle_nodes_copy:\n launcher.set_node_list(nn.split()) # Hack to convert string to list\n try:\n launcher.run(name)\n node_names += nn.split()\n except subprocess.CalledProcessError as e:\n if e.returncode == 1 and nn not in launcher.get_idle_nodes():\n launcher.write_log(name, '{node} has disappeared from the idle list!'.format(node=nn))\n idle_nodes.remove(nn)\n else:\n launcher.write_log(name, 'Return code = {code}'.format(code=e.returncode))\n raise e\n ao = geopmpy.io.AppOutput(report_path, do_cache=False)\n sleep_data = ao.get_report_data(node_name=nn, region='sleep')\n app_data = ao.get_app_total_data(node_name=nn)\n self.assertNotEqual(0, len(sleep_data))\n self.assertNear(delay, sleep_data['runtime'].item())\n self.assertGreater(app_data['runtime'].item(), sleep_data['runtime'].item())\n self.assertEqual(1, sleep_data['count'].item())\n\n self.assertEqual(len(node_names), len(idle_nodes))\n\n def test_runtime(self):\n name = 'test_runtime'\n report_path = name + '.report'\n num_node = 1\n num_rank = 5\n delay = 3.0\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('sleep', delay)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n self._output = geopmpy.io.AppOutput(report_path)\n node_names = self._output.get_node_names()\n self.assertEqual(num_node, len(node_names))\n for nn in node_names:\n report = self._output.get_report_data(node_name=nn, region='sleep')\n app_total = self._output.get_app_total_data(node_name=nn)\n self.assertNear(delay, report['runtime'].item())\n self.assertGreater(app_total['runtime'].item(), report['runtime'].item())\n\n def test_runtime_epoch(self):\n name = 'test_runtime_epoch'\n report_path = name + '.report'\n num_node = 1\n num_rank = 5\n delay = 3.0\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('sleep', delay)\n app_conf.append_region('spin', delay)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n self._output = geopmpy.io.AppOutput(report_path)\n node_names = self._output.get_node_names()\n self.assertEqual(num_node, len(node_names))\n for nn in node_names:\n spin_data = self._output.get_report_data(node_name=nn, region='spin')\n sleep_data = self._output.get_report_data(node_name=nn, region='sleep')\n epoch_data = self._output.get_report_data(node_name=nn, region='epoch')\n total_runtime = sleep_data['runtime'].item() + spin_data['runtime'].item()\n self.assertNear(total_runtime, epoch_data['runtime'].item())\n\n def test_epoch_data_valid(self):\n name = 'test_epoch_data_valid'\n report_path = name + '.report'\n num_node = 1\n num_rank = 1\n big_o = 1.0\n loop_count = 10\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.set_loop_count(loop_count)\n app_conf.append_region('spin-unmarked', big_o)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n report = geopmpy.io.RawReport(report_path)\n node_names = report.host_names()\n self.assertEqual(num_node, len(node_names))\n for nn in node_names:\n regions = report.region_names(nn)\n self.assertTrue('model-init' not in regions)\n totals = report.raw_totals(nn)\n unmarked = report.raw_region(nn, 'unmarked-region')\n epoch = report.raw_epoch(nn)\n\n # Epoch has valid data\n self.assertGreater(epoch['runtime (sec)'], 0)\n self.assertGreater(epoch['sync-runtime (sec)'], 0)\n self.assertGreater(epoch['package-energy (joules)'], 0)\n self.assertGreater(epoch['dram-energy (joules)'], 0)\n self.assertGreater(epoch['power (watts)'], 0)\n self.assertGreater(epoch['frequency (%)'], 0)\n self.assertGreater(epoch['frequency (Hz)'], 0)\n self.assertEqual(epoch['count'], loop_count)\n\n # Runtime\n self.assertTrue(totals['runtime (sec)'] > unmarked['runtime (sec)'] >= epoch['runtime (sec)'],\n '''The total runtime is NOT > the unmarked runtime or the unmarked runtime is NOT\n >= the Epoch runtime.''')\n\n # Package Energy (joules)\n self.assertTrue(totals['package-energy (joules)'] >\n unmarked['package-energy (joules)'] >=\n epoch['package-energy (joules)'],\n '''The total package energy (joules) is NOT > the unmarked package energy (joules)\n or the unmarked package energy (joules) is NOT >= the Epoch package\n energy (joules).''')\n\n # DRAM Energy\n self.assertTrue(totals['dram-energy (joules)'] >\n unmarked['dram-energy (joules)'] >=\n epoch['dram-energy (joules)'],\n '''The total dram energy is NOT > the unmarked dram energy or the unmarked\n dram energy is NOT >= the Epoch dram energy.''')\n\n # Sync-runtime\n self.assertTrue(unmarked['sync-runtime (sec)'] >= epoch['sync-runtime (sec)'],\n '''The sync-runtime for the unmarked region is NOT >= the Epoch sync-runtime.''')\n\n\n def test_runtime_nested(self):\n name = 'test_runtime_nested'\n report_path = name + '.report'\n num_node = 1\n num_rank = 1\n delay = 1.0\n loop_count = 2\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.set_loop_count(loop_count)\n app_conf.append_region('nested-progress', delay)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n self._output = geopmpy.io.AppOutput(report_path)\n node_names = self._output.get_node_names()\n self.assertEqual(num_node, len(node_names))\n for nn in node_names:\n spin_data = self._output.get_report_data(node_name=nn, region='spin')\n epoch_data = self._output.get_report_data(node_name=nn, region='epoch')\n app_totals = self._output.get_app_total_data(node_name=nn)\n # The spin sections of this region sleep for 'delay' seconds twice per loop.\n self.assertNear(2 * loop_count * delay, spin_data['runtime'].item())\n self.assertNear(spin_data['runtime'].item(), epoch_data['runtime'].item(), epsilon=0.01)\n self.assertGreater(app_totals['network-time'].item(), 0)\n self.assertGreater(0.1, app_totals['network-time'].item())\n self.assertEqual(loop_count, spin_data['count'].item())\n\n def test_trace_runtimes(self):\n name = 'test_trace_runtimes'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 4\n num_rank = 16\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('sleep', 1.0)\n app_conf.append_region('dgemm', 1.0)\n app_conf.append_region('all2all', 1.0)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,\n trace_path, region_barrier=True)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n regions = self._output.get_region_names()\n for nn in node_names:\n trace = self._output.get_trace_data(node_name=nn)\n app_totals = self._output.get_app_total_data(node_name=nn)\n self.assertNear(trace.iloc[-1]['TIME'], app_totals['runtime'].item(), msg='Application runtime failure, node_name={}.'.format(nn))\n # Calculate runtime totals for each region in each trace, compare to report\n tt = trace.reset_index(level='index') # move 'index' field from multiindex to columns\n tt = tt.set_index(['REGION_HASH'], append=True) # add region_hash column to multiindex\n tt_reg = tt.groupby(level=['REGION_HASH'])\n for region_name in regions:\n region_data = self._output.get_report_data(node_name=nn, region=region_name)\n if (region_name not in ['unmarked-region', 'model-init', 'epoch'] and\n not region_name.startswith('MPI_') and\n region_data['sync_runtime'].item() != 0):\n region_hash = region_data['id'].item()\n trace_data = tt_reg.get_group(region_hash)\n start_idx = trace_data.iloc[0]['index']\n end_idx = trace_data.iloc[-1]['index'] + 1 # use time from sample after exiting region\n start_time = tt.loc[tt['index'] == start_idx]['TIME'].item()\n end_time = tt.loc[tt['index'] == end_idx]['TIME'].item()\n trace_elapsed_time = end_time - start_time\n msg = 'for region {rn} on node {nn}'.format(rn=region_name, nn=nn)\n self.assertNear(trace_elapsed_time, region_data['sync_runtime'].item(), msg=msg)\n #epoch\n region_data = self._output.get_report_data(node_name=nn, region='epoch')\n trace_elapsed_time = trace.iloc[-1]['TIME'] - trace['TIME'].loc[trace['EPOCH_COUNT'] == 0].iloc[0]\n msg = 'for epoch on node {nn}'.format(nn=nn)\n self.assertNear(trace_elapsed_time, region_data['runtime'].item(), msg=msg)\n\n @util.skip_unless_config_enable('bloat')\n def test_runtime_regulator(self):\n name = 'test_runtime_regulator'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 1\n num_rank = 4\n loop_count = 20\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.set_loop_count(loop_count)\n sleep_big_o = 1.0\n spin_big_o = 0.5\n expected_region_runtime = {'spin': spin_big_o, 'sleep': sleep_big_o}\n app_conf.append_region('sleep', sleep_big_o)\n app_conf.append_region('spin', spin_big_o)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path, region_barrier=True)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n regions = self._output.get_region_names()\n for nn in node_names:\n app_totals = self._output.get_app_total_data(node_name=nn)\n trace = self._output.get_trace_data(node_name=nn)\n self.assertNear(trace.iloc[-1]['TIME'], app_totals['runtime'].item())\n tt = trace.set_index(['REGION_HASH'], append=True)\n tt = tt.groupby(level=['REGION_HASH'])\n for region_name in regions:\n region_data = self._output.get_report_data(node_name=nn, region=region_name)\n if region_name not in ['unmarked-region', 'model-init', 'epoch'] and not region_name.startswith('MPI_') and region_data['runtime'].item() != 0:\n trace_data = tt.get_group(region_data['id'].item())\n filtered_df = self.create_progress_df(trace_data)\n first_time = False\n epsilon = 0.001 if region_name != 'sleep' else 0.05\n for index, df in filtered_df.iterrows():\n if df['REGION_PROGRESS'] == 1:\n self.assertNear(df['REGION_RUNTIME'], expected_region_runtime[region_name], epsilon=epsilon)\n first_time = True\n if first_time is True and df['REGION_PROGRESS'] == 0:\n self.assertNear(df['REGION_RUNTIME'], expected_region_runtime[region_name], epsilon=epsilon)\n\n @util.skip_unless_run_long_tests()\n @util.skip_unless_config_enable('bloat')\n def test_region_runtimes(self):\n name = 'test_region_runtimes'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 4\n num_rank = 16\n loop_count = 500\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('dgemm', 8.0)\n app_conf.set_loop_count(loop_count)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path, time_limit=900)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n\n # Calculate region times from traces\n region_times = collections.defaultdict(lambda: collections.defaultdict(dict))\n for nn in node_names:\n tt = self._output.get_trace_data(node_name=nn).set_index(['REGION_HASH'], append=True).groupby(level=['REGION_HASH'])\n\n for region_hash, data in tt:\n filtered_df = self.create_progress_df(data)\n filtered_df = filtered_df.diff()\n # Since I'm not separating out the progress 0's from 1's, when I do the diff I only care about the\n # case where 1 - 0 = 1 for the progress column.\n filtered_df = filtered_df.loc[filtered_df['REGION_PROGRESS'] == 1]\n\n if len(filtered_df) > 1:\n launcher.write_log(name, 'Region elapsed time stats from {} - {} :\\n{}'\\\n .format(nn, region_hash, filtered_df['TIME'].describe()))\n filtered_df['TIME'].describe()\n region_times[nn][region_hash] = filtered_df\n\n launcher.write_log(name, '{}'.format('-' * 80))\n\n # Loop through the reports to see if the region runtimes line up with what was calculated from the trace files above.\n regions = self._output.get_region_names()\n write_regions = True\n for nn in node_names:\n for region_name in regions:\n rr = self._output.get_report_data(node_name=nn, region=region_name)\n if (region_name != 'epoch' and\n rr['id'].item() != 0 and\n rr['count'].item() > 1):\n if write_regions:\n launcher.write_log(name, 'Region {} is {}.'.format(rr['id'].item(), region_name))\n runtime = rr['sync_runtime'].item()\n self.assertNear(runtime,\n region_times[nn][rr['id'].item()]['TIME'].sum())\n write_regions = False\n\n # Test to ensure every region detected in the trace is captured in the report.\n for nn in node_names:\n report_ids = []\n for region_name in regions:\n rr = self._output.get_report_data(node_name=nn, region=region_name)\n report_ids.append(rr['id'].item())\n for region_hash in region_times[nn].keys():\n self.assertTrue(region_hash in report_ids, msg='Report from {} missing region_hash {}'.format(nn, region_hash))\n\n def test_progress(self):\n name = 'test_progress'\n report_path = name + '.report'\n num_node = 1\n num_rank = 4\n delay = 3.0\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('sleep-progress', delay)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n self._output = geopmpy.io.AppOutput(report_path)\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n for nn in node_names:\n sleep_data = self._output.get_report_data(node_name=nn, region='sleep')\n app_total = self._output.get_app_total_data(node_name=nn)\n self.assertNear(delay, sleep_data['runtime'].item())\n self.assertGreater(app_total['runtime'].item(), sleep_data['runtime'].item())\n self.assertEqual(1, sleep_data['count'].item())\n\n def test_count(self):\n name = 'test_count'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 1\n num_rank = 4\n delay = 0.01\n loop_count = 100\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.set_loop_count(loop_count)\n app_conf.append_region('spin', delay)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n for nn in node_names:\n trace_data = self._output.get_trace_data(node_name=nn)\n spin_data = self._output.get_report_data(node_name=nn, region='spin')\n epoch_data = self._output.get_report_data(node_name=nn, region='epoch')\n self.assertNear(delay * loop_count, spin_data['runtime'].item())\n self.assertEqual(loop_count, spin_data['count'].item())\n self.assertEqual(loop_count, epoch_data['count'].item())\n self.assertEqual(loop_count, trace_data['EPOCH_COUNT'][-1])\n\n @util.skip_unless_run_long_tests()\n def test_scaling(self):\n \"\"\"\n This test will start at ${num_node} nodes and ranks. It will then calls check_run() to\n ensure that commands can be executed successfully on all of the allocated compute nodes.\n Afterwards it will run the specified app config on each node and verify the reports. When\n complete it will double num_node and run the steps again.\n\n WARNING: This test can take a long time to run depending on the number of starting nodes and\n the size of the allocation.\n \"\"\"\n name = 'test_scaling'\n report_path = name + '.report'\n num_node = 2\n loop_count = 100\n\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('dgemm', 1.0)\n app_conf.append_region('all2all', 1.0)\n app_conf.set_loop_count(loop_count)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, time_limit=900)\n\n check_successful = True\n while check_successful:\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_node)\n try:\n launcher.check_run(name)\n except subprocess.CalledProcessError as e:\n # If we exceed the available nodes in the allocation ALPS/SLURM give a rc of 1\n # All other rc's are real errors\n if e.returncode != 1:\n raise e\n check_successful = False\n if check_successful:\n launcher.write_log(name, 'About to run on {} nodes.'.format(num_node))\n launcher.run(name)\n self._output = geopmpy.io.AppOutput(report_path)\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n for nn in node_names:\n dgemm_data = self._output.get_report_data(node_name=nn, region='dgemm')\n all2all_data = self._output.get_report_data(node_name=nn, region='all2all')\n self.assertEqual(loop_count, dgemm_data['count'].item())\n self.assertEqual(loop_count, all2all_data['count'].item())\n self.assertGreater(dgemm_data['runtime'].item(), 0.0)\n self.assertGreater(all2all_data['runtime'].item(), 0.0)\n num_node *= 2\n self._output.remove_files()\n\n @util.skip_unless_run_long_tests()\n def test_power_consumption(self):\n name = 'test_power_consumption'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 4\n num_rank = 16\n loop_count = 500\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('dgemm', 8.0)\n app_conf.set_loop_count(loop_count)\n\n fam, mod = geopm_test_launcher.get_platform()\n if fam == 6 and mod == 87:\n # budget for KNL\n self._options['power_budget'] = 130\n else:\n self._options['power_budget'] = 200\n gov_agent_conf_path = name + '_gov_agent.config'\n self._tmp_files.append(gov_agent_conf_path)\n gov_agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n launcher = geopm_test_launcher.TestLauncher(app_conf, gov_agent_conf, report_path,\n trace_path, time_limit=900)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.write_log(name, 'Power cap = {}W'.format(self._options['power_budget']))\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(num_node, len(node_names))\n all_power_data = {}\n # Total power consumed will be Socket(s) + DRAM\n for nn in node_names:\n tt = self._output.get_trace_data(node_name=nn)\n\n first_epoch_index = tt.loc[tt['EPOCH_COUNT'] == 0][:1].index[0]\n epoch_dropped_data = tt[first_epoch_index:] # Drop all startup data\n\n power_data = epoch_dropped_data.filter(regex='ENERGY')\n power_data['TIME'] = epoch_dropped_data['TIME']\n power_data = power_data.diff().dropna()\n power_data.rename(columns={'TIME': 'ELAPSED_TIME'}, inplace=True)\n power_data = power_data.loc[(power_data != 0).all(axis=1)] # Will drop any row that is all 0's\n\n pkg_energy_cols = [s for s in power_data.keys() if 'ENERGY_PACKAGE' in s]\n dram_energy_cols = [s for s in power_data.keys() if 'ENERGY_DRAM' in s]\n power_data['SOCKET_POWER'] = power_data[pkg_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']\n power_data['DRAM_POWER'] = power_data[dram_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']\n power_data['COMBINED_POWER'] = power_data['SOCKET_POWER'] + power_data['DRAM_POWER']\n\n pandas.set_option('display.width', 100)\n launcher.write_log(name, 'Power stats from {} :\\n{}'.format(nn, power_data.describe()))\n\n all_power_data[nn] = power_data\n\n for node_name, power_data in all_power_data.items():\n # Allow for overages of 2% at the 75th percentile.\n self.assertGreater(self._options['power_budget'] * 1.02, power_data['SOCKET_POWER'].quantile(.75))\n\n # TODO Checks on the maximum power computed during the run?\n # TODO Checks to see how much power was left on the table?\n\n @util.skip_unless_run_long_tests()\n @util.skip_unless_batch()\n def test_power_balancer(self):\n name = 'test_power_balancer'\n num_node = 4\n num_rank = 16\n loop_count = 500\n # Require that the balancer moves the maximum dgemm runtime at\n # least 1/4 the distance to the mean dgemm runtime under the\n # governor.\n margin_factor = 0.25\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('dgemm-imbalance', 8.0)\n app_conf.append_region('all2all', 0.05)\n app_conf.set_loop_count(loop_count)\n\n # Update app config with imbalance\n alloc_nodes = geopm_test_launcher.TestLauncher.get_alloc_nodes()\n for nn in range(len(alloc_nodes) // 2):\n app_conf.append_imbalance(alloc_nodes[nn], 0.5)\n\n fam, mod = geopm_test_launcher.get_platform()\n if fam == 6 and mod == 87:\n # budget for KNL\n power_budget = 130\n else:\n power_budget = 200\n self._options = {'power_budget': power_budget}\n gov_agent_conf_path = name + '_gov_agent.config'\n bal_agent_conf_path = name + '_bal_agent.config'\n self._tmp_files.append(gov_agent_conf_path)\n self._tmp_files.append(bal_agent_conf_path)\n\n agent_list = ['power_governor', 'power_balancer']\n path_dict = {'power_governor': gov_agent_conf_path, 'power_balancer': bal_agent_conf_path}\n agent_runtime = dict()\n for agent in agent_list:\n agent_conf = geopmpy.io.AgentConf(path_dict[agent], agent, self._options)\n run_name = '{}_{}'.format(name, agent)\n report_path = '{}.report'.format(run_name)\n trace_path = '{}.trace'.format(run_name)\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,\n trace_path, time_limit=2700)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.write_log(run_name, 'Power cap = {}W'.format(power_budget))\n launcher.run(run_name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(num_node, len(node_names))\n\n power_limits = []\n # Total power consumed will be Socket(s) + DRAM\n for nn in node_names:\n tt = self._output.get_trace_data(node_name=nn)\n\n first_epoch_index = tt.loc[tt['EPOCH_COUNT'] == 0][:1].index[0]\n epoch_dropped_data = tt[first_epoch_index:] # Drop all startup data\n\n power_data = epoch_dropped_data.filter(regex='ENERGY')\n power_data['TIME'] = epoch_dropped_data['TIME']\n power_data = power_data.diff().dropna()\n power_data.rename(columns={'TIME': 'ELAPSED_TIME'}, inplace=True)\n power_data = power_data.loc[(power_data != 0).all(axis=1)] # Will drop any row that is all 0's\n\n pkg_energy_cols = [s for s in power_data.keys() if 'ENERGY_PACKAGE' in s]\n dram_energy_cols = [s for s in power_data.keys() if 'ENERGY_DRAM' in s]\n power_data['SOCKET_POWER'] = power_data[pkg_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']\n power_data['DRAM_POWER'] = power_data[dram_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']\n power_data['COMBINED_POWER'] = power_data['SOCKET_POWER'] + power_data['DRAM_POWER']\n\n pandas.set_option('display.width', 100)\n launcher.write_log(name, 'Power stats from {} {} :\\n{}'.format(agent, nn, power_data.describe()))\n\n # Get final power limit set on the node\n if agent == 'power_balancer':\n power_limits.append(epoch_dropped_data['POWER_LIMIT'][-1])\n\n if agent == 'power_balancer':\n avg_power_limit = sum(power_limits) / len(power_limits)\n self.assertTrue(avg_power_limit <= power_budget)\n\n min_runtime = float('nan')\n max_runtime = float('nan')\n node_names = self._output.get_node_names()\n runtime_list = []\n for node_name in node_names:\n epoch_data = self._output.get_report_data(node_name=node_name, region='dgemm')\n runtime_list.append(epoch_data['runtime'].item())\n if agent == 'power_governor':\n mean_runtime = sum(runtime_list) / len(runtime_list)\n max_runtime = max(runtime_list)\n margin = margin_factor * (max_runtime - mean_runtime)\n\n agent_runtime[agent] = max(runtime_list)\n\n self.assertGreater(agent_runtime['power_governor'] - margin,\n agent_runtime['power_balancer'],\n \"governor runtime: {}, balancer runtime: {}, margin: {}\".format(\n agent_runtime['power_governor'], agent_runtime['power_balancer'], margin))\n\n def test_progress_exit(self):\n \"\"\"\n Check that when we always see progress exit before the next entry.\n Make sure that progress only decreases when a new region is entered.\n \"\"\"\n name = 'test_progress_exit'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 1\n num_rank = 16\n loop_count = 100\n big_o = 0.1\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.set_loop_count(loop_count)\n app_conf.append_region('dgemm-progress', big_o)\n app_conf.append_region('spin-progress', big_o)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path, region_barrier=True)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(num_node, len(node_names))\n\n for nn in node_names:\n tt = self._output.get_trace_data(node_name=nn)\n tt = tt.set_index(['REGION_HASH'], append=True)\n tt = tt.groupby(level=['REGION_HASH'])\n for region_hash, data in tt:\n tmp = data['REGION_PROGRESS'].diff()\n #@todo legacy branch?\n # Look for changes in progress that are more negative\n # than can be expected due to extrapolation error.\n if region_hash == 8300189175:\n negative_progress = tmp.loc[(tmp > -1) & (tmp < -0.1)]\n launcher.write_log(name, '{}'.format(negative_progress))\n self.assertEqual(0, len(negative_progress))\n\n @util.skip_unless_run_long_tests()\n @util.skip_unless_optimized()\n def test_sample_rate(self):\n \"\"\"\n Check that sample rate is regular and fast.\n \"\"\"\n name = 'test_sample_rate'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 1\n num_rank = 16\n loop_count = 10\n big_o = 10.0\n region = 'dgemm-progress'\n max_mean = 0.01 # 10 millisecond max sample period\n max_nstd = 0.1 # 10% normalized standard deviation (std / mean)\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.set_loop_count(loop_count)\n app_conf.append_region(region, big_o)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(num_node, len(node_names))\n\n for nn in node_names:\n tt = self._output.get_trace_data(node_name=nn)\n delta_t = tt['TIME'].diff()\n delta_t = delta_t.loc[delta_t != 0]\n self.assertGreater(max_mean, delta_t.mean())\n # WARNING : The following line may mask issues in the sampling rate. To do a fine grained analysis, comment\n # out the next line and do NOT run on the BSP. This will require modifications to the launcher or manual testing.\n size_orig = len(delta_t)\n delta_t = delta_t[(delta_t - delta_t.mean()) < 3*delta_t.std()] # Only keep samples within 3 stds of the mean\n self.assertGreater(0.06, 1 - (float(len(delta_t)) / size_orig))\n self.assertGreater(max_nstd, delta_t.std() / delta_t.mean())\n\n def test_network_times(self):\n name = 'test_network_times'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 4\n num_rank = 16\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('sleep', 1.0)\n app_conf.append_region('dgemm', 1.0)\n app_conf.append_region('all2all', 1.0)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n for nn in node_names:\n all2all_data = self._output.get_report_data(node_name=nn, region='all2all')\n sleep_data = self._output.get_report_data(node_name=nn, region='sleep')\n dgemm_data = self._output.get_report_data(node_name=nn, region='dgemm')\n barrier_data = self._output.get_report_data(node_name=nn, region='MPI_Barrier')\n unmarked_data = self._output.get_report_data(node_name=nn, region='unmarked-region')\n epoch_data = self._output.get_report_data(node_name=nn, region='epoch')\n app_total = self._output.get_app_total_data(node_name=nn)\n self.assertEqual(0, unmarked_data['count'].item())\n # Since MPI time is is counted if any rank on a node is in\n # an MPI call, but region time is counted only when all\n # ranks on a node are in a region, we must use the\n # unmarked-region time as our error term when comparing\n # MPI time and all2all time.\n mpi_epsilon = max(unmarked_data['runtime'].item() / all2all_data['network_time'].item(), 0.05)\n self.assertNear(all2all_data['network_time'].item(), all2all_data['runtime'].item(), mpi_epsilon)\n self.assertNear(all2all_data['network_time'].item() + barrier_data['network_time'].item(),\n epoch_data['network_time'].item())\n # TODO: inconsistent; can we just use _ everywhere?\n self.assertNear(all2all_data['network_time'].item() + barrier_data['network_time'].item(),\n app_total['network-time'].item())\n self.assertEqual(0, unmarked_data['network_time'].item())\n self.assertEqual(0, sleep_data['network_time'].item())\n self.assertEqual(0, dgemm_data['network_time'].item())\n\n def test_ignore_runtime(self):\n name = 'test_ignore_runtime'\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 4\n num_rank = 16\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('ignore', 1.0)\n app_conf.append_region('dgemm', 1.0)\n app_conf.append_region('all2all', 1.0)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n for nn in node_names:\n ignore_data = self._output.get_report_data(node_name=nn, region='ignore')\n app_data = self._output.get_app_total_data(node_name=nn)\n self.assertNear(ignore_data['runtime'].item(),\n app_data['ignore-runtime'].item(), 0.00005)\n\n @util.skip_unless_config_enable('ompt')\n def test_unmarked_ompt(self):\n name = 'test_unmarked_ompt'\n report_path = name + '.report'\n num_node = 4\n num_rank = 16\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.append_region('stream-unmarked', 1.0)\n app_conf.append_region('dgemm-unmarked', 1.0)\n app_conf.append_region('all2all-unmarked', 1.0)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path)\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n stream_id = None\n region_names = self._output.get_region_names()\n stream_name = [key for key in region_names if key.lower().find('stream') != -1][0]\n for nn in node_names:\n stream_data = self._output.get_report_data(node_name=nn, region=stream_name)\n found = False\n for name in region_names:\n if stream_name in name: # account for numbers at end of OMPT region names\n found = True\n self.assertTrue(found)\n self.assertEqual(1, stream_data['count'].item())\n if stream_id:\n self.assertEqual(stream_id, stream_data['id'].item())\n else:\n stream_id = stream_data['id'].item()\n ompt_regions = [key for key in region_names if key.startswith('[OMPT]')]\n self.assertLessEqual(2, len(ompt_regions))\n self.assertTrue(('MPI_Alltoall' in region_names))\n gemm_region = [key for key in region_names if key.lower().find('gemm') != -1]\n self.assertLessEqual(1, len(gemm_region))\n\n def _test_agent_frequency_map(self, name, use_env=False):\n min_freq = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_MIN board 0\")\n max_freq = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_MAX board 0\")\n sticker_freq = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_STICKER board 0\")\n freq_step = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_STEP board 0\")\n self._agent = \"frequency_map\"\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 1\n num_rank = 4\n loop_count = 5\n dgemm_bigo = 15.0\n stream_bigo = 1.0\n dgemm_bigo_jlse = 35.647\n dgemm_bigo_quartz = 29.12\n stream_bigo_jlse = 1.6225\n stream_bigo_quartz = 1.7941\n hostname = socket.gethostname()\n if hostname.endswith('.alcf.anl.gov'):\n dgemm_bigo = dgemm_bigo_jlse\n stream_bigo = stream_bigo_jlse\n elif hostname.startswith('mcfly'):\n dgemm_bigo = 42.0\n stream_bigo = 1.75\n elif hostname.startswith('quartz'):\n dgemm_bigo = dgemm_bigo_quartz\n stream_bigo = stream_bigo_quartz\n\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.set_loop_count(loop_count)\n app_conf.append_region('dgemm', dgemm_bigo)\n app_conf.append_region('stream', stream_bigo)\n app_conf.append_region('all2all', 1.0)\n app_conf.write()\n freq_map = {}\n freq_map['dgemm'] = sticker_freq\n freq_map['stream'] = sticker_freq - 2 * freq_step\n freq_map['all2all'] = min_freq\n self._options = create_frequency_map_policy(min_freq, max_freq, freq_map, use_env)\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,\n trace_path, region_barrier=True, time_limit=900)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n regions = self._output.get_region_names()\n for nn in node_names:\n for region_name in regions:\n region_data = self._output.get_report_data(node_name=nn, region=region_name)\n if (region_name in ['dgemm', 'stream', 'all2all']):\n #todo verify trace frequencies\n #todo verify agent report augment frequecies\n msg = region_name + \" frequency should be near assigned map frequency\"\n self.assertNear(region_data['frequency'].item(), freq_map[region_name] / sticker_freq * 100, msg=msg)\n\n def test_agent_frequency_map_env(self):\n \"\"\"\n Test of the FrequencyMapAgent, setting a map through GEOPM_FREQUENCY_MAP.\n \"\"\"\n self._test_agent_frequency_map('test_agent_frequency_map_env', use_env=True)\n\n def test_agent_frequency_map_policy(self):\n \"\"\"\n Test of the FrequencyMapAgent, setting a map through the policy.\n \"\"\"\n self._test_agent_frequency_map('test_agent_frequency_map_policy', use_env=False)\n\n def test_agent_energy_efficient_single_region(self):\n \"\"\"\n Test of the EnergyEfficientAgent against single region loop.\n \"\"\"\n name = 'test_energy_efficient_single_region'\n min_freq = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_MIN board 0\")\n sticker_freq = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_STICKER board 0\")\n freq_step = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_STEP board 0\")\n self._agent = \"energy_efficient\"\n report_path = name + '.report'\n trace_path = name + '.trace'\n num_node = 1\n num_rank = 4\n loop_count = 100\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.set_loop_count(loop_count)\n app_conf.append_region('spin', 0.1)\n self._options = {'frequency_min': min_freq,\n 'frequency_max': sticker_freq}\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name)\n self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')\n node_names = self._output.get_node_names()\n self.assertEqual(len(node_names), num_node)\n regions = self._output.get_region_names()\n for nn in node_names:\n for region_name in regions:\n report = geopmpy.io.RawReport(report_path)\n if (region_name in ['spin']):\n region = report.raw_region(nn, region_name)\n msg = region_name + \" frequency should be minimum frequency as specified by policy\"\n self.assertEqual(region['requested-online-frequency'], min_freq, msg=msg) # freq should reduce\n\n\n @util.skip_unless_run_long_tests()\n @util.skip_unless_cpufreq()\n @util.skip_unless_batch()\n def test_agent_energy_efficient(self):\n \"\"\"\n Test of the EnergyEfficientAgent.\n \"\"\"\n name = 'test_energy_efficient_sticker'\n min_freq = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_MIN board 0\")\n max_freq = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_MAX board 0\")\n sticker_freq = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_STICKER board 0\")\n freq_step = geopm_test_launcher.geopmread(\"CPUINFO::FREQ_STEP board 0\")\n self._agent = \"energy_efficient\"\n num_node = 1\n num_rank = 4\n loop_count = 200\n dgemm_bigo = 15.0\n stream_bigo = 1.0\n dgemm_bigo_jlse = 35.647\n dgemm_bigo_quartz = 29.12\n stream_bigo_jlse = 1.6225\n stream_bigo_quartz = 1.7941\n hostname = socket.gethostname()\n if hostname.endswith('.alcf.anl.gov'):\n dgemm_bigo = dgemm_bigo_jlse\n stream_bigo = stream_bigo_jlse\n elif hostname.startswith('mcfly'):\n dgemm_bigo = 42.0\n stream_bigo = 1.75\n elif hostname.startswith('quartz'):\n dgemm_bigo = dgemm_bigo_quartz\n stream_bigo = stream_bigo_quartz\n\n run = ['_sticker', '_nan_nan']\n for rr in run:\n report_path = name + rr + '.report'\n trace_path = name + rr + '.trace'\n app_conf = geopmpy.io.BenchConf(name + '_app.config')\n self._tmp_files.append(app_conf.get_path())\n app_conf.set_loop_count(loop_count)\n app_conf.append_region('dgemm', dgemm_bigo)\n app_conf.append_region('stream', stream_bigo)\n app_conf.write()\n if rr == '_sticker':\n self._options = {'frequency_min': sticker_freq,\n 'frequency_max': sticker_freq}\n freq = sticker_freq\n else:\n self._options = {'frequency_min': min_freq,\n 'frequency_max': sticker_freq}\n agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)\n self._tmp_files.append(agent_conf.get_path())\n launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,\n trace_path, region_barrier=True, time_limit=900)\n launcher.set_num_node(num_node)\n launcher.set_num_rank(num_rank)\n launcher.run(name + rr)\n\n # compare the app_total runtime and energy and assert within bounds\n report_path = name + run[0] + '.report'\n trace_path = name + run[0] + '.trace'\n sticker_out = geopmpy.io.AppOutput(report_path, trace_path + '*')\n report_path = name + run[1] + '.report'\n trace_path = name + run[1] + '.trace'\n nan_out = geopmpy.io.AppOutput(report_path, trace_path + '*')\n for nn in nan_out.get_node_names():\n sticker_app_total = sticker_out.get_app_total_data(node_name=nn)\n nan_app_total = nan_out.get_app_total_data(node_name=nn)\n runtime_savings_epoch = (sticker_app_total['runtime'].item() - nan_app_total['runtime'].item()) / sticker_app_total['runtime'].item()\n energy_savings_epoch = (sticker_app_total['energy-package'].item() - nan_app_total['energy-package'].item()) / sticker_app_total['energy-package'].item()\n self.assertLess(-0.1, runtime_savings_epoch) # want -10% or better\n self.assertLess(0.0, energy_savings_epoch)\n\n\nclass TestIntegrationGeopmio(unittest.TestCase):\n ''' Tests of geopmread and geopmwrite.'''\n def setUp(self):\n self.skip_warning_string = 'Incompatible CPU'\n\n def check_output(self, args, expected):\n try:\n proc = subprocess.Popen([self.exec_name] + args,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for exp in expected:\n line = proc.stdout.readline()\n while self.skip_warning_string.encode() in line:\n line = proc.stdout.readline()\n self.assertIn(exp.encode(), line)\n for line in proc.stdout:\n if self.skip_warning_string.encode() not in line:\n self.assertNotIn(b'Error', line)\n except subprocess.CalledProcessError as ex:\n sys.stderr.write('{}\\n'.format(ex.output))\n\n def check_output_range(self, args, min_exp, max_exp):\n try:\n proc = subprocess.Popen([self.exec_name] + args,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for line in proc.stdout:\n if self.skip_warning_string.encode() in line:\n continue\n if line.startswith(b'0x'):\n value = int(line)\n else:\n value = float(line)\n self.assertLessEqual(min_exp, value, msg=\"Value read for {} smaller than {}: {}.\".format(args, min_exp, value))\n self.assertGreaterEqual(max_exp, value, msg=\"Value read for {} larger than {}: {}.\".format(args, max_exp, value))\n except subprocess.CalledProcessError as ex:\n sys.stderr.write('{}\\n'.format(ex.output))\n\n def check_no_error(self, args):\n try:\n proc = subprocess.Popen([self.exec_name] + args,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for line in proc.stdout:\n if self.skip_warning_string.encode() not in line:\n self.assertNotIn(b'Error', line)\n except subprocess.CalledProcessError as ex:\n sys.stderr.write('{}\\n'.format(ex.output))\n\n def test_geopmread_command_line(self):\n '''\n Check that geopmread commandline arguments work.\n '''\n self.exec_name = \"geopmread\"\n\n # no args\n self.check_no_error([])\n\n # domain flag\n self.check_output(['--domain'], ['board', 'package', 'core', 'cpu',\n 'board_memory', 'package_memory',\n 'board_nic', 'package_nic',\n 'board_accelerator', 'package_accelerator'])\n self.check_output(['--domain', 'TIME'], ['cpu'])\n\n # read signal\n self.check_no_error(['TIME', 'board', '0'])\n\n # info\n self.check_no_error(['--info'])\n self.check_output(['--info', 'TIME'], ['Time in seconds'])\n\n # errors\n read_err = 'domain type and domain index are required'\n self.check_output(['TIME'], [read_err])\n self.check_output(['TIME', 'board'], [read_err])\n self.check_output(['TIME', 'board', 'bad'], ['invalid domain index'])\n self.check_output(['FREQUENCY', 'package', '111'], ['cannot read signal'])\n self.check_output(['ENERGY_PACKAGE', 'cpu', '0'], ['cannot read signal'])\n self.check_output(['INVALID', 'board', '0'], ['cannot read signal'])\n self.check_output(['--domain', 'INVALID'], ['unable to determine signal type'])\n self.check_output(['--domain', '--info'], ['info about domain not implemented'])\n\n @util.skip_unless_batch()\n def test_geopmread_all_signal_agg(self):\n '''\n Check that all reported signals can be read for board, aggregating if necessary.\n '''\n self.exec_name = \"geopmread\"\n all_signals = []\n try:\n proc = subprocess.Popen([self.exec_name],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for line in proc.stdout:\n if self.skip_warning_string.encode() not in line:\n all_signals.append(line.strip())\n except subprocess.CalledProcessError as ex:\n sys.stderr.write('{}\\n'.format(ex.output))\n for sig in all_signals:\n self.check_no_error([sig.decode(), 'board', '0'])\n\n @util.skip_unless_batch()\n def test_geopmread_signal_value(self):\n '''\n Check that some specific signals give a sane value.\n '''\n self.exec_name = \"geopmread\"\n signal_range = {\n \"POWER_PACKAGE\": (20, 400),\n \"FREQUENCY\": (1.0e8, 5.0e9),\n \"TIME\": (0, 10), # time in sec to start geopmread\n \"TEMPERATURE_CORE\": (0, 100)\n }\n\n for signal, val_range in signal_range.items():\n try:\n self.check_no_error([signal, \"board\", \"0\"])\n except:\n raise\n pass # skip missing signals\n else:\n self.check_output_range([signal, \"board\", \"0\"], *val_range)\n\n def test_geopmread_custom_msr(self):\n '''\n Check that MSRIOGroup picks up additional MSRs in path.\n '''\n self.exec_name = \"geopmread\"\n path = os.path.join(\n os.path.dirname(\n os.path.dirname(\n os.path.realpath(__file__))),\n 'examples/custom_msr/')\n custom_env = os.environ.copy()\n custom_env['GEOPM_PLUGIN_PATH'] = path\n all_signals = []\n try:\n proc = subprocess.Popen([self.exec_name], env=custom_env,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for line in proc.stdout:\n if self.skip_warning_string.encode() not in line:\n all_signals.append(line.strip())\n except subprocess.CalledProcessError as ex:\n sys.stderr.write('{}\\n'.format(ex.output))\n self.assertIn(b'MSR::CORE_PERF_LIMIT_REASONS#', all_signals)\n\n def test_geopmwrite_command_line(self):\n '''\n Check that geopmwrite commandline arguments work.\n '''\n self.exec_name = \"geopmwrite\"\n\n # no args\n self.check_no_error([])\n\n # domain flag\n self.check_output(['--domain'], ['board', 'package', 'core', 'cpu',\n 'board_memory', 'package_memory',\n 'board_nic', 'package_nic',\n 'board_accelerator', 'package_accelerator'])\n self.check_no_error(['--domain', 'FREQUENCY'])\n\n # info\n self.check_no_error(['--info'])\n self.check_output(['--info', 'FREQUENCY'], ['processor frequency'])\n\n # errors\n write_err = 'domain type, domain index, and value are required'\n self.check_output(['FREQUENCY'], [write_err])\n self.check_output(['FREQUENCY', 'board'], [write_err])\n self.check_output(['FREQUENCY', 'board', '0'], [write_err])\n self.check_output(['FREQUENCY', 'board', 'bad', '0'], ['invalid domain index'])\n self.check_output(['FREQUENCY', 'board', '0', 'bad'], ['invalid write value'])\n self.check_output(['FREQUENCY', 'package', '111', '0'], ['cannot write control'])\n self.check_output(['FREQUENCY', 'board_nic', '0', '0'], ['cannot write control'])\n self.check_output(['INVALID', 'board', '0', '0'], ['cannot write control'])\n self.check_output(['--domain', 'INVALID'], ['unable to determine control type'])\n self.check_output(['--domain', '--info'], ['info about domain not implemented'])\n\n @util.skip_unless_batch()\n def test_geopmwrite_set_freq(self):\n '''\n Check that geopmwrite can be used to set frequency.\n '''\n def read_stdout_line(stdout):\n line = stdout.readline()\n while self.skip_warning_string.encode() in line:\n line = stdout.readline()\n return line.strip()\n\n def read_current_freq(domain, signal='FREQUENCY'):\n read_proc = subprocess.Popen(['geopmread', signal, domain, '0'],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n freq = read_stdout_line(read_proc.stdout)\n freq = float(freq)\n return freq\n\n def read_min_max_freq():\n read_proc = subprocess.Popen(['geopmread', 'CPUINFO::FREQ_MIN', 'board', '0'],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n min_freq = read_stdout_line(read_proc.stdout)\n min_freq = float(int(float(min_freq)/1e8)*1e8) # convert to multiple of 1e8\n read_proc = subprocess.Popen(['geopmread', 'CPUINFO::FREQ_MAX', 'board', '0'],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n max_freq = read_stdout_line(read_proc.stdout)\n max_freq = float(int(float(max_freq)/1e8)*1e8)\n return min_freq, max_freq\n\n self.exec_name = \"geopmwrite\"\n\n read_proc = subprocess.Popen(['geopmread', '--domain', 'FREQUENCY'],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n read_domain = read_stdout_line(read_proc.stdout).decode()\n write_proc = subprocess.Popen([self.exec_name, '--domain', 'FREQUENCY'],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n write_domain = read_stdout_line(write_proc.stdout).decode()\n min_freq, max_freq = read_min_max_freq()\n\n old_freq = read_current_freq(write_domain, 'MSR::PERF_CTL:FREQ')\n self.assertLess(old_freq, max_freq * 2)\n self.assertGreater(old_freq, min_freq - 1e8)\n\n # set to min and check\n self.check_no_error(['FREQUENCY', write_domain, '0', str(min_freq)])\n result = read_current_freq(read_domain)\n self.assertEqual(min_freq, result)\n # set to max and check\n self.check_no_error(['FREQUENCY', write_domain, '0', str(max_freq)])\n result = read_current_freq(read_domain)\n self.assertEqual(max_freq, result)\n\n self.check_no_error(['FREQUENCY', write_domain, '0', str(old_freq)])\n\n\nclass TestIntegrationGeopmagent(unittest.TestCase):\n ''' Tests of geopmagent.'''\n def setUp(self):\n self.exec_name = 'geopmagent'\n self.skip_warning_string = 'Incompatible CPU frequency driver/governor'\n\n def check_output(self, args, expected):\n try:\n proc = subprocess.Popen([self.exec_name] + args,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for exp in expected:\n line = proc.stdout.readline()\n while self.skip_warning_string.encode() in line or line == b'\\n':\n line = proc.stdout.readline()\n self.assertIn(exp.encode(), line)\n for line in proc.stdout:\n if self.skip_warning_string.encode() not in line:\n self.assertNotIn(b'Error', line)\n except subprocess.CalledProcessError as ex:\n sys.stderr.write('{}\\n'.format(ex.output))\n\n def check_json_output(self, args, expected):\n try:\n proc = subprocess.Popen([self.exec_name] + args,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as ex:\n sys.stderr.write('{}\\n'.format(ex.output))\n line = proc.stdout.readline()\n while self.skip_warning_string.encode() in line or line == b'\\n':\n line = proc.stdout.readline()\n try:\n out_json = json.loads(line.decode())\n except ValueError:\n self.fail('Could not convert json string: {}\\n'.format(line))\n self.assertEqual(expected, out_json)\n for line in proc.stdout:\n if self.skip_warning_string.encode() not in line:\n self.assertNotIn(b'Error', line)\n\n def check_no_error(self, args):\n try:\n proc = subprocess.Popen([self.exec_name] + args,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n for line in proc.stdout:\n if self.skip_warning_string.encode() not in line:\n self.assertNotIn(b'Error', line)\n except subprocess.CalledProcessError as ex:\n sys.stderr.write('{}\\n'.format(ex.output))\n\n def test_geopmagent_command_line(self):\n '''\n Check that geopmagent commandline arguments work.\n '''\n # no args\n agent_names = ['monitor', 'power_balancer', 'power_governor',\n 'energy_efficient', 'frequency_map']\n self.check_output([], agent_names)\n\n # help message\n self.check_output(['--help'], ['Usage'])\n\n # version\n self.check_no_error(['--version'])\n\n # agent policy and sample names\n for agent in agent_names:\n self.check_output(['--agent', agent],\n ['Policy', 'Sample'])\n\n # policy file\n self.check_json_output(['--agent', 'monitor', '--policy', 'None'],\n {})\n self.check_json_output(['--agent', 'power_governor', '--policy', '150'],\n {'POWER_PACKAGE_LIMIT_TOTAL': 150})\n # default value policy\n self.check_json_output(['--agent', 'power_governor', '--policy', 'NAN'],\n {'POWER_PACKAGE_LIMIT_TOTAL': 'NAN'})\n self.check_json_output(['--agent', 'power_governor', '--policy', 'nan'],\n {'POWER_PACKAGE_LIMIT_TOTAL': 'NAN'})\n self.check_json_output(['--agent', 'energy_efficient', '--policy', 'nan,nan'],\n {'FREQ_MIN': 'NAN', 'FREQ_MAX': 'NAN'})\n self.check_json_output(['--agent', 'energy_efficient', '--policy', '1.2e9,nan'],\n {'FREQ_MIN': 1.2e9, 'FREQ_MAX': 'NAN'})\n self.check_json_output(['--agent', 'energy_efficient', '--policy', 'nan,1.3e9'],\n {'FREQ_MIN': 'NAN', 'FREQ_MAX': 1.3e9})\n # unspecified policy values are accepted\n self.check_json_output(['--agent', 'power_balancer', '--policy', '150'],\n {'POWER_PACKAGE_LIMIT_TOTAL': 150})\n\n # errors\n self.check_output(['--agent', 'power_governor', '--policy', 'None'],\n ['not a valid floating-point number', 'Invalid argument'])\n self.check_output(['--agent', 'monitor', '--policy', '300'],\n ['agent takes no parameters', 'Invalid argument'])\n self.check_output(['--agent', 'energy_efficient', '--policy', '2.0e9,5.0e9,4.5e9,6.7,4.2'],\n ['Number of policies', 'Invalid argument'])\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"pandas.DataFrame",
"pandas.concat",
"pandas.set_option"
]
] |
lilleswing/Reinvent-1 | [
"ac4e3e6fa6379c6f4af883478dfd1b3407933ada"
] | [
"running_modes/utils/general.py"
] | [
"import time\n\nimport numpy as np\nimport torch\n\n\ndef to_tensor(tensor):\n if isinstance(tensor, np.ndarray):\n tensor = torch.from_numpy(tensor)\n if torch.cuda.is_available():\n return torch.autograd.Variable(tensor).cuda()\n return torch.autograd.Variable(tensor)\n\n\ndef set_default_device_cuda():\n \"\"\"Sets the default device (cpu or cuda) used for all tensors.\"\"\"\n if torch.cuda.is_available() == False:\n tensor = torch.FloatTensor\n torch.set_default_tensor_type(tensor)\n return False\n else: # device_name == \"cuda\":\n tensor = torch.cuda.FloatTensor # pylint: disable=E1101\n torch.set_default_tensor_type(tensor)\n return True\n\n\ndef estimate_run_time(start_time, n_steps, step):\n time_elapsed = int(time.time() - start_time)\n time_left = (time_elapsed * ((n_steps - step) / (step + 1)))\n summary = {\"elapsed\": time_elapsed, \"left\": time_left}\n return summary"
] | [
[
"torch.set_default_tensor_type",
"torch.autograd.Variable",
"torch.cuda.is_available",
"torch.from_numpy"
]
] |
Shreyashwaghe/monk_v1 | [
"62f34a52f242772186ffff7e56764e958fbcd920",
"62f34a52f242772186ffff7e56764e958fbcd920"
] | [
"monk/system_unit_tests/pytorch/test_block_resnet_v2.py",
"monk/system_unit_tests/pytorch/test_layer_average_pooling1d.py"
] | [
"import os\nimport sys\nsys.path.append(\"../../../monk/\");\nimport psutil\n\nfrom pytorch_prototype import prototype\nfrom compare_prototype import compare\nfrom common import print_start\nfrom common import print_status\n\nimport torch\nimport numpy as np\nfrom pytorch.losses.return_loss import load_loss\n\n\ndef test_block_resnet_v2(system_dict):\n forward = True;\n\n test = \"test_block_resnet_v2\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n\n\n network = [];\n network.append(gtf.resnet_v2_block(output_channels=32, stride=1, downsample=True));\n network.append(gtf.resnet_v2_block(output_channels=32, stride=1, downsample=False));\n gtf.Compile_Network(network, data_shape=(1, 64, 64), use_gpu=False);\n\n x = torch.randn(1, 1, 64, 64);\n y = gtf.system_dict[\"local\"][\"model\"](x); \n\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n",
"import os\nimport sys\nsys.path.append(\"../../../monk/\");\nimport psutil\n\nfrom pytorch_prototype import prototype\nfrom compare_prototype import compare\nfrom common import print_start\nfrom common import print_status\n\nimport torch\nimport numpy as np\nfrom pytorch.losses.return_loss import load_loss\n\n\ndef test_layer_average_pooling1d(system_dict):\n forward = True;\n\n test = \"test_layer_average_pooling1d\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n\n\n network = [];\n network.append(gtf.average_pooling1d());\n gtf.Compile_Network(network, data_shape=(3, 128), use_gpu=False);\n\n x = torch.randn(1, 3, 128);\n y = gtf.system_dict[\"local\"][\"model\"](x); \n\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n"
] | [
[
"torch.randn"
],
[
"torch.randn"
]
] |
teristam/spiketoolk | [
"0ae7adabce46cf620c3627ee0093d890996ef355"
] | [
"spiketoolkit/preprocessing/center.py"
] | [
"from spikeextractors import RecordingExtractor\nfrom .transform import TransformRecording\nimport numpy as np\n\n\nclass CenterRecording(TransformRecording):\n preprocessor_name = 'Center'\n\n def __init__(self, recording, mode, seconds, n_snippets):\n if not isinstance(recording, RecordingExtractor):\n raise ValueError(\"'recording' must be a RecordingExtractor\")\n self._scalar = 1\n self._mode = mode\n self._seconds = seconds\n self._n_snippets = n_snippets\n assert self._mode in ['mean', 'median'], \"'mode' can be 'mean' or 'median'\"\n\n # use n_snippets of equal duration equally distributed on the recording\n n_snippets = int(n_snippets)\n assert n_snippets > 0, \"'n_snippets' must be positive\"\n snip_len = seconds / n_snippets * recording.get_sampling_frequency()\n\n if seconds * recording.get_sampling_frequency() >= recording.get_num_frames():\n traces = recording.get_traces()\n else:\n # skip initial and final part\n snip_start = np.linspace(snip_len // 2, recording.get_num_frames()-int(1.5*snip_len), n_snippets)\n traces_snippets = recording.get_snippets(reference_frames=snip_start, snippet_len=snip_len)\n traces_snippets = traces_snippets.swapaxes(0, 1)\n traces = traces_snippets.reshape((traces_snippets.shape[0],\n traces_snippets.shape[1] * traces_snippets.shape[2]))\n if self._mode == 'mean':\n self._offset = -np.mean(traces, axis=1)\n else:\n self._offset = -np.median(traces, axis=1)\n dtype = str(recording.get_dtype())\n if 'uint' in dtype:\n if 'numpy' in dtype:\n dtype = str(dtype).replace(\"<class '\", \"\").replace(\"'>\", \"\")\n # drop 'numpy'\n dtype = dtype.split('.')[1]\n dtype = dtype[1:]\n TransformRecording.__init__(self, recording, scalar=self._scalar, offset=self._offset, dtype=dtype)\n self._kwargs = {'recording': recording.make_serialized_dict(), 'mode': mode, 'seconds': seconds,\n 'n_snippets': n_snippets}\n\n\ndef center(recording, mode='median', seconds=10., n_snippets=10):\n '''\n Removes the offset of the traces channel by channel.\n\n Parameters\n ----------\n recording: RecordingExtractor\n The recording extractor to be transformed\n mode: str\n 'median' (default) or 'mean'\n seconds: float\n Number of seconds used to compute center\n n_snippets: int\n Number of snippets in which the total 'seconds' are divided spanning the recording duration\n\n Returns\n -------\n center: CenterRecording\n The output recording extractor object\n '''\n return CenterRecording(recording=recording, mode=mode, seconds=seconds, n_snippets=n_snippets)\n"
] | [
[
"numpy.median",
"numpy.mean"
]
] |
DoubleE1/Keras-GAN | [
"775eb82b18cb146203295f19c937d4290de2953f"
] | [
"dcgan/mnist/InceptionScore.py"
] | [
"# calculate inception score for cifar-10 in Keras\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom math import floor\nfrom numpy import ones, expand_dims, log, mean, std, exp\nfrom numpy.random import shuffle\nfrom keras.applications.inception_v3 import InceptionV3, preprocess_input\nfrom keras.datasets import cifar10\nfrom skimage.transform import resize\nfrom numpy import asarray\nfrom PIL import Image\nimport os.path\nfrom os import path\nfrom IPython.display import clear_output\n\n# scale an array of images to a new size\ndef scale_images(images, new_shape):\n images_list = list()\n for image in images:\n # resize with nearest neighbor interpolation\n new_image = resize(image, new_shape, 0)\n # store\n images_list.append(new_image)\n return asarray(images_list)\n\ndef crop_center(img):\n #hardcoded for now\n left = 143\n top = 58\n right = 513\n bottom = 427\n # Crop the center of the image\n return np.asarray(img.crop((left, top, right, bottom)))\n\n# assumes images have any shape and pixels in [0,255]\ndef calculate_inception_score(images, n_split=10, eps=1E-16):\n # load inception v3 model\n model = InceptionV3()\n # enumerate splits of images/predictions\n scores = list()\n n_part = floor(images.shape[0] / n_split)\n for i in range(n_split):\n # retrieve images\n ix_start, ix_end = i * n_part, (i+1) * n_part\n subset = images[ix_start:ix_end]\n # convert from uint8 to float32\n print(i, ix_end, ix_start, n_part)\n subset = subset.astype('float32')\n # scale images to the required size\n subset = scale_images(subset, (299,299,1))\n # pre-process images, scale to [-1,1]\n subset = preprocess_input(subset)\n # predict p(y|x)\n p_yx = model.predict(subset)\n # calculate p(y)\n p_y = expand_dims(p_yx.mean(axis=0), 0)\n # calculate KL divergence using log probabilities\n kl_d = p_yx * (log(p_yx + eps) - log(p_y + eps))\n # sum over classes\n sum_kl_d = kl_d.sum(axis=1)\n # average over images\n avg_kl_d = mean(sum_kl_d)\n # undo the log\n is_score = exp(avg_kl_d)\n # store\n scores.append(is_score)\n # print(i)\n # average across images\n is_avg, is_std = mean(scores), std(scores)\n return is_avg, is_std\n\nimage_path = \"Keras-GAN/dcgan/mnist/single_mnist_images\"\n\nif path.exists(image_path):\n images = []\n head_tail = path.split(image_path)\n for i in range(2):\n head_tail = head_tail[0]\n head_tail = path.split(head_tail)\n\n if ~image_path.endswith('/'):\n image_path = image_path + '/'\n print(image_path)\n\n for i in range(5000):\n if path.exists(image_path + str(f\"{i}.png\")):\n new_image_path = image_path + str(f\"{i}.png\")\n print(\"Loaded image: \", str(f\"{i}.png\"))\n img = Image.open(new_image_path)\n img = crop_center(img)\n\n # append the image into a list\n images.append(img)\n\n clear_output()\n\n # convert the list into array\n images = np.asarray(images)\n print(images.shape)\n\n # calculates the average and standard deviation inception scores\n is_avg, is_std = calculate_inception_score(images)\n print(f\"The inception score for {head_tail[1]}\")\n print('average inception score:', is_avg, 'standard deviation inception scores:', is_std)\nelse:\n print(\"Image path not found\")"
] | [
[
"numpy.asarray",
"numpy.exp",
"numpy.log",
"numpy.std",
"numpy.mean"
]
] |
kaylode/Custom-Template | [
"b2f11bfacf2b03b793476a19781f9046fab6fd82"
] | [
"theseus/utilities/cuda.py"
] | [
"\"\"\" CUDA / AMP utils\nHacked together by / Copyright 2020 Ross Wightman\n\"\"\"\nimport torch\nfrom typing import Any\nfrom theseus.utilities.loggers.observer import LoggerObserver\n\nLOGGER = LoggerObserver.getLogger('main')\n\ndef get_devices_info(device_names=\"0\"):\n\n if device_names.startswith('cuda'):\n device_names = device_names.split('cuda:')[1]\n elif device_names.startswith('cpu'):\n return \"CPU\"\n\n devices_info = \"\"\n for i, device_id in enumerate(device_names.split(',')):\n p = torch.cuda.get_device_properties(i)\n devices_info += f\"CUDA:{device_id} ({p.name}, {p.total_memory / 1024 ** 2}MB)\\n\" # bytes to MB\n return devices_info\n\ndef get_device(name='cpu') -> torch.device:\n if name.startswith('cuda'):\n if not torch.cuda.is_available():\n LOGGER.text(\"CUDA is not available. Using CPU...\", level=LoggerObserver.WARN)\n name = 'cpu'\n return torch.device(name)\n\ndef move_to(obj: Any, device: torch.device):\n \"\"\"Credit: https://discuss.pytorch.org/t/pytorch-tensor-to-device-for-a-list-of-dict/66283\n Arguments:\n obj {dict, list} -- Object to be moved to device\n device {torch.device} -- Device that object will be moved to\n Raises:\n TypeError: object is of type that is not implemented to process\n Returns:\n type(obj) -- same object but moved to specified device\n \"\"\"\n if torch.is_tensor(obj) or isinstance(obj, torch.nn.Module):\n return obj.to(device)\n if isinstance(obj, dict):\n res = {k: move_to(v, device) for k, v in obj.items()}\n return res\n if isinstance(obj, list):\n return [move_to(v, device) for v in obj]\n if isinstance(obj, tuple):\n return tuple(move_to(list(obj), device))\n \n return obj\n\ndef detach(obj: Any):\n \"\"\"Credit: https://discuss.pytorch.org/t/pytorch-tensor-to-device-for-a-list-of-dict/66283\n Arguments:\n obj {dict, list} -- Object to be moved to cpu\n Raises:\n TypeError: Invalid type for detach\n Returns:\n type(obj) -- same object but moved to cpu\n \"\"\"\n if torch.is_tensor(obj):\n return obj.detach()\n if isinstance(obj, dict):\n res = {k: detach(v) for k, v in obj.items()}\n return res\n if isinstance(obj, list):\n return [detach(v) for v in obj]\n if isinstance(obj, tuple):\n return tuple(detach(list(obj)))\n raise TypeError(\"Invalid type for detach\")"
] | [
[
"torch.cuda.get_device_properties",
"torch.is_tensor",
"torch.device",
"torch.cuda.is_available"
]
] |
wangx1996/CenterPillarNet | [
"4be3d53265b8ecb1f9572612fa87f7acd8c57669"
] | [
"src/config/train_config.py"
] | [
"\"\"\"\n# -*- coding: utf-8 -*-\n-----------------------------------------------------------------------------------\n# Author: Nguyen Mau Dung\n# DoC: 2020.08.17\n# email: [email protected]\n-----------------------------------------------------------------------------------\n# Description: The configurations of the project will be defined here\n\"\"\"\n\nimport os\nimport argparse\n\nimport torch\nfrom easydict import EasyDict as edict\nimport kitti_config as cnf\n\ndef parse_train_configs():\n parser = argparse.ArgumentParser(description='The Implementation using PyTorch')\n parser.add_argument('--seed', type=int, default=2020,\n help='re-produce the results with seed random')\n parser.add_argument('--saved_fn', type=str, default='fpn_resnet_18', metavar='FN',\n help='The name using for saving logs, models,...')\n\n parser.add_argument('--root-dir', type=str, default='../', metavar='PATH',\n help='The ROOT working directory')\n ####################################################################\n ############## Model configs ########################\n ####################################################################\n parser.add_argument('--arch', type=str, default='fpn_resnet_18', metavar='ARCH',\n help='The name of the model architecture')\n parser.add_argument('--pretrained_path', type=str, default=None, metavar='PATH',\n help='the path of the pretrained checkpoint')\n\n ####################################################################\n ############## Dataloader and Running configs #######\n ####################################################################\n parser.add_argument('--hflip_prob', type=float, default=0.5,\n help='The probability of horizontal flip')\n parser.add_argument('--no-val', action='store_true',\n help='If true, dont evaluate the model on the val set')\n parser.add_argument('--num_samples', type=int, default=None,\n help='Take a subset of the dataset to run and debug')\n parser.add_argument('--num_workers', type=int, default=4,\n help='Number of threads for loading data')\n parser.add_argument('--batch_size', type=int, default=16,\n help='mini-batch size (default: 16), this is the total'\n 'batch size of all GPUs on the current node when using'\n 'Data Parallel or Distributed Data Parallel')\n parser.add_argument('--print_freq', type=int, default=50, metavar='N',\n help='print frequency (default: 50)')\n parser.add_argument('--tensorboard_freq', type=int, default=50, metavar='N',\n help='frequency of saving tensorboard (default: 50)')\n parser.add_argument('--checkpoint_freq', type=int, default=2, metavar='N',\n help='frequency of saving checkpoints (default: 5)')\n ####################################################################\n ############## Training strategy ####################\n ####################################################################\n\n parser.add_argument('--start_epoch', type=int, default=1, metavar='N',\n help='the starting epoch')\n parser.add_argument('--num_epochs', type=int, default=300, metavar='N',\n help='number of total epochs to run')\n parser.add_argument('--lr_type', type=str, default='cosin',\n help='the type of learning rate scheduler (cosin or multi_step or one_cycle)')\n parser.add_argument('--lr', type=float, default=0.003, metavar='LR',\n help='initial learning rate')\n parser.add_argument('--minimum_lr', type=float, default=1e-7, metavar='MIN_LR',\n help='minimum learning rate during training')\n parser.add_argument('--momentum', type=float, default=0.949, metavar='M',\n help='momentum')\n parser.add_argument('-wd', '--weight_decay', type=float, default=0., metavar='WD',\n help='weight decay (default: 0.)')\n parser.add_argument('--optimizer_type', type=str, default='adam', metavar='OPTIMIZER',\n help='the type of optimizer, it can be sgd or adam')\n parser.add_argument('--steps', nargs='*', default=[150, 180],\n help='number of burn in step')\n\n ####################################################################\n ############## Loss weight ##########################\n ####################################################################\n\n ####################################################################\n ############## Distributed Data Parallel ############\n ####################################################################\n parser.add_argument('--world-size', default=-1, type=int, metavar='N',\n help='number of nodes for distributed training')\n parser.add_argument('--rank', default=-1, type=int, metavar='N',\n help='node rank for distributed training')\n parser.add_argument('--dist-url', default='tcp://127.0.0.1:29500', type=str,\n help='url used to set up distributed training')\n parser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\n parser.add_argument('--gpu_idx', default=0, type=int,\n help='GPU index to use.')\n parser.add_argument('--no_cuda', action='store_true',\n help='If true, cuda is not used.')\n parser.add_argument('--multiprocessing-distributed', action='store_true',\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\n ####################################################################\n ############## Evaluation configurations ###################\n ####################################################################\n parser.add_argument('--evaluate', action='store_true',\n help='only evaluate the model, not training')\n parser.add_argument('--resume_path', type=str, default=None, metavar='PATH',\n help='the path of the resumed checkpoint')\n parser.add_argument('--K', type=int, default=50,\n help='the number of top K')\n\n configs = edict(vars(parser.parse_args()))\n\n ####################################################################\n ############## Hardware configurations #############################\n ####################################################################\n configs.device = torch.device('cpu' if configs.no_cuda else 'cuda')\n configs.ngpus_per_node = torch.cuda.device_count()\n\n configs.pin_memory = True\n configs.input_size = (cnf.BEV_WIDTH, cnf.BEV_HEIGHT)\n configs.down_ratio = 2\n configs.hm_size = (cnf.BEV_WIDTH/configs.down_ratio, cnf.BEV_HEIGHT/configs.down_ratio)\n configs.max_objects = 50\n\n configs.imagenet_pretrained = True\n configs.head_conv = 256\n configs.num_classes = 1\n configs.num_center_offset = 2\n configs.num_z = 1\n configs.num_dim = 3\n configs.num_direction = 2 # sin, cos 8 for bin cos sin\n configs.voxel_size = [0.16, 0.16, 4]\n configs.point_cloud_range =[0, -34.56, -2.73, 69.12, 34.56, 1.27]\n configs.max_number_of_points_per_voxel = 100\n\n\n configs.heads = {\n 'hm_cen': configs.num_classes,\n 'cen_offset': configs.num_center_offset,\n 'direction': configs.num_direction,\n 'z_coor': configs.num_z,\n 'dim': configs.num_dim\n }\n\n configs.num_input_features = 4\n\n ####################################################################\n ############## Dataset, logs, Checkpoints dir ######################\n ####################################################################\n configs.dataset_dir = '/media/wx/File/data/kittidata'\n configs.checkpoints_dir = os.path.join(configs.root_dir, 'checkpoints', configs.saved_fn)\n configs.logs_dir = os.path.join(configs.root_dir, 'logs', configs.saved_fn)\n\n if not os.path.isdir(configs.checkpoints_dir):\n os.makedirs(configs.checkpoints_dir)\n if not os.path.isdir(configs.logs_dir):\n os.makedirs(configs.logs_dir)\n\n return configs\n"
] | [
[
"torch.device",
"torch.cuda.device_count"
]
] |
alimuldal/scipy | [
"713cf7df7b759e2aaeef0f81eb632f48c9b4bae0"
] | [
"scipy/special/__init__.py"
] | [
"\"\"\"\n========================================\nSpecial functions (:mod:`scipy.special`)\n========================================\n\n.. module:: scipy.special\n\nNearly all of the functions below are universal functions and follow\nbroadcasting and automatic array-looping rules. Exceptions are noted.\n\nError handling\n==============\n\nErrors are handled by returning nans, or other appropriate values.\nSome of the special function routines will emit warnings when an error\noccurs. By default this is disabled. To enable such messages use\n``errprint(1)``, and to disable such messages use ``errprint(0)``.\n\nExample:\n\n >>> print scipy.special.bdtr(-1,10,0.3)\n >>> scipy.special.errprint(1)\n >>> print scipy.special.bdtr(-1,10,0.3)\n\n.. autosummary::\n :toctree: generated/\n\n errprint\n SpecialFunctionWarning -- Warning that can be issued with ``errprint(True)``\n\nAvailable functions\n===================\n\nAiry functions\n--------------\n\n.. autosummary::\n :toctree: generated/\n\n airy -- Airy functions and their derivatives.\n airye -- Exponentially scaled Airy functions\n ai_zeros -- [+]Zeros of Airy functions Ai(x) and Ai'(x)\n bi_zeros -- [+]Zeros of Airy functions Bi(x) and Bi'(x)\n itairy --\n \n\nElliptic Functions and Integrals\n--------------------------------\n\n.. autosummary::\n :toctree: generated/\n\n ellipj -- Jacobian elliptic functions\n ellipk -- Complete elliptic integral of the first kind.\n ellipkm1 -- ellipkm1(x) == ellipk(1 - x)\n ellipkinc -- Incomplete elliptic integral of the first kind.\n ellipe -- Complete elliptic integral of the second kind.\n ellipeinc -- Incomplete elliptic integral of the second kind.\n\nBessel Functions\n----------------\n\n.. autosummary::\n :toctree: generated/\n\n jv -- Bessel function of real-valued order and complex argument.\n jn -- Alias for jv\n jve -- Exponentially scaled Bessel function.\n yn -- Bessel function of second kind (integer order).\n yv -- Bessel function of the second kind (real-valued order).\n yve -- Exponentially scaled Bessel function of the second kind.\n kn -- Modified Bessel function of the second kind (integer order).\n kv -- Modified Bessel function of the second kind (real order).\n kve -- Exponentially scaled modified Bessel function of the second kind.\n iv -- Modified Bessel function.\n ive -- Exponentially scaled modified Bessel function.\n hankel1 -- Hankel function of the first kind.\n hankel1e -- Exponentially scaled Hankel function of the first kind.\n hankel2 -- Hankel function of the second kind.\n hankel2e -- Exponentially scaled Hankel function of the second kind.\n\nThe following is not an universal function:\n\n.. autosummary::\n :toctree: generated/\n\n lmbda -- [+]Sequence of lambda functions with arbitrary order v.\n\nZeros of Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n jnjnp_zeros -- [+]Zeros of integer-order Bessel functions and derivatives sorted in order.\n jnyn_zeros -- [+]Zeros of integer-order Bessel functions and derivatives as separate arrays.\n jn_zeros -- [+]Zeros of Jn(x)\n jnp_zeros -- [+]Zeros of Jn'(x)\n yn_zeros -- [+]Zeros of Yn(x)\n ynp_zeros -- [+]Zeros of Yn'(x)\n y0_zeros -- [+]Complex zeros: Y0(z0)=0 and values of Y0'(z0)\n y1_zeros -- [+]Complex zeros: Y1(z1)=0 and values of Y1'(z1)\n y1p_zeros -- [+]Complex zeros of Y1'(z1')=0 and values of Y1(z1')\n\nFaster versions of common Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. autosummary::\n :toctree: generated/\n\n j0 -- Bessel function of order 0.\n j1 -- Bessel function of order 1.\n y0 -- Bessel function of second kind of order 0.\n y1 -- Bessel function of second kind of order 1.\n i0 -- Modified Bessel function of order 0.\n i0e -- Exponentially scaled modified Bessel function of order 0.\n i1 -- Modified Bessel function of order 1.\n i1e -- Exponentially scaled modified Bessel function of order 1.\n k0 -- Modified Bessel function of the second kind of order 0.\n k0e -- Exponentially scaled modified Bessel function of the second kind of order 0.\n k1 -- Modified Bessel function of the second kind of order 1.\n k1e -- Exponentially scaled modified Bessel function of the second kind of order 1.\n\nIntegrals of Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. autosummary::\n :toctree: generated/\n\n itj0y0 -- Basic integrals of j0 and y0 from 0 to x.\n it2j0y0 -- Integrals of (1-j0(t))/t from 0 to x and y0(t)/t from x to inf.\n iti0k0 -- Basic integrals of i0 and k0 from 0 to x.\n it2i0k0 -- Integrals of (i0(t)-1)/t from 0 to x and k0(t)/t from x to inf.\n besselpoly -- Integral of a Bessel function: Jv(2* a* x) * x[+]lambda from x=0 to 1.\n\nDerivatives of Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. autosummary::\n :toctree: generated/\n\n jvp -- Nth derivative of Jv(v,z)\n yvp -- Nth derivative of Yv(v,z)\n kvp -- Nth derivative of Kv(v,z)\n ivp -- Nth derivative of Iv(v,z)\n h1vp -- Nth derivative of H1v(v,z)\n h2vp -- Nth derivative of H2v(v,z)\n\nSpherical Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n sph_jn -- [+]Sequence of spherical Bessel functions, jn(z)\n sph_yn -- [+]Sequence of spherical Bessel functions, yn(z)\n sph_jnyn -- [+]Sequence of spherical Bessel functions, jn(z) and yn(z)\n sph_in -- [+]Sequence of spherical Bessel functions, in(z)\n sph_kn -- [+]Sequence of spherical Bessel functions, kn(z)\n sph_inkn -- [+]Sequence of spherical Bessel functions, in(z) and kn(z)\n\nRiccati-Bessel Functions\n^^^^^^^^^^^^^^^^^^^^^^^^\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n riccati_jn -- [+]Sequence of Ricatti-Bessel functions of first kind.\n riccati_yn -- [+]Sequence of Ricatti-Bessel functions of second kind.\n\nStruve Functions\n----------------\n\n.. autosummary::\n :toctree: generated/\n\n struve -- Struve function --- Hv(x)\n modstruve -- Modified Struve function --- Lv(x)\n itstruve0 -- Integral of H0(t) from 0 to x\n it2struve0 -- Integral of H0(t)/t from x to Inf.\n itmodstruve0 -- Integral of L0(t) from 0 to x.\n\n\nRaw Statistical Functions\n-------------------------\n\n.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.\n\n.. autosummary::\n :toctree: generated/\n\n bdtr -- Sum of terms 0 through k of the binomial pdf.\n bdtrc -- Sum of terms k+1 through n of the binomial pdf.\n bdtri -- Inverse of bdtr\n bdtrik --\n bdtrin --\n btdtr -- Integral from 0 to x of beta pdf.\n btdtri -- Quantiles of beta distribution\n btdtria --\n btdtrib --\n fdtr -- Integral from 0 to x of F pdf.\n fdtrc -- Integral from x to infinity under F pdf.\n fdtri -- Inverse of fdtrc\n fdtridfd -- \n gdtr -- Integral from 0 to x of gamma pdf.\n gdtrc -- Integral from x to infinity under gamma pdf.\n gdtria -- Inverse with respect to `a` of gdtr.\n gdtrib -- Inverse with respect to `b` of gdtr.\n gdtrix -- Inverse with respect to `x` of gdtr.\n nbdtr -- Sum of terms 0 through k of the negative binomial pdf.\n nbdtrc -- Sum of terms k+1 to infinity under negative binomial pdf.\n nbdtri -- Inverse of nbdtr\n nbdtrik --\n nbdtrin --\n ncfdtr -- CDF of non-central t distribution.\n ncfdtridfd -- Find degrees of freedom (denominator) of noncentral F distribution.\n ncfdtridfn -- Find degrees of freedom (numerator) of noncentral F distribution.\n ncfdtri -- Inverse CDF of noncentral F distribution.\n ncfdtrinc -- Find noncentrality parameter of noncentral F distribution.\n nctdtr -- CDF of noncentral t distribution.\n nctdtridf -- Find degrees of freedom of noncentral t distribution.\n nctdtrit -- Inverse CDF of noncentral t distribution.\n nctdtrinc -- Find noncentrality parameter of noncentral t distribution.\n nrdtrimn -- Find mean of normal distribution from cdf and std.\n nrdtrisd -- Find std of normal distribution from cdf and mean.\n pdtr -- Sum of terms 0 through k of the Poisson pdf.\n pdtrc -- Sum of terms k+1 to infinity of the Poisson pdf.\n pdtri -- Inverse of pdtr\n pdtrik --\n stdtr -- Integral from -infinity to t of the Student-t pdf.\n stdtridf --\n stdtrit --\n chdtr -- Integral from 0 to x of the Chi-square pdf.\n chdtrc -- Integral from x to infnity of Chi-square pdf.\n chdtri -- Inverse of chdtrc.\n chdtriv --\n ndtr -- Integral from -infinity to x of standard normal pdf\n log_ndtr -- Logarithm of integral from -infinity to x of standard normal pdf\n ndtri -- Inverse of ndtr (quantiles)\n chndtr --\n chndtridf --\n chndtrinc --\n chndtrix --\n smirnov -- Kolmogorov-Smirnov complementary CDF for one-sided test statistic (Dn+ or Dn-)\n smirnovi -- Inverse of smirnov.\n kolmogorov -- The complementary CDF of the (scaled) two-sided test statistic (Kn*) valid for large n.\n kolmogi -- Inverse of kolmogorov\n tklmbda -- Tukey-Lambda CDF\n logit --\n expit --\n boxcox -- Compute the Box-Cox transformation.\n boxcox1p -- Compute the Box-Cox transformation of 1 + x.\n inv_boxcox -- Compute the inverse of the Box-Cox tranformation.\n inv_boxcox1p -- Compute the inverse of the Box-Cox transformation of 1 + x.\n\n\nInformation Theory Functions\n----------------------------\n\n.. autosummary::\n :toctree: generated/\n\n entr -- entr(x) = -x*log(x)\n rel_entr -- rel_entr(x, y) = x*log(x/y)\n kl_div -- kl_div(x, y) = x*log(x/y) - x + y\n huber -- Huber loss function.\n pseudo_huber -- Pseudo-Huber loss function.\n\n\nGamma and Related Functions\n---------------------------\n\n.. autosummary::\n :toctree: generated/\n\n gamma -- Gamma function.\n gammaln -- Log transformation of the gamma function.\n gammasgn -- Sign of the gamma function.\n gammainc -- Incomplete gamma integral.\n gammaincinv -- Inverse of gammainc.\n gammaincc -- Complemented incomplete gamma integral.\n gammainccinv -- Inverse of gammaincc.\n beta -- Beta function.\n betaln -- Log of the absolute value of the beta function.\n betainc -- Incomplete beta integral.\n betaincinv -- Inverse of betainc.\n psi -- Logarithmic derivative of the gamma function.\n rgamma -- One divided by the gamma function.\n polygamma -- Nth derivative of psi function.\n multigammaln -- Log of the multivariate gamma.\n digamma -- Digamma function (derivative of the logarithm of gamma).\n poch -- The Pochhammer symbol (rising factorial).\n\n\nError Function and Fresnel Integrals\n------------------------------------\n\n.. autosummary::\n :toctree: generated/\n\n erf -- Error function.\n erfc -- Complemented error function (1- erf(x))\n erfcx -- Scaled complemented error function exp(x**2)*erfc(x)\n erfi -- Imaginary error function, -i erf(i x)\n erfinv -- Inverse of error function\n erfcinv -- Inverse of erfc\n wofz -- Fadeeva function.\n dawsn -- Dawson's integral.\n fresnel -- Fresnel sine and cosine integrals.\n fresnel_zeros -- Complex zeros of both Fresnel integrals\n modfresnelp -- Modified Fresnel integrals F_+(x) and K_+(x)\n modfresnelm -- Modified Fresnel integrals F_-(x) and K_-(x)\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n erf_zeros -- [+]Complex zeros of erf(z)\n fresnelc_zeros -- [+]Complex zeros of Fresnel cosine integrals\n fresnels_zeros -- [+]Complex zeros of Fresnel sine integrals\n\nLegendre Functions\n------------------\n\n.. autosummary::\n :toctree: generated/\n\n lpmv -- Associated Legendre Function of arbitrary non-negative degree v.\n sph_harm -- Spherical Harmonics (complex-valued) Y^m_n(theta,phi)\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n clpmn -- [+]Associated Legendre Function of the first kind for complex arguments.\n lpn -- [+]Legendre Functions (polynomials) of the first kind\n lqn -- [+]Legendre Functions of the second kind.\n lpmn -- [+]Associated Legendre Function of the first kind for real arguments.\n lqmn -- [+]Associated Legendre Function of the second kind.\n\nEllipsoidal Harmonics\n---------------------\n\n.. autosummary::\n :toctree: generated/\n\n ellip_harm -- Ellipsoidal harmonic E\n ellip_harm_2 -- Ellipsoidal harmonic F\n ellip_normal -- Ellipsoidal normalization constant\n\nOrthogonal polynomials\n----------------------\n\nThe following functions evaluate values of orthogonal polynomials:\n\n.. autosummary::\n :toctree: generated/\n\n assoc_laguerre\n eval_legendre\n eval_chebyt\n eval_chebyu\n eval_chebyc\n eval_chebys\n eval_jacobi\n eval_laguerre\n eval_genlaguerre\n eval_hermite\n eval_hermitenorm\n eval_gegenbauer\n eval_sh_legendre\n eval_sh_chebyt\n eval_sh_chebyu\n eval_sh_jacobi\n\nThe functions below, in turn, return the polynomial coefficients in\n:class:`~.orthopoly1d` objects, which function similarly as :ref:`numpy.poly1d`.\nThe :class:`~.orthopoly1d` class also has an attribute ``weights`` which returns\nthe roots, weights, and total weights for the appropriate form of Gaussian\nquadrature. These are returned in an ``n x 3`` array with roots in the first\ncolumn, weights in the second column, and total weights in the final column.\nNote that :class:`~.orthopoly1d` objects are converted to ``poly1d`` when doing\narithmetic, and lose information of the original orthogonal polynomial.\n\n.. autosummary::\n :toctree: generated/\n\n legendre -- [+]Legendre polynomial P_n(x) (lpn -- for function).\n chebyt -- [+]Chebyshev polynomial T_n(x)\n chebyu -- [+]Chebyshev polynomial U_n(x)\n chebyc -- [+]Chebyshev polynomial C_n(x)\n chebys -- [+]Chebyshev polynomial S_n(x)\n jacobi -- [+]Jacobi polynomial P^(alpha,beta)_n(x)\n laguerre -- [+]Laguerre polynomial, L_n(x)\n genlaguerre -- [+]Generalized (Associated) Laguerre polynomial, L^alpha_n(x)\n hermite -- [+]Hermite polynomial H_n(x)\n hermitenorm -- [+]Normalized Hermite polynomial, He_n(x)\n gegenbauer -- [+]Gegenbauer (Ultraspherical) polynomials, C^(alpha)_n(x)\n sh_legendre -- [+]shifted Legendre polynomial, P*_n(x)\n sh_chebyt -- [+]shifted Chebyshev polynomial, T*_n(x)\n sh_chebyu -- [+]shifted Chebyshev polynomial, U*_n(x)\n sh_jacobi -- [+]shifted Jacobi polynomial, J*_n(x) = G^(p,q)_n(x)\n\n.. warning::\n\n Computing values of high-order polynomials (around ``order > 20``) using\n polynomial coefficients is numerically unstable. To evaluate polynomial\n values, the ``eval_*`` functions should be used instead.\n\nRoots and weights for orthogonal polynomials\n\n.. autosummary::\n :toctree: generated/\n\n c_roots\n cg_roots\n h_roots\n he_roots\n j_roots\n js_roots\n l_roots\n la_roots\n p_roots\n ps_roots\n s_roots\n t_roots\n ts_roots\n u_roots\n us_roots\n\n\nHypergeometric Functions\n------------------------\n\n.. autosummary::\n :toctree: generated/\n\n hyp2f1 -- Gauss hypergeometric function (2F1)\n hyp1f1 -- Confluent hypergeometric function (1F1)\n hyperu -- Confluent hypergeometric function (U)\n hyp0f1 -- Confluent hypergeometric limit function (0F1)\n hyp2f0 -- Hypergeometric function (2F0)\n hyp1f2 -- Hypergeometric function (1F2)\n hyp3f0 -- Hypergeometric function (3F0)\n\n\nParabolic Cylinder Functions\n----------------------------\n\n.. autosummary::\n :toctree: generated/\n\n pbdv -- Parabolic cylinder function Dv(x) and derivative.\n pbvv -- Parabolic cylinder function Vv(x) and derivative.\n pbwa -- Parabolic cylinder function W(a,x) and derivative.\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n pbdv_seq -- [+]Sequence of parabolic cylinder functions Dv(x)\n pbvv_seq -- [+]Sequence of parabolic cylinder functions Vv(x)\n pbdn_seq -- [+]Sequence of parabolic cylinder functions Dn(z), complex z\n\nMathieu and Related Functions\n-----------------------------\n\n.. autosummary::\n :toctree: generated/\n\n mathieu_a -- Characteristic values for even solution (ce_m)\n mathieu_b -- Characteristic values for odd solution (se_m)\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n mathieu_even_coef -- [+]sequence of expansion coefficients for even solution\n mathieu_odd_coef -- [+]sequence of expansion coefficients for odd solution\n\nThe following return both function and first derivative:\n\n.. autosummary::\n :toctree: generated/\n\n mathieu_cem -- Even Mathieu function\n mathieu_sem -- Odd Mathieu function\n mathieu_modcem1 -- Even modified Mathieu function of the first kind\n mathieu_modcem2 -- Even modified Mathieu function of the second kind\n mathieu_modsem1 -- Odd modified Mathieu function of the first kind\n mathieu_modsem2 -- Odd modified Mathieu function of the second kind\n\nSpheroidal Wave Functions\n-------------------------\n\n.. autosummary::\n :toctree: generated/\n\n pro_ang1 -- Prolate spheroidal angular function of the first kind\n pro_rad1 -- Prolate spheroidal radial function of the first kind\n pro_rad2 -- Prolate spheroidal radial function of the second kind\n obl_ang1 -- Oblate spheroidal angular function of the first kind\n obl_rad1 -- Oblate spheroidal radial function of the first kind\n obl_rad2 -- Oblate spheroidal radial function of the second kind\n pro_cv -- Compute characteristic value for prolate functions\n obl_cv -- Compute characteristic value for oblate functions\n pro_cv_seq -- Compute sequence of prolate characteristic values\n obl_cv_seq -- Compute sequence of oblate characteristic values\n\nThe following functions require pre-computed characteristic value:\n\n.. autosummary::\n :toctree: generated/\n\n pro_ang1_cv -- Prolate spheroidal angular function of the first kind\n pro_rad1_cv -- Prolate spheroidal radial function of the first kind\n pro_rad2_cv -- Prolate spheroidal radial function of the second kind\n obl_ang1_cv -- Oblate spheroidal angular function of the first kind\n obl_rad1_cv -- Oblate spheroidal radial function of the first kind\n obl_rad2_cv -- Oblate spheroidal radial function of the second kind\n\nKelvin Functions\n----------------\n\n.. autosummary::\n :toctree: generated/\n\n kelvin -- All Kelvin functions (order 0) and derivatives.\n kelvin_zeros -- [+]Zeros of All Kelvin functions (order 0) and derivatives\n ber -- Kelvin function ber x\n bei -- Kelvin function bei x\n berp -- Derivative of Kelvin function ber x\n beip -- Derivative of Kelvin function bei x\n ker -- Kelvin function ker x\n kei -- Kelvin function kei x\n kerp -- Derivative of Kelvin function ker x\n keip -- Derivative of Kelvin function kei x\n\nThese are not universal functions:\n\n.. autosummary::\n :toctree: generated/\n\n ber_zeros -- [+]Zeros of Kelvin function bei x\n bei_zeros -- [+]Zeros of Kelvin function ber x\n berp_zeros -- [+]Zeros of derivative of Kelvin function ber x\n beip_zeros -- [+]Zeros of derivative of Kelvin function bei x\n ker_zeros -- [+]Zeros of Kelvin function kei x\n kei_zeros -- [+]Zeros of Kelvin function ker x\n kerp_zeros -- [+]Zeros of derivative of Kelvin function ker x\n keip_zeros -- [+]Zeros of derivative of Kelvin function kei x\n\nCombinatorics\n-------------\n\n.. autosummary::\n :toctree: generated/\n\n comb -- [+]Combinations of N things taken k at a time, \"N choose k\"\n perm -- [+]Permutations of N things taken k at a time, \"k-permutations of N\"\n\nOther Special Functions\n-----------------------\n\n.. autosummary::\n :toctree: generated/\n\n agm -- Arithmetic-Geometric Mean\n bernoulli -- Bernoulli numbers\n binom -- Binomial coefficient.\n diric -- Dirichlet function (periodic sinc)\n euler -- Euler numbers\n expn -- Exponential integral.\n exp1 -- Exponential integral of order 1 (for complex argument)\n expi -- Another exponential integral -- Ei(x)\n factorial -- The factorial function, n! = special.gamma(n+1)\n factorial2 -- Double factorial, (n!)!\n factorialk -- [+](...((n!)!)!...)! where there are k '!'\n shichi -- Hyperbolic sine and cosine integrals.\n sici -- Integral of the sinc and \"cosinc\" functions.\n spence -- Dilogarithm integral.\n lambertw -- Lambert W function\n zeta -- Riemann zeta function of two arguments.\n zetac -- Standard Riemann zeta function minus 1.\n\nConvenience Functions\n---------------------\n\n.. autosummary::\n :toctree: generated/\n\n cbrt -- Cube root.\n exp10 -- 10 raised to the x power.\n exp2 -- 2 raised to the x power.\n radian -- radian angle given degrees, minutes, and seconds.\n cosdg -- cosine of the angle given in degrees.\n sindg -- sine of the angle given in degrees.\n tandg -- tangent of the angle given in degrees.\n cotdg -- cotangent of the angle given in degrees.\n log1p -- log(1+x)\n expm1 -- exp(x)-1\n cosm1 -- cos(x)-1\n round -- round the argument to the nearest integer. If argument ends in 0.5 exactly, pick the nearest even integer.\n xlogy -- x*log(y)\n xlog1py -- x*log1p(y)\n exprel -- (exp(x)-1)/x\n sinc -- sin(x)/x\n\n.. [+] in the description indicates a function which is not a universal\n.. function and does not follow broadcasting and automatic\n.. array-looping rules.\n\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nfrom ._ufuncs import *\n\nfrom .basic import *\nfrom . import specfun\nfrom . import orthogonal\nfrom .orthogonal import *\nfrom .spfun_stats import multigammaln\nfrom ._ellip_harm import ellip_harm, ellip_harm_2, ellip_normal\nfrom .lambertw import lambertw\n\n\n__all__ = [s for s in dir() if not s.startswith('_')]\n\nfrom numpy.dual import register_func\nregister_func('i0',i0)\ndel register_func\n\nfrom numpy.testing import Tester\ntest = Tester().test\n"
] | [
[
"numpy.dual.register_func",
"numpy.testing.Tester"
]
] |
pradip026/passengerCOVIDscan | [
"1ebbe23beb91963679a97d8e9fe45354c47bbbff"
] | [
"passengerCOVIDscan/glove_detection/tensorflow_infer.py"
] | [
"# -*- coding:utf-8 -*-\nimport cv2\nimport time\nimport argparse\nimport os\nimport numpy as np\nfrom PIL import Image\n#from keras.models import model_from_json\nfrom .utils.anchor_generator import generate_anchors\nfrom .utils.anchor_decode import decode_bbox\nfrom .utils.nms import single_class_non_max_suppression\nfrom .load_model.tensorflow_loader import load_tf_model, tf_inference\n\nMODEL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"models/face_mask_detection.pb\")\nsess, graph = load_tf_model(MODEL_PATH)\n# anchor configuration\nfeature_map_sizes = [[33, 33], [17, 17], [9, 9], [5, 5], [3, 3]]\nanchor_sizes = [[0.04, 0.056], [0.08, 0.11], [0.16, 0.22], [0.32, 0.45], [0.64, 0.72]]\nanchor_ratios = [[1, 0.62, 0.42]] * 5\n\n# generate anchors\nanchors = generate_anchors(feature_map_sizes, anchor_sizes, anchor_ratios)\n\n# for inference , the batch size is 1, the model output shape is [1, N, 4],\n# so we expand dim for anchors to [1, anchor_num, 4]\nanchors_exp = np.expand_dims(anchors, axis=0)\n\nid2class = {0: 'glove', 1: 'Noglove'}\n\n\ndef inference(image,\n conf_thresh=0.5,\n iou_thresh=0.4,\n target_shape=(160, 160),\n draw_result=True,\n show_result=True\n ):\n '''\n Main function of detection inference\n :param image: 3D numpy array of image\n :param conf_thresh: the min threshold of classification probabity.\n :param iou_thresh: the IOU threshold of NMS\n :param target_shape: the model input size.\n :param draw_result: whether to daw bounding box to the image.\n :param show_result: whether to display the image.\n :return:\n '''\n # image = np.copy(image)\n output_info = []\n height, width, _ = image.shape\n image_resized = cv2.resize(image, target_shape)\n image_np = image_resized / 255.0 # 归一化到0~1\n image_exp = np.expand_dims(image_np, axis=0)\n y_bboxes_output, y_cls_output = tf_inference(sess, graph, image_exp)\n\n # remove the batch dimension, for batch is always 1 for inference.\n y_bboxes = decode_bbox(anchors_exp, y_bboxes_output)[0]\n y_cls = y_cls_output[0]\n # To speed up, do single class NMS, not multiple classes NMS.\n bbox_max_scores = np.max(y_cls, axis=1)\n bbox_max_score_classes = np.argmax(y_cls, axis=1)\n\n # keep_idx is the alive bounding box after nms.\n keep_idxs = single_class_non_max_suppression(y_bboxes,\n bbox_max_scores,\n conf_thresh=conf_thresh,\n iou_thresh=iou_thresh,\n )\n\n for idx in keep_idxs:\n conf = float(bbox_max_scores[idx])\n class_id = bbox_max_score_classes[idx]\n bbox = y_bboxes[idx]\n # clip the coordinate, avoid the value exceed the image boundary.\n xmin = max(0, int(bbox[0] * width))\n ymin = max(0, int(bbox[1] * height))\n xmax = min(int(bbox[2] * width), width)\n ymax = min(int(bbox[3] * height), height)\n\n if draw_result:\n if class_id == 0:\n color = (0, 255, 0)\n else:\n color = (255, 0, 0)\n cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 2)\n cv2.putText(image, \"%s: %.2f\" % (id2class[class_id], conf), (xmin + 2, ymin - 2),\n cv2.FONT_HERSHEY_SIMPLEX, 0.8, color)\n output_info.append([class_id, conf, xmin, ymin, xmax, ymax])\n\n if show_result:\n Image.fromarray(image).show()\n return output_info\n\n\ndef run_on_video(video_path, output_video_name, conf_thresh):\n cap = cv2.VideoCapture(video_path)\n height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\n fps = cap.get(cv2.CAP_PROP_FPS)\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n # writer = cv2.VideoWriter(output_video_name, fourcc, int(fps), (int(width), int(height)))\n total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n if not cap.isOpened():\n raise ValueError(\"Video open failed.\")\n return\n status = True\n idx = 0\n while status:\n start_stamp = time.time()\n status, img_raw = cap.read()\n img_raw = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB)\n read_frame_stamp = time.time()\n if (status):\n inference(img_raw,\n conf_thresh,\n iou_thresh=0.5,\n target_shape=(260, 260),\n draw_result=True,\n show_result=False)\n cv2.imshow('image', img_raw[:, :, ::-1])\n cv2.waitKey(1)\n inference_stamp = time.time()\n # writer.write(img_raw)\n write_frame_stamp = time.time()\n idx += 1\n print(\"%d of %d\" % (idx, total_frames))\n print(\"read_frame:%f, infer time:%f, write time:%f\" % (read_frame_stamp - start_stamp,\n inference_stamp - read_frame_stamp,\n write_frame_stamp - inference_stamp))\n # writer.release()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Face Mask Detection\")\n parser.add_argument('--img-mode', type=int, default=1, help='set 1 to run on image, 0 to run on video.')\n parser.add_argument('--img-path', type=str, help='path to your image.')\n parser.add_argument('--video-path', type=str, default='0', help='path to your video, `0` means to use camera.')\n # parser.add_argument('--hdf5', type=str, help='keras hdf5 file')\n args = parser.parse_args()\n if args.img_mode:\n imgPath = args.img_path\n img = cv2.imread(imgPath)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n inference(img, show_result=True, target_shape=(260, 260))\n else:\n video_path = args.video_path\n if args.video_path == '0':\n video_path = 0\n run_on_video(video_path, '', conf_thresh=0.5)\n"
] | [
[
"numpy.expand_dims",
"numpy.argmax",
"numpy.max"
]
] |
KokBob/InitProject | [
"63b7cefb9a130118db9ff5405c5dd87bbe34e9f3"
] | [
"data_postprocessing_10.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n20181010\r\nciklaminima\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os \r\nimport pandas as pd\r\nimport _dataPostprLib_ as lib\r\nimport seaborn as sns\r\nimport importlib \r\n#%%\r\nsns.set()\r\n#sns.set_context(\"poster\")\r\nsns.set_context(\"paper\")\r\n#sns.color_palette(\"Paired\")\r\nseq_col_brew = sns.color_palette('hls', 12)\r\nsns.set_palette(seq_col_brew)\r\n\r\nplt.close('all')\r\npath_glob = r'U:\\projects\\0005_Moventas_RCA\\40_measurement'\r\ntest_bench_name = ['Data_test_run_63526_PPH-5700', 'Data_test_run_63527_PPH-5700']\r\n#%%\r\npath_test_bench_i = path_glob + '\\\\' + test_bench_name[0]\r\npath_meas = os.listdir(path_test_bench_i)\r\n#%% \r\ni = 0\r\n\r\nlc_repos = []\r\nfor lc in path_meas:\r\n \r\n#load_collection = path_meas[0] \r\n load_collection = lc \r\n #load_collection = path_meas[-1] \r\n path_mea_i = path_test_bench_i + '\\\\' + load_collection \r\n meas_i = os.listdir(path_mea_i)\r\n \r\n data_repos = []\r\n for mf in meas_i:\r\n h_,r_,freq_,name_ = lib.catch_mea(mf)\r\n mea_file = path_mea_i + '\\\\' + mf \r\n data_i = pd.read_csv(mea_file,sep=';',header=3, skiprows = [4])\r\n t_i = lib.time_vector(freq_,data_i)\r\n mea_dict = {'data': data_i, \r\n 't': t_i,\r\n 'name': name_,\r\n 'load': load_collection}\r\n \r\n data_repos.append(mea_dict)\r\n# lib.plot_Torque_Temp_pls1(data_repos)\r\n# lib.plot_Torque_Temp_pls2(data_repos)\r\n lib.plot_Torque_Temp_pls(data_repos)\r\n lc_repos.append(data_repos)\r\n# data_repos_actual = data_repos[i]\r\n#%%\r\n# lib.plot_Torque_Temp_pls1(data_repos)\r\n# lib.plot_Torque_Temp_pls2(data_repos)\r\n# lib.plot_Torque_Temp_pls(data_repos)\r\n# i += 1"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.close"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.